2using System.Collections.Generic;
26 int m_nConcatInputSize;
74 int nNumAxes = colBottom[0].num_axes;
81 m_log.
CHECK_GE(m_nConcatAxis, 0,
"Casting concat_dim from uint to int produced a negative result; concat_dim must be > 0.");
82 m_log.
CHECK_LT(m_nConcatAxis, nNumAxes,
"concat_dim out of range.");
90 List<int> rgTopShape =
Utility.Clone<
int>(colBottom[0].shape());
91 m_nNumConcats = colBottom[0].count(0, m_nConcatAxis);
92 m_nConcatInputSize = colBottom[0].count(m_nConcatAxis + 1);
94 int nBottomCountSum = colBottom[0].count();
96 for (
int i = 1; i < colBottom.
Count; i++)
98 m_log.
CHECK_EQ(nNumAxes, colBottom[i].num_axes,
"All inputs must have the same # axes.");
100 for (
int j = 0; j < nNumAxes; j++)
102 if (j == m_nConcatAxis)
105 m_log.
CHECK_EQ(rgTopShape[j], colBottom[i].shape(j),
"All inputs must have the same shape, except at concat_axis. You might try switching between the ONNX(p) and CAFFE(t) type pooling sizing methods.");
108 nBottomCountSum += colBottom[i].count();
109 rgTopShape[m_nConcatAxis] += colBottom[i].shape(m_nConcatAxis);
113 m_log.
CHECK_EQ(nBottomCountSum, colTop[0].count(),
"The bottomCountSums should equal the top[0].count.");
115 if (colBottom.
Count == 1)
117 colTop[0].ShareData(colBottom[0]);
118 colTop[0].ShareDiff(colBottom[0]);
142 if (colBottom.
Count == 1)
145 long hTopData = colTop[0].mutable_gpu_data;
146 int nOffsetConcatAxis = 0;
147 int nTopConcatAxis = colTop[0].shape(m_nConcatAxis);
149 for (
int i = 0; i < colBottom.
Count; i++)
151 long hBottomData = colBottom[i].gpu_data;
152 int nBottomConcatAxis = colBottom[i].shape(m_nConcatAxis);
153 int nBottomConcatSize = nBottomConcatAxis * m_nConcatInputSize;
154 int nCount = nBottomConcatSize * m_nNumConcats;
156 m_cuda.concat_fwd(nCount, hBottomData, m_nNumConcats, m_nConcatInputSize, nTopConcatAxis, nBottomConcatAxis, nOffsetConcatAxis, hTopData);
157 nOffsetConcatAxis += nBottomConcatAxis;
185 if (colBottom.
Count == 1)
188 long hTopDiff = colTop[0].gpu_diff;
189 int nOffsetConcatAxis = 0;
190 int nTopConcatAxis = colTop[0].shape(m_nConcatAxis);
192 for (
int i = 0; i < colBottom.
Count; i++)
194 int nBottomConcatAxis = colBottom[i].shape(m_nConcatAxis);
196 if (rgbPropagateDown[i])
198 long hBottomDiff = colBottom[i].mutable_gpu_diff;
199 int nBottomConcatSize = nBottomConcatAxis * m_nConcatInputSize;
200 int nCount = nBottomConcatSize * m_nNumConcats;
202 m_cuda.concat_bwd(nCount, hTopDiff, m_nNumConcats, m_nConcatInputSize, nTopConcatAxis, nBottomConcatAxis, nOffsetConcatAxis, hBottomDiff);
205 nOffsetConcatAxis += nBottomConcatAxis;
The Log class provides general output in text form.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
void CHECK_LT(double df1, double df2, string str)
Test whether one number is less than another.
The Utility class provides general utility funtions.
The BlobCollection contains a list of Blobs.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The ConcatLayer takes at least two Blobs and concatentates them along either the num or channel dimen...
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: concat
ConcatLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The ConcatLayer constructor.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the concatenation inputs.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int MinBottomBlobs
Returns the minimum number of required bottom (input) Blobs: input
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
uint? concat_dim
DEPRECIATED: alias for 'axis' – does not support negative indexing.
int axis
The axis along which to concatenate – may be negative to index from the end (e.g.,...
Specifies the base parameter for all layers.
ConcatParameter concat_param
Returns the parameter set when initialized with LayerType.CONCAT
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...