2using System.Collections.Generic;
64 if (m_nAxis != 0 && m_nAxis != 1)
65 m_log.
FAIL(
"Currently only axis = 0 or axis = 1 are supported.");
67 m_nDim = colBottom[0].count(m_nAxis + 1);
68 m_nDimAtAxis = colBottom[0].shape()[m_nAxis];
69 m_nM = colBottom[0].count(0, m_nAxis);
70 m_nN = colBottom[1].count();
81 float[] rgIdxf =
convertF(colBottom[1].mutable_cpu_data);
82 List<int> rgTopShape =
GatherParameter.
Reshape(m_nAxis, colBottom[0].shape(), colBottom[1].shape(), rgIdxf, out m_nDim, out m_nDimAtAxis, out m_nM, out m_nN, out strErr);
83 if (rgTopShape ==
null)
100 int nCount = colBottom[0].count();
101 long hBottom = colBottom[0].gpu_data;
102 long hIdx = colBottom[1].gpu_data;
103 long hTop = colTop[0].mutable_gpu_data;
104 int nExpectedCount = (m_nAxis == 0) ? (m_nN * m_nDim) : (m_nN * m_nM);
106 m_log.
CHECK_EQ(colTop[0].count(), nExpectedCount,
"The top count should equal " + nExpectedCount.ToString() +
"!");
108 m_cuda.gather_fwd(nCount, hBottom, hTop, m_nAxis, m_nDim, m_nDimAtAxis, m_nM, m_nN, hIdx);
121 int nCount = colBottom[0].count();
122 long hBottomDiff = colBottom[0].mutable_gpu_diff;
123 long hIdx = colBottom[1].gpu_data;
124 long hTopDiff = colTop[0].gpu_diff;
126 m_cuda.gather_bwd(nCount, hTopDiff, hBottomDiff, m_nAxis, m_nDim, m_nDimAtAxis, m_nM, m_nN, hIdx);
The Log class provides general output in text form.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The BlobCollection contains a list of Blobs.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
float convertF(T df)
Converts a generic to a float value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The GatherLayer extracts (gathers) data from specified indices along a given axis from the input and ...
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: flatten
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: input.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
GatherLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The GatherLayer constructor.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the concatenate inputs.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
Specifies the base parameter for all layers.
GatherParameter gather_param
Returns the parameter set when initialized with LayerType.GATHER
LayerType
Specifies the layer type.
Specifies the parameters for the GatherLayer.
int axis
Specifies the first axis to gather: all preceding axes are retained in the output....
static List< int > Reshape(int nAxis, List< int > rgBtmShape, List< int > rgIdxShape, float[] rgIdxF, out int nDim, out int nDimAtAxis, out int nM, out int nN, out string strErr)
Calculate the reshape array given the parameters.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.beta namespace contains all beta stage layers.
The MyCaffe.param.beta parameters are used by the MyCaffe.layer.beta layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...