2using System.Collections.Generic;
21 List<int> m_rgIndicesToForward =
new List<int>();
72 int nSelectorIdx = colBottom.
Count - 1;
73 for (
int i = 1; i < colBottom[nSelectorIdx].num_axes; i++)
75 m_log.
CHECK_EQ(colBottom[nSelectorIdx].shape(i), 1,
"Selector blob dimensions must be singletons (1), except the first");
78 for (
int i = 0; i < colBottom.
Count - 1; i++)
80 m_log.
CHECK_EQ(colBottom[nSelectorIdx].shape(0), colBottom[i].shape(0),
"Each bottom should have the same 0th dimension as the selector blob.");
83 T[] rgBottomDataSelector = colBottom[nSelectorIdx].update_cpu_data();
84 m_rgIndicesToForward =
new List<int>();
89 if (typeof(T) == typeof(
double))
91 double[] rgBottomDataSelectorD = (
double[])Convert.ChangeType(rgBottomDataSelector, typeof(
double[]));
93 for (
int i = 0; i < colBottom[nSelectorIdx].shape(0); i++)
95 if (rgBottomDataSelectorD[i] != 0.0)
96 m_rgIndicesToForward.Add(i);
101 float[] rgBottomDataSelectorF = (
float[])Convert.ChangeType(rgBottomDataSelector, typeof(
float[]));
103 for (
int i = 0; i < colBottom[nSelectorIdx].shape(0); i++)
105 if (rgBottomDataSelectorF[i] != 0.0)
106 m_rgIndicesToForward.Add(i);
111 int nNewTopsNum = m_rgIndicesToForward.Count;
116 nNewTopsNum = colBottom[0].shape(0);
117 m_bFirstShape =
false;
120 for (
int t = 0; t < colTop.
Count; t++)
122 int nNumAxes = colBottom[t].num_axes;
123 List<int> rgShape =
new List<int>();
124 rgShape.Add(nNewTopsNum);
126 for (
int ts = 1; ts < nNumAxes; ts++)
128 rgShape.Add(colBottom[t].shape(ts));
156 int nNewTopsNum = m_rgIndicesToForward.Count;
159 for (
int t = 0; t < colTop.
Count; t++)
161 long hBottomData = colBottom[t].gpu_data;
162 long hTopData = colTop[t].mutable_gpu_data;
163 int nDim = colBottom[t].count() / colBottom[t].shape(0);
165 for (
int n = 0; n < nNewTopsNum; n++)
167 int nDataOffsetTop = n * nDim;
168 int nDataOffsetBottom = m_rgIndicesToForward[n] * nDim;
169 m_cuda.copy(nDim, hBottomData, hTopData, nDataOffsetBottom, nDataOffsetTop);
184 if (rgbPropagateDown[colBottom.
Count - 1])
185 m_log.
FAIL(
"Layer cannot backpropagate to filter index inputs.");
187 for (
int i = 0; i < colTop.
Count; i++)
192 if (rgbPropagateDown[i])
194 int nDim = colTop[i].count() / colTop[i].shape(0);
195 int nNextToBackwardOffset = 0;
196 int nBatchOffset = 0;
197 int nDataOffsetBottom = 0;
198 int nDataOffsetTop = 0;
200 for (
int n = 0; n < colBottom[i].shape(0); n++)
202 if (nNextToBackwardOffset >= m_rgIndicesToForward.Count)
206 nDataOffsetBottom = n * nDim;
207 m_cuda.set(nDim, colBottom[i].mutable_gpu_diff,
m_tZero, -1, nDataOffsetBottom);
211 nBatchOffset = m_rgIndicesToForward[nNextToBackwardOffset];
212 nDataOffsetBottom = n * nDim;
214 if (n != nBatchOffset)
216 m_cuda.set(nDim, colBottom[i].mutable_gpu_diff,
m_tZero, -1, nDataOffsetBottom);
220 nDataOffsetTop = nNextToBackwardOffset * nDim;
221 nNextToBackwardOffset++;
222 m_cuda.copy(nDim, colTop[i].mutable_gpu_diff, colBottom[i].mutable_gpu_diff, nDataOffsetTop, nDataOffsetBottom);
The Log class provides general output in text form.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The BlobCollection contains a list of Blobs.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The FilterLayer takes two+ Blobs, interprets last Blob as a selector and filters remaining Blobs acco...
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation.
override int MinBottomBlobs
Returns the minimum number of required bottom (intput) Blobs: input, selector
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the forwarded inputs.
FilterLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The FilterLayer constructor.
override int MinTopBlobs
Returns the minimum number of required top (output) Blobs: filter
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
T m_tZero
Specifies a generic type equal to 0.0.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
Specifies the base parameter for all layers.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...