2using System.Collections.Generic;
37 m_blobCounts =
new common.Blob<T>(cuda, log);
39 m_blobBegins =
new common.
Blob<T>(cuda, log);
41 m_blobTopIndexes =
new common.
Blob<T>(cuda, log);
48 if (m_blobCounts !=
null)
50 m_blobCounts.Dispose();
54 if (m_blobBegins !=
null)
60 if (m_blobTopIndexes !=
null)
63 m_blobTopIndexes =
null;
75 col.
Add(m_blobCounts);
76 col.
Add(m_blobBegins);
77 col.
Add(m_blobTopIndexes);
112 m_log.
CHECK_EQ(1, colBottom[1].num_axes,
"The bottom[1] should have num_axes = 1.");
113 List<int> rgNewShape =
new List<int>();
114 rgNewShape.Add(colBottom[1].shape(0));
116 for (
int i = 1; i < colBottom[0].shape().Count; i++)
118 rgNewShape.Add(colBottom[0].shape(i));
123 List<int> rgShape =
new List<int>();
124 rgShape.Add(colBottom[1].count());
125 m_blobTopIndexes.
Reshape(rgShape);
127 rgShape[0] = colBottom[0].shape(0);
132 private void check_batch_reindex(
int nInitialNum,
int nFinalNum,
Blob<T> b)
136 if (typeof(T) == typeof(
double))
138 double[] rgidx_Data = (
double[])Convert.ChangeType(rgData, typeof(
double[]));
139 for (
int i = 0; i < nFinalNum; i++)
141 m_log.
CHECK_GE(rgidx_Data[i], 0,
"Index specified for reindex layer was negative.");
142 m_log.
CHECK_LT(rgidx_Data[i], nInitialNum,
"Index specified for reindex layer was greater than batch size.");
147 float[] rgidx_Data = (
float[])Convert.ChangeType(rgData, typeof(
float[]));
148 for (
int i = 0; i < nFinalNum; i++)
150 m_log.
CHECK_GE(rgidx_Data[i], 0,
"Index specified for reindex layer was negative.");
151 m_log.
CHECK_LT(rgidx_Data[i], nInitialNum,
"Index specified for reindex layer was greater than batch size.");
173 check_batch_reindex(colBottom[0].shape(0), colBottom[1].count(), colBottom[1]);
175 int nCount = colTop[0].count();
179 m_cuda.batchreidx_fwd(nCount, colBottom[0].count() / colBottom[0].shape(0), colBottom[0].gpu_data, colBottom[1].gpu_data, colTop[0].mutable_gpu_data);
198 m_log.
CHECK(!rgbPropagateDown[1],
"Cannot backprop to index.");
200 if (!rgbPropagateDown[0])
203 List<KeyValuePair<int, int>> rgMapping =
new List<KeyValuePair<int, int>>();
204 T[] rgData = colBottom[1].update_cpu_data();
206 if (typeof(T) == typeof(
double))
208 double[] rgPerm = (
double[])Convert.ChangeType(rgData, typeof(
double[]));
209 for (
int i = 0; i < colBottom[1].count(); i++)
211 rgMapping.
Add(
new KeyValuePair<int, int>((
int)rgPerm[i], i));
216 float[] rgPerm = (
float[])Convert.ChangeType(rgData, typeof(
float[]));
217 for (
int i = 0; i < colBottom[1].count(); i++)
219 rgMapping.Add(
new KeyValuePair<int, int>((
int)rgPerm[i], i));
223 rgMapping.Sort(
new Comparison<KeyValuePair<int, int>>(sort));
234 m_blobCounts.SetData(0);
237 T[] rgCounts = m_blobCounts.mutable_cpu_data;
240 if (typeof(T) == typeof(
double))
242 double[] t_i_data = (
double[])Convert.ChangeType(rgTopIndexes, typeof(
double[]));
243 double[] c_data = (
double[])Convert.ChangeType(rgCounts, typeof(
double[]));
244 double[] b_data = (
double[])Convert.ChangeType(rgBegins, typeof(
double[]));
246 for (
int i = 0; i < rgMapping.Count; i++)
248 t_i_data[i] = rgMapping[i].Value;
250 if (b_data[rgMapping[i].Key] == -1)
251 b_data[rgMapping[i].Key] = i;
253 c_data[rgMapping[i].Key] += 1;
258 float[] t_i_data = (
float[])Convert.ChangeType(rgTopIndexes, typeof(
float[]));
259 float[] c_data = (
float[])Convert.ChangeType(rgCounts, typeof(
float[]));
260 float[] b_data = (
float[])Convert.ChangeType(rgBegins, typeof(
float[]));
262 for (
int i = 0; i < rgMapping.Count; i++)
264 t_i_data[i] = rgMapping[i].Value;
266 if (b_data[rgMapping[i].Key] == -1)
267 b_data[rgMapping[i].Key] = i;
269 c_data[rgMapping[i].Key] += 1;
274 m_blobCounts.mutable_cpu_data = rgCounts;
277 int nCount = colBottom[0].count();
279 m_cuda.batchreidx_bwd(nCount, colBottom[0].count() / colBottom[0].shape(0), colTop[0].gpu_diff, m_blobTopIndexes.
gpu_data, m_blobBegins.
gpu_data, m_blobCounts.gpu_data, colBottom[0].mutable_gpu_diff);
282 private int sort(KeyValuePair<int, int> a, KeyValuePair<int, int> b)
The Log class provides general output in text form.
void CHECK(bool b, string str)
Test a flag for true.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
void CHECK_LT(double df1, double df2, string str)
Test whether one number is less than another.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
Blob(CudaDnn< T > cuda, Log log, bool bIncludeDiff=true, bool bUseHalfSize=false)
The Blob constructor.
T[] mutable_cpu_data
Get data from the GPU and bring it over to the host, or Set data from the Host and send it over to th...
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The BatchReindexLayer provides an index into the input blob along its first axis.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the reordered input.
override int ExactNumBottomBlobs
Returns the exact number of bottom (input) Blobs required: input, axis
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
BatchReindexLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The BatchReindexLayer constructor.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int ExactNumTopBlobs
Returns the exact number of top (output) Blobs required: batchreidx
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The Forward computation.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...