2using System.Collections.Generic;
33 bool m_bOutputLabels =
false;
34 Blob<T> m_blobLabeledDataCache =
null;
35 int m_nLabelStart = 0;
36 int m_nLabelCount = 0;
37 bool m_bBalanceMatches =
false;
38 long m_hCacheCursors = 0;
39 long m_hWorkDataHost = 0;
68 if (m_blobLabeledDataCache !=
null)
70 m_blobLabeledDataCache.
Dispose();
71 m_blobLabeledDataCache =
null;
74 if (m_hCacheCursors != 0)
76 m_cuda.FreeHostBuffer(m_hCacheCursors);
81 if (m_hWorkDataHost != 0)
83 m_cuda.FreeHostBuffer(m_hWorkDataHost);
103 get {
return m_nK + 2 + ((m_bOutputLabels) ? 1 : 0); }
114 for (
int i = 0; i < colBottom.
Count; i++)
130 for (
int k = 0; k < m_nK; k++)
137 int nLabelDim = 2 + m_nK;
138 colTop[2 + m_nK].
Reshape(colBottom[0].num, nLabelDim, 1, 1);
160 if (m_blobLabeledDataCache ==
null)
162 List<int> rgLabels =
new List<int>();
165 if (m_nLabelCount == 0)
169 foreach (
float fLabel
in rgfLabels)
171 int nLabel = (int)fLabel;
172 if (!rgLabels.Contains(nLabel))
173 rgLabels.Add(nLabel);
178 m_nLabelCount = rgLabels.Count;
179 m_nLabelStart = rgLabels.Min();
183 for (
int i = 0; i < m_nLabelCount; i++)
185 rgLabels.Add(m_nLabelStart + i);
189 int nNum = rgLabels.Count * m_nCacheSize;
190 m_blobLabeledDataCache =
new Blob<T>(
m_cuda,
m_log, nNum, colBottom[0].channels, colBottom[0].height, colBottom[0].width);
191 m_blobLabeledDataCache.
SetData(0);
192 m_hCacheCursors =
m_cuda.AllocHostBuffer(rgLabels.Count * 2);
193 m_hWorkDataHost =
m_cuda.AllocHostBuffer(labels.
count());
198 m_cuda.copy_batch(data.
count(), data.
num, data.
count(1), data.
gpu_data, labels.
gpu_data, m_blobLabeledDataCache.
count(), m_blobLabeledDataCache.
mutable_gpu_data, m_blobLabeledDataCache.
mutable_gpu_diff, m_nLabelStart, m_nLabelCount, m_nCacheSize, m_hCacheCursors, m_hWorkDataHost);
201 List<long> rgTop =
new List<long>();
202 List<int> rgTopCount =
new List<int>();
204 for (
int i = 0; i < colTop.
Count; i++)
206 rgTop.Add(colTop[i].mutable_gpu_data);
207 rgTopCount.Add(colTop[i].count());
210 m_cuda.copy_sequence(nK, data.
num, data.
count(1), data.
gpu_data, labels.
gpu_data, m_blobLabeledDataCache.
count(), m_blobLabeledDataCache.
gpu_data, m_nLabelStart, m_nLabelCount, m_nCacheSize, m_hCacheCursors, m_bOutputLabels, rgTop, rgTopCount, m_hWorkDataHost, m_bBalanceMatches);
216 if (rgbPropagateDown[0])
217 throw new NotImplementedException();
The Log class provides general output in text form.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The BlobCollection contains a list of Blobs.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
int count()
Returns the total number of items in the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
float convertF(T df)
Converts a generic to a float value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
DataSequence Layer - this caches inputs by label and then outputs data item tuplets that include an '...
override int ExactNumBottomBlobs
Returns the exact number of required bottom (intput) Blobs: data, label
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int? MinTopBlobs
Returns the minimum number of required top (output) Blobs: anchor, positve (k > 0),...
DataSequenceLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The DataSequenceLayer constructor.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
During the forward pass, each input data item is cached by label and then sequencing is performed on ...
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Not implemented - the DataSequence Layer does not perform backward.
override void dispose()
Releases all GPU and host resources used by the Layer.
Specifies the base parameter for all layers.
List< bool > propagate_down
Specifies whether or not the LayerParameter (or protions of) should be backpropagated.
DataSequenceParameter data_sequence_param
Returns the parameter set when initialized with LayerType.DATA_SEQUENCE
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.beta namespace contains all beta stage layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...