2using System.Collections.Generic;
38 long m_hBottomDesc = 0;
41 long m_hDropoutDesc = 0;
42 string m_strBottomSize =
null;
44 ulong m_ulReserved = 0;
61 m_blobRand =
new Blob<T>(cuda, log);
68 if (m_blobRand !=
null)
74 if (m_hDropoutDesc != 0)
76 m_cuda.FreeDropoutDesc(m_hDropoutDesc);
82 m_cuda.FreeMemory(m_hStates);
88 m_cuda.FreeMemory(m_hReserved);
92 if (m_hBottomDesc != 0)
94 m_cuda.FreeTensorDesc(m_hBottomDesc);
100 m_cuda.FreeCuDNN(m_hCuda);
123 base.LayerSetUp(colBottom, colTop);
126 m_log.
CHECK(m_dfThreshold > 0.0,
"Threshold should be > 0");
127 m_log.
CHECK(m_dfThreshold < 1.0,
"Threshold should be < 1");
128 m_dfScale = 1.0 / (1.0 - m_dfThreshold);
129 m_uiThreshold = (uint)(uint.MaxValue * m_dfThreshold);
136 m_hCuda =
m_cuda.CreateCuDNN();
137 m_hBottomDesc =
m_cuda.CreateTensorDesc();
138 m_hDropoutDesc =
m_cuda.CreateDropoutDesc();
148 base.Reshape(colBottom, colTop);
151 m_blobRand.ReshapeLike(colBottom[0]);
156 string strBottomSize = colBottom[0].ToSizeString();
157 m_log.
CHECK(strBottomSize == colTop[0].ToSizeString(),
"The bottom[0] and top[0] must have the same size!");
159 if (strBottomSize != m_strBottomSize)
164 m_cuda.SetTensorDesc(m_hBottomDesc, colBottom[0].num, colBottom[0].channels, colBottom[0].height, colBottom[0].width);
165 m_cuda.GetDropoutInfo(m_hCuda, m_hBottomDesc, out ulStates, out ulReserved);
167 if (ulStates > m_ulStates)
170 m_cuda.FreeMemory(m_hStates);
172 m_hStates =
m_cuda.AllocMemory((
long)ulStates);
173 m_ulStates = ulStates;
176 if (ulReserved > m_ulReserved)
178 if (m_hReserved != 0)
179 m_cuda.FreeMemory(m_hReserved);
181 m_hReserved =
m_cuda.AllocMemory((
long)ulReserved);
182 m_ulReserved = ulReserved;
188 lSeed = DateTime.Now.Ticks;
190 m_cuda.SetDropoutDesc(m_hCuda, m_hDropoutDesc, m_dfThreshold, m_hStates, lSeed);
191 m_strBottomSize = strBottomSize;
256 long hBottomData = colBottom[0].gpu_data;
257 long hTopData = colTop[0].mutable_gpu_data;
258 int nCount = colBottom[0].count();
262 long hMask = m_blobRand.mutable_gpu_data;
266 m_cuda.dropout_fwd(nCount, hBottomData, hMask, m_uiThreshold,
convert(m_dfScale), hTopData);
270 m_cuda.copy(nCount, hBottomData, hTopData);
282 if (!rgbPropagateDown[0])
285 long hTopDiff = colTop[0].gpu_diff;
286 long hBottomDiff = colBottom[0].mutable_gpu_diff;
290 long hMask = m_blobRand.gpu_data;
291 int nCount = colBottom[0].count();
293 m_cuda.dropout_bwd(nCount, hTopDiff, hMask, m_uiThreshold,
convert(m_dfScale), hBottomDiff);
297 m_cuda.copy(colTop[0].count(), hTopDiff, hBottomDiff);
308 long hBottomData = colBottom[0].gpu_data;
309 long hTopData = colTop[0].mutable_gpu_data;
312 m_cuda.DropoutForward(m_hCuda, m_hDropoutDesc, m_hBottomDesc, hBottomData, m_hBottomDesc, hTopData, m_hReserved);
314 m_cuda.copy(colBottom[0].count(), hBottomData, hTopData);
325 if (!rgbPropagateDown[0])
328 long hTopDiff = colTop[0].gpu_diff;
329 long hBottomDiff = colBottom[0].mutable_gpu_diff;
332 m_cuda.DropoutBackward(m_hCuda, m_hDropoutDesc, m_hBottomDesc, hTopDiff, m_hBottomDesc, hBottomDiff, m_hReserved);
334 m_cuda.copy(colTop[0].count(), hTopDiff, hBottomDiff);
The Log class provides general output in text form.
void CHECK(bool b, string str)
Test a flag for true.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
The Blob is the main holder of data that moves through the Layers of the Net.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
During training only, sets a random portion of to 0, adjusting the rest of the vector magnitude acco...
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Run the Backward computation using the Engine CUDNN mode as specified in the LayerParameter.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void setup_internal_blobs(BlobCollection< T > col)
Add all internal blobs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Run the Forward computation using either the Engine.CAFFE or Engine.CUDNN mode as specified in the La...
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer for use with both Engine.CAFFE and Engine.CUDNN modes.
DropoutLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The DeconvolutionLayer constructor.
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Run the Forward computation using the Engine CUDNN mode as specified in the LayerParameter.
void forward_caffe(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Run the Forward computation using the Engine CAFFE mode as specified in the LayerParameter.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Run the Backward computation using either the Engine.CAFFE or Engine.CUDNN mode as specified in the L...
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
void backward_caffe(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Run the Backward computation using the Engine CAFFE mode as specified in the LayerParameter.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
bool shareLayerBlob(Blob< T > b, List< int > rgMinShape)
Attempts to share a Layer Blob if another parameter Blob with the same name and acceptable size is fo...
T m_tZero
Specifies a generic type equal to 0.0.
Phase m_phase
Specifies the Phase under which the Layer is run.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
double dropout_ratio
Specifies the dropout ratio. (e.g. the probability that values will be dropped out and set to zero....
long seed
Specifies the seed used by cuDnn for random number generation.
bool active
Specifies whether or not the dropout is active or not. When inactive and training,...
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
DropoutParameter dropout_param
Returns the parameter set when initialized with LayerType.DROPOUT
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Phase
Defines the Phase under which to run a Net.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...