2using System.Collections.Generic;
46 m_blobMean =
new common.Blob<T>(cuda, log);
48 m_blobVariance =
new common.
Blob<T>(cuda, log);
50 m_blobTemp =
new Blob<T>(cuda, log);
52 m_blobSumMultiplier =
new Blob<T>(cuda, log);
59 if (m_blobMean !=
null)
65 if (m_blobVariance !=
null)
68 m_blobVariance =
null;
71 if (m_blobTemp !=
null)
77 if (m_blobSumMultiplier !=
null)
80 m_blobSumMultiplier =
null;
93 col.
Add(m_blobVariance);
95 col.
Add(m_blobSumMultiplier);
132 m_blobMean.Reshape(colBottom[0].num, colBottom[0].channels, 1, 1);
133 m_blobVariance.
Reshape(colBottom[0].num, colBottom[0].channels, 1, 1);
134 m_blobTemp.
Reshape(colBottom[0].num, colBottom[0].channels, colBottom[0].height, colBottom[0].width);
137 m_blobSumMultiplier.
Reshape(1, colBottom[0].channels, colBottom[0].height, colBottom[0].width);
139 m_blobSumMultiplier.
Reshape(1, 1, colBottom[0].height, colBottom[0].width);
141 m_blobSumMultiplier.
SetData(1.0);
158 long hBottomData = colBottom[0].gpu_data;
159 long hTopData = colTop[0].mutable_gpu_data;
160 int nNum = colBottom[0].num;
163 nNum *= colBottom[0].channels;
165 int nDim = colBottom[0].count() / nNum;
168 m_cuda.gemv(
false, nNum, nDim, 1.0 / nDim, hBottomData, m_blobSumMultiplier.
gpu_data, 0.0, m_blobMean.mutable_gpu_data);
199 if (!rgbPropagateDown[0])
202 long hTopDiff = colTop[0].gpu_diff;
203 long hTopData = colTop[0].gpu_data;
204 long hBottomData = colBottom[0].gpu_data;
205 long hBottomDiff = colBottom[0].mutable_gpu_diff;
206 int nNum = colBottom[0].num;
209 nNum *= colBottom[0].channels;
211 int nDim = colBottom[0].count() / nNum;
215 m_cuda.mul(m_blobTemp.
count(), hTopData, hTopDiff, hBottomDiff);
216 m_cuda.gemv(
false, nNum, nDim, 1.0, hBottomDiff, m_blobSumMultiplier.
gpu_data, 0.0, m_blobMean.mutable_gpu_data);
217 m_cuda.gemm(
false,
false, nNum, nDim, 1, 1.0, m_blobMean.gpu_data, m_blobSumMultiplier.
gpu_data, 0.0, hBottomDiff);
218 m_cuda.mul(m_blobTemp.
count(), hTopData, hBottomDiff, hBottomDiff);
220 m_cuda.gemv(
false, nNum, nDim, 1.0, hTopDiff, m_blobSumMultiplier.
gpu_data, 0.0, m_blobMean.mutable_gpu_data);
221 m_cuda.gemm(
false,
false, nNum, nDim, 1, 1.0, m_blobMean.gpu_data, m_blobSumMultiplier.
gpu_data, 1.0, hBottomDiff);
223 m_cuda.axpby(m_blobTemp.
count(), 1.0, hTopDiff, -1.0 / nDim, hBottomDiff);
232 m_cuda.gemv(
false, nNum, nDim, 1.0 / nDim, hTopDiff, m_blobSumMultiplier.
gpu_data, 0.0, m_blobMean.mutable_gpu_data);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
Blob(CudaDnn< T > cuda, Log log, bool bIncludeDiff=true, bool bUseHalfSize=false)
The Blob constructor.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
int count()
Returns the total number of items in the Blob.
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The "Mean-Variance Normalization" MVNLayer normalizes the input to have 0-mean and/or unit (1) varian...
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation that computes the normalization.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
MVNLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The MVNLayer constructor.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the mvn error gradient w.r.t the output.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: input.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: mvn
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
MVNParameter mvn_param
Returns the parameter set when initialized with LayerType.MVN
LayerType
Specifies the layer type.
double eps
Specifies a small value to avoid divide by zero.
bool across_channels
Specifies whether or not to normalize accross channels.
bool normalize_variance
Specifies whether or not to normalize the variance.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...