2using System.Collections.Generic;
38 m_blobSumMultiplier =
new common.Blob<T>(cuda, log,
false);
39 m_blobSumMultiplier.Name =
m_param.
name +
" sum mult";
40 m_blobSquare =
new common.
Blob<T>(cuda, log,
false);
42 m_blobNorm =
new common.
Blob<T>(cuda, log,
false);
44 m_blobTempDot =
new common.
Blob<T>(cuda, log,
false);
52 dispose(ref m_blobSumMultiplier);
64 col.
Add(m_blobSquare);
66 col.
Add(m_blobTempDot);
67 col.
Add(m_blobSumMultiplier);
104 m_blobSumMultiplier.Reshape(1, colBottom[0].channels, 1, 1);
105 m_blobSumMultiplier.SetData(1.0);
107 m_blobNorm.
Reshape(colBottom[0].num, 1, colBottom[0].height, colBottom[0].width);
108 m_blobTempDot.
Reshape(colBottom[0].num, 1, colBottom[0].height, colBottom[0].width);
120 long hBottomData = colBottom[0].gpu_data;
121 long hTopData = colTop[0].mutable_gpu_data;
124 int nCount = colBottom[0].count();
125 int nNum = colBottom[0].num;
126 int nChannels = colBottom[0].channels;
127 int nSpatialDim = colBottom[0].height * colBottom[0].width;
129 m_cuda.copy(nCount, hBottomData, hTopData);
130 m_cuda.copy(nCount, hBottomData, hSquareData);
133 m_cuda.powx(nCount, hSquareData, 2.0, hSquareData);
136 m_cuda.channel_sum(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hSquareData, hNormData);
139 m_cuda.powx(nNum * nSpatialDim, hNormData, 0.5, hNormData);
142 m_cuda.channel_div(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hNormData, hTopData, 2);
155 long hTopDiff = colTop[0].gpu_diff;
156 long hTopData = colTop[0].gpu_data;
157 long hBottomDiff = colBottom[0].mutable_gpu_diff;
158 long hBottomData = colBottom[0].gpu_data;
159 long hNormData = m_blobNorm.
gpu_data;
162 int nNum = colTop[0].num;
163 int nChannels = colTop[0].channels;
164 int nSpatialDim = colTop[0].height * colTop[0].width;
165 int nCount = colTop[0].count();
167 m_cuda.copy(nCount, hTopDiff, hBottomDiff);
168 m_cuda.copy(nCount, hBottomData, hTempData);
172 m_cuda.channel_dot(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hTopDiff, hTopData, hTempDotData);
175 m_cuda.div(nNum * nSpatialDim, hTempDotData, hNormData, hTempDotData);
176 m_cuda.div(nNum * nSpatialDim, hTempDotData, hNormData, hTempDotData);
179 m_cuda.channel_div(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hNormData, hBottomDiff, 2);
182 m_cuda.channel_mul(nNum * nSpatialDim, nNum, nChannels, nSpatialDim, hTempDotData, hTempData, 2);
185 m_cuda.axpy(nCount, -1.0, hTempData, hBottomDiff);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
The Blob is the main holder of data that moves through the Layers of the Net.
Blob(CudaDnn< T > cuda, Log log, bool bIncludeDiff=true, bool bUseHalfSize=false)
The Blob constructor.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The GRNLayer performs an L2 normalization over the input data.
GlobResNormLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The GRNLayer constructor.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: norm
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: data
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.beta namespace contains all beta stage layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...