2using System.Collections.Generic;
30 List<bool> m_rgbBiasPropagateDown =
new List<bool>();
55 m_blobSumMultiplier =
new Blob<T>(cuda, log);
57 m_blobSumResult =
new Blob<T>(cuda, log);
59 m_blobTemp =
new Blob<T>(cuda, log);
66 if (m_biasLayer !=
null)
68 m_biasLayer.Dispose();
72 if (m_blobSumMultiplier !=
null)
75 m_blobSumMultiplier =
null;
78 if (m_blobSumResult !=
null)
81 m_blobSumResult =
null;
84 if (m_blobTemp !=
null)
99 col.
Add(m_blobSumMultiplier);
100 col.
Add(m_blobSumResult);
135 base.ReInitializeParameters(target);
148 m_biasLayer.ReInitializeParameters(target);
167 else if (colBottom.
Count == 1)
170 m_nAxis = colBottom[0].CanonicalAxisIndex(p.
axis);
172 m_log.
CHECK_GE(nNumAxes, -1,
"num_axes must be non-negative, or -1 to extend to the end of bottom[0].");
175 m_log.
CHECK_GE(colBottom[0].num_axes, m_nAxis + nNumAxes,
"scale blob's shape extends past bottom[0]'s shape when applied starting with bottom[0] axis = " + m_nAxis.ToString());
179 List<int> rgShape =
new List<int>();
180 int nStart = m_nAxis;
181 int nEnd = (nNumAxes == -1) ? colBottom[0].shape().
Count : nStart + nNumAxes;
183 for (
int i = nStart; i < nEnd; i++)
185 rgShape.Add(colBottom[0].shape(i));
202 filler.
Fill(blobScale);
215 m_colBiasBottomVec.
Add(colBottom[0]);
218 m_biasLayer.Setup(m_colBiasBottomVec, colTop);
246 m_nAxis = (blobScale.
num_axes == 0) ? 0 : colBottom[0].CanonicalAxisIndex(p.
axis);
247 m_log.
CHECK_GE(colBottom[0].num_axes, m_nAxis + blobScale.
num_true_axes,
"scale blob's shape extends past bottom[0]'s shape when applied starting with bottom[0] axis = " + m_nAxis.ToString());
251 m_log.
CHECK_EQ(colBottom[0].shape(m_nAxis + i), blobScale.
shape(i),
"dimension mismatch between bottom[0]->shape(" + (m_nAxis + i).ToString() +
") and scale->shape(" + i.ToString() +
")");
254 m_nOuterDim = colBottom[0].count(0, m_nAxis);
255 m_nScaleDim = blobScale.
count();
256 m_nInnerDim = colBottom[0].count(m_nAxis + blobScale.
num_true_axes);
258 if (colBottom[0] == colTop[0])
263 m_blobSumResult.
Reshape(
new List<int>() { m_nOuterDim * m_nScaleDim });
264 int nSumMultSize = Math.Max(m_nOuterDim, m_nInnerDim);
265 m_blobSumMultiplier.
Reshape(
new List<int>() { nSumMultSize });
266 m_blobSumMultiplier.
SetData(1.0);
268 if (m_biasLayer !=
null)
270 m_colBiasBottomVec[0] = colTop[0];
271 m_biasLayer.
Reshape(m_colBiasBottomVec, colTop);
299 if (colBottom[0] == colTop[0])
308 long hScaleData = (colBottom.
Count > 1) ? colBottom[1].gpu_data :
m_colBlobs[0].gpu_data;
309 long hTopData = colTop[0].mutable_gpu_data;
310 int nCount = colTop[0].count();
311 long hBottomData = colBottom[0].gpu_data;
313 if (m_biasLayer !=
null)
315 long hBiasData =
m_colBlobs[m_nBiasParamId].gpu_data;
316 m_cuda.scale_fwd(nCount, hBottomData, hScaleData, m_nScaleDim, m_nInnerDim, hTopData, hBiasData);
320 m_cuda.scale_fwd(nCount, hBottomData, hScaleData, m_nScaleDim, m_nInnerDim, hTopData);
334 m_biasLayer.Backward(colTop, m_rgbBiasPropagateDown, m_colBiasBottomVec);
336 bool bScaleParam = (colBottom.
Count == 1) ?
true :
false;
341 long hTopDiff = colTop[0].gpu_diff;
342 bool bInPlace = (colBottom[0] == colTop[0]) ?
true :
false;
343 long hBottomData = (bInPlace) ? m_blobTemp.
gpu_data : colBottom[0].gpu_data;
350 bool bIsEltwise = (colBottom[0].count() == blobScale.
count()) ?
true :
false;
352 long hSumMult = m_blobSumMultiplier.
gpu_data;
354 m_cuda.mul(colTop[0].count(), hTopDiff, hBottomData, hProduct);
360 if (m_nInnerDim == 1)
362 hSumResult = hProduct;
364 else if (m_blobSumResult.
count() == 1)
370 T fDot =
m_cuda.dot(m_nInnerDim, hProduct, hSumMult);
372 blobScale.
SetDiff(dfScaleDiff, 0);
376 T fDot =
m_cuda.dot(m_nInnerDim, hProduct, hSumMult);
386 if (m_nOuterDim != 1)
388 if (m_nScaleDim == 1)
394 T fDot =
m_cuda.dot(m_nOuterDim, hSumMult, hSumResult);
396 blobScale.
SetDiff(dfScaleDiff, 0);
400 T fDot =
m_cuda.dot(m_nOuterDim, hSumMult, hSumResult);
413 if (rgbPropagateDown[0])
415 int nCount = colTop[0].count();
416 long hTopDiff = colTop[0].gpu_diff;
417 long hScaleData = blobScale.
gpu_data;
418 long hBottomDiff = colBottom[0].mutable_gpu_diff;
420 m_cuda.scale_fwd(nCount, hTopDiff, hScaleData, m_nScaleDim, m_nInnerDim, hBottomDiff);
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
The Utility class provides general utility funtions.
static List< int > Create(int nCount, int nStart, int nInc)
Create a new List and fill it with values starting with start and incrementing by inc.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
int num_axes
Returns the number of axes in the Blob.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
T GetDiff(int nIdx)
Returns the diff at a given flat index within the Blob.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
int num_true_axes
Returns the number of true axes, ignoring the trailing ones.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
BLOB_TYPE type
Returns the BLOB_TYPE of the Blob.
void CopyFrom(Blob< T > src, int nSrcOffset, int nDstOffset, int nCount, bool bCopyData, bool bCopyDiff)
Copy from a source Blob.
List< int > shape()
Returns an array where each element contains the shape of an axis of the Blob.
int count()
Returns the total number of items in the Blob.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
void SetDiff(double dfVal, int nIdx=-1)
Either sets all of the diff items in the Blob to a given value, or alternatively only sets a single i...
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Abstract Filler class used to fill blobs with values.
void Fill(Blob< T > b)
Fill the blob with values based on the actual filler used.
static Filler< T > Create(CudaDnn< T > cuda, Log log, FillerParameter p)
Create a new Filler instance.
The BiasLayer computes a sum of two input Blobs, with the shape of the latter Blob 'broadcast' to mat...
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
T m_tZero
Specifies a generic type equal to 0.0.
T m_tOne
Specifies a generic type equal to 1.0.
bool shareParameter(Blob< T > b, List< int > rgMinShape, bool bAllowEndsWithComparison=false)
Attempts to share a parameter Blob if another parameter Blob with the same name and accpetable size i...
bool shareLayerBlobs(Layer< T > layer)
Attempts to share the Layer blobs and internal_blobs with matching names and sizes with those in anot...
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
BlobCollection< T > blobs
Returns the collection of learnable parameter Blobs for the Layer.
BlobCollection< T > m_colBlobs
Specifies the learnable parameter Blobs of the Layer.
DictionaryMap< bool > m_rgbParamPropagateDown
Specifies whether or not to compute the learnable diff of each parameter Blob.
The ScaleLayer computes the elementwise product of two input Blobs, with the shape of the latter Blob...
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation.
ScaleLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The ScaleLayer constructor.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override int MaxBottomBlobs
Returns the maximum number of required bottom (input) Blobs: firstfactor, secondfactor
override int MinBottomBlobs
Returns the minimum number of required bottom (input) Blobs: firstfactor
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: scale
override bool ReInitializeParameters(WEIGHT_TARGET target)
Re-initialize the parameters of the layer.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
int axis
The first axis of bottom[0] (the first input Blob) along which to apply bottom[1] (the second input B...
int num_axes
(num_axes is ignored unless just one bottom is given and the bias is a learned parameter of the layer...
FillerParameter filler
(filler is ignored unless just one bottom is given and the bias is a learned parameter of the layer....
Specifies the filler parameters used to create each Filler.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
ScaleParameter scale_param
Returns the parameter set when initialized with LayerType.SCALE
BiasParameter bias_param
Returns the parameter set when initialized with LayerType.BIAS
LayerType
Specifies the layer type.
Specifies the parameters for the ScaleLayer.
bool bias_term
Whether to also learn a bias (equivalent to a ScalarLayer + BiasLayer, but may be more efficient).
FillerParameter bias_filler
Filler used for bias filling.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
BLOB_TYPE
Defines the tpe of data held by a given Blob.
WEIGHT_TARGET
Defines the type of weight to target in re-initializations.
The MyCaffe.fillers namespace contains all fillers including the Filler class.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...