2using System.Collections.Generic;
52 m_blobDiff =
new Blob<T>(cuda, log,
false);
54 m_blobErrors =
new Blob<T>(cuda, log,
false);
57 m_bHasWeights =
false;
63 if (m_blobDiff !=
null)
69 if (m_blobErrors !=
null)
85 col.
Add(m_blobErrors);
130 base.LayerSetUp(colBottom, colTop);
132 if (colBottom.
Count == 3)
133 m_bHasWeights =
true;
143 base.Reshape(colBottom, colTop);
145 m_log.
CHECK_EQ(colBottom[0].channels, colBottom[1].channels,
"The bottom(0) and bottom(1) must have the same channels.");
146 m_log.
CHECK_EQ(colBottom[0].height, colBottom[1].height,
"The bottom(0) and bottom(1) must have the same height.");
147 m_log.
CHECK_EQ(colBottom[0].width, colBottom[1].width,
"The bottom(0) and bottom(1) must have the same width.");
151 m_log.
CHECK_EQ(colBottom[0].channels, colBottom[2].channels,
"The bottom(0) and bottom(2) must have the same channels.");
152 m_log.
CHECK_EQ(colBottom[0].height, colBottom[2].height,
"The bottom(0) and bottom(2) must have the same height.");
153 m_log.
CHECK_EQ(colBottom[0].width, colBottom[2].width,
"The bottom(0) and bottom(2) must have the same width.");
156 m_blobDiff.ReshapeLike(colBottom[0]);
180 int nCount = colBottom[0].count();
183 m_cuda.sub(nCount, colBottom[0].gpu_data, colBottom[1].gpu_data, m_blobDiff.mutable_gpu_data);
187 m_cuda.mul(nCount, colBottom[2].gpu_data, m_blobDiff.gpu_data, m_blobDiff.mutable_gpu_data);
192 colTop[0].
SetData(dfLoss / colBottom[0].num, 0);
227 int nCount = m_blobDiff.count();
229 m_cuda.smoothl1_bwd(nCount, m_blobDiff.gpu_data, m_blobDiff.mutable_gpu_data);
231 for (
int i = 0; i < 2; i++)
233 if (rgbPropagateDown[i])
235 double dfSign = (i == 0) ? 1 : -1;
236 double dfAlpha =
Utility.ConvertVal<T>(colTop[0].GetDiff(0));
238 dfAlpha = dfSign * dfAlpha / colBottom[i].num;
239 m_cuda.axpby(colBottom[i].count(), dfAlpha, m_blobDiff.gpu_data, 0.0, colBottom[i].mutable_gpu_diff);
The Log class provides general output in text form.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The Utility class provides general utility funtions.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
The Blob is the main holder of data that moves through the Layers of the Net.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
T asum_data()
Compute the sum of absolute values (L1 norm) of the data.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
Fast R-CNN Copyright (c) Microsoft Licensed under The MIT License [see fast-rcnn/LICENSE for details]...
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the smooth L1 loss error gradient w.r.t the predictions.
SmoothL1LossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (output) Blobs as variable.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override int MinBottomBlobs
Returns the minimum number of required top (output) Blobs: loss, labels
override int MaxBottomBlobs
Returns the maximum number of required top (output) Blobs: loss, labels, weights
override bool AllowForceBackward(int nBottomIdx)
Unlike most loss layers, in the SmoothL1LossLayer we can backpropagate to both inputs – override to r...
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.ssd namespace contains all Single-Shot MultiBox (SSD) related layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...