2using System.Collections.Generic;
57 m_blobWork =
new Blob<T>(cuda, log);
63 if (m_blobWork !=
null)
103 base.LayerSetUp(colBottom, colTop);
113 base.Reshape(colBottom, colTop);
144 long hPredicted = colBottom[0].gpu_data;
145 long hTarget = colBottom[1].gpu_data;
146 int nCount = colBottom[0].count();
150 m_log.
CHECK_EQ(nCount, colBottom[1].count(),
"The bottom(0) predicted and bottom(1) target must have the same shapes!");
155 m_cuda.sub(nCount, hTarget, hPredicted, colBottom[0].mutable_gpu_diff);
156 m_cuda.powx(nCount, colBottom[0].gpu_diff, 2.0, colBottom[0].mutable_gpu_diff);
157 dfLoss =
m_cuda.asum_double(nCount, colBottom[0].gpu_diff);
161 m_cuda.sub(nCount, hTarget, hPredicted, colBottom[0].mutable_gpu_diff);
162 m_cuda.abs(nCount, colBottom[0].gpu_diff, colBottom[0].mutable_gpu_diff);
163 dfLoss =
m_cuda.asum_double(nCount, colBottom[0].gpu_diff);
168 colTop[0].
SetData(dfLoss / dfNormalizer, 0);
214 if (!rgbPropagateDown[0])
217 long hPredicted = colBottom[0].gpu_data;
218 long hTarget = colBottom[1].gpu_data;
219 long hBottomDiff = colBottom[0].mutable_gpu_diff;
220 int nCount = colBottom[0].count();
222 m_cuda.mean_error_loss_bwd(nCount, hPredicted, hTarget, hBottomDiff, m_meanType);
224 double dfTopDiff =
convertD(colTop[0].GetDiff(0));
226 double dfLossWeight = dfTopDiff / dfNormalizer;
228 m_cuda.scal(nCount, dfLossWeight, hBottomDiff);
230 if (colBottom.
Count > 1)
232 long hBottomDiff2 = colBottom[1].mutable_gpu_diff;
233 m_cuda.scale(nCount, -1, hBottomDiff, hBottomDiff2);
The Log class provides general output in text form.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
void SetDiff(double df)
Set all blob diff to the value specified.
int Count
Returns the number of items in the collection.
The Blob is the main holder of data that moves through the Layers of the Net.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Log m_log
Specifies the Log for output.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
int m_nOuterNum
Specifies the outer num, such as the batch count (e.g. count(0, axis)). Each derivative class must se...
int m_nInnerNum
Specifies the inner num, such as the channel + height + width (e.g. count(axis + 1))....
virtual double get_normalizer(LossParameter.NormalizationMode normalization_mode, int nValidCount)
Returns the normalizer used to normalize the loss.
LossParameter.NormalizationMode m_normalization
Specifies the normalization mode used to normalize the loss.
The MeanErrorLossLayer computes losses based on various different Mean Error methods as shown below....
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
MeanErrorLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs as variable.
override int MaxTopBlobs
Returns the maximum number of required top (output) Blobs: loss.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the softmax loss error gradient w.r.t the predictions.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int MinTopBlobs
Returns the minimum number of required top (output) Blobs: loss.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
Specifies the base parameter for all layers.
MeanErrorLossParameter mean_error_loss_param
Returns the parameter set when initialized with LayerType.MEAN_ERROR_LOSS
LayerType
Specifies the layer type.
MEAN_ERROR mean_error_type
[optional, default = MSE] Specifies the type of mean error to use.
int axis
[optional, default = 1] Specifies the axis of the probability.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
MEAN_ERROR
Defines the type of Mean Error to use.
The MyCaffe.layers.beta namespace contains all beta stage layers.
The MyCaffe.param.beta parameters are used by the MyCaffe.layer.beta layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...