2using System.Collections.Generic;
4using System.Runtime.InteropServices;
26 int? m_nIgnoreLabel =
null;
92 base.LayerSetUp(colBottom, colTop);
103 base.Reshape(colBottom, colTop);
105 m_blobProb.ReshapeLike(colBottom[0]);
111 m_log.
CHECK_EQ(
m_nOuterNum *
m_nInnerNum, colBottom[1].count(),
"Number of labels must match number of predictions; e.g., if nll axis == 1 and prediction shape is (N, C, H, W), label count (number of labels) must be N*H*W, with integer values in {0, 1, ..., C-1}.");
133 long hProbData = colBottom[0].gpu_data;
134 long hLabel = colBottom[1].gpu_data;
140 long hLossData = colBottom[0].mutable_gpu_diff;
144 long hCounts = m_blobProb.mutable_gpu_diff;
147 T fLoss =
m_cuda.asum(nCount, hLossData);
148 double dfValidCount = -1;
157 double dfFinalLoss = dfLoss / dfNormalizer;
159 colTop[0].
SetData(dfFinalLoss, 0);
161 if (colTop.
Count == 2)
162 colTop[1].ShareData(m_blobProb);
206 if (!rgbPropagateDown[0])
209 long hBottomDiff = colBottom[0].mutable_gpu_diff;
210 long hTopData = colTop[0].gpu_data;
212 long hLabel = colBottom[1].gpu_data;
218 long hCounts = m_blobProb.mutable_gpu_diff;
222 double dfValidCount = -1;
229 double dfTopDiff =
convertD(colTop[0].GetDiff(0));
231 double dfLossWeight = dfTopDiff / dfNormalizer;
233 m_cuda.scal(m_blobProb.count(),
convert(dfLossWeight), hBottomDiff);
The Log class provides general output in text form.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
void SetDiff(double df)
Set all blob diff to the value specified.
int Count
Returns the number of items in the collection.
The Blob is the main holder of data that moves through the Layers of the Net.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
bool m_bIgnoreLabels
Set to true when labels are to be ignored.
int m_nOuterNum
Specifies the outer num, such as the batch count (e.g. count(0, axis)). Each derivative class must se...
int m_nInnerNum
Specifies the inner num, such as the channel + height + width (e.g. count(axis + 1))....
virtual double get_normalizer(LossParameter.NormalizationMode normalization_mode, int nValidCount)
Returns the normalizer used to normalize the loss.
LossParameter.NormalizationMode m_normalization
Specifies the normalization mode used to normalize the loss.
Computes the nll loss for a one-of-many classification task, passing real-valued predictions (from a ...
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
NLLLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int MinTopBlobs
Returns the minimum number of required top (output) Blobs: loss.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the nll loss error gradient w.r.t the predictions.
override void dispose()
Releases all GPU and host resources used by the Layer.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs as variable.
override int MaxTopBlobs
Returns the maximum number of required top (output) Blobs: loss, labels
Specifies the base parameter for all layers.
NLLLossParameter nll_loss_param
Returns the parameter set when initialized with LayerType.NLL_LOSS
LayerType
Specifies the layer type.
LossParameter loss_param
Returns the parameter set when initialized with LayerType.LOSS
Stores the parameters used by loss layers.
NormalizationMode
How to normalize the loss for loss layers that aggregate across batches, spatial dimensions,...
int? ignore_label
If specified, the ignore instances with the given label.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.gpt namespace contains all GPT related layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...