2using System.Collections.Generic;
26 int? m_nIgnoreLabel =
null;
29 bool m_bDirectLabels =
false;
30 bool m_bEnableSimpleAccuracy =
false;
31 bool m_bEnableLastElementOnly =
false;
48 m_blobNumsBuffer =
new Blob<T>(cuda, log,
false);
49 m_blobAccData =
new Blob<T>(cuda, log);
93 if (!m_bEnableSimpleAccuracy && m_bEnableLastElementOnly)
94 m_log.
WriteLine(
"WARNING: The accuracy layer currently only supports last element accuracy when using the simple accuracy.");
97 m_nIgnoreLabel =
null;
101 m_log.
WriteLine(
"WARNING: The accuracy layer currently only supports a single ignore label.");
105 if (m_bEnableSimpleAccuracy && m_nTopK > 1)
107 m_log.
WriteLine(
"WARNING: The accuracy layer currently only supports top_k = 1 for simple accuracy.");
111 m_bDirectLabels =
false;
121 m_log.
CHECK_LE(m_nTopK, colBottom[0].count() / colBottom[1].count(),
"top_k must be less than or equal to the number of classes.");
124 m_nOuterNum = colBottom[0].count(0, m_nLabelAxis);
125 m_nInnerNum = colBottom[0].count(m_nLabelAxis + 1);
126 int nLabelDim = m_nOuterNum * m_nInnerNum;
130 if (!m_bDirectLabels)
131 m_log.
WriteLine(
"WARNING: Using direct label comparisons where a label is expected in each item (e.g. no Softmax used).");
132 m_bDirectLabels =
true;
136 m_log.
CHECK_EQ(m_nOuterNum * m_nInnerNum, colBottom[1].count(),
"Number of labels must match number of predictions; e.g., if label axis = 1 and prediction shape is (N, C, H, W), label count (number of labels) must be N*H*W, with integer values in {0, 1, ..., C=1}.");
139 List<int> rgTopShape =
new List<int>();
143 if (colTop.
Count > 1)
146 List<int> rgTopShapePerClass =
new List<int>() { colBottom[0].shape(m_nLabelAxis) };
147 colTop[1].
Reshape(rgTopShapePerClass);
148 m_blobNumsBuffer.
Reshape(rgTopShapePerClass);
151 if (m_bEnableSimpleAccuracy)
152 m_blobAccData.
Reshape(m_nOuterNum, 1, 1, 1);
185 if (m_bEnableSimpleAccuracy)
187 else if (m_bDirectLabels)
224 int nDim = colBottom[0].count() / m_nOuterNum;
225 int? nIgnoreLabel =
null;
231 m_log.
WriteLine(
"WARNING: Only the first ignore label recognized when using the simple accuracy layer.");
234 m_cuda.accuracy_fwd(colBottom[0].count(), m_nOuterNum, nDim, colBottom[0].gpu_data, colBottom[1].gpu_data, m_blobAccData.
mutable_gpu_data, m_blobAccData.
mutable_gpu_diff, nIgnoreLabel, m_bEnableLastElementOnly, colBottom[0].num);
238 float fAccuracy = (fTotalCount == 0) ? 0 : fAccCount / fTotalCount;
240 colTop[0].
SetData(fAccuracy, 0);
272 if (typeof(T) == typeof(
double))
273 forward_cpuD(colBottom, colTop);
275 forward_cpuF(colBottom, colTop);
280 double dfAccuracy = 0;
281 double[] rgBottomData =
convertD(colBottom[0].update_cpu_data());
282 double[] rgBottomLabel =
convertD(colBottom[1].update_cpu_data());
283 int nDim = colBottom[0].count() / m_nOuterNum;
284 int nNumLabels = colBottom[0].shape(m_nLabelAxis);
285 double[] rgNumsBuffer =
null;
286 double[] rgTopLabel =
null;
288 if (colTop.
Count > 1)
292 rgTopLabel =
convertD(colTop[1].mutable_cpu_data);
297 bool bNanDetected =
false;
299 for (
int i = 0; i < m_nOuterNum; i++)
301 for (
int j = 0; j < m_nInnerNum; j++)
303 int nLabelValue = (int)rgBottomLabel[i * m_nInnerNum + j];
305 if (m_nIgnoreLabel.HasValue && m_nIgnoreLabel.Value == nLabelValue)
308 m_log.
CHECK_GE(nLabelValue, 0,
"The lable value must be >= 0.");
309 m_log.
CHECK_LT(nLabelValue, nNumLabels,
"The label value must be < " + nNumLabels.ToString() +
". Make sure that the prototxt 'num_outputs' setting is > the highest label number.");
311 if (colTop.
Count > 1)
312 rgNumsBuffer[nLabelValue]++;
314 double prob_of_true_class = rgBottomData[i * nDim
315 + nLabelValue * m_nInnerNum
317 int num_better_predictions = -1;
319 for (
int k = 0; k < nNumLabels && num_better_predictions < m_nTopK; k++)
321 double dfVal = rgBottomData[i * nDim + k * m_nInnerNum + j];
323 if (
double.IsNaN(dfVal) ||
double.IsInfinity(dfVal))
325 else if (dfVal >= prob_of_true_class)
326 num_better_predictions += 1;
330 if (num_better_predictions != -1 && num_better_predictions < m_nTopK)
334 if (colTop.
Count > 1)
335 rgTopLabel[nLabelValue] += 1.0;
346 dfAccuracy = (nCount == 0) ? 0 : (dfAccuracy / nCount);
347 colTop[0].
SetData(dfAccuracy, 0);
350 if (colTop.
Count > 1)
352 for (
int i = 0; i < colTop[1].count(); i++)
356 if (rgNumsBuffer[i] != 0)
357 dfVal = rgTopLabel[i] / rgNumsBuffer[i];
359 rgTopLabel[i] = dfVal;
362 colTop[1].mutable_cpu_data =
convert(rgTopLabel);
370 float dfAccuracy = 0;
371 float[] rgBottomData =
convertF(colBottom[0].update_cpu_data());
372 float[] rgBottomLabel =
convertF(colBottom[1].update_cpu_data());
373 int nDim = colBottom[0].count() / m_nOuterNum;
374 int nNumLabels = colBottom[0].shape(m_nLabelAxis);
375 float[] rgNumsBuffer =
null;
376 float[] rgTopLabel =
null;
378 if (colTop.
Count > 1)
382 rgTopLabel =
convertF(colTop[1].mutable_cpu_data);
387 bool bNanDetected =
false;
389 for (
int i = 0; i < m_nOuterNum; i++)
391 for (
int j = 0; j < m_nInnerNum; j++)
393 int nLabelValue = (int)rgBottomLabel[i * m_nInnerNum + j];
395 if (m_nIgnoreLabel.HasValue && m_nIgnoreLabel.Value == nLabelValue)
398 m_log.
CHECK_GE(nLabelValue, 0,
"The lable value must be >= 0.");
399 m_log.
CHECK_LT(nLabelValue, nNumLabels,
"The label value must be < " + nNumLabels.ToString() +
". Make sure that the prototxt 'num_outputs' setting is > the highest label number.");
401 if (colTop.
Count > 1)
402 rgNumsBuffer[nLabelValue]++;
404 double prob_of_true_class = rgBottomData[i * nDim
405 + nLabelValue * m_nInnerNum
407 int num_better_predictions = -1;
409 for (
int k = 0; k < nNumLabels && num_better_predictions < m_nTopK; k++)
411 double dfVal = rgBottomData[i * nDim + k * m_nInnerNum + j];
413 if (
double.IsNaN(dfVal) ||
double.IsInfinity(dfVal))
415 else if (dfVal >= prob_of_true_class)
416 num_better_predictions += 1;
420 if (num_better_predictions != -1 && num_better_predictions < m_nTopK)
424 if (colTop.
Count > 1)
425 rgTopLabel[nLabelValue] += 1.0f;
436 dfAccuracy = (nCount == 0) ? 0 : (dfAccuracy / nCount);
437 colTop[0].
SetData(dfAccuracy, 0);
440 if (colTop.
Count > 1)
442 for (
int i = 0; i < colTop[1].count(); i++)
446 if (rgNumsBuffer[i] != 0)
447 dfVal = rgTopLabel[i] / rgNumsBuffer[i];
449 rgTopLabel[i] = dfVal;
452 colTop[1].mutable_cpu_data =
convert(rgTopLabel);
484 double dfAccuracy = 0;
485 double[] rgBottomData =
convertD(colBottom[0].update_cpu_data());
486 double[] rgBottomLabel =
convertD(colBottom[1].update_cpu_data());
487 int nNumLabels = colBottom[0].num;
489 bool bNanDetected =
false;
491 for (
int i = 0; i < nNumLabels; i++)
493 double dfDiff = Math.Abs(rgBottomData[i] - rgBottomLabel[i]);
494 if (dfDiff < 0.00001)
501 dfAccuracy = (double)nNumMatches / (
double)nNumLabels;
502 colTop[0].
SetData(dfAccuracy, 0);
505 if (colTop.
Count > 1)
514 if (rgbPropagateDown[0])
515 throw new NotImplementedException();
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_LE(double df1, double df2, string str)
Test whether one number is less than or equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
void CHECK_LT(double df1, double df2, string str)
Test whether one number is less than another.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
int count()
Returns the total number of items in the Blob.
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The AccuracyLayer computes the classification accuracy for a one-of-many classification task....
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Not implemented – AccuracyLayer cannot be used as a loss.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void dispose()
Releases all GPU and host resources used by the Layer.
void forward_gpu(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The simple accuracy calculates the total accuracy across all predictions using an argmax comparison w...
void forward_cpu(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward compuation.
AccuracyLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
override int MaxTopBlobs
Returns the maximum number of top blobs: accuracy, labels
override int ExactNumBottomBlobs
Returns the number of bottom blobs used: predicted, label
void forward_cpu_direct(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward compuation.
override int MinTopBlobs
Returns the minimum number of top blobs: accuracy
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward compuation.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
float convertF(T df)
Converts a generic to a float value.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
uint top_k
When computing accuracy, count as correct by comparing the true label to the top_k scoring classes....
List< int > ignore_labels
If specified, ignore instances with the given label(s).
int axis
The 'label' axis of the prediction blob, whos argmax corresponds to the predicted label – may be nega...
bool enable_simple_accuracy
Enables a simple accuracy calculation where the argmax is compared with the actual.
bool enable_last_element_only
When computing accuracy, only count the last element of the prediction blob.
Specifies the base parameter for all layers.
AccuracyParameter accuracy_param
Returns the parameter set when initialized with LayerType.ACCURACY
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
BLOB_TYPE
Defines the tpe of data held by a given Blob.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...