2using System.Collections.Generic;
92 long hBottomData = colBottom[0].gpu_data;
93 long hBottomDiff = colBottom[0].mutable_gpu_diff;
94 int nNum = colBottom[0].num;
95 int nCount = colBottom[0].count();
96 int nDim = nCount / nNum;
98 m_cuda.copy(nCount, hBottomData, hBottomDiff);
100 T[] rgBottomDiff = colBottom[0].mutable_cpu_diff;
101 T[] rgLabel = colBottom[1].update_cpu_data();
103 if (typeof(T) == typeof(
double))
105 double[] rgBottomDiffD = (
double[])Convert.ChangeType(rgBottomDiff, typeof(
double[]));
106 double[] rgLabelD = (
double[])Convert.ChangeType(rgLabel, typeof(
double[]));
108 for (
int i = 0; i < nNum; i++)
110 rgBottomDiffD[i * nDim + (int)rgLabelD[i]] *= -1;
112 for (
int i = 0; i < nNum; i++)
114 for (
int j = 0; j < nDim; j++)
116 int nIdx = i * nDim + j;
117 double dfDiff = rgBottomDiffD[nIdx];
118 rgBottomDiffD[nIdx] = Math.Max(0.0, 1.0 + dfDiff);
124 float[] rgBottomDiffF = (
float[])Convert.ChangeType(rgBottomDiff, typeof(
float[]));
125 float[] rgLabelF = (
float[])Convert.ChangeType(rgLabel, typeof(
float[]));
127 for (
int i = 0; i < nNum; i++)
129 rgBottomDiffF[i * nDim + (int)rgLabelF[i]] *= -1;
131 for (
int i = 0; i < nNum; i++)
133 for (
int j = 0; j < nDim; j++)
135 int nIdx = i * nDim + j;
136 float fDiff = rgBottomDiffF[nIdx];
137 rgBottomDiffF[nIdx] = Math.Max(0.0f, 1.0f + fDiff);
142 colBottom[0].mutable_cpu_diff = rgBottomDiff;
152 dfLoss =
convertD(
m_cuda.dot(nCount, hBottomDiff, hBottomDiff)) / nNum;
156 m_log.
FAIL(
"Unknown norm in HingeLoss!");
193 if (rgbPropagateDown[1])
196 if (rgbPropagateDown[0])
198 long hBottomDiff = colBottom[0].mutable_gpu_diff;
199 int nNum = colBottom[0].num;
200 int nCount = colBottom[0].count();
201 int nDim = nCount / nNum;
203 T[] rgBottomDiff = colBottom[0].mutable_cpu_diff;
204 T[] rgLabel = colBottom[1].update_cpu_data();
206 if (typeof(T) == typeof(
double))
208 double[] rgBottomDiffD = (
double[])Convert.ChangeType(rgBottomDiff, typeof(
double[]));
209 double[] rgLabelD = (
double[])Convert.ChangeType(rgLabel, typeof(
double[]));
211 for (
int i = 0; i < nNum; i++)
213 rgBottomDiffD[i * nDim + (int)rgLabelD[i]] *= -1;
218 float[] rgBottomDiffF = (
float[])Convert.ChangeType(rgBottomDiff, typeof(
float[]));
219 float[] rgLabelF = (
float[])Convert.ChangeType(rgLabel, typeof(
float[]));
221 for (
int i = 0; i < nNum; i++)
223 rgBottomDiffF[i * nDim + (int)rgLabelF[i]] *= -1;
227 colBottom[0].mutable_cpu_diff = rgBottomDiff;
229 double dfLossWeight =
convertD(colTop[0].GetDiff(0));
233 m_cuda.sign(nCount, hBottomDiff, hBottomDiff);
234 m_cuda.scal(nCount, dfLossWeight / nNum, hBottomDiff);
238 m_cuda.scal(nCount, dfLossWeight * 2 / nNum, hBottomDiff);
242 m_log.
FAIL(
"Unknown norm in HingeLoss!");
The Log class provides general output in text form.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The HingeLossLayer computes the hinge loss for a one-of-many classification task. This layer is initi...
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the hinge loss error gradient w.r.t the predictions.
HingeLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The HingeLoss constructor.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
LayerParameter.LayerType type
Returns the LayerType of this Layer.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
Specifies the parameters for the HingLossLayer.
Norm norm
Specify the Norm to use L1 or L2
Norm
Defines the type of normalization.
Specifies the base parameter for all layers.
HingeLossParameter hinge_loss_param
Returns the parameter set when initialized with LayerType.HINGE_LOSS
LayerType
Specifies the layer type.
override string ToString()
Returns a string representation of the LayerParameter.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...