2using System.Collections.Generic;
28 List<int> m_rgShape =
new List<int>(4);
52 m_blobErrors =
new Blob<T>(cuda, log);
54 m_blobTargetsFull =
new Blob<T>(cuda, log);
56 m_blobQuantile1 =
new Blob<T>(cuda, log);
58 m_blobQuantile2 =
new Blob<T>(cuda, log);
60 m_blobDesiredQuantiles =
new Blob<T>(cuda, log);
62 m_blobLoss =
new Blob<T>(cuda, log);
64 m_blobLossSum =
new Blob<T>(cuda, log);
66 m_blobLossSumMean =
new Blob<T>(cuda, log);
79 dispose(ref m_blobDesiredQuantiles);
130 base.LayerSetUp(colBottom, colTop);
132 List<int> rgShape =
new List<int>(1);
134 m_blobDesiredQuantiles.
Reshape(rgShape);
139 for (
int i = 0; i < rgDeqQtl1.Length; i++)
142 rgDeqQtl2[i] = rgDeqQtl1[i] - 1;
158 base.Reshape(colBottom, colTop);
160 int nAxes = colBottom[0].num_axes;
161 m_nCount = colBottom[0].count();
163 m_nChannels = (nAxes == 2) ? 1 : colBottom[0].channels;
164 m_nInnerNum = (nAxes == 2) ? colBottom[0].channels : colBottom[0].count(2);
166 m_log.
CHECK_EQ(colBottom[0].num, colBottom[1].num,
"Input and target must have same 'num' size.");
167 m_log.
CHECK_EQ(colBottom[0].channels, colBottom[1].channels,
"Input and target must have same 'channel' size.");
179 m_rgShape.Add(m_nChannels);
180 m_blobLossSum.
Reshape(m_rgShape);
184 m_blobLossSumMean.
Reshape(m_rgShape);
190 if (colTop.
Count > 1)
192 m_rgShape[0] = m_nChannels;
236 double dfQLoss = m_blobLossSumMean.
mean();
240 if (colTop.
Count > 1)
242 double dfTargetSum =
convertD(colBottom[1].asum_data());
245 colTop[1].scale_data(2.0 / dfTargetSum);
275 if (!rgbPropagateDown[0])
279 double dfGrad =
convertD(colTop[0].GetDiff(0));
294 m_cuda.scale(m_nCount, -1.0, m_blobErrors.
gpu_diff, colBottom[0].mutable_gpu_diff);
The Log class provides general output in text form.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
T[] mutable_cpu_diff
Get diff from the GPU and bring it over to the host, or Set diff from the Host and send it over to th...
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
T[] mutable_cpu_data
Get data from the GPU and bring it over to the host, or Set data from the Host and send it over to th...
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
double mean(float[] rgDf=null, bool bDiff=false)
Calculate the mean of the blob data.
int count()
Returns the total number of items in the Blob.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
void SetDiff(double dfVal, int nIdx=-1)
Either sets all of the diff items in the Blob to a given value, or alternatively only sets a single i...
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
int m_nOuterNum
Specifies the outer num, such as the batch count (e.g. count(0, axis)). Each derivative class must se...
int m_nInnerNum
Specifies the inner num, such as the channel + height + width (e.g. count(axis + 1))....
void callLossEvent(Blob< T > blob)
This method is called by the loss layer to pass the blob data to the OnLoss event (if implemented)
The QuantileLossLayer computes the quantile loss for real-valued regression tasks.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs as variable.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the QuantileLoss error gradient w.r.t. the inputs.
override int MinTopBlobs
Returns the minimum number of required top (output) Blobs: loss.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
QuantileLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The QuantileLossLayer constructor
override int MaxTopBlobs
Returns the maximum number of required top (output) Blobs: loss, q_risk
override bool AllowForceBackward(int nBottomIdx)
Unlike most loss layers, in the QuantileLossLayer we can backpropagate to both inputs – override to r...
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
QuantileLossParameter quantile_loss_param
Returns the parameter set when initialized with LayerType.QUANTILE_LOSS
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
DIR
Defines the direction of data flow.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...