2using System.Collections.Generic;
22 object m_userState =
null;
23 bool m_bWarningMade =
false;
24 bool m_bEnableLoss =
true;
30 public event EventHandler<MemoryLossLayerGetLossArgs<T>>
OnGetLoss;
61 get {
return m_userState; }
62 set { m_userState = value; }
104 base.LayerSetUp(colBottom, colTop);
111 m_bWarningMade =
false;
121 bool bUniformSize =
true;
122 int nAxis = colBottom[0].CanonicalAxisIndex(1);
123 int nCount = colBottom[0].count(nAxis);
125 for (
int i = 1; i < colBottom.
Count; i++)
127 int nCount1 = colBottom[i].count(nAxis);
128 if (nCount1 != nCount)
130 bUniformSize =
false;
139 m_log.
WriteLine(
"WARNING: The MemoryDataLayer bottoms are not of uniform size, so the normalization will be set to NONE.");
140 m_bWarningMade =
true;
153 List<int> rgLossShape =
new List<int>();
154 colTop[0].
Reshape(rgLossShape);
179 m_log.
FAIL(
"The OnGetLoss event must be implemented. Make sure the SolverParameter 'custom_trainer' points to a trainer that connects the OnGetLoss event.");
205 if (!rgbPropagateDown[0])
213 double dfLoss =
convertD(colTop[0].GetData(0));
215 for (
int i = 0; i < colBottom.
Count; i++)
217 m_cuda.copy(colBottom[i].count(), colBottom[i].gpu_data, colBottom[i].mutable_gpu_diff);
218 m_cuda.mul_scalar(colBottom[i].count(), dfLoss, colBottom[i].mutable_gpu_diff);
224 double dfTopDiff =
convertD(colTop[0].GetDiff(0));
225 double dfLossWeight = dfTopDiff / dfNormalizer;
228 if (dfLossWeight != 1.0)
230 for (
int i = 0; i < colBottom.
Count; i++)
232 m_cuda.scal(colBottom[i].count(),
convert(dfLossWeight), colBottom[i].mutable_gpu_diff);
243 object m_userState =
null;
245 double m_dfNormalizer = 1;
247 bool m_bEnableLossUpdate =
true;
258 m_userState = userState;
259 m_colBottom = colBottom;
268 get {
return m_userState; }
276 get {
return m_colBottom; }
284 get {
return m_dfNormalizer; }
292 get {
return m_dfLoss; }
293 set { m_dfLoss = value; }
301 get {
return m_bEnableLossUpdate; }
302 set { m_bEnableLossUpdate = value; }
310 get {
return m_tag; }
311 set { m_tag = value; }
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
int m_nOuterNum
Specifies the outer num, such as the batch count (e.g. count(0, axis)). Each derivative class must se...
int m_nInnerNum
Specifies the inner num, such as the channel + height + width (e.g. count(axis + 1))....
virtual double get_normalizer(LossParameter.NormalizationMode normalization_mode, int nValidCount)
Returns the normalizer used to normalize the loss.
LossParameter.NormalizationMode m_normalization
Specifies the normalization mode used to normalize the loss.
The MemoryLossLayerGetLossArgs class is passed to the OnGetLoss event.
bool EnableLossUpdate
Get/set enabling the loss update within the backpropagation pass.
object user_state
Specifies a user-state.
double Normalizer
Specifies the normalizer.
double Loss
Get/set the externally calculated total loss.
BlobCollection< T > Bottom
Specifies the bottom passed in during the forward pass.
MemoryLossLayerGetLossArgs(BlobCollection< T > colBottom, object userState, double dfNormalizer)
The constructor.
object Tag
Get/set a user defined value.
The MemoryLossLayer provides a method of performing a custom loss functionality. Similar to the Memor...
MemoryLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
EventHandler< MemoryLossLayerGetLossArgs< T > > OnGetLoss
The OnGetLoss event fires during each forward pass. The value returned is saved, and applied on the b...
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: loss.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs as variable.
object user_state
Optionally specifies a user-state that is passed to the OnGetLoss event.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Backpropagates the previously acquired (within the forward pass) loss error gradient w....
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
override void dispose()
Releases all GPU and host resources used by the Layer.
override int MaxBottomBlobs
Returns the maximum number of required bottom (output) Blobs: input 1 and 2.
override int MinBottomBlobs
Returns the minimum number of required bottom (output) Blobs: input 1.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
Specifies the base parameter for all layers.
LayerType
Specifies the layer type.
LossParameter loss_param
Returns the parameter set when initialized with LayerType.LOSS
Stores the parameters used by loss layers.
NormalizationMode
How to normalize the loss for loss layers that aggregate across batches, spatial dimensions,...
bool normalize
DEPRECIATED. Ignore if normalization is specified. If normalization is not specified,...
NormalizationMode? normalization
Specifies the normalization mode (default = VALID).
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
BLOB_TYPE
Defines the tpe of data held by a given Blob.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...