2using System.Collections.Generic;
30 public abstract class Layer<T> : IDisposable
110 private List<List<int>> m_rgrgLastBottomShape =
new List<List<int>>();
111 private List<List<int>> m_rgrgLastTopShape =
new List<List<int>>();
113 private double m_dfForwardTiming = 0;
114 private double m_dfForwardAverageTiming = 0;
115 private double m_dfBackwardTiming = 0;
116 private double m_dfBackwardAverageTiming = 0;
117 private double m_dfAverageInterval = 20.0;
118 private Stopwatch m_swTiming =
new Stopwatch();
140 public event EventHandler<GetWorkBlobArgs<T>>
OnDebug;
165 m_rgLoss =
new DictionaryMap<double>(0.0);
168 for (
int i = 0; i < p.
blobs.Count; i++)
173 m_tOne = (T)Convert.ChangeType(1, typeof(T));
174 m_tZero = (T)Convert.ChangeType(0, typeof(T));
257 get {
return false; }
265 get {
return false; }
273 get {
return false; }
281 get {
return false; }
443 m_rgrgLastBottomShape =
new List<List<int>>();
444 m_rgrgLastTopShape =
new List<List<int>>();
453 if (colBottom != colTop)
455 for (
int i = 0; i < colBottom.
Count; i++)
457 if (i < colTop.
Count && colBottom[i] != colTop[i])
459 colTop[i].CopyParameters(colBottom[i]);
464 catch (Exception excpt)
467 throw new Exception(
"Layer: '" +
m_param.
name +
"' (" +
m_param.
type.ToString() +
") Error: " + excpt.Message, excpt);
517 throw new Exception(
"The OnGetWorkSpace and OnSetWorkspace events must be connected!");
519 ulong lSize = (ulong)nCount *
CudaDnn<T>.basetype_size(
false);
538 bool bConversionNeeded =
false;
546 if (lMaxSize < lSize)
549 bConversionNeeded =
true;
553 if (!bConversionNeeded)
557 throw new Exception(
"The OnGetWorkSpace and OnSetWorkspace events must be connected!");
569 b.
ConvertToHalf(args.WorkspaceData, args.WorkspaceSizeInBytes,
true,
true);
571 b.
ConvertToBase(args.WorkspaceData, args.WorkspaceSizeInBytes,
true,
true);
585 bool bConversionNeeded =
false;
592 if (lMaxSize < lSize)
595 bConversionNeeded =
true;
599 if (!bConversionNeeded)
611 b.
ConvertToBase(args.WorkspaceData, args.WorkspaceSizeInBytes,
true,
true);
661 if (rgrg.Count != col.
Count)
664 for (
int i = 0; i < col.
Count; i++)
666 int nCount = col[i].shape().
Count;
667 if (rgrg[i].Count != nCount)
670 for (
int j = 0; j < nCount; j++)
672 if (col[i].shape()[j] != rgrg[i][j])
687 setShapes(colBottom, ref m_rgrgLastBottomShape);
688 setShapes(colTop, ref m_rgrgLastTopShape);
693 if (rgrg.Count != col.
Count)
694 rgrg =
new List<List<int>>(col.
Count);
696 for (
int i = 0; i < col.
Count; i++)
698 int nCount = col[i].shape().
Count;
699 if (rgrg.Count < col.
Count)
700 rgrg.Add(
new List<int>());
701 else if (rgrg[i].Count != nCount)
702 rgrg[i] =
new List<int>(nCount);
704 for (
int j = 0; j < nCount; j++)
706 nCount = col[i].shape().
Count;
707 if (rgrg[i].Count < nCount)
708 rgrg[i].Add(col[i].shape()[j]);
710 rgrg[i][j] = col[i].shape()[j];
732 m_swTiming.Restart();
745 for (
int i = 0; i < colTop.
Count; i++)
750 int nCount = colTop[i].count();
751 long hData = colTop[i].gpu_data;
752 long hDiff = colTop[i].gpu_diff;
753 double dfBlobLoss =
m_cuda.dot_double(nCount, hData, hDiff);
755 dfLoss += dfBlobLoss;
759 m_dfForwardTiming = m_swTiming.Elapsed.TotalMilliseconds;
760 m_dfForwardAverageTiming = getAveTiming(m_dfAverageInterval, m_dfForwardTiming, m_dfForwardAverageTiming);
769 Tuple<double, double, double, double> mm_data = b.
minmax_data(args.Blob,
true);
770 Tuple<double, double, double, double> mm_diff = b.
minmax_diff(args.Blob,
true);
772 if (mm_data.Item3 > 0 || mm_data.Item4 > 0)
773 throw new Exception(
"NAN or INF detected in the TOP '" + b.
Name +
"' Data for layer '" +
m_param.
name +
"' on the forward pass.");
775 if (mm_diff.Item3 > 0 || mm_diff.Item4 > 0)
776 throw new Exception(
"NAN or INF detected in TOP '" + b.
Name +
"' Diff for layer '" +
m_param.
name +
"' on the forward pass.");
782 catch (Exception excpt)
785 throw new Exception(
"Layer: '" +
m_param.
name +
"' (" +
m_param.
type.ToString() +
") Error: " + excpt.Message, excpt);
819 m_swTiming.Restart();
826 backward(colTop, rgbPropagateDown, colBottom);
828 m_dfBackwardTiming = m_swTiming.Elapsed.TotalMilliseconds;
829 m_dfBackwardAverageTiming = getAveTiming(m_dfAverageInterval, m_dfBackwardTiming, m_dfBackwardAverageTiming);
836 foreach (
Blob<T> b
in colBottom)
838 Tuple<double, double, double, double> mm_data = b.
minmax_data(args.Blob,
true);
839 Tuple<double, double, double, double> mm_diff = b.
minmax_diff(args.Blob,
true);
841 if (mm_data.Item3 > 0 || mm_data.Item4 > 0)
842 throw new Exception(
"NAN or INF detected in the BOTTOM '" + b.
Name +
"' Data for layer '" +
m_param.
name +
"' on the backward pass.");
844 if (mm_diff.Item3 > 0 || mm_diff.Item4 > 0)
845 throw new Exception(
"NAN or INF detected in the BOTTOM '" + b.
Name +
"' Diff for layer '" +
m_param.
name +
"' on the backward pass.");
849 catch (Exception excpt)
852 throw new Exception(
"Layer: '" +
m_param.
name +
"' (" +
m_param.
type.ToString() +
") Error: " + excpt.Message, excpt);
908 public double loss(
int nTopIdx)
1019 get {
return false; }
1032 get {
return false; }
1115 for (
int i = 0; i < colTop.
Count; i++)
1119 if (dfLossWeight == 0)
1123 colTop[i].
SetDiff(dfLossWeight);
1155 if (paramEx ==
null)
1161 return paramEx.
SharedBlobs.Share(b, rgMinShape,
false, bAllowEndsWithComparison);
1173 if (paramEx ==
null)
1190 if (paramEx ==
null)
1199 for (
int i = 0; i < paramEx.
SharedLayer.blobs.Count; i++)
1206 if (strSrc != strDst)
1208 m_log.
WriteLine(
"WARNING: Cannot share blob '" + bSrc.
Name +
"'(" + strSrc +
") with blob '" + bDst.
Name +
"'(" + strDst +
") because the sizes differ!");
1218 for (
int i = 0; i < paramEx.
SharedLayer.internal_blobs.Count; i++)
1225 if (strSrc != strDst)
1227 m_log.
WriteLine(
"WARNING: Cannot share internal blob '" + bSrc.
Name +
"'(" + strSrc +
") with internal blob '" + bDst.
Name +
"'(" + strDst +
") because the sizes differ!");
1242 get {
return m_dfForwardTiming; }
1250 get {
return m_dfForwardAverageTiming; }
1258 get {
return m_dfBackwardTiming; }
1266 get {
return m_dfBackwardAverageTiming; }
1317 for (
int i = 0; i < rg.Length; i++)
1319 if (
double.IsNaN(rg[i]))
1320 throw new Exception(
"NAN FOUND!");
1331 return (T)Convert.ChangeType(df, typeof(T));
1341 return (T)Convert.ChangeType(f, typeof(T));
1351 return (
double)Convert.ChangeType(df, typeof(
double));
1361 return (
float)Convert.ChangeType(df, typeof(
float));
1371 if (typeof(T) == typeof(
double))
1372 return (
double[])Convert.ChangeType(rg, typeof(
double[]));
1374 double[] rgdf =
new double[rg.Length];
1375 Array.Copy(rg, rgdf, rg.Length);
1387 if (typeof(T) == typeof(
double))
1388 return (T[])Convert.ChangeType(rg, typeof(T[]));
1390 float[] rgf =
new float[rg.Length];
1391 Array.Copy(Array.ConvertAll(rg, p => Convert.ToSingle(p)), rgf, rg.Length);
1393 return (T[])Convert.ChangeType(rgf, typeof(T[]));
1403 if (typeof(T) == typeof(
float))
1404 return (
float[])Convert.ChangeType(rg, typeof(
float[]));
1406 float[] rgf =
new float[rg.Length];
1407 Array.Copy(Array.ConvertAll(rg, p => Convert.ToSingle(p)), rgf, rg.Length);
1419 if (typeof(T) == typeof(
float))
1420 return (T[])Convert.ChangeType(rg, typeof(T[]));
1422 T[] rgt =
new T[rg.Length];
1423 Array.Copy(rg, rgt, rg.Length);
1436 return (
int)Convert.ChangeType(rg[nIdx], typeof(
int));
1447 int nHeight =
val_at(rg, 0);
1448 int nWidth = (rg.Length > 1) ?
val_at(rg, 1) : nHeight;
1449 return new Size(nWidth, nHeight);
1452 private double getAveTiming(
double dfInterval,
double dfTiming,
double dfAveTiming)
1454 double dfRatio = 1.0 / m_dfAverageInterval;
1455 return (dfAveTiming * (1.0 - dfRatio)) + (dfTiming * dfRatio);
1676 Layer<T> layer = createDynamicLayer(cuda, log, p, db, evtCancel);
1680 log.
FAIL(
"Unknown layer type: " + p.
type.ToString());
1684 throw new NotImplementedException(
"The layer type: " + p.
type.ToString() +
" is not implemented yet.");
1689 string strDir =
System.IO.Path.GetDirectoryName(
new System.Uri(
System.Reflection.Assembly.GetExecutingAssembly().CodeBase).LocalPath);
1690 string[] rgstrFiles = Directory.GetFiles(strDir,
"mycaffe.layers.*.dll");
1692 foreach (
string strFile
in rgstrFiles)
1694 FileInfo fi =
new FileInfo(strFile);
1695 if (fi.Name.ToLower().IndexOf(
"mycaffe.layers.") == 0 && fi.Extension.ToLower() ==
".dll")
1698 if (icreator !=
null)
1702 if (typeof(T) == typeof(
double))
1716 private static ILayerCreator loadCreator(
string strPath)
1720 Assembly a = Assembly.LoadFile(strPath);
1722 foreach (Type t
in a.GetTypes())
1726 Type iface = t.GetInterface(
"ILayerCreator");
1729 object obj = Activator.CreateInstance(t);
1730 return (ILayerCreator)obj;
1765 m_colSharedBlobs = colBlobs;
1766 m_colLayerBlobs = colLayerBlobs;
1767 m_layer = sharedLayer;
1775 get {
return m_layer; }
1783 get {
return m_colSharedBlobs; }
1791 get {
return m_colLayerBlobs; }
1801 return new LayerParameterEx<T>(base.Clone(bCloneBlobs), m_colSharedBlobs, m_colLayerBlobs, m_layer);
The CancelEvent provides an extension to the manual cancel event that allows for overriding the manua...
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_LE(double df1, double df2, string str)
Test whether one number is less than or equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
Specifies a key-value pair of properties.
The BlobCollection contains a list of Blobs.
void SetDiff(double df)
Set all blob diff to the value specified.
int Count
Returns the number of items in the collection.
The Blob is the main holder of data that moves through the Layers of the Net.
Tuple< double, double, double, double > minmax_data(Blob< T > work, bool bDetectNans=false, bool bUseChunks=false)
Returns the minimum and maximum values in the data of the Blob.
void Share(Blob< T > b)
Share another Blob with this one, by setting the data and diff to the same data and diff of the other...
bool HalfSize
Returns whether or not this blob is using half sizes.
string shape_string
Returns a string describing the Blob's shape.
void ConvertToBase(long hWorkMem, ulong lWorkSize, bool bData, bool bDiff)
Converts this blob from the half type to the base type.
void ConvertToHalf(long hWorkMem, ulong lWorkSize, bool bData, bool bDiff)
Converts this blob from its base type to the half type.
Tuple< double, double, double, double > minmax_diff(Blob< T > work, bool bDetectNans=false, bool bUseChunks=false)
Returns the minimum and maximum values in the diff of the Blob.
ulong GetConversionWorkSize(bool bUseHalfSize)
Returns the amount of memory (in bytes) required to convert from base to half and back.
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
string Name
Get/set the name of the Blob.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The GetIterationArgs is sent bubbled up to the solver when a layer needs to know the curret training ...
The GetWorkBlobArgs are passed to the Layer::OnGetWorkBlob event which is supported for debugging onl...
The WorkspaceArgs are passed to both the Layer::OnSetWorkspace and Layer::OnGetWorkspace events.
long WorkspaceData
Get/set the handle to workspace data in GPU memory.
ulong WorkspaceSizeInBytes
Get/set the workspace memory size in bytes.
The AbsValLayer computes the absolute value of the input.
The AccuracyLayer computes the classification accuracy for a one-of-many classification task....
The ArgMaxLayer computes the index of the K max values for each datum across all dimensions ....
The Binomial Normal Log Liklihod Layer.
The BatchNormLayer normalizes the input to have 0-mean and/or unit (1) variance across the batch....
The BatchReindexLayer provides an index into the input blob along its first axis.
The BiasLayer computes a sum of two input Blobs, with the shape of the latter Blob 'broadcast' to mat...
The ClipLayer provides a neuron layer that clips the data to fit within the [min,max] range....
The ConcatLayer takes at least two Blobs and concatentates them along either the num or channel dimen...
The ConstantLayer provides a layer that just outputs a constant value. This layer is initialized with...
The ContrastiveLossLayer computes the contrastive loss where . This layer is initialized with the My...
The ConvolutionLayer convolves the input image with a bank of learned filters, and (optionally) adds ...
The CropLayer takes a Blob and crops it to the shape specified by the second input Blob,...
The DataLayer loads data from the IXImageDatabase database. This layer is initialized with the MyCaff...
The DataNormalizerLayer normalizes the input data (and optionally label) based on the normalization o...
The DebugLayer merely stores, up to max_stored_batches, batches of input which are then optionally us...
The DeconvolutionLayer convolves the input with a bank of learned filtered, and (optionally) add bias...
During training only, sets a random portion of to 0, adjusting the rest of the vector magnitude acco...
The DummyDataLayer provides data to the Net generated by a Filler. This layer is initialized with the...
The ELULayer computes exponential linear unit non-linearity . This layer is initialized with the MyCa...
The EltwiseLayer computes elementwise oeprations, such as product and sum, along multiple input blobs...
The EmbedLayer is a layer for learning 'embeddings' of one-hot vector input. This layer is initialize...
The EuclideanLossLayer computes the Euclidean (L2) loss for real-valued regression tasks.
The ExpLayer which computes the exponential of the input. This layer is initialized with the MyCaffe....
The FilterLayer takes two+ Blobs, interprets last Blob as a selector and filters remaining Blobs acco...
The FlattenLayer reshapes the input Blob into flat vectors This layer is initialized with the MyCaffe...
The GradientScaleLayer which scales the deltas during the backpropagation. This layer is initialized ...
The HingeLossLayer computes the hinge loss for a one-of-many classification task. This layer is initi...
The Im2ColLayer is a helper layer for image operations that rearranges image regions into column vect...
The ImageDataLayer loads data from the image files located in the root directory specified....
The InforgainLossLayer is a generalization of SoftmaxWithLossLayer that takes an 'information gain' (...
The InnerProductLayer, also know as a 'fully-connected' layer, computes the inner product with a set ...
The "Local Response Normalization" LRNLayer is used to normalize the input in a local region across o...
The LSTMLayer processes sequential inputs using a 'Long Short-Term Memory' (LSTM) [1] style recurrent...
[DEPRECIATED - use LSTMAttentionLayer instead with enable_attention = false] The LSTMSimpleLayer is a...
The LSTMUnitLayer is a helper for LSTMLayer that computes a single timestep of the non-linearity of t...
/b DEPRECIATED (use DataLayer DataLabelMappingParameter instead) The LabelMappingLayer converts origi...
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
double backward_timing
Returns the timing of the last backward pass in milliseconds.
float[] convertF(T[] rg)
Converts an array of float values into an array of generic values.
virtual WorkspaceArgs getWorkspace()
Returns the WorkspaceArgs used to share a workspace between Layers.
long convert_to_full(int nCount, long hMem)
Convert half memory to full memory.
double backward_timing_average
Returns the average timing of the backward passes in milliseconds.
virtual void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
GetIterationArgs getCurrentIteration()
Fires the OnGetIteration event to query the current iteration.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void SetNetReshapeRequest()
Called by the Net when requesting a reshape.
EventHandler< WorkspaceArgs > OnGetWorkspace
Specifies the OnGetWorkspace event that fires when the getWorkspace() function is called by a layer t...
void setShapes(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Set the internal shape sizes - used when determining if a Reshape is necessary.
virtual bool SupportsPostProcessing
Should return true when pre PostProcessing methods are overriden.
virtual int MinBottomBlobs
Returns the minimum number of bottom (input) Blobs required by the Layer, or -1 if no minimum number ...
virtual void SetOnDebug(EventHandler< GetWorkBlobArgs< T > > fn)
Set the OnDebug event.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
virtual bool AutoTopBlobs
Return whether "anonymous" top (output) Blobs are created automatically by the Layer.
virtual void dispose()
Releases all GPU and host resources used by the Layer.
void dispose(ref Blob< T > b)
Helper method used to dispose internal blobs.
int val_at(T[] rg, int nIdx)
Returns the integer value at a given index in a generic array.
virtual List< Tuple< string, int, double > > PostProcessOutput(Blob< T > blobSofmtax, int nK=1)
The PostProcessOutput allows derivative data layers to post-process the results, converting them back...
void set_loss(int nTopIdx, double dfLoss)
Sets the loss associated with a top Blob at a given index.
double loss(int nTopIdx)
Returns the scalar loss associated with the top Blob at a given index.
abstract void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Performs Layer specific setup. Derived layers should override this function as well as the Reshape fu...
bool shareLayerBlob(Blob< T > b, List< int > rgMinShape)
Attempts to share a Layer Blob if another parameter Blob with the same name and acceptable size is fo...
T m_tZero
Specifies a generic type equal to 0.0.
void SetEnablePassthrough(bool bEnable)
Enables/disables the pass-through mode.
void Backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Given the top Blob error gradients, compute the bottom Blob error gradients.
virtual bool setWorkspace(ulong lSizeInBytes)
Sets the workspace size (in items) and returns true if set, false otherwise.
T m_tOne
Specifies a generic type equal to 1.0.
void ConvertToBase(BlobCollection< T > col)
ConvertToBase converts any blobs in a collection that are in half size to the base size.
abstract void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
This forward abstract function must be overriden by each derived Layer class to compute the top (outp...
virtual bool ReInitializeParameters(WEIGHT_TARGET target)
Re-initialize the parameters of the layer.
bool shareParameter(Blob< T > b, List< int > rgMinShape, bool bAllowEndsWithComparison=false)
Attempts to share a parameter Blob if another parameter Blob with the same name and accpetable size i...
virtual List< Tuple< string, int, double > > PostProcessLogitsOutput(int nCurIdx, Blob< T > blobLogits, Layer< T > softmax, int nAxis, int nK=1)
The PostProcessLogitsOutput allows derivative data layers to post-process the results,...
bool compareShapes(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Compare the shapes of the top and bottom and if the same, return true, otherwise false.
EventHandler< GetIterationArgs > OnGetIteration
Specifies the OnGetIteration event that fires when a layer needs to get the current iteration from th...
LayerParameter.? LayerType parent_layer_type
Optionally, specifies the parent layer type (e.g. LOSS, etc.)
bool shareLayerBlobs(Layer< T > layer)
Attempts to share the Layer blobs and internal_blobs with matching names and sizes with those in anot...
void SetPhase(Phase phase)
Changes the layer's Phase to the one specified.
double Forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Given the bottom (input) Blobs, this function computes the top (output) Blobs and the loss.
void dispose(ref Layer< T > l)
Helper method used to dispose internal layers.
T[] convert(double[] rg)
Converts an array of double values into an array of generic values.
double forward_timing_average
Returns the average timing of the forward passes in milliseconds.
LayerParameter.LayerType type
Returns the LayerType of this Layer.
virtual string PostProcessOutput(int nIdx)
Convert the index to the word.
virtual int ExactNumTopBlobs
Returns the exact number of top (output) Blobs required by the Layer, or -1 if no exact number is req...
virtual bool AllowForceBackward(int nBottomIdx)
Return whether to allow
void set_param_propagate_down(int nParamIdx, bool bPropagate)
Sets whether or not the Layer should compute gradients w.r.t. a parameter at a particular index given...
double forward_timing
Returns the timing of the last forward pass in milliseconds.
float convertF(T df)
Converts a generic to a float value.
bool m_bConvertTopOnBwd
Specifies whether or not to convert the top on the backward pass when using half sized memory (typica...
DictionaryMap< double > m_rgLoss
Specifies the loss values that indeicate whether each top (output) Blob has a non-zero weight in the ...
virtual string PostProcessFullOutput(Blob< T > blobSoftmax)
The PostProcessFullOutput allows derivative data layers to post-process the results,...
T convert(float f)
Converts a float to a generic.
abstract void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Adjust the shapes of top blobs and internal buffers to accomodate the shapes of the bottom blobs.
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
BlobCollection< T > m_colInternalBlobs
Specifies internal blobs used by the layer.
bool m_bEnablePassthrough
Enables/disables the pass-through mode for the layer. Default = false.
double convertD(T df)
Converts a generic to a double value.
LayerParameter.? LayerType m_parentLayerType
Specifies the layer type of the parent.
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
Size size_at(Blob< T > b)
Returns the Size of a given two element Blob, such as one that stores Blob size information.
virtual int MaxTopBlobs
Returns the maximum number of top (output) Blobs required by the Layer, or -1 if no maximum number is...
T[] convert(float[] rg)
Converts an array of float values into an array of generic values.
void Dispose()
Releases all GPU and host resources used by the Layer.
virtual bool SupportsPreProcessing
Should return true when PreProcessing methods are overriden.
virtual bool EqualNumBottomTopBlobs
Returns true if the Layer requires and equal number of bottom (input) and top (output) Blobs.
virtual bool PreProcessInput(string strEncInput, int? nDecInput, BlobCollection< T > colBottom)
Preprocess the input data for the RUN phase.
bool param_propagate_down(int nParamIdx)
Returns whether or not the Layer should compute gradients w.r.t. a parameter at a particular index gi...
virtual int ExactNumBottomBlobs
Returns the exact number of bottom (input) Blobs required by the Layer, or -1 if no exact number is r...
EventHandler< GetWorkBlobArgs< T > > OnDebug
Specifies the OnGetWorkBlob event that is only supported when debugging to get a work blob from the p...
T convert(double df)
Converts a double to a generic.
virtual int MinTopBlobs
Returns the minimum number of top (output) Blobs required by the Layer, or -1 if no minimum number is...
bool m_bConvertBottom
Specifies whether or not the layer should convert the bottom when using half sized memory.
Phase m_phase
Specifies the Phase under which the Layer is run.
virtual void ResetOnDebug(EventHandler< GetWorkBlobArgs< T > > fn)
Reset the OnDebug event, disabling it.
virtual void SetNetParameterUsed(NetParameter np)
This function allows other layers to gather needed information from the NetParameters if any,...
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
void Setup(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Implements common Layer setup functionality.
void dispose(ref BlobCollection< T > rg, bool bSetToNull=true)
Dispose the blob collection.
void check_nan(Blob< T > b)
Checks a Blob for NaNs and throws an exception if found.
bool m_bReshapeOnForwardNeeded
Specifies whether or not the reshape on forward is needed or not.
EventHandler< WorkspaceArgs > OnSetWorkspace
Specifies the OnSetWorkspace event that fires when the setWorkspace() function is called by a layer t...
Layer(CudaDnn< T > cuda, Log log, LayerParameter p)
The Layer constructor.
void CheckBlobCounts(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Called by the Layer::Setup function to check the number of bottom (input) and top (output) Blobs prov...
static Layer< T > Create(CudaDnn< T > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db=null, TransferInput trxinput=null)
Create a new Layer based on the LayerParameter.
LayerParameter.LayerType m_type
Specifies the Layer type.
void SetLossWeights(BlobCollection< T > colTop)
Called by Layer::Setup to initialize the weights associated with any top (output) Blobs in the loss f...
LayerParameter layer_param
Returns the LayerParameter for this Layer.
virtual void ConnectLoss(LossLayer< T > layer)
Called to connect the loss OnLoss event to a specified layer (typically the data layer).
virtual int MaxBottomBlobs
Returns the maximum number of bottom (input) Blobs required by the Layer, or -1 if no maximum number ...
BlobCollection< T > blobs
Returns the collection of learnable parameter Blobs for the Layer.
virtual bool SupportsPostProcessingLogits
Should return true when pre PostProcessingLogits methods are overriden.
BlobCollection< T > internal_blobs
Returns the collection of internal Blobs used by the Layer.
double[] convertD(T[] rg)
Converts an array of generic values into an array of double values.
virtual bool SupportsPostProcessingFullOutput
Should return true when PostProcessingFullOutput is supported.
abstract void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
This backward abstract function must be overriden by each derived Layer class to compute the bottom (...
BlobCollection< T > m_colBlobs
Specifies the learnable parameter Blobs of the Layer.
DictionaryMap< bool > m_rgbParamPropagateDown
Specifies whether or not to compute the learnable diff of each parameter Blob.
LayerParameter convertLayerParam(LayerParameter pChild, LayerParameter pParent)
Called to convert a parent LayerParameterEx, used in blob sharing, with a child layer parameter.
bool m_bConvertTopOnFwd
Specifies whether or not the layer should convert the top on the forward pass when using half sized m...
bool m_bNetReshapeRequest
Specifies whether the reshape is requested from a Net.Reshape call or not.
virtual BlobCollection< T > PreProcessInput(PropertySet customInput, out int nSeqLen, BlobCollection< T > colBottom=null)
The PreprocessInput allows derivative data layers to convert a property set of input data into the bo...
The LayerParameterEx class is used when sharing another Net to conserve GPU memory and extends the La...
BlobCollection< T > SharedBlobs
Returns the shared parameter Blobs.
Layer< T > SharedLayer
Returns the layer in the shared Net that matches this one.
BlobCollection< T > SharedLayerBlobs
Returns the shared Layer Blobs.
override LayerParameter Clone(bool bCloneBlobs)
Creates and returns a new copy of this instance.
LayerParameterEx(LayerParameter p, BlobCollection< T > colBlobs, BlobCollection< T > colLayerBlobs, Layer< T > sharedLayer)
The LayerParameterEx constructor.
The LogLayer computes the log of the input. This layer is initialized with the MyCaffe....
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
The "Mean-Variance Normalization" MVNLayer normalizes the input to have 0-mean and/or unit (1) varian...
The MathLayer which computes various mathematical functions of the input. This layer is initialized w...
The MemoryDataLayer provides data to the Net from memory. This layer is initialized with the MyCaffe....
The MemoryLossLayer provides a method of performing a custom loss functionality. Similar to the Memor...
The MultinomialLogicistLossLayer computes the multinomial logistc loss for a one-of-many classificati...
The PReLULayer computes the "Parameterized Rectified Linear Unit" non-linearity. This layer is initia...
The ParameterLayer passes its blob[0] data and diff to the top[0].
The PoolingLayer pools the input image by taking the max, average, etc. within regions....
The PowerLayer computes the power of the input. This layer is initialized with the MyCaffe....
The RNNLayer processes time-varying inputs using a simple recurrent neural network (RNN)....
The ReLULayer computes the "Rectifier Linear Unit" ReLULayer non-linearity, a classic for neural netw...
The ReductionLayer computes the 'reductions' – operations that return a scalar output Blob for an inp...
The ReshapeLayer reshapes the input Blob into an arbitrary-sized output Blob. This layer is initializ...
The SPPLayer does spatial pyramid pooling on the input image by taking the max, average,...
The ScaleLayer computes the elementwise product of two input Blobs, with the shape of the latter Blob...
The SigmoidCrossEntropyLayer computes the cross-entropy (logisitic) loss and is often used for predic...
The SigmoidLayer is a neuron layer that calculates the sigmoid function, a classc choice for neural n...
The SilenceLayer ignores bottom blobs while producing no top blobs. (This is useuful to suppress outp...
The SliceLayer takes a blob and slices it along either the num or channel dimensions outputting multi...
The SoftmaxCrossEntropy2Layer computes the cross-entropy (logisitic) loss and is often used for predi...
The SoftmaxCrossEntropyLossLayer computes the cross-entropy (logisitic) loss and is often used for pr...
The SoftmaxLayer computes the softmax function. This layer is initialized with the MyCaffe....
Computes the multinomial logistic loss for a one-of-many classification task, passing real-valued pre...
The SplitLayer creates a 'split' path in the network by copying the bottom blob into multiple top blo...
The SwishLayer provides a novel activation function that tends to work better than ReLU....
The TanhLayer is a neuron layer that calculates the tanh function, popular with auto-encoders....
The ThresholdLayer is a neuron layer that tests whether the input exceeds a threshold: outputs 1 for ...
The TileLayer copies a Blob along specified dimensions. This layer is initialized with the MyCaffe....
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
List< double > loss_weight
Specifies the loss weight.
LayerType type
Specifies the type of this LayerParameter.
Phase phase
Specifies the Phase for which this LayerParameter is run.
LayerType
Specifies the layer type.
override string ToString()
Returns a string representation of the LayerParameter.
List< BlobProto > blobs
Specifies the blobs of the LayerParameter.
virtual LayerParameter Clone(bool bCloneBlobs)
Creates a new copy of this instance of the parameter.
Specifies the parameters use to create a Net
The ILayerCreator interface is implemented by each MyCaffe.layers.x layer extension dll and is used t...
Layer< double > CreateDouble(CudaDnn< double > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db)
Create the layers when using the double base type.
Layer< float > CreateSingle(CudaDnn< float > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db)
Create the layers when using the float base type.
The IXDatabaseBase interface defines the general interface to the in-memory database.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Phase
Defines the Phase under which to run a Net.
The MyCaffe.common namespace contains common MyCaffe classes.
WEIGHT_TARGET
Defines the type of weight to target in re-initializations.
The MyCaffe.db.image namespace contains all image database related classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...