2using System.Collections.Generic;
52 m_blobIp1 =
new Blob<T>(cuda, log);
54 m_blobIp2 =
new Blob<T>(cuda, log);
56 m_blobMod =
new Blob<T>(cuda, log);
58 m_blobBtm =
new Blob<T>(cuda, log);
120 if (m_ip1Layer ==
null)
133 addBtmTop(colBottom[0], m_blobIp1);
134 m_ip1Layer.Setup(m_colBtm, m_colTop);
135 blobs.Add(m_ip1Layer.blobs);
138 if (m_modLayer ==
null)
140 if (
m_param.
glu_param.modulation == param.tft.GluParameter.MODULATION.SIGMOID)
147 addBtmTop(m_blobIp1, m_blobMod);
148 m_modLayer.
Setup(m_colBtm, m_colTop);
156 if (m_ip2Layer ==
null)
169 addBtmTop(colBottom[0], m_blobIp2);
170 m_ip2Layer.
Setup(m_colBtm, m_colTop);
186 addBtmTop(colBottom[0], m_blobIp1);
187 m_ip1Layer.Reshape(m_colBtm, m_colTop);
189 addBtmTop(m_blobIp1, m_blobMod);
190 m_modLayer.
Reshape(m_colBtm, m_colTop);
192 addBtmTop(colBottom[0], m_blobIp2);
193 m_ip2Layer.
Reshape(m_colBtm, m_colTop);
213 addBtmTop(colBottom[0], m_blobIp1);
214 m_ip1Layer.Forward(m_colBtm, m_colTop);
216 addBtmTop(m_blobIp1, m_blobMod);
217 m_modLayer.
Forward(m_colBtm, m_colTop);
219 addBtmTop(colBottom[0], m_blobIp2);
220 m_ip2Layer.
Forward(m_colBtm, m_colTop);
247 addBtmTop(m_blobBtm, m_blobIp2);
248 m_ip2Layer.
Backward(m_colTop, rgbPropagateDown, m_colBtm);
250 addBtmTop(m_blobIp1, m_blobMod);
251 m_modLayer.
Backward(m_colTop, rgbPropagateDown, m_colBtm);
253 addBtmTop(colBottom[0], m_blobIp1);
254 m_ip1Layer.Backward(m_colTop, rgbPropagateDown, m_colBtm);
257 m_cuda.add(colBottom[0].count(), m_blobBtm.
gpu_diff, colBottom[0].gpu_diff, colBottom[0].mutable_gpu_diff);
The Log class provides general output in text form.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void Clear(bool bDispose=false)
Remove all items from the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
The Blob is the main holder of data that moves through the Layers of the Net.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
void CopyFrom(Blob< T > src, int nSrcOffset, int nDstOffset, int nCount, bool bCopyData, bool bCopyDiff)
Copy from a source Blob.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void Backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Given the top Blob error gradients, compute the bottom Blob error gradients.
double Forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Given the bottom (input) Blobs, this function computes the top (output) Blobs and the loss.
abstract void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Adjust the shapes of top blobs and internal buffers to accomodate the shapes of the bottom blobs.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
void Setup(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Implements common Layer setup functionality.
static Layer< T > Create(CudaDnn< T > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db=null, TransferInput trxinput=null)
Create a new Layer based on the LayerParameter.
LayerParameter.LayerType m_type
Specifies the Layer type.
BlobCollection< T > blobs
Returns the collection of learnable parameter Blobs for the Layer.
LayerParameter convertLayerParam(LayerParameter pChild, LayerParameter pParent)
Called to convert a parent LayerParameterEx, used in blob sharing, with a child layer parameter.
The GluLayer implements the Gated Linear Unit layer.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the top (output) blobs.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: x
override void dispose()
Releases all GPU and host resources used by the Layer.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: y
GluLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The constructor.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the stacked embedding numeric and categorical value inputs.
Specifies whether to use the NVIDIA cuDnn version or Caffe version of a given forward/backward operat...
Engine engine
Specifies the Engine in use.
Engine
Defines the type of engine to use.
double sigma_init
Specifies the initialization value for the sigma weight and sigma bias used when 'enable_noise' = tru...
FillerParameter weight_filler
The filler for the weights.
int axis
Specifies the first axis to be lumped into a single inner product computation; all preceding axes are...
bool enable_noise
Enable/disable noise in the inner-product layer (default = false).
FillerParameter bias_filler
The filler for the bias.
uint num_output
The number of outputs for the layer.
bool bias_term
Whether to have bias terms or not.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
SigmoidParameter sigmoid_param
Returns the parameter set when initialized with LayerType.SIGMOID
GluParameter glu_param
Returns the parameter set when initialized with LayerType.GLU
InnerProductParameter inner_product_param
Returns the parameter set when initialized with LayerType.INNERPRODUCT
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.tft namespace contains all TFT related layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...