2using System.Collections.Generic;
33 m_blobXActs =
new Blob<T>(cuda, log);
40 if (m_blobXActs !=
null)
105 int nNumInstances = colBottom[0].shape(1);
107 for (
int i = 0; i < colBottom.
Count; i++)
110 m_log.
CHECK_EQ(2, colBottom[i].num_axes,
"There should be 2 axes at bottom[2]");
112 m_log.
CHECK_EQ(3, colBottom[i].num_axes,
"There should be 3 axes at bottom[" + i.ToString() +
"]");
114 m_log.
CHECK_EQ(1, colBottom[i].shape(0),
"The shape(0) at bottom[" + i.ToString() +
"] should be 1.");
115 m_log.
CHECK_EQ(nNumInstances, colBottom[i].shape(1),
"The shape(1) at bottom[" + i.ToString() +
"] should equal the number of instances (" + nNumInstances.ToString() +
")");
118 m_nHiddenDim = colBottom[0].shape(2);
119 m_log.
CHECK_EQ(4 * m_nHiddenDim, colBottom[1].shape(2),
"The bottom[1].shape(2) should equal the 4 * the number of hidden dimensions (4 x " + m_nHiddenDim.ToString() +
")");
152 int nCount = colTop[1].count();
153 long hC_prev = colBottom[0].gpu_data;
154 long hX = colBottom[1].gpu_data;
155 long hCont = colBottom[2].gpu_data;
157 long hC = colTop[0].mutable_gpu_data;
158 long hH = colTop[1].mutable_gpu_data;
159 int nXCount = colBottom[1].count();
161 m_cuda.lstm_unit_fwd(nCount, m_nHiddenDim, nXCount, hX, hX_acts, hC_prev, hCont, hC, hH);
196 m_log.
CHECK(!rgbPropagateDown[2],
"Cannot backpropagate to sequence indicators.");
198 if (!rgbPropagateDown[0] && !rgbPropagateDown[1])
201 int nCount = colTop[1].count();
202 long hC_prev = colBottom[0].gpu_data;
203 long hX_acts = m_blobXActs.
gpu_data;
204 long hCont = colBottom[2].gpu_data;
205 long hC = colTop[0].gpu_data;
206 long hH = colTop[1].gpu_data;
207 long hC_diff = colTop[0].gpu_diff;
208 long hH_diff = colTop[1].gpu_diff;
209 long hC_prev_diff = colBottom[0].mutable_gpu_diff;
211 int nXCount = colBottom[1].count();
212 long hX_diff = colBottom[1].mutable_gpu_diff;
214 m_cuda.lstm_unit_bwd(nCount, m_nHiddenDim, nXCount, hC_prev, hX_acts, hC, hH, hCont, hC_diff, hH_diff, hC_prev_diff, hX_acts_diff, hX_diff);
The Log class provides general output in text form.
void CHECK(bool b, string str)
Test a flag for true.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
The Blob is the main holder of data that moves through the Layers of the Net.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The LSTMUnitLayer is a helper for LSTMLayer that computes a single timestep of the non-linearity of t...
override bool AllowForceBackward(int nBottomIdx)
Returns true for all but the bottom index = 2 for you can't propagate to the sequence comtinuation in...
LSTMUnitLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The LSTMUnitLayer constructor.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (intput) Blobs: prevtime, gatein, seqcon
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the LSTMUnit inputs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation.
override void dispose()
Releases all GPU and host resources used by the Layer.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: cellst, hiddenst
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...