2using System.Collections.Generic;
30 long m_hBottomDesc = 0;
52 if (m_hBottomDesc != 0)
54 m_cuda.FreeTensorDesc(m_hBottomDesc);
60 m_cuda.FreeTensorDesc(m_hTopDesc);
66 m_cuda.FreeCuDNN(m_hCudnn);
87 m_hCudnn =
m_cuda.CreateCuDNN();
88 m_hBottomDesc =
m_cuda.CreateTensorDesc();
89 m_hTopDesc =
m_cuda.CreateTensorDesc();
99 base.Reshape(colBottom, colTop);
106 int nN = colBottom[0].num;
107 int nK = colBottom[0].channels;
108 int nH = colBottom[0].height;
109 int nW = colBottom[0].width;
174 long hBottomData = colBottom[0].gpu_data;
175 long hTopData = colTop[0].mutable_gpu_data;
176 int nCount = colBottom[0].count();
178 m_cuda.tanh_fwd(nCount, hBottomData, hTopData);
202 long hTopData = colTop[0].gpu_data;
203 long hTopDiff = colTop[0].gpu_diff;
204 long hBottomDiff = colBottom[0].mutable_gpu_diff;
205 int nCount = colBottom[0].count();
207 m_cuda.tanh_bwd(nCount, hTopDiff, hTopData, hBottomDiff);
223 long hBottomData = colBottom[0].gpu_data;
224 long hTopData = colTop[0].mutable_gpu_data;
226 m_cuda.TanhForward(m_hCudnn,
m_tOne, m_hBottomDesc, hBottomData,
m_tZero, m_hTopDesc, hTopData);
250 if (!rgbPropagateDown[0])
253 long hTopData = colTop[0].gpu_data;
254 long hTopDiff = colTop[0].gpu_diff;
255 long hBottomData = colBottom[0].gpu_data;
256 long hBottomDiff = colBottom[0].mutable_gpu_diff;
258 m_cuda.TanhBackward(m_hCudnn,
m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData,
m_tZero, m_hBottomDesc, hBottomDiff);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
T m_tZero
Specifies a generic type equal to 0.0.
T m_tOne
Specifies a generic type equal to 1.0.
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
The TanhLayer is a neuron layer that calculates the tanh function, popular with auto-encoders....
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CUDNN mode.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs using either the Engine.CAFFE or Engine....
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CAFFE mode.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
TanhLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The TanhLayer constructor.
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the ganh inputs using the Engine.CAFFE mode.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer to run in either Engine.CAFFE or Engine.CUDNN mode.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using either the Engine.CAFFE or Engine.CUDNN mode.
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the ganh inputs using the Engine.CUDNN mode.
Specifies the base parameter for all layers.
bool use_halfsize
Specifies whether or not to use half sized memory or not.
LayerType
Specifies the layer type.
TanhParameter tanh_param
Returns the parameter set when initialized with LayerType.TANH
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...