2using System.Collections.Generic;
29 long m_hBottomDesc = 0;
51 if (m_hBottomDesc != 0)
53 m_cuda.FreeTensorDesc(m_hBottomDesc);
59 m_cuda.FreeTensorDesc(m_hTopDesc);
65 m_cuda.FreeCuDNN(m_hCudnn);
83 m_hCudnn =
m_cuda.CreateCuDNN();
84 m_hBottomDesc =
m_cuda.CreateTensorDesc();
85 m_hTopDesc =
m_cuda.CreateTensorDesc();
95 base.Reshape(colBottom, colTop);
102 int nN = colBottom[0].num;
103 int nK = colBottom[0].channels;
104 int nH = colBottom[0].height;
105 int nW = colBottom[0].width;
107 m_cuda.SetTensorDesc(m_hBottomDesc, nN, nK, nH, nW);
108 m_cuda.SetTensorDesc(m_hTopDesc, nN, nK, nH, nW);
180 int nCount = colTop[0].count();
181 long hBottomData = colBottom[0].gpu_data;
182 long hTopData = colTop[0].mutable_gpu_data;
185 m_cuda.elu_fwd(nCount, hBottomData, hTopData, dfAlpha);
209 if (!rgbPropagateDown[0])
212 int nCount = colTop[0].count();
213 long hTopDiff = colTop[0].gpu_diff;
214 long hTopData = colTop[0].gpu_data;
215 long hBottomData = colBottom[0].gpu_data;
216 long hBottomDiff = colBottom[0].mutable_gpu_diff;
219 m_cuda.elu_bwd(nCount, hTopDiff, hTopData, hBottomData, hBottomDiff, dfAlpha);
244 long hBottomData = colBottom[0].gpu_data;
245 long hTopData = colTop[0].mutable_gpu_data;
247 m_cuda.EluForward(m_hCudnn,
m_tOne, m_hBottomDesc, hBottomData,
m_tZero, m_hTopDesc, hTopData);
271 if (!rgbPropagateDown[0])
274 long hTopData = colTop[0].gpu_data;
275 long hTopDiff = colTop[0].gpu_diff;
276 long hBottomData = colBottom[0].gpu_data;
277 long hBottomDiff = colBottom[0].mutable_gpu_diff;
279 m_cuda.EluBackward(m_hCudnn,
m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData,
m_tZero, m_hBottomDesc, hBottomDiff);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The ELULayer computes exponential linear unit non-linearity . This layer is initialized with the MyCa...
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using either the Engine.CAFFE or Engine.CUDNN mode.
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation using cuDNN.
override void dispose()
Releases all GPU and host resources used by the Layer.
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the ELU value inputs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs using either the Engine.CAFFE or Engine....
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer to run in either Engine.CAFFE or Engine.CUDNN mode.
ELULayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The ELULayer constructor
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation using Cuda.
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the ELU value inputs.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
T m_tZero
Specifies a generic type equal to 0.0.
T m_tOne
Specifies a generic type equal to 1.0.
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
double alpha
Described in Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) by Clevert,...
Specifies the base parameter for all layers.
EluParameter elu_param
Returns the parameter set when initialized with LayerType.ELU
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...