2using System.Collections.Generic;
30 long m_hBottomDesc = 0;
51 if (m_hBottomDesc != 0)
53 m_cuda.FreeTensorDesc(m_hBottomDesc);
59 m_cuda.FreeTensorDesc(m_hTopDesc);
65 m_cuda.FreeCuDNN(m_hCudnn);
86 m_hCudnn =
m_cuda.CreateCuDNN();
87 m_hBottomDesc =
m_cuda.CreateTensorDesc();
88 m_hTopDesc =
m_cuda.CreateTensorDesc();
98 base.Reshape(colBottom, colTop);
105 int nN = colBottom[0].num;
106 int nK = colBottom[0].channels;
107 int nH = colBottom[0].height;
108 int nW = colBottom[0].width;
169 long hBottomData = colBottom[0].gpu_data;
170 long hTopData = colTop[0].mutable_gpu_data;
171 int nCount = colBottom[0].count();
173 m_cuda.sigmoid_fwd(nCount, hBottomData, hTopData);
195 long hTopData = colTop[0].gpu_data;
196 long hTopDiff = colTop[0].gpu_diff;
197 long hBottomDiff = colBottom[0].mutable_gpu_diff;
198 int nCount = colBottom[0].count();
200 m_cuda.sigmoid_bwd(nCount, hTopDiff, hTopData, hBottomDiff);
215 long hBottomData = colBottom[0].gpu_data;
216 long hTopData = colTop[0].mutable_gpu_data;
218 m_cuda.SigmoidForward(m_hCudnn,
m_tOne, m_hBottomDesc, hBottomData,
m_tZero, m_hTopDesc, hTopData);
240 if (!rgbPropagateDown[0])
243 long hTopData = colTop[0].gpu_data;
244 long hTopDiff = colTop[0].gpu_diff;
245 long hBottomData = colBottom[0].gpu_data;
246 long hBottomDiff = colBottom[0].mutable_gpu_diff;
248 m_cuda.SigmoidBackward(m_hCudnn,
m_tOne, m_hTopDesc, hTopData, m_hTopDesc, hTopDiff, m_hBottomDesc, hBottomData,
m_tZero, m_hBottomDesc, hBottomDiff);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
T m_tZero
Specifies a generic type equal to 0.0.
T m_tOne
Specifies a generic type equal to 1.0.
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
The SigmoidLayer is a neuron layer that calculates the sigmoid function, a classc choice for neural n...
void forward_cuda(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CAFFE.
SigmoidLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The SigmoidLayer constructor.
override void dispose()
Releases all GPU and host resources used by the Layer.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer to run in either Engine.CAFFE or Engine.CUDNN mode.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using either the Engine.CAFFE or Engine.CUDNN mode.
void backward_cuda(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the ganh inputs using the Engine.CAFFE.
void backward_cudnn(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the ganh inputs using the Engine.CUDNN mode.
void forward_cudnn(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation using the Engine.CUDNN mode.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the ganh inputs using either the Engine.CAFFE or Engine....
Specifies the base parameter for all layers.
SigmoidParameter sigmoid_param
Returns the parameter set when initialized with LayerType.SIGMOID
bool use_halfsize
Specifies whether or not to use half sized memory or not.
LayerType
Specifies the layer type.
bool useCudnn()
Queries whether or not to use NVIDIA's cuDnn.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...