2using System.Collections.Generic;
32 bool m_bChannelShared;
53 m_blobMultiplier =
new Blob<T>(cuda, log);
55 m_blobBackwardBuff =
new Blob<T>(cuda, log);
57 m_blobBottomMemory =
new Blob<T>(cuda, log);
77 col.
Add(m_blobMultiplier);
78 col.
Add(m_blobBackwardBuff);
79 col.
Add(m_blobBottomMemory);
89 base.ReInitializeParameters(target);
111 m_log.
CHECK_GE(colBottom[0].num_axes, 2,
"Number of axes of bottom must be >= 2");
113 int nChannels = colBottom[0].channels;
125 List<int> rgSlopeShape =
new List<int>();
126 if (!m_bChannelShared)
127 rgSlopeShape.Add(nChannels);
135 blobSlope.
Reshape(rgSlopeShape);
142 filler.
Fill(blobSlope);
147 if (m_bChannelShared)
155 List<int> rgShape =
new List<int>() { colBottom[0].count(1) };
157 m_blobMultiplier.
Reshape(rgShape);
158 m_blobBackwardBuff.
Reshape(rgShape);
172 m_log.
CHECK_GE(colBottom[0].num_axes, 2,
"Number of axes of bottom blob must be >= 2.");
175 if (colBottom[0] == colTop[0])
198 long hBottomData = colBottom[0].gpu_data;
199 long hTopData = colTop[0].mutable_gpu_data;
200 int nCount = colBottom[0].count();
201 int nDim = colBottom[0].count(2);
202 int nChannels = colBottom[0].channels;
204 int nDivFactor = m_bChannelShared ? nChannels : 1;
206 if (colTop[0] == colBottom[0])
209 m_cuda.prelu_fwd(nCount, nChannels, nDim, hBottomData, hTopData, hSlopeData, nDivFactor);
247 long hBottomData = colBottom[0].gpu_data;
248 long hTopDiff = colTop[0].gpu_diff;
249 int nCount = colBottom[0].count();
250 int nDim = colBottom[0].count(2);
251 int nChannels = colBottom[0].channels;
254 if (colTop[0] == colBottom[0])
255 hBottomData = m_blobBottomMemory.
gpu_data;
263 long hSlopeDiff =
m_colBlobs[0].mutable_gpu_diff;
264 int nCDim = nChannels * nDim;
266 m_cuda.prelu_bwd_param(nCDim, colBottom[0].num, colTop[0].offset(1), hTopDiff, hBottomData, m_blobBackwardBuff.
mutable_gpu_diff);
268 if (m_bChannelShared)
280 if (rgbPropagateDown[0])
282 long hBottomDiff = colBottom[0].mutable_gpu_diff;
284 int nDivFactor = m_bChannelShared ? nChannels : 1;
286 m_cuda.prelu_bwd(nCount, nChannels, nDim, hTopDiff, hBottomData, hBottomDiff, hSlopeData, nDivFactor);
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
BLOB_TYPE type
Returns the BLOB_TYPE of the Blob.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Abstract Filler class used to fill blobs with values.
void Fill(Blob< T > b)
Fill the blob with values based on the actual filler used.
static Filler< T > Create(CudaDnn< T > cuda, Log log, FillerParameter p)
Create a new Filler instance.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
T m_tOne
Specifies a generic type equal to 1.0.
bool shareParameter(Blob< T > b, List< int > rgMinShape, bool bAllowEndsWithComparison=false)
Attempts to share a parameter Blob if another parameter Blob with the same name and accpetable size i...
virtual bool reshapeNeeded(BlobCollection< T > colBottom, BlobCollection< T > colTop, bool bReset=true)
Tests the shapes of both the bottom and top blobs and if they are the same as the previous sizing,...
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
BlobCollection< T > m_colBlobs
Specifies the learnable parameter Blobs of the Layer.
DictionaryMap< bool > m_rgbParamPropagateDown
Specifies whether or not to compute the learnable diff of each parameter Blob.
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
The PReLULayer computes the "Parameterized Rectified Linear Unit" non-linearity. This layer is initia...
override bool ReInitializeParameters(WEIGHT_TARGET target)
Re-initialize the parameters of the layer.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward operation
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the PReLU inputs.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
PReLULayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The PReLULayer constructor.
override void dispose()
Releases all GPU and host resources used by the Layer.
Specifies the filler parameters used to create each Filler.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
PReLUParameter prelu_param
Returns the parameter set when initialized with LayerType.PRELU
Specifies the parameters for the PReLULayer.
bool channel_shared
Specifies whether or not slope parameters are shared across channels.
FillerParameter filler
Specifies initial value of . Default is for all i.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
BLOB_TYPE
Defines the tpe of data held by a given Blob.
WEIGHT_TARGET
Defines the type of weight to target in re-initializations.
The MyCaffe.fillers namespace contains all fillers including the Filler class.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...