2using System.Collections.Generic;
5using System.Threading.Tasks;
63 m_blobSumMultiplier =
new Blob<T>(cuda, log);
72 if (m_blobSumMultiplier !=
null)
75 m_blobSumMultiplier =
null;
85 col.
Add(m_blobSumMultiplier);
127 List<int> rgTopShape =
Utility.Clone<
int>(colBottom[0].shape(), m_nAxis);
129 m_nNum = colBottom[0].count(0, m_nAxis);
130 m_nDim = colBottom[0].count(m_nAxis);
131 m_log.
CHECK_EQ(m_nNum, colTop[0].count(),
"The 'num' should equal the top[0].count!");
136 List<int> rgSumMultShape =
new List<int>() { m_nDim };
137 m_blobSumMultiplier.
Reshape(rgSumMultShape);
138 m_blobSumMultiplier.
SetData(1);
162 long hBottomData = colBottom[0].gpu_data;
165 if (m_blobSumMultiplier.
count() > 0)
166 hMultData = m_blobSumMultiplier.
gpu_data;
168 T[] rgTop = colTop[0].mutable_cpu_data;
172 for (
int i = 0; i < m_nNum; i++)
178 rgTop[i] =
m_cuda.dot(m_nDim, hMultData, hBottomData, 0, nOffset);
182 rgTop[i] =
m_cuda.asum(m_nDim, hBottomData, nOffset);
186 rgTop[i] =
m_cuda.dot(m_nDim, hBottomData, hBottomData, nOffset, nOffset);
190 rgTop[i] =
convert(
m_cuda.min(m_nDim, hBottomData, out lPos, nOffset));
194 rgTop[i] =
convert(
m_cuda.max(m_nDim, hBottomData, out lPos, nOffset));
198 m_log.
FAIL(
"Unknown reduction op: " + m_op.ToString());
205 colTop[0].mutable_cpu_data = rgTop;
207 if (m_dfCoeff != 1.0)
208 m_cuda.scal(m_nNum, m_dfCoeff, colTop[0].mutable_gpu_data);
229 if (!rgbPropagateDown[0])
233 long hBottomData = 0;
237 hBottomData = colBottom[0].gpu_data;
239 T[] rgTopDiff = colTop[0].update_cpu_diff();
240 long hBottomDiff = colBottom[0].mutable_gpu_diff;
243 for (
int i = 0; i < m_nNum; i++)
245 double dfBottomCoeff =
convertD(rgTopDiff[i]) * m_dfCoeff;
251 m_cuda.set(m_nDim, hBottomDiff,
convert(dfBottomCoeff), -1, nOffset);
257 m_cuda.sign(m_nDim, hBottomData, hBottomDiff, nOffset, nOffset);
258 m_cuda.scal(m_nDim, dfBottomCoeff, hBottomDiff, nOffset);
262 m_cuda.scale(m_nDim,
convert(2 * dfBottomCoeff), hBottomData, hBottomDiff, nOffset, nOffset);
266 m_log.
FAIL(
"Unknown reduction op: " + m_op.ToString());
The Log class provides general output in text form.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The Utility class provides general utility funtions.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
int count()
Returns the total number of items in the Blob.
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The ReductionLayer computes the 'reductions' – operations that return a scalar output Blob for an inp...
ReductionLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The ReductionLayer constructor.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the Reduction inputs.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: reduction
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: input
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward operation
override void dispose()
Releases all GPU and host resources used by the Layer.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
ReductionParameter reduction_param
Returns the parameter set when initialized with LayerType.REDUCTION
LayerType
Specifies the layer type.
Specifies the parameters used by ReductionLayer.
int axis
The first axis to reduce to scalar – may be negative index from the end (e.g., -1 for the last axis)....
ReductionOp
Defines the reduction operation.
double coeff
Specifies the coefficient used to scale the output.
ReductionOp operation
Specifies the reduction operation.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...