2using System.Collections.Generic;
39 int m_nUnPooledHeight = -1;
40 int m_nUnPooledWidth = -1;
41 bool m_bGlobalPooling;
127 m_log.
CHECK(((p.
pad.Count > 0) && p.
pad_h.HasValue && p.
pad_w.HasValue) || (!p.
pad_h.HasValue && !p.
pad_w.HasValue),
"Pad is pad or pad_h and pad_w are required.");
134 if (m_bGlobalPooling)
136 m_nKernelH = colBottom[0].height;
137 m_nKernelW = colBottom[0].width;
153 m_log.
CHECK_GT(m_nKernelH, 0,
"Filter dimensions cannot be zero.");
154 m_log.
CHECK_GT(m_nKernelW, 0,
"Filter dimensions cannot be zero.");
175 m_nPadH = (int)p.
pad[0];
176 m_nPadW = (
int)p.
pad[0];
180 m_nPadH = (p.
pad_h.HasValue) ? (
int)p.
pad_h.Value : 0;
181 m_nPadW = (p.
pad_w.HasValue) ? (
int)p.
pad_w.Value : 0;
189 m_nStrideH = (int)p.
stride[0];
190 m_nStrideW = (
int)p.
stride[0];
198 if (m_bGlobalPooling)
199 m_log.
CHECK(m_nPadH == 0 && m_nPadW == 0 && m_nStrideH == 1 && m_nStrideW == 1,
"With global pooling = true, only pad = 0 and stride = 1 allowed.");
201 if (m_nPadH != 0 || m_nPadW != 0)
204 m_log.
CHECK_LT(m_nPadH, m_nKernelH,
"The pad_h must be <= kernel_h.");
205 m_log.
CHECK_LT(m_nPadW, m_nKernelW,
"The pad_w must be <= kernel_w.");
216 m_log.
CHECK_EQ(4, colBottom[0].num_axes,
"Input must have 4 axes, corresponding to (num, channels, height, width)");
218 m_nChannels = colBottom[0].channels;
219 m_nHeight = colBottom[0].height;
220 m_nWidth = colBottom[0].width;
222 if (m_bGlobalPooling)
224 m_nKernelH = colBottom[0].height;
225 m_nKernelW = colBottom[0].width;
235 if (m_nUnPooledHeight < 0)
237 m_nUnPooledHeight = Math.Max((m_nHeight - 1) * m_nStrideH + m_nKernelH - 2 * m_nPadH,
238 m_nHeight * m_nStrideH - m_nPadH + 1);
241 if (m_nUnPooledWidth < 0)
243 m_nUnPooledWidth = Math.Max((m_nWidth - 1) * m_nStrideW + m_nKernelW - 2 * m_nPadW,
244 m_nWidth * m_nStrideW - m_nPadW + 1);
247 if (m_nUnPooledHeight <= 0)
249 m_nUnPooledHeight = 1;
250 m_log.
WriteLine(
"WARNING: unpooling height was 0, setting to 1.");
253 if (m_nUnPooledWidth <= 0)
255 m_nUnPooledWidth = 1;
259 colTop[0].
Reshape(colBottom[0].num, m_nChannels, m_nUnPooledHeight, m_nUnPooledWidth);
269 int nCount = colBottom[0].count();
270 long hBottomData = colBottom[0].gpu_data;
271 long hTopData = colTop[0].mutable_gpu_data;
272 long hBottomMask = 0;
279 if (colBottom.
Count > 1)
280 hBottomMask = colBottom[1].gpu_data;
281 m_cuda.unpooling_fwd(
POOLING_METHOD.MAX, nCount, hBottomData, colBottom[0].num, m_nChannels, m_nHeight, m_nWidth, m_nUnPooledHeight, m_nUnPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hTopData, hBottomMask);
285 m_cuda.unpooling_fwd(
POOLING_METHOD.AVE, nCount, hBottomData, colBottom[0].num, m_nChannels, m_nHeight, m_nWidth, m_nUnPooledHeight, m_nUnPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hTopData, 0);
302 if (!rgbPropagateDown[0])
305 int nCount = colBottom[0].count();
306 long hTopDiff = colTop[0].gpu_diff;
307 long hBottomDiff = colBottom[0].mutable_gpu_diff;
308 long hBottomMask = 0;
315 if (colBottom.
Count > 1)
316 hBottomMask = colBottom[1].gpu_data;
317 m_cuda.unpooling_bwd(
POOLING_METHOD.MAX, nCount, hTopDiff, colTop[0].num, m_nChannels, m_nHeight, m_nWidth, m_nUnPooledHeight, m_nUnPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hBottomDiff, hBottomMask);
321 m_cuda.unpooling_bwd(
POOLING_METHOD.AVE, nCount, hTopDiff, colTop[0].num, m_nChannels, m_nHeight, m_nWidth, m_nUnPooledHeight, m_nUnPooledWidth, m_nKernelH, m_nKernelW, m_nStrideH, m_nStrideW, m_nPadH, m_nPadW, hBottomDiff, 0);
The Log class provides general output in text form.
void CHECK(bool b, string str)
Test a flag for true.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void FAIL(string str)
Causes a failure which throws an exception with the desciptive text.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
void CHECK_LT(double df1, double df2, string str)
Test whether one number is less than another.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
void SetDiff(double df)
Set all blob diff to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Run the Backward computation using the Engine.CAFFE mode only.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer for use with both Engine.CAFFE and Engine.CUDNN modes.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Run the Forward computation using the Engine.CAFFE mode only.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int MinBottomBlobs
Returns the minimum number of required bottom (input) Blobs: input
override void dispose()
Releases all GPU and host resources used by the Layer.
UnPoolingLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The UnPoolingLayer constructor.
override int? MaxBottomBlobs
Returns the maximum number of required bottom (input) Blobs: input, mask (only when using MAX)
override int ExactNumTopBlobs
Returns the required number of top (output) Blobs: unpool
uint? stride_h
The stride height (2D only)
List< uint > kernel_size
Kernel size is given as a single value for equal dimensions in all spatial dimensions,...
uint? stride_w
The stride width (2D only)
uint? pad_h
The padding height (2D only)
uint? kernel_h
The kernel height (2D only)
List< uint > stride
Stride is given as a single value for equal dimensions in all spatial dimensions, or once per spatial...
uint? kernel_w
The kernel width (2D only)
uint? pad_w
The padding width (2D only)
List< uint > pad
Pad is given as a single value for equal dimensions in all spatial dimensions, or once per spatial di...
Specifies the base parameter for all layers.
UnPoolingParameter unpooling_param
Returns the parameter set when initialized with LayerType.UNPOOLING
LayerType
Specifies the layer type.
Specifies the parameters for the PoolingLayer.
PoolingMethod
Defines the pooling method.
PoolingMethod pool
Specifies the pooling method.
bool global_pooling
Specifies whether or not to enable global pooling.
Specifies the parameters for the UnPoolingLayer.
List< uint > unpool_size
UnPool size is given as a single value for equal dimensions in all spatial dimensions,...
uint? unpool_h
The unpooling height override (2D only)
uint? unpool_w
The unpooling width override (2D only)
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
POOLING_METHOD
Specifies the pooling method to use when using the Caffe pooling (instead of the pooling from NVIDIA'...
The MyCaffe.layers.beta namespace contains all beta stage layers.
The MyCaffe.param.beta parameters are used by the MyCaffe.layer.beta layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...