2using System.Collections.Generic;
50 m_blobOffsets.Dispose();
62 col.
Add(m_blobOffsets);
63 col.
Add(m_blobSrcStrides);
64 col.
Add(m_blobDstStrides);
91 int nInputDim = colBottom[0].num_axes;
93 m_log.
CHECK_LT(nStartAxis, nInputDim,
"The crop axis is bigger than the input dim.");
110 int nInputDim = colBottom[0].num_axes;
114 List<int> rgNewShape =
Utility.Clone<
int>(colBottom[0].shape());
115 List<int> rgOffsetsShape =
new List<int>() { nInputDim };
117 m_blobOffsets.Reshape(rgOffsetsShape);
118 float[] rgOffsetData =
convertF(m_blobOffsets.mutable_cpu_data);
121 for (
int i = 0; i < nInputDim; i++)
124 int nNewSize = colBottom[0].shape(i);
128 nNewSize = colBottom[1].shape(i);
142 m_log.
CHECK_GE(colBottom[0].shape(i) - nCropOffset, colBottom[1].shape(i),
"The crop for dimension " + i.ToString() +
" is out-of-bounds with size " + colBottom[0].shape(i).ToString() +
" and offset " + nCropOffset.ToString());
145 rgNewShape[i] = nNewSize;
146 rgOffsetData[i] = nCropOffset;
149 m_blobOffsets.mutable_cpu_data =
convert(rgOffsetData);
154 m_blobSrcStrides.
Reshape(rgOffsetsShape);
155 m_blobDstStrides.
Reshape(rgOffsetsShape);
160 for (
int i = 0; i < nInputDim; i++)
162 rgSrcStrides[i] = colBottom[0].count(i + 1, nInputDim);
163 rgDstStrides[i] = colTop[0].count(i + 1, nInputDim);
185 List<int> rgIndices =
Utility.
Create<
int>(colTop[0].num_axes, 0);
186 long hBottomData = colBottom[0].gpu_data;
187 long hTopData = colTop[0].mutable_gpu_data;
188 int nCount = colTop[0].count();
190 m_cuda.crop_fwd(nCount, colBottom[0].num_axes, m_blobSrcStrides.
gpu_data, m_blobDstStrides.
gpu_data, m_blobOffsets.gpu_data, hBottomData, hTopData);
202 if (rgbPropagateDown[0])
206 long hTopDiff = colTop[0].gpu_diff;
207 long hBottomDiff = colBottom[0].mutable_gpu_diff;
208 int nCount = colTop[0].count();
210 m_cuda.crop_bwd(nCount, colBottom[0].num_axes, m_blobSrcStrides.
gpu_data, m_blobDstStrides.
gpu_data, m_blobOffsets.gpu_data, hBottomDiff, hTopDiff);
The Log class provides general output in text form.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
void CHECK_LT(double df1, double df2, string str)
Test whether one number is less than another.
The Utility class provides general utility funtions.
static List< int > Create(int nCount, int nStart, int nInc)
Create a new List and fill it with values starting with start and incrementing by inc.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void SetDiff(double df)
Set all blob diff to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
Blob(CudaDnn< T > cuda, Log log, bool bIncludeDiff=true, bool bUseHalfSize=false)
The Blob constructor.
T[] mutable_cpu_data
Get data from the GPU and bring it over to the host, or Set data from the Host and send it over to th...
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
string Name
Get/set the name of the Blob.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The CropLayer takes a Blob and crops it to the shape specified by the second input Blob,...
override void dispose()
Releases all GPU and host resources used by the Layer.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: input, shape
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the concatenation inputs.
CropLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The CropLayer constructor.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: crop
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
float convertF(T df)
Converts a generic to a float value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
List< uint > offset
Specifies the offset to set the shift for all/each dimension.
int axis
The axis along which to crop – may be negative to index from the end (e.g., -1 for the last axis)....
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LayerType
Specifies the layer type.
CropParameter crop_param
Returns the parameter set when initialized with LayerType.CROP
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...