2using System.Collections.Generic;
73 if (m_colWork1 !=
null)
79 if (m_colWork2 !=
null)
129 if (p.
steps.Count == 0)
130 m_log.
WriteLine(
"WARNING: No normalization steps are specified, data will just pass through in its normal form.");
135 m_log.
CHECK_GT(dfRange, 0,
"The output data range must be greater than 0!");
144 for (
int i = 0; i < colBottom.
Count; i++)
160 for (
int i = 0; i < colBottom.
Count; i++)
163 m_colWork1[i].ReshapeLike(colBottom[i]);
185 for (
int i = 0; i < colBottom.
Count; i++)
187 m_colWork1[i].CopyFrom(colBottom[i]);
188 m_colWork2[i].
CopyFrom(colBottom[i]);
196 run_step_log(m_colWork1);
200 run_step_center(m_colWork1, m_colWork2);
204 run_step_stdev(m_colWork1, m_colWork2);
209 for (
int i = 0; i < colTop.
Count; i++)
231 for (
int i = 0; i < col1.
Count; i++)
233 col1[i].add_scalar(-dfMean);
239 for (
int i = 0; i < col1.
Count; i++)
244 int nSpatialCount = b1.
count(2);
246 if (nSpatialCount > 1)
264 if (dfStdev != 0 && dfStdev != 1)
266 for (
int i = 0; i < col1.
Count; i++)
268 m_cuda.mul_scalar(col1[i].count(), 1.0 / dfStdev, col1[i].mutable_gpu_data);
274 for (
int i = 0; i < col1.
Count; i++)
279 int nSpatialCount = b1.
count(2);
281 if (nSpatialCount > 1)
302 new NotImplementedException();
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_NE(double df1, double df2, string str)
Test whether one number is not-equal to another.
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
The BlobCollection contains a list of Blobs.
void Dispose()
Release all resource used by the collection and its Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
void CopyFrom(BlobCollection< T > bSrc, bool bCopyDiff=false)
Copy the data or diff from another BlobCollection into this one.
The Blob is the main holder of data that moves through the Layers of the Net.
int channels
DEPRECIATED; legacy shape accessor channels: use shape(1) instead.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
int count()
Returns the total number of items in the Blob.
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The DataNormalizerLayer normalizes the input data (and optionally label) based on the normalization o...
override int MaxBottomBlobs
Returns the maximum number of bottom blobs required: data, label
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation.
override void dispose()
Clean up any resources used.
DataNormalizerLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
Constructor.
override int MaxTopBlobs
Returns the maximum number of top blobs required: data, label
override int MinBottomBlobs
Returns the minimum number of bottom blobs required: data
override int MinTopBlobs
Returns the minimum number of top blobs required: data
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Not implemented.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
Specifies the parameter for the data normalizer layer.
double output_max
Specifies the maximum data range of the output.
double output_min
Specifies the minimum data range of the output.
List< NORMALIZATION_STEP > steps
Specifies the normalization steps which are performed in the order for which they are listed.
NORMALIZATION_STEP
Specifies the normalization step to run.
double? input_mean
Specifies the input mean, if known. When not specified the input_mean is determined dynamically from ...
double? input_stdev
Specifies the input standard deviation, if known. When not specified input_stdev is determined dynami...
Specifies the base parameter for all layers.
DataNormalizerParameter data_normalizer_param
Returns the parameter set when initialized with LayerType.DATA_NORMALIZER
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...