2using System.Collections.Generic;
23 double m_dfInnerScale;
24 double m_dfOuterScale;
52 base.LayerSetUp(colBottom, colTop);
57 m_log.
CHECK_GT(dfBase, 0,
"base_val must be strictly positive.");
61 double dfLogBase = (dfBase == -1) ? 1 : Math.Log(dfBase);
63 m_log.
CHECK(!
double.IsNaN(dfLogBase),
"NaN result: log(base) == log(" + dfBase.ToString() +
") = " + dfLogBase.ToString());
64 m_log.
CHECK(!
double.IsInfinity(dfLogBase),
"Inf result: log(base) == log(" + dfBase.ToString() +
") = " + dfLogBase.ToString());
69 m_dfInnerScale = dfLogBase * dfInputScale;
70 m_dfOuterScale = (dfInputShift == 0) ? 1 : ((dfBase != -1) ? Math.Pow(dfBase, dfInputShift) : Math.Exp(dfInputShift));
84 int nCount = colBottom[0].count();
85 long hBottomData = colBottom[0].gpu_data;
86 long hTopData = colTop[0].mutable_gpu_data;
88 if (m_dfInnerScale == 1)
90 m_cuda.exp(nCount, hBottomData, hTopData);
94 m_cuda.scale(nCount,
convert(m_dfInnerScale), hBottomData, hTopData);
95 m_cuda.exp(nCount, hTopData, hTopData);
98 if (m_dfOuterScale != 1)
115 if (!rgbPropagateDown[0])
118 int nCount = colBottom[0].count();
119 long hTopData = colTop[0].gpu_data;
120 long hTopDiff = colTop[0].gpu_diff;
121 long hBottomDiff = colBottom[0].mutable_gpu_diff;
123 m_cuda.mul(nCount, hTopData, hTopDiff, hBottomDiff);
125 if (m_dfInnerScale != 1)
The Log class provides general output in text form.
void CHECK(bool b, string str)
Test a flag for true.
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
The BlobCollection contains a list of Blobs.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
The ExpLayer which computes the exponential of the input. This layer is initialized with the MyCaffe....
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
ExpLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The ExpLayer constructor.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the EXP value inputs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
double base_val
Specifies the base to use for the exponent, where , for base > 0.
double shift
Specifies the shift to use for the exponent, where , for base > 0.
double scale
Specifies the scale to use for the exponent, where , for base > 0.
Specifies the base parameter for all layers.
ExpParameter exp_param
Returns the parameter set when initialized with LayerType.EXP
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...