2using System.Collections.Generic;
55 base.LayerSetUp(colBottom, colTop);
60 m_dfDiffScale = m_dfPower * m_dfScale;
79 if (m_dfDiffScale == 0)
81 colTop[0].
SetData((m_dfPower == 0) ? 1.0 : Math.Pow(m_dfShift, m_dfPower));
85 int nCount = colBottom[0].count();
86 long hTopData = colTop[0].mutable_gpu_data;
87 long hBottomData = colBottom[0].gpu_data;
89 m_cuda.copy(nCount, hBottomData, hTopData);
124 if (!rgbPropagateDown[0])
127 int nCount = colBottom[0].count();
128 long hTopDiff = colTop[0].gpu_diff;
129 long hBottomDiff = colBottom[0].mutable_gpu_diff;
131 if (m_dfDiffScale == 0 || m_dfPower == 1)
133 colBottom[0].
SetDiff(m_dfDiffScale);
137 long hBottomData = colBottom[0].gpu_data;
149 m_cuda.add_scalar(nCount,
convert(m_dfDiffScale * m_dfShift), hBottomDiff);
151 else if (m_dfShift == 0)
157 long hTopData = colTop[0].gpu_data;
158 m_cuda.div(nCount, hTopData, hBottomData, hBottomDiff);
163 m_cuda.copy(nCount, hBottomData, hBottomDiff);
165 if (m_dfScale != 1.0)
168 if (m_dfShift != 0.0)
171 long hTopData = colTop[0].gpu_data;
172 m_cuda.div(nCount, hTopData, hBottomDiff, hBottomDiff);
174 if (m_dfDiffScale != 1.0)
179 m_cuda.mul(nCount, hTopDiff, hBottomDiff, hBottomDiff);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
void SetDiff(double df)
Set all blob diff to the value specified.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
T m_tZero
Specifies a generic type equal to 0.0.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The NeuronLayer is an interface for layers that take one blob as input (x) and produce only equally-s...
The PowerLayer computes the power of the input. This layer is initialized with the MyCaffe....
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the power inputs
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
PowerLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The PowerLayer constructor.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The forward computation.
Specifies the base parameter for all layers.
PowerParameter power_param
Returns the parameter set when initialized with LayerType.POWER
LayerType
Specifies the layer type.
double power
Specifies power value in the formula .
double scale
Specifies scale value in the formula .
double shift
Specifies shift value in the formula .
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...