2using System.Collections.Generic;
26 double m_dfAlpha = 1.0;
27 double m_dfBeta = 1.0;
71 m_log.
CHECK_GT(m_dfAlpha, 0,
"The 'alpha' parameter must be greater than zero.");
72 m_log.
CHECK_GT(m_dfBeta, 0,
"The 'beta' parameter must be greater than zero.");
85 m_nK = colBottom[0].count(nAxis);
89 m_nM = colBottom[0].count(0, nAxis - 1);
92 m_nN = colBottom[0].shape(nAxis - 1);
94 List<int> rgTopShape =
Utility.Clone<
int>(colBottom[0].shape(), nAxis + 1);
95 rgTopShape[nAxis] = m_nN;
111 long hBottomData = colBottom[0].gpu_data;
112 long hTopData = colTop[0].mutable_gpu_data;
115 if (m_dfAlpha != 1.0)
116 fScale = (T)Convert.ChangeType(m_dfAlpha, typeof(T));
118 for (
int i = 0; i < m_nM; i++)
120 m_cuda.gemm(
false,
true, m_nN, m_nN, m_nK, fScale, hBottomData, hBottomData,
m_tZero, hTopData, i * m_nK * m_nN, i * m_nK * m_nN, i * m_nN * m_nN);
124 colTop[0].scale_data(m_dfBeta);
137 if (!rgbPropagateDown[0])
140 long hTopDiff = colTop[0].gpu_diff;
141 long hBottomDiff = colBottom[0].mutable_gpu_diff;
142 long hBottomData = colBottom[0].gpu_data;
147 if (m_dfAlpha != 1.0)
148 fScale = (T)Convert.ChangeType(1.0 / m_dfAlpha, typeof(T));
151 for (
int i = 0; i < m_nM; i++)
153 m_cuda.gemm(
false,
false, m_nN, m_nK, m_nN, fScale, hTopDiff, hBottomData,
m_tZero, hBottomDiff, i * m_nN * m_nN, i * m_nK * m_nN, i * m_nK * m_nN);
154 m_cuda.gemm(
true,
false, m_nN, m_nK, m_nN, fScale, hTopDiff, hBottomData,
m_tOne, hBottomDiff, i * m_nN * m_nN, i * m_nK * m_nN, i * m_nK * m_nN);
160 colBottom[0].scale_diff(1.0 / m_dfBeta);
The Log class provides general output in text form.
void CHECK_GT(double df1, double df2, string str)
Test whether one number is greater than another.
The Utility class provides general utility funtions.
The BlobCollection contains a list of Blobs.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
T m_tZero
Specifies a generic type equal to 0.0.
T m_tOne
Specifies a generic type equal to 1.0.
bool m_bUseHalfSize
Specifies that the half size of the top (if any) should be converted to the base size.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The GramLayer computes the Gram matrix used in Neural Style.
GramLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The GramLayer constructor.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override int ExactNumBottomBlobs
Returns the exact number of bottom blobs (e.g. 1)
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the Gram matrix values.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the absolute value inputs.
override int ExactNumTopBlobs
Returns the exact number of bottom blobs (e.g. 1)
Specifies the base parameter for all layers.
bool use_halfsize
Specifies whether or not to use half sized memory or not.
GramParameter gram_param
Returns the parameter set when initialized with LayerType.GRAM
LayerType
Specifies the layer type.
double beta
Specifies the scaling factor applied after the gram operation.
bool disable_scaling_on_gradient
Specifies whether or not to apply the un-scaling of the alpha and beta values during the during the b...
double alpha
Specifies the scaling factor applied before the gram operation.
int axis
The first axis to be lumped into a single Gram matrix computation; all preceding axes are retained in...
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.nt namespace contains all Neural Transfer related layers.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param.nt namespace defines the parameters used by the Nerual Style Transfer layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...