2using System.Collections.Generic;
41 m_tMinusOne = (T)Convert.ChangeType(-1, typeof(T));
42 m_blobTmp =
new Blob<T>(cuda, log,
false);
43 m_blobTmp.Name = p.
name +
" tmp";
44 m_blobYDiff =
new Blob<T>(cuda, log,
false);
45 m_blobYDiff.
Name = p.
name +
" y diff";
46 m_blobXDiff =
new Blob<T>(cuda, log,
false);
47 m_blobXDiff.
Name = p.
name +
" x diff";
48 m_blobMask =
new Blob<T>(cuda, log,
false);
50 m_blobGradNorm =
new Blob<T>(cuda, log,
false);
51 m_blobGradNorm.
Name = p.
name +
" grad norm";
79 private void create_mask(
int nCount,
int nH,
int nW,
Blob<T> mask)
84 for (
int i = 0; i < nCount; i++)
86 int nUnitPos = i % nSize;
88 if (nUnitPos % nW == nW - 1 || nUnitPos / nW == nH - 1)
104 m_blobTmp.ReshapeLike(colBottom[0]);
110 create_mask(colBottom[0].count(), colBottom[0].shape(-2), colBottom[0].shape(-1), m_blobMask);
113 List<int> rgLossShape =
new List<int>();
114 colTop[0].
Reshape(rgLossShape);
128 int nW = colBottom[0].shape(-1);
129 int nCount = colBottom[0].count();
130 long hBottomData = colBottom[0].gpu_data;
144 double dfAsum =
convertD(m_blobTmp.asum_data());
158 if (!rgbPropagateDown[0])
161 int nW = colBottom[0].shape(-1);
162 int nCount = colBottom[0].count();
163 long hBottomDiff = colBottom[0].mutable_gpu_diff;
176 m_cuda.axpy(nCount - 1, m_tMinusOne, m_blobXDiff.
gpu_data, hBottomDiff, 0, 1);
177 m_cuda.axpy(nCount - nW, m_tMinusOne, m_blobYDiff.
gpu_data, hBottomDiff, 0, nW);
179 double dfTopDiff =
convertD(colTop[0].GetDiff(0));
180 m_cuda.scal(nCount, dfTopDiff, hBottomDiff);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
void SetData(double df)
Set all blob data to the value specified.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
T[] mutable_cpu_data
Get data from the GPU and bring it over to the host, or Set data from the Host and send it over to th...
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
float convertF(T df)
Converts a generic to a float value.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
The TVLossLayer computes total variation loss as described by 'Mahendran' et al., and used in Neural ...
override int ExactNumTopBlobs
Returns the exact number of bottom blobs (e.g. 1)
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the Gram matrix values.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the absolute value inputs.
override int ExactNumBottomBlobs
Returns the exact number of bottom blobs (e.g. 1)
TVLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The TVLossLayer constructor.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
TVLossParameter tv_loss_param
Returns the parameter set when initialized with LayerType.TV_LOSS
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.nt namespace contains all Neural Transfer related layers.
The MyCaffe.layers namespace contains all layers that have a solidified code base,...
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...