2using System.Collections.Generic;
 
   39        float[] m_rgOneHotVector;
 
   40        float[] m_rgTop = 
null;
 
  104            int nCount = colBottom[0].count(m_nAxis);
 
  105            m_log.
CHECK_EQ(nCount, 1, 
"The bottom[0] count at axis " + m_nAxis.ToString() + 
" must equal 1");
 
  107            List<int> rgTopShape = 
Utility.Clone<
int>(colBottom[0].shape());
 
  108            rgTopShape[m_nAxis] = m_colBuckets.
Count;
 
  117            int nTopCount = colTop[0].count();
 
  118            if (m_rgTop == 
null || m_rgTop.Length < nTopCount)
 
  119                m_rgTop = 
new float[nTopCount];
 
  129            float[] rgBottom = 
convertF(colBottom[0].mutable_cpu_data);
 
  130            int nCount = colBottom[0].count(0, m_nAxis);
 
  132            for (
int i = 0; i < nCount; i++)
 
  134                int nIdx = m_colBuckets.
Add(rgBottom[i]);
 
  136                for (
int j = 0; j < m_rgOneHotVector.Length; j++)
 
  139                        m_rgOneHotVector[j] = 1.0f;
 
  141                        m_rgOneHotVector[j] = 0;
 
  144                Array.Copy(m_rgOneHotVector, 0, m_rgTop, i * m_rgOneHotVector.Length, m_rgOneHotVector.Length);
 
  147            colTop[0].mutable_cpu_data = 
convert(m_rgTop);
 
  158            int nItemCount = colTop[0].count(m_nAxis);
 
  159            m_log.
CHECK_EQ(nItemCount, m_colBuckets.
Count, 
"The count at the top[axis] is incorrect!");
 
  161            int nCount1 = colTop[0].count(0, m_nAxis);
 
  162            int nCount2 = colBottom[0].count(0, m_nAxis);
 
  163            m_log.
CHECK_EQ(nCount1, nCount2, 
"The top and bottom have incompatible sizes.");
 
  166            float[] rgBottomDiff = 
convertF(colBottom[0].mutable_cpu_diff);
 
  167            float[] rgTopData = 
convertF(colTop[0].mutable_cpu_data);
 
  168            float[] rgTopDiff = 
convertF(colTop[0].mutable_cpu_diff);
 
  170            for (
int i = 0; i < nCount1; i++)
 
  172                int nItemIdx = i * nItemCount;
 
  176                for (
int j = 0; j < nItemCount; j++)
 
  178                    fDiff = rgTopDiff[nItemIdx + j];
 
  180                    if (rgTopData[nItemIdx + j] == 0)
 
  186                rgBottomDiff[i] = fDiffSum / nItemCount;
 
  189            colBottom[0].mutable_cpu_diff = 
convert(rgBottomDiff);
 
The BucketCollection contains a set of Buckets.
 
int Count
Returns the number of Buckets.
 
int Add(double fVal)
Finds the correct Bucket and adds the value to it.
 
The Log class provides general output in text form.
 
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
 
The Utility class provides general utility funtions.
 
The BlobCollection contains a list of Blobs.
 
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
 
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
 
An interface for the units of computation which can be composed into a Net.
 
Log m_log
Specifies the Log for output.
 
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
 
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
 
float convertF(T df)
Converts a generic to a float value.
 
LayerParameter.LayerType m_type
Specifies the Layer type.
 
The OneHotLayer is a layer for converting real values into a one-hot vector where a 1 is placed withi...
 
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the input.
 
override void dispose()
Releases all GPU and host resources used by the Layer.
 
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
 
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: onehot
 
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
 
override int ExactNumBottomBlobs
Returns the exact number of required bottom (intput) Blobs: input.
 
OneHotLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The OneHotLayer constructor
 
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
The Forward computation.
 
Specifies the base parameter for all layers.
 
OneHotParameter onehot_param
Returns the parameter set when initialized with LayerType.ONEHOT
 
LayerType
Specifies the layer type.
 
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
 
The MyCaffe.common namespace contains common MyCaffe classes.
 
The MyCaffe.fillers namespace contains all fillers including the Filler class.
 
The MyCaffe.layers.nt namespace contains all Neural Transfer related layers.
 
The MyCaffe.param namespace contains parameters used to create models.
 
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...