2using System.Collections.Generic;
39 int m_nCurrentBatchIdx = 0;
43 bool m_bBufferFull =
false;
44 int m_nBatchDataCount = 0;
66 m_nMaxBatches = p.
knn_param.max_stored_batches;
67 m_blobCompare =
new common.Blob<T>(
m_cuda,
m_log,
false);
84 col.
Add(m_blobCompare);
119 m_log.
CHECK_EQ(2, colBottom.
Count,
"There should be two bottom items: data (embeddings) and labels.");
121 m_nBatchSize = colBottom[0].shape(0);
135 for (
int i = 0; i < m_nMaxBatches; i++)
145 for (
int i = 0; i < colBottom.
Count; i++)
159 for (
int i = 0; i < m_nMaxBatches; i++)
161 m_colBlobs[1 + (i * 2 + 0)].ReshapeLike(colBottom[0]);
163 if (colBottom.
Count > 1)
164 m_colBlobs[1 + (i * 2 + 1)].ReshapeLike(colBottom[1]);
168 List<int> rgShape =
Utility.Clone<
int>(colBottom[0].shape());
170 m_blobCompare.Reshape(rgShape);
171 m_nVectorDim = m_blobCompare.count();
174 rgShape =
new List<int>() { m_nBatchSize, m_nNumOutput, 1, 1 };
177 m_nBatchDataCount = (m_bBufferFull) ? m_nMaxBatches : (m_nCurrentBatchIdx + 1);
218 int nDataCount = (int)fDataCount;
223 m_log.
CHECK_EQ(colBottom.
Count, 2,
"The KNN Layer is used for testing and expects both the 'data' and 'label' bottom items.");
225 Dictionary<int, List<Tuple<int, int>>> rgData =
new Dictionary<int, List<Tuple<int, int>>>();
226 float[] rgFullSet =
new float[m_nBatchSize * m_nNumOutput];
229 for (
int i = 0; i < nDataCount; i++)
234 for (
int j = 0; j < rgLabels1.Length; j++)
236 int nLabel = (int)rgLabels1[j];
238 if (!rgData.ContainsKey(nLabel))
239 rgData.Add(nLabel,
new List<Tuple<int, int>>());
241 rgData[nLabel].Add(
new Tuple<int, int>(i, j));
246 float[] rgLabels =
convertF(colBottom[1].update_cpu_data());
247 Stopwatch sw =
new Stopwatch();
253 for (
int i = 0; i < m_nBatchSize; i++)
255 int nLabel = (int)rgLabels[i];
256 Dictionary<int, float> rgKDist =
new Dictionary<int, float>();
258 foreach (KeyValuePair<
int, List<Tuple<int, int>>> kvItem
in rgData.OrderBy(p => p.Key))
260 List<float> rgDist =
new List<float>();
262 foreach (Tuple<int, int> offset
in kvItem.Value)
265 m_cuda.sub(m_blobCompare.count(), colBottom[0].gpu_data, blobData.
gpu_data, m_blobCompare.mutable_gpu_data, i * m_nVectorDim, offset.Item2 * m_nVectorDim);
266 float fDist1 =
m_cuda.dot_float(m_blobCompare.count(), m_blobCompare.gpu_data, m_blobCompare.gpu_data);
267 float fDist = (float)Math.Sqrt(
convertF(m_blobCompare.sumsq_data()));
272 int k = (m_nK <= 0 || m_nK > rgDist.Count) ? rgDist.Count : m_nK;
275 for (
int j = 0; j < k; j++)
280 float fAveDist = fTotal / k;
281 rgKDist.Add(kvItem.Key, fAveDist);
284 List<KeyValuePair<int, float>> rgKDistSorted = rgKDist.OrderBy(p => p.Key).ToList();
285 float fMax = rgKDistSorted.Max(p => p.Value);
286 float fMin = rgKDistSorted.Min(p => p.Value);
288 for (
int j = 0; j < rgKDistSorted.Count; j++)
290 float fVal = (rgKDistSorted[j].Value - fMin)/(fMax - fMin);
292 rgFullSet[i * m_nNumOutput + j] = fVal;
295 if (sw.Elapsed.TotalMilliseconds > 1000)
297 double dfPct = (double)i / (
double)m_nBatchSize;
298 m_log.
WriteLine(
"KNN testing cycle at " + dfPct.ToString(
"P") +
"...");
303 colTop[0].mutable_cpu_data =
convert(rgFullSet.ToArray());
318 m_log.
CHECK_EQ(2, colBottom.
Count,
"When training, the bottom must have both 'data' and 'labels'.");
320 if (m_nCurrentBatchIdx == m_nMaxBatches)
322 m_bBufferFull =
true;
323 m_nCurrentBatchIdx = 0;
327 m_cuda.copy(colBottom[0].count(), colBottom[0].gpu_data,
m_colBlobs[1 + (m_nCurrentBatchIdx * 2 + 0)].mutable_gpu_data);
328 m_cuda.copy(colBottom[1].count(), colBottom[1].gpu_data,
m_colBlobs[1 + (m_nCurrentBatchIdx * 2 + 1)].mutable_gpu_data);
329 m_nCurrentBatchIdx++;
331 double dfCount = (m_bBufferFull) ? m_nMaxBatches : m_nCurrentBatchIdx;
334 return m_bBufferFull;
340 if (rgbPropagateDown[0])
341 throw new NotImplementedException();
The Log class provides general output in text form.
void WriteLine(string str, bool bOverrideEnabled=false, bool bHeader=false, bool bError=false, bool bDisable=false)
Write a line of output.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
The Utility class provides general utility funtions.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
string Name
Get/set the name of the Blob.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void convert(BlobCollection< T > col)
Convert a collection of blobs from / to half size.
float convertF(T df)
Converts a generic to a float value.
Phase m_phase
Specifies the Phase under which the Layer is run.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
BlobCollection< T > m_colBlobs
Specifies the learnable parameter Blobs of the Layer.
override int MinBottomBlobs
Returns the minimum number of required bottom (intput) Blobs: data
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override int MaxBottomBlobs
Returns the maximum number of required bottom (intput) Blobs: data, label
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
void forward_test(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation, run during the Phase.TEST cycle to find the closest averaged distan...
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Not implemented - the KNN Layer does not perform backward.
override int MinTopBlobs
Returns the minimum number of required top (output) Blobs: knn
override void dispose()
Releases all GPU and host resources used by the Layer.
bool forward_save(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Save the data in the batch storage.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
KnnLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The KnnLayer constructor.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
List< bool > propagate_down
Specifies whether or not the LayerParameter (or protions of) should be backpropagated.
KnnParameter knn_param
Returns the parameter set when initialized with LayerType.KNN
Phase phase
Specifies the Phase for which this LayerParameter is run.
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Phase
Defines the Phase under which to run a Net.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.beta namespace contains all beta stage layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...