2using System.Collections.Generic;
70 m_blobDiffAP.Name =
m_param.
name +
".positive delta";
103 if (m_blobDiffAP !=
null)
105 m_blobDiffAP.Dispose();
109 if (m_blobDiffSqAP !=
null)
112 m_blobDiffSqAP =
null;
115 if (m_blobDistSqAP !=
null)
118 m_blobDistSqAP =
null;
121 if (m_blobDiffAN !=
null)
127 if (m_blobDiffSqAN !=
null)
130 m_blobDiffSqAN =
null;
133 if (m_blobDistSqAN !=
null)
136 m_blobDistSqAN =
null;
139 if (m_blobDiffPN !=
null)
145 if (m_blobSumVec !=
null)
151 if (m_blobLossVec !=
null)
154 m_blobLossVec =
null;
157 if (m_blobWork !=
null)
163 if (m_blobPreGenTargetsPos !=
null)
165 m_blobPreGenTargetsPos.
Dispose();
166 m_blobPreGenTargetsPos =
null;
169 if (m_blobPreGenTargetsNeg !=
null)
171 m_blobPreGenTargetsNeg.
Dispose();
172 m_blobPreGenTargetsNeg =
null;
184 col.
Add(m_blobDiffAP);
185 col.
Add(m_blobDiffSqAP);
186 col.
Add(m_blobDistSqAP);
187 col.
Add(m_blobDiffAN);
188 col.
Add(m_blobDiffSqAN);
189 col.
Add(m_blobDistSqAN);
190 col.
Add(m_blobDiffPN);
191 col.
Add(m_blobSumVec);
192 col.
Add(m_blobLossVec);
195 if (m_blobPreGenTargetsPos !=
null)
196 col.
Add(m_blobPreGenTargetsPos);
198 if (m_blobPreGenTargetsNeg !=
null)
199 col.
Add(m_blobPreGenTargetsNeg);
254 base.LayerSetUp(colBottom, colTop);
258 if (colBottom.
Count == 5)
261 m_blobPreGenTargetsNeg.
Name =
"pregen neg";
263 m_blobPreGenTargetsPos.
Name =
"pregen pos";
274 base.Reshape(colBottom, colTop);
276 m_log.
CHECK(
Utility.Compare<
int>(colBottom[0].shape(), colBottom[1].shape()),
"Inputs must have the same dimension.");
277 m_log.
CHECK(
Utility.Compare<
int>(colBottom[0].shape(), colBottom[2].shape()),
"Inputs must have the same dimension.");
279 m_blobDiffAP.ReshapeLike(colBottom[0]);
286 int nNum = colBottom[0].num;
287 int nDim = colBottom[0].count(1);
288 m_blobSumVec.
Reshape(nDim, 1, 1, 1);
291 m_blobWork.
Reshape(nNum, 1, 1, 1);
297 List<int> rgLossShape =
new List<int>();
298 colTop[0].
Reshape(rgLossShape);
300 if (m_blobPreGenTargetsNeg !=
null)
303 if (m_blobPreGenTargetsPos !=
null)
317 int nLblDim = lbl.
count(1);
318 int nLblNum = tgt.
num;
320 int nDim = tgt.
count(1);
321 Random rand =
new Random();
322 List<int> rgLabelVals =
new List<int>();
323 Dictionary<int, List<int>> rgrgLabelSel =
new Dictionary<int, List<int>>();
325 for (
int i = 0; i < tgt.
num; i++)
336 for (
int i = 0; i < nNum; i++)
338 int nLabel = (int)rgLabels[i * nLblDim];
345 if (rgrgLabelSel[nLabel].Count == 0)
347 for (
int l = 0; l < rgLabelVals.Count; l++)
349 if (rgLabelVals[l] != nLabel)
350 rgrgLabelSel[nLabel].Add(rgLabelVals[l]);
354 int nLabelIdx = rand.Next(rgrgLabelSel[nLabel].Count);
355 int nLabelX = rgrgLabelSel[nLabel][nLabelIdx];
356 rgrgLabelSel[nLabel].Remove(nLabelX);
381 m_log.
CHECK_GE(colBottom.
Count, 4,
"The bottom must have at least 4 items: anchor, positives, negatives and label.");
382 int nCount = colBottom[0].count();
383 int nNum = colBottom[0].num;
384 int nDim = colBottom[0].count(1);
385 long hAnchor = colBottom[0].gpu_data;
386 long hPositive = colBottom[1].gpu_data;
387 long hNegative = colBottom[2].gpu_data;
390 m_blobWork.
Reshape(nNum, 1, 1, 1);
392 m_log.
CHECK_EQ(colBottom.
Count, 4,
"Currently, external targts such as centroids are not supported.");
396 m_cuda.sub(nCount, hAnchor, hPositive, m_blobDiffAP.mutable_gpu_data);
405 double dfMargin = m_dfAlpha;
414 dfLoss /= (nNum * 2.0);
436 int nCount = colBottom[0].count();
437 int nNum = colBottom[0].num;
438 double dfDiff =
convertD(colTop[0].GetDiff(0));
439 double dfAlpha = dfDiff / (double)nNum;
440 long hAnchor = colBottom[0].gpu_data;
441 long hPositive = colBottom[1].gpu_data;
442 long hNegative = colBottom[2].gpu_data;
446 if (rgbPropagateDown[0])
452 if (rgbPropagateDown[1])
454 m_cuda.sub(nCount, hPositive, hAnchor, m_blobDiffAP.mutable_gpu_diff);
455 m_cuda.mul(nCount, m_blobLossVec.
gpu_data, m_blobDiffAP.gpu_diff, colBottom[1].mutable_gpu_diff);
458 if (rgbPropagateDown[2])
The Log class provides general output in text form.
void CHECK(bool b, string str)
Test a flag for true.
void CHECK_EQ(double df1, double df2, string str)
Test whether one number is equal to another.
void CHECK_GE(double df1, double df2, string str)
Test whether one number is greater than or equal to another.
The Utility class provides general utility funtions.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
void SetData(double df)
Set all blob data to the value specified.
int Count
Returns the number of items in the collection.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
The Blob is the main holder of data that moves through the Layers of the Net.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
void scale_data(double df)
Scale the data by a scaling factor.
T[] update_cpu_data()
Update the CPU data by transferring the GPU data over to the Host.
int count()
Returns the total number of items in the Blob.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
virtual void Dispose(bool bDisposing)
Releases all resources used by the Blob (including both GPU and Host).
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
float convertF(T df)
Converts a generic to a float value.
double convertD(T df)
Converts a generic to a double value.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
LayerParameter.LayerType m_type
Specifies the Layer type.
The LossLayer provides an interface for Layer's that take two blobs as input – usually (1) prediction...
TripletLoss Layer - this is the triplet loss layer used to calculate the triplet loss and gradients u...
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t the inputs.
TripletLossLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The TripletLossLayer constructor.
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
override int ExactNumBottomBlobs
Returns the exact number of bottom blobs which are variable so -1 is returned.
override int MinBottomBlobs
Returns the minimum number of bottom blobs: anchor, positive, negative, label
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override bool AllowForceBackward(int nBottomIdx)
Returns true for all but the labels, for we want the loss value to be propagated back.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override int MaxBottomBlobs
Returns the maximum number of bottom blobs: anchor, positive, negative, label, centroids (from decode...
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Computes the forward calculation.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: loss
void loadPreGenTargets(Blob< T > lbl, Blob< T > tgt, Blob< T > tgtNeg, Blob< T > tgtPos)
Loads the pre-gen targets, only made public for testing.
override void dispose()
Releases all GPU and host resources used by the Layer.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
TripletLossParameter triplet_loss_param
Returns the parameter set when initialized with LayerType.TRIPLET_LOSS
LayerType
Specifies the layer type.
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
The MyCaffe.layers.beta namespace contains all beta stage layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...