2using System.Collections.Generic;
5using System.Runtime.InteropServices;
29 Blob<T> m_blobSensorySigmoidW =
null;
30 Blob<T> m_blobSensoryActivationW =
null;
31 Blob<T> m_blobSensoryActivationW1 =
null;
32 Blob<T> m_blobSensoryActivationRev =
null;
33 Blob<T> m_blobSensoryNumeratorW =
null;
34 Blob<T> m_blobSensoryDenominatorW =
null;
35 Blob<T> m_blobSensoryNumeratorW1 =
null;
36 Blob<T> m_blobSensoryDenominatorW1 =
null;
58 int[] m_rgShape =
new int[] { 1, 1, 1, 1, };
134 List<int> rgShape =
new List<int>() { nStateSize };
139 rgShape.Add(nStateSize);
143 addWeight(
blobs, m_colWtsAccum,
"erev", rgShape);
145 rgShape[0] = nSensorySize;
149 addWeight(
blobs, m_colWtsAccum,
"sensory_erev", rgShape);
151 addWeight(
blobs, m_colWtsAccum,
"input_w", nSensorySize, 1.0);
152 addWeight(
blobs, m_colWtsAccum,
"input_b", nSensorySize, 0.0);
154 m_blobVPre =
new Blob<T>(cuda, log);
156 m_blobWork =
new Blob<T>(cuda, log);
167 m_blobSensoryActivationW.
Name =
m_param.
name +
".sensory_activation_w";
169 m_blobSensoryActivationW1.
Name =
m_param.
name +
".sensory_activation_w1";
171 m_blobSensoryActivationRev.
Name =
m_param.
name +
".sensory_activation_erev";
175 m_blobSensoryDenominatorW.
Name =
m_param.
name +
".sensory_denominator_w";
177 m_blobSensoryNumeratorW1.
Name =
m_param.
name +
".sensory_numerator_w1";
179 m_blobSensoryDenominatorW1.
Name =
m_param.
name +
".sensory_denominator_w1";
188 m_colVPre[i].Name =
m_param.
name +
".vpre." + i.ToString();
190 m_colCmt[i].Name =
m_param.
name +
".cmt." + i.ToString();
193 m_colMues[i].Name =
m_param.
name +
".mues." + i.ToString();
195 m_colSigmoidW[i].Name =
m_param.
name +
".sigmoid_w." + i.ToString();
197 m_colActivationW[i].Name =
m_param.
name +
".activation_w." + i.ToString();
198 m_colActivationW1.
Add(
new Blob<T>(cuda, log));
199 m_colActivationW1[i].Name =
m_param.
name +
".activation_w1." + i.ToString();
200 m_colActivationRev.
Add(
new Blob<T>(cuda, log));
201 m_colActivationRev[i].Name =
m_param.
name +
".activation_rev." + i.ToString();
203 m_colNumeratorW[i].Name =
m_param.
name +
".numerator_w." + i.ToString();
204 m_colDenominatorW.
Add(
new Blob<T>(cuda, log));
205 m_colDenominatorW[i].Name =
m_param.
name +
".denominator_w." + i.ToString();
207 m_colNumerator1[i].Name =
m_param.
name +
".numerator1." + i.ToString();
209 m_colNumerator2[i].Name =
m_param.
name +
".numerator2." + i.ToString();
211 m_colNumerator[i].Name =
m_param.
name +
".numerator." + i.ToString();
213 m_colDenominator[i].Name =
m_param.
name +
".denominator." + i.ToString();
228 blobAcc.
Name = strName +
"_acc";
240 blobsAcc.
Add(blobAcc);
249 blobAcc.
Name = strName +
"_acc";
261 blobsAcc.
Add(blobAcc);
266 List<int> rgShape =
new List<int>() { nSize };
272 blobAcc.
Name = strName +
"_acc";
282 blobsAcc.
Add(blobAcc);
291 dispose_internal_blobs();
293 clear_inernal_blobs();
299 private void dispose_internal_blobs(
bool bSetToNull =
true)
305 dispose(ref m_blobSensorySigmoidW);
306 dispose(ref m_blobSensoryActivationW);
307 dispose(ref m_blobSensoryActivationW1);
308 dispose(ref m_blobSensoryActivationRev);
309 dispose(ref m_blobSensoryNumeratorW);
310 dispose(ref m_blobSensoryDenominatorW);
311 dispose(ref m_blobSensoryNumeratorW1);
312 dispose(ref m_blobSensoryDenominatorW1);
317 dispose(ref m_colVPre, bSetToNull);
318 dispose(ref m_colCmt, bSetToNull);
319 dispose(ref m_colMues, bSetToNull);
320 dispose(ref m_colSigmoidW, bSetToNull);
321 dispose(ref m_colActivationW, bSetToNull);
322 dispose(ref m_colActivationW1, bSetToNull);
323 dispose(ref m_colActivationRev, bSetToNull);
324 dispose(ref m_colNumeratorW, bSetToNull);
325 dispose(ref m_colDenominatorW, bSetToNull);
326 dispose(ref m_colNumerator, bSetToNull);
327 dispose(ref m_colNumerator1, bSetToNull);
328 dispose(ref m_colNumerator2, bSetToNull);
329 dispose(ref m_colDenominator, bSetToNull);
332 private void clear_inernal_blobs()
337 m_colSigmoidW.
Clear();
338 m_colActivationW.
Clear();
339 m_colActivationW1.
Clear();
340 m_colActivationRev.
Clear();
341 m_colNumeratorW.
Clear();
342 m_colDenominatorW.
Clear();
343 m_colNumerator.
Clear();
344 m_colNumerator1.
Clear();
345 m_colNumerator2.
Clear();
346 m_colDenominator.
Clear();
363 dispose_internal_blobs(
false);
387 col.
Add(blobSensorySigmoidW);
391 col.
Add(blobSensoryActivationW);
394 blobSensoryActivationW1.
Name =
m_param.
name +
".sensory_activation_w1";
395 col.
Add(blobSensoryActivationW1);
398 blobSensoryActivationRev.
Name =
m_param.
name +
".sensory_activation_erev";
399 col.
Add(blobSensoryActivationRev);
403 col.
Add(blobSensoryNumeratorW);
406 blobSensoryDenominatorW.
Name =
m_param.
name +
".sensory_denominator_w";
407 col.
Add(blobSensoryDenominatorW);
411 col.
Add(blobSensoryNumeratorW1);
414 blobSensoryDenominatorW1.
Name =
m_param.
name +
".sensory_denominator_w1";
415 col.
Add(blobSensoryDenominatorW1);
441 col.
Add(blobSigmoidW_a);
444 blobActivationW_a.
Name =
m_param.
name +
".activation_w." + i.ToString();
445 col.
Add(blobActivationW_a);
448 blobActivationW1_a.
Name =
m_param.
name +
".activation_w1." + i.ToString();
449 col.
Add(blobActivationW1_a);
452 blobActivationRev_a.
Name =
m_param.
name +
".activation_rev." + i.ToString();
453 col.
Add(blobActivationRev_a);
457 col.
Add(blobNumeratorW_a);
460 blobDenominatorW_a.
Name =
m_param.
name +
".denominator_w." + i.ToString();
461 col.
Add(blobDenominatorW_a);
465 col.
Add(blobNumerator1_a);
469 col.
Add(blobNumerator2_a);
473 col.
Add(blobNumerator_a);
476 blobDenominator_a.
Name =
m_param.
name +
".denominator." + i.ToString();
477 col.
Add(blobDenominator_a);
493 m_blobVPre = col[nIdx];
496 m_blobWork = col[nIdx];
499 m_blobInputs = col[nIdx];
502 m_blobMues = col[nIdx];
508 m_blobSensorySigmoidW = col[nIdx];
511 m_blobSensoryActivationW = col[nIdx];
514 m_blobSensoryActivationW1 = col[nIdx];
517 m_blobSensoryActivationRev = col[nIdx];
520 m_blobSensoryNumeratorW = col[nIdx];
523 m_blobSensoryDenominatorW = col[nIdx];
526 m_blobSensoryNumeratorW1 = col[nIdx];
529 m_blobSensoryDenominatorW1 = col[nIdx];
532 m_blobCmt = col[nIdx];
535 m_blobTs = col[nIdx];
541 m_colSigmoidW.
Clear();
542 m_colActivationW.
Clear();
543 m_colActivationW1.
Clear();
544 m_colActivationRev.
Clear();
545 m_colNumeratorW.
Clear();
546 m_colDenominatorW.
Clear();
547 m_colNumerator1.
Clear();
548 m_colNumerator2.
Clear();
549 m_colNumerator.
Clear();
550 m_colDenominator.
Clear();
554 m_colVPre.
Add(col[nIdx]);
557 m_colCmt.
Add(col[nIdx]);
560 m_colMues.
Add(col[nIdx]);
563 m_colSigmoidW.
Add(col[nIdx]);
566 m_colActivationW.
Add(col[nIdx]);
569 m_colActivationW1.
Add(col[nIdx]);
572 m_colActivationRev.
Add(col[nIdx]);
575 m_colNumeratorW.
Add(col[nIdx]);
578 m_colDenominatorW.
Add(col[nIdx]);
581 m_colNumerator1.
Add(col[nIdx]);
584 m_colNumerator2.
Add(col[nIdx]);
587 m_colNumerator.
Add(col[nIdx]);
590 m_colDenominator.
Add(col[nIdx]);
633 base.ReInitializeParameters(target);
644 addBtmTop(m_blobInputs, m_blobSensoryActivationW);
645 m_sigmoid.
Setup(m_colBtm, m_colTop);
655 m_blobInputs.ReshapeLike(colBottom[0]);
662 m_rgShape[0] = m_blobInputs.num;
663 m_rgShape[1] = m_blobInputs.channels;
665 m_blobSensorySigmoidW.
Reshape(m_rgShape);
666 m_blobSensoryActivationW.
Reshape(m_rgShape);
667 m_blobSensoryActivationW1.
Reshape(m_rgShape);
668 m_blobSensoryActivationRev.
Reshape(m_rgShape);
670 m_rgShape[0] = m_blobSensoryActivationW.
num;
671 m_rgShape[1] = m_blobSensoryActivationW.
height;
672 m_rgShape[2] = m_blobSensoryActivationW.
width;
673 m_blobSensoryNumeratorW.
Reshape(m_rgShape);
674 m_blobSensoryDenominatorW.
Reshape(m_rgShape);
675 m_blobSensoryNumeratorW1.
Reshape(m_rgShape);
676 m_blobSensoryDenominatorW1.
Reshape(m_rgShape);
682 m_colSigmoidW.
Reshape(m_rgShape);
683 m_colActivationW.
Reshape(m_rgShape);
684 m_colActivationW1.
Reshape(m_rgShape);
685 m_colActivationRev.
Reshape(m_rgShape);
688 m_colNumeratorW.
Reshape(m_rgShape);
689 m_colDenominatorW.
Reshape(m_rgShape);
690 m_colNumerator.
Reshape(m_rgShape);
691 m_colNumerator1.
Reshape(m_rgShape);
692 m_colDenominator.
Reshape(m_rgShape);
696 m_colNumerator2.
Reshape(m_rgShape);
719 map_inputs_fwd(colBottom[0], m_blobInputs);
721 addBtmTop(m_blobInputs, colTop[0]);
722 m_colBtm.
Add(colBottom[1]);
723 m_colBtm.
Add(colBottom[2]);
724 ode_solver_fwd(m_colBtm, m_colTop);
745 addBtmTop(m_blobInputs, colTop[0]);
746 m_colBtm.
Add(colBottom[1]);
747 m_colBtm.
Add(colBottom[2]);
748 ode_solver_bwd(m_colBtm, m_colTop);
750 map_inputs_bwd(colBottom[0], m_blobInputs);
753 private void op_fwd(
OP op,
Blob<T> btm1,
Blob<T> btm2,
Blob<T> top,
int nC = 0,
int nN1 = 0,
int nSD1 = 0,
int nN2 = 0,
int nSD2 = 0)
770 int nN = Math.Max(nN1, nN2);
771 int nSD = Math.Max(nSD1, nSD2);
772 int nCount = nN * nC * nSD;
774 if (nCount != top.
count())
780 private void op_bwd_local(
OP op,
Blob<T> btm1,
Blob<T> btm2,
Blob<T> top,
int nC = 0,
int nN1 = 0,
int nSD1 = 0,
int nN2 = 0,
int nSD2 = 0)
797 int nN = Math.Max(nN1, nN2);
798 int nSD = Math.Max(nSD1, nSD2);
799 int nCount = nN * nC * nSD;
801 if (nCount != top.
count())
804 if (nCount != m_blobWork.
count())
813 if (op ==
OP.MUL || op ==
OP.DIV)
830 int nNa = nN1 * nC * nSD1;
831 int nCa = nSD2 / nSD1;
839 int nSDa = nC * nSD1;
855 int nNc = m_blobWork.
num;
856 int nSDc = top.
count(2);
863 else if (op ==
OP.MUL)
870 else if (op ==
OP.SUB)
887 int nNb = Math.Max(nN1, nN2);
888 int nCb = nSD1 / nSD2;
896 int nSDb = nC * nSD2;
902 private void op_bwd(
OP op,
Blob<T> btm1,
Blob<T> btm2,
Blob<T> top,
int nC = 0,
int nN1 = 0,
int nSD1 = 0,
int nN2 = 0,
int nSD2 = 0,
int nCy = 0,
int nSDy = 0)
925 int nN = Math.Max(nN1, nN2);
926 int nSD = Math.Max(nSD1, nSD2);
927 int nCount = nN * nC * nSD;
929 if (nCount != top.
count())
932 if (nCount != m_blobWork.
count())
935 m_cuda.channel_op_bwd(op, top.
count(), nC, nN1, nSD1, nN2, nSD2, nCy, nSDy, btm1.
gpu_data, btm2.
gpu_data, top.
gpu_data, btm1.
mutable_gpu_diff, btm2.
mutable_gpu_diff, top.
gpu_diff, m_blobWork.
mutable_gpu_data);
947 blobMues = m_colMues[t];
952 addBtmTop(m_blobX, blobTop);
953 m_sigmoid.
Forward(m_colBtm, m_colTop);
965 blobMues = m_colMues[t];
967 addBtmTop(m_blobX, blobTop);
968 m_sigmoid.
Backward(m_colTop,
new List<bool>() {
true }, m_colBtm);
970 op_bwd_local(
OP.MUL, blobMues, blobSigma, m_blobX, blobMues.
channels, blobMues.
num, blobMues.
count(2), 1, blobSigma.
channels);
977 op_fwd(
OP.MUL, btm,
blobs[(
int)WEIGHT.INPUT_WT], top);
978 op_fwd(
OP.ADD, top,
blobs[(
int)WEIGHT.INPUT_BIAS], top);
983 op_bwd(
OP.ADD, top,
blobs[(
int)WEIGHT.INPUT_BIAS], top);
984 op_bwd(
OP.MUL, btm,
blobs[(
int)WEIGHT.INPUT_WT], top);
989 Blob<T> blobInputs = colBtm[0];
993 int nN = blobInputs.
num;
997 int nCount = blobInputs.
count();
1002 addBtmTop(blobInputs, m_blobSensorySigmoidW);
1003 m_colBtm.
Add(
blobs[(
int)WEIGHT.SENSORY_MU]);
1004 m_colBtm.
Add(
blobs[(
int)WEIGHT.SENSORY_SIGMA]);
1005 sigmoid_fwd(m_colBtm, m_colTop);
1007 op_fwd(
OP.MUL, m_blobSensorySigmoidW,
blobs[(
int)WEIGHT.SENSORY_W], m_blobSensoryActivationW, nC, nN, nSD, 1, nSD);
1008 op_fwd(
OP.MUL, m_blobSensoryActivationW,
blobs[(
int)WEIGHT.SENSORY_EREV], m_blobSensoryActivationRev, nC, nN, nSD, 1, nSD);
1018 op_fwd(
OP.DIV,
blobs[(
int)WEIGHT.CM], m_blobTs, m_blobCmt, 1, 1, nSD, nN, 1);
1027 addBtmTop(m_colVPre[t], m_colSigmoidW[t]);
1028 m_colBtm.
Add(
blobs[(
int)WEIGHT.MU]);
1029 m_colBtm.
Add(
blobs[(
int)WEIGHT.SIGMA]);
1030 sigmoid_fwd(m_colBtm, m_colTop, t);
1031 op_fwd(
OP.MUL, m_colSigmoidW[t],
blobs[(
int)WEIGHT.W], m_colActivationW[t], nSD, nN, nSD, 1, nSD);
1034 op_fwd(
OP.MUL, m_colActivationW[t],
blobs[(
int)WEIGHT.EREV], m_colActivationRev[t], nSD, nN, nSD, 1, nSD);
1037 m_cuda.channel_sum(nCount, m_colActivationRev[t].num, m_colActivationRev[t].channels, m_colActivationRev[t].count(2), m_colActivationRev[t].gpu_data, m_colNumeratorW[t].mutable_gpu_data,
true);
1038 m_cuda.channel_sum(nCount, m_colActivationW[t].num, m_colActivationW[t].channels, m_colActivationW[t].count(2), m_colActivationW[t].gpu_data, m_colDenominatorW[t].mutable_gpu_data,
true);
1040 op_fwd(
OP.ADD, m_colNumeratorW[t], m_blobSensoryNumeratorW, m_colNumeratorW[t]);
1041 op_fwd(
OP.ADD, m_colDenominatorW[t], m_blobSensoryDenominatorW, m_colDenominatorW[t]);
1044 op_fwd(
OP.MUL, m_colCmt[t], m_colVPre[t], m_colNumerator1[t]);
1045 op_fwd(
OP.MUL,
blobs[(
int)WEIGHT.GLEAK],
blobs[(
int)WEIGHT.VLEAK], m_colNumerator2[t], nSD, 1, 1, 1, 1);
1046 op_fwd(
OP.ADD, m_colNumerator1[t], m_colNumerator2[t], m_colNumerator[t], nSD, nN, 1, 1, 1);
1047 op_fwd(
OP.ADD, m_colNumerator[t], m_colNumeratorW[t], m_colNumerator[t]);
1050 op_fwd(
OP.ADD, m_colCmt[t],
blobs[(
int)WEIGHT.GLEAK], m_colDenominator[t], nSD, nN, 1, 1, 1);
1051 op_fwd(
OP.ADD, m_colDenominator[t], m_colDenominatorW[t], m_colDenominator[t]);
1055 op_fwd(
OP.DIV, m_colNumerator[t], m_colDenominator[t], m_blobVPre);
1063 m_cuda.add(dst[(
int)wt].count(), src[(
int)wt].gpu_diff, dst[(
int)wt].gpu_data, dst[(
int)wt].mutable_gpu_data);
1068 m_cuda.copy(dst[(
int)wt].count(), src[(
int)wt].gpu_data, dst[(
int)wt].mutable_gpu_diff);
1073 Blob<T> blobInputs = colBtm[0];
1077 int nN = blobInputs.
num;
1081 int nCount = blobInputs.
count();
1085 foreach (
Blob<T> blob
in m_colWtsAccum)
1095 op_bwd_local(
OP.DIV, m_colNumerator[t], m_colDenominator[t], colTop[0]);
1097 op_bwd_local(
OP.DIV, m_colNumerator[t], m_colDenominator[t], m_blobVPre);
1102 op_bwd(
OP.ADD, m_colDenominator[t], m_colDenominatorW[t], m_colDenominator[t]);
1103 op_bwd(
OP.ADD, m_colCmt[t],
blobs[(
int)WEIGHT.GLEAK], m_colDenominator[t], nSD, nN, 1, 1, 1);
1104 accumulateGrad(
blobs, m_colWtsAccum, WEIGHT.GLEAK);
1108 op_bwd(
OP.ADD, m_colNumerator[t], m_colNumeratorW[t], m_colNumerator[t]);
1109 op_bwd(
OP.ADD, m_colNumerator1[t], m_colNumerator2[t], m_colNumerator[t], nSD, nN, 1, 1, 1);
1110 op_bwd(
OP.MUL,
blobs[(
int)WEIGHT.GLEAK],
blobs[(
int)WEIGHT.VLEAK], m_colNumerator2[t], nSD, 1, 1, 1, 1);
1111 accumulateGrad(
blobs, m_colWtsAccum, WEIGHT.GLEAK);
1112 accumulateGrad(
blobs, m_colWtsAccum, WEIGHT.VLEAK);
1114 op_bwd(
OP.MUL, m_colCmt[t], m_colVPre[t], m_colNumerator1[t]);
1119 op_bwd(
OP.ADD, m_colDenominatorW[t], m_blobSensoryDenominatorW1, m_colDenominatorW[t]);
1121 op_bwd(
OP.ADD, m_colNumeratorW[t], m_blobSensoryNumeratorW1, m_colNumeratorW[t]);
1125 m_cuda.channel_sum(m_colActivationRev[t].count(), m_colActivationRev[t].num, m_colActivationRev[t].channels, m_colActivationRev[t].count(2), m_colActivationRev[t].mutable_gpu_diff, m_colNumeratorW[t].gpu_diff,
true,
DIR.BWD);
1126 m_cuda.channel_sum(m_colActivationW1[t].count(), m_colActivationW1[t].num, m_colActivationW1[t].channels, m_colActivationW1[t].count(2), m_colActivationW1[t].mutable_gpu_diff, m_colDenominatorW[t].gpu_diff,
true,
DIR.BWD);
1129 op_bwd_local(
OP.MUL, m_colActivationW[t],
blobs[(
int)WEIGHT.EREV], m_colActivationRev[t], nSD, nN, nSD, 1, nSD);
1130 accumulateGrad(
blobs, m_colWtsAccum, WEIGHT.EREV);
1132 m_cuda.add(m_colActivationW[t].count(), m_colActivationW1[t].gpu_diff, m_colActivationW[t].gpu_diff, m_colActivationW1[t].mutable_gpu_diff);
1135 op_bwd_local(
OP.MUL, m_colSigmoidW[t],
blobs[(
int)WEIGHT.W], m_colActivationW1[t], nSD, nN, nSD, 1, nSD);
1136 accumulateGrad(
blobs, m_colWtsAccum, WEIGHT.W);
1138 addBtmTop(m_colVPre[t], m_colSigmoidW[t]);
1139 m_colBtm.
Add(
blobs[(
int)WEIGHT.MU]);
1140 m_colBtm.
Add(
blobs[(
int)WEIGHT.SIGMA]);
1141 sigmoid_bwd(m_colBtm, m_colTop, t);
1144 accumulateGrad(
blobs, m_colWtsAccum, WEIGHT.MU);
1145 accumulateGrad(
blobs, m_colWtsAccum, WEIGHT.SIGMA);
1148 copyGrad(m_colWtsAccum,
blobs, WEIGHT.GLEAK);
1149 copyGrad(m_colWtsAccum,
blobs, WEIGHT.VLEAK);
1150 copyGrad(m_colWtsAccum,
blobs, WEIGHT.EREV);
1151 copyGrad(m_colWtsAccum,
blobs, WEIGHT.W);
1152 copyGrad(m_colWtsAccum,
blobs, WEIGHT.MU);
1153 copyGrad(m_colWtsAccum,
blobs, WEIGHT.SIGMA);
1157 op_bwd(
OP.DIV,
blobs[(
int)WEIGHT.CM], m_blobTs, m_blobCmt, 1, 1, nSD, nN, 1, m_blobCmt.
channels, m_blobCmt.
count(2));
1166 op_bwd_local(
OP.MUL, m_blobSensoryActivationW,
blobs[(
int)WEIGHT.SENSORY_EREV], m_blobSensoryActivationRev, nC, nN, nSD, 1, nSD);
1168 op_bwd_local(
OP.MUL, m_blobSensorySigmoidW,
blobs[(
int)WEIGHT.SENSORY_W], m_blobSensoryActivationW, nC, nN, nSD, 1, nSD);
1170 addBtmTop(blobInputs, m_blobSensorySigmoidW);
1171 m_colBtm.
Add(
blobs[(
int)WEIGHT.SENSORY_MU]);
1172 m_colBtm.
Add(
blobs[(
int)WEIGHT.SENSORY_SIGMA]);
1173 sigmoid_bwd(m_colBtm, m_colTop);
1175 blobVPre.
CopyFrom(m_blobVPre,
true);
The Log class provides general output in text form.
The BlobCollection contains a list of Blobs.
void Add(Blob< T > b)
Add a new Blob to the collection.
int Count
Returns the number of items in the collection.
void Clear(bool bDispose=false)
Remove all items from the collection.
void ReshapeLike(BlobCollection< T > src)
Reshapes all blobs in the collection to the sizes of the source.
void Reshape(int[] rgShape)
Reshapes all blobs in the collection to the given shape.
void CopyFrom(BlobCollection< T > bSrc, bool bCopyDiff=false)
Copy the data or diff from another BlobCollection into this one.
The Blob is the main holder of data that moves through the Layers of the Net.
int channels
DEPRECIATED; legacy shape accessor channels: use shape(1) instead.
void SetData(T[] rgData, int nCount=-1, bool bSetCount=true)
Sets a number of items within the Blob's data.
int height
DEPRECIATED; legacy shape accessor height: use shape(2) instead.
int num_axes
Returns the number of axes in the Blob.
long mutable_gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
long mutable_gpu_data
Returns the data GPU handle used by the CudaDnn connection.
void Reshape(int nNum, int nChannels, int nHeight, int nWidth, bool? bUseHalfSize=null)
DEPRECIATED; use
void CopyFrom(Blob< T > src, int nSrcOffset, int nDstOffset, int nCount, bool bCopyData, bool bCopyDiff)
Copy from a source Blob.
void scale_data(double df)
Scale the data by a scaling factor.
int width
DEPRECIATED; legacy shape accessor width: use shape(3) instead.
void add_scalar(double dfVal)
Adds a scalar value to the Blob.
int count()
Returns the total number of items in the Blob.
void ReshapeLike(Blob< T > b, bool? bUseHalfSize=null)
Reshape this Blob to have the same shape as another Blob.
string Name
Get/set the name of the Blob.
long gpu_diff
Returns the diff GPU handle used by the CudaDnn connection.
void scale_diff(double df)
Scale the diff by a scaling factor.
void SetDiff(double dfVal, int nIdx=-1)
Either sets all of the diff items in the Blob to a given value, or alternatively only sets a single i...
int num
DEPRECIATED; legacy shape accessor num: use shape(0) instead.
long gpu_data
Returns the data GPU handle used by the CudaDnn connection.
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Abstract Filler class used to fill blobs with values.
void Fill(Blob< T > b)
Fill the blob with values based on the actual filler used.
static Filler< T > Create(CudaDnn< T > cuda, Log log, FillerParameter p)
Create a new Filler instance.
An interface for the units of computation which can be composed into a Net.
Log m_log
Specifies the Log for output.
LayerParameter m_param
Specifies the LayerParameter describing the Layer.
void Backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Given the top Blob error gradients, compute the bottom Blob error gradients.
double Forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Given the bottom (input) Blobs, this function computes the top (output) Blobs and the loss.
CudaDnn< T > m_cuda
Specifies the CudaDnn connection to Cuda.
void Setup(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Implements common Layer setup functionality.
static Layer< T > Create(CudaDnn< T > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db=null, TransferInput trxinput=null)
Create a new Layer based on the LayerParameter.
LayerParameter.LayerType m_type
Specifies the Layer type.
BlobCollection< T > blobs
Returns the collection of learnable parameter Blobs for the Layer.
The LnnUnitLayer implements the base class to the Cfc and Ltc Unit layers.
bool m_bOwnInternalBlobs
Specifies member variable used to track whether or not the internal blobs are owned by this layer.
The LtcUnitLayer implements the liquid time constant with ODE solver (LTCCell) layer.
override void backward(BlobCollection< T > colTop, List< bool > rgbPropagateDown, BlobCollection< T > colBottom)
Computes the error gradient w.r.t. the LtcUnit value inputs.
override int ExactNumTopBlobs
Returns the exact number of required top (output) Blobs: attn
override void forward(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Forward computation
override void dispose()
Releases all GPU and host resources used by the Layer.
override bool ReInitializeParameters(WEIGHT_TARGET target)
Re-initialize the parameters of the layer.
LtcUnitLayer(CudaDnn< T > cuda, Log log, LayerParameter p)
The LtcUnitLayer constructor.
override void setup_internal_blobs(BlobCollection< T > col)
Derivative layers should add all internal blobws to the 'col' provided.
override void SetInternalSharedBlobs(BlobCollection< T > col)
Set the internal shared blobs to a set of external blobs.
override void Reshape(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Reshape the bottom (input) and top (output) blobs.
override BlobCollection< T > CreateInternalSharedBlobs(int nIdx, CudaDnn< T > cuda, Log log)
Create the internal shared blobs used by the layer for a given index.
override int ExactNumBottomBlobs
Returns the exact number of required bottom (input) Blobs: input, hx, ts
override void LayerSetUp(BlobCollection< T > colBottom, BlobCollection< T > colTop)
Setup the layer.
Specifies whether to use the NVIDIA cuDnn version or Caffe version of a given forward/backward operat...
Engine engine
Specifies the Engine in use.
Engine
Defines the type of engine to use.
Specifies the filler parameters used to create each Filler.
double min
Specifies the minimum value to use with the 'uniform' filler.
double max
Specifies the maximum value to use with the 'uniform' filler.
Specifies the base parameter for all layers.
string name
Specifies the name of this LayerParameter.
LtcUnitParameter ltc_unit_param
Returns the parameter set when initialized with LayerType.LTC_UNIT
SigmoidParameter sigmoid_param
Returns the parameter set when initialized with LayerType.SIGMOID
LayerType
Specifies the layer type.
float sensory_sigma_init_min
Specifies the initial sensory_sigma min value (default = 3.0f).
int hidden_size
Specifies the number of hidden units (default = 256).
float mu_init_max
Specifies the initial mu max value (default = 0.8f).
float w_init_max
Specifies the initial w max value (default = 1.0f).
float cm_init_min
Specifies the initial cm min value (default = 0.4f).
float sensory_mu_init_min
Specifies the initial sensory_mu min value (default = 0.3f).
float sigma_init_max
Specifies the initial sigma max value (default = 8.0f).
float sensory_w_init_min
Specifies the initial sensory_w min value (default = 0.001f).
float vleak_init_max
Specifies the initial vleak max value (default = 0.2f).
float sensory_sigma_init_max
Specifies the initial sensory_sigma max value (default = 8.0f).
float sigma_init_min
Specifies the initial sigma min value (default = 3.0f).
float sensory_mu_init_max
Specifies the initial sensory_mu max value (default = 0.8f).
float w_init_min
Specifies the initial w min value (default = 0.001f).
float gleak_init_min
Specifies the initial gleak min value (default = 0.001f).
float epsilon
Specifies the epsilon used to avoid divide by zero (default = 1e-08).
float cm_init_max
Specifies the initial cm max value (default = 0.6f).
float vleak_init_min
Specifies the initial vleak min value (default = -0.2f).
float mu_init_min
Specifies the initial mu min value (default = 0.3f).
int ode_unfolds
Specifies the number of unfolds run by the ode (default = 6).
float sensory_w_init_max
Specifies the initial sensory_w max value (default = 1.0f).
int input_size
Specifies the input size.
float gleak_init_max
Specifies the initial gleak max value (default = 1.0f).
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
The MyCaffe.common namespace contains common MyCaffe classes.
OP
Defines the operations performed by the channel_op function.
DIR
Defines the direction of data flow.
WEIGHT_TARGET
Defines the type of weight to target in re-initializations.
The MyCaffe.fillers namespace contains all fillers including the Filler class.
The MyCaffe.layers.lnn namespace contains all Liquid Neural Network (LNN) related layers.
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...