MyCaffe  1.12.2.41
Deep learning software for Windows C# programmers.
LayerFactory.cs
1using System;
2using System.Collections.Generic;
3using System.Linq;
4using System.Text;
5using System.Threading.Tasks;
6using MyCaffe.basecode;
7using MyCaffe.common;
8using MyCaffe.db.image;
9using MyCaffe.param;
10
15{
20 {
31 {
32 switch (p.type)
33 {
34 case LayerParameter.LayerType.DATA_TEMPORAL:
35 return new DataTemporalLayer<double>(cuda, log, p, evtCancel, db);
36
37 case LayerParameter.LayerType.CATEGORICAL_TRANS:
38 return new CategoricalTransformationLayer<double>(cuda, log, p);
39
40 case LayerParameter.LayerType.CHANNEL_EMBEDDING:
41 return new ChannelEmbeddingLayer<double>(cuda, log, p);
42
43 case LayerParameter.LayerType.GATEADDNORM:
44 return new GateAddNormLayer<double>(cuda, log, p);
45
47 return new GluLayer<double>(cuda, log, p);
48
50 return new GrnLayer<double>(cuda, log, p);
51
52 case LayerParameter.LayerType.NUMERIC_TRANS:
53 return new NumericTransformationLayer<double>(cuda, log, p);
54
55 case LayerParameter.LayerType.MULTIHEAD_ATTENTION_INTERP:
56 return new MultiHeadAttentionInterpLayer<double>(cuda, log, p);
57
58 case LayerParameter.LayerType.RESHAPE_TEMPORAL:
59 return new ReshapeTemporalLayer<double>(cuda, log, p);
60
61 case LayerParameter.LayerType.VARSELNET:
62 return new VarSetNetLayer<double>(cuda, log, p);
63
64 case LayerParameter.LayerType.QUANTILE_ACCURACY:
65 return new QuantileAccuracyLayer<double>(cuda, log, p);
66
67 case LayerParameter.LayerType.QUANTILE_LOSS:
68 return new QuantileLossLayer<double>(cuda, log, p);
69
70 default:
71 return null;
72 }
73 }
74
85 {
86 switch (p.type)
87 {
88 case LayerParameter.LayerType.DATA_TEMPORAL:
89 return new DataTemporalLayer<float>(cuda, log, p, evtCancel, db);
90
91 case LayerParameter.LayerType.CATEGORICAL_TRANS:
92 return new CategoricalTransformationLayer<float>(cuda, log, p);
93
94 case LayerParameter.LayerType.CHANNEL_EMBEDDING:
95 return new ChannelEmbeddingLayer<float>(cuda, log, p);
96
97 case LayerParameter.LayerType.GATEADDNORM:
98 return new GateAddNormLayer<float>(cuda, log, p);
99
100 case LayerParameter.LayerType.GLU:
101 return new GluLayer<float>(cuda, log, p);
102
103 case LayerParameter.LayerType.GRN:
104 return new GrnLayer<float>(cuda, log, p);
105
106 case LayerParameter.LayerType.NUMERIC_TRANS:
107 return new NumericTransformationLayer<float>(cuda, log, p);
108
109 case LayerParameter.LayerType.MULTIHEAD_ATTENTION_INTERP:
110 return new MultiHeadAttentionInterpLayer<float>(cuda, log, p);
111
112 case LayerParameter.LayerType.RESHAPE_TEMPORAL:
113 return new ReshapeTemporalLayer<float>(cuda, log, p);
114
115 case LayerParameter.LayerType.VARSELNET:
116 return new VarSetNetLayer<float>(cuda, log, p);
117
118 case LayerParameter.LayerType.QUANTILE_ACCURACY:
119 return new QuantileAccuracyLayer<float>(cuda, log, p);
120
121 case LayerParameter.LayerType.QUANTILE_LOSS:
122 return new QuantileLossLayer<float>(cuda, log, p);
123
124 default:
125 return null;
126 }
127 }
128 }
129}
The CancelEvent provides an extension to the manual cancel event that allows for overriding the manua...
Definition: CancelEvent.cs:17
The Log class provides general output in text form.
Definition: Log.cs:13
The CudaDnn object is the main interface to the Low-Level Cuda C++ DLL.
Definition: CudaDnn.cs:969
An interface for the units of computation which can be composed into a Net.
Definition: Layer.cs:31
The QuantileLossLayer computes the quantile loss for real-valued regression tasks.
The CategoricalTransformationLayer implements the transforming/embeddings for the set of categorical ...
The ChannelEmbeddingLayer implements the transforming/embeddings for both the numeric and categorical...
The DataTemporalLayer implements the data layer used to load the temporal data into the model.
The GateAddNormLayer implements the Dropout, Gated Linear Unit layer, LayerNorm while adding in the r...
The GluLayer implements the Gated Linear Unit layer.
Definition: GluLayer.cs:30
The GrnLayer implements the Gated Linear Unit layer.
Definition: GrnLayer.cs:28
The LayerFactor is responsible for creating all layers implemented in the MyCaffe....
Definition: LayerFactory.cs:20
Layer< float > CreateSingle(CudaDnn< float > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db)
Create the layers when using the float base type.
Definition: LayerFactory.cs:84
Layer< double > CreateDouble(CudaDnn< double > cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXDatabaseBase db)
Create the layers when using the double base type.
Definition: LayerFactory.cs:30
The MultiHeadAttentionInterpLayer implements the Multi-head Attention Interpretive Layer
The NumericTransformationLayer implements the transforming/embeddings for the set of numeric input va...
The QuantileAccuracyLayer implements the Quantile Accuracy Layer used in TFT models.
The ReshapeTemporalLayer implements the Variable Selection Network
The VarSetNetLayer implements the Variable Selection Network
Specifies the base parameter for all layers.
LayerType type
Specifies the type of this LayerParameter.
LayerType
Specifies the layer type.
The ILayerCreator interface is implemented by each MyCaffe.layers.x layer extension dll and is used t...
Definition: Interfaces.cs:19
The IXDatabaseBase interface defines the general interface to the in-memory database.
Definition: Interfaces.cs:444
The MyCaffe.basecode contains all generic types used throughout MyCaffe.
Definition: Annotation.cs:12
The MyCaffe.common namespace contains common MyCaffe classes.
Definition: BatchInput.cs:8
The MyCaffe.db.image namespace contains all image database related classes.
Definition: Database.cs:18
The MyCaffe.layers.tft namespace contains all TFT related layers.
Definition: LayerFactory.cs:15
The MyCaffe.param namespace contains parameters used to create models.
The MyCaffe namespace contains the main body of MyCaffe code that closesly tracks the C++ Caffe open-...
Definition: Annotation.cs:12