24 #ifndef TMVA_NEURAL_NET
25 #define TMVA_NEURAL_NET
41 #include <type_traits>
59 class IPythonInteractive;
94 inline void add(
T value,
double weight = 1.0)
109 double R = Q*weight/tmpWeight;
116 template <
typename ITERATOR>
117 inline void add (ITERATOR itBegin, ITERATOR itEnd)
119 for (ITERATOR it = itBegin; it != itEnd; ++it)
212 template <
typename T>
215 return (
int)(value & flag) != 0;
240 Batch (
typename std::vector<Pattern>::const_iterator itBegin,
typename std::vector<Pattern>::const_iterator itEnd)
260 template <
typename ItSource,
typename ItWeight,
typename ItTarget>
261 void applyWeights (ItSource itSourceBegin, ItSource itSourceEnd, ItWeight itWeight, ItTarget itTargetBegin, ItTarget itTargetEnd);
265 template <
typename ItSource,
typename ItWeight,
typename ItPrev>
266 void applyWeightsBackwards (ItSource itCurrBegin, ItSource itCurrEnd, ItWeight itWeight, ItPrev itPrevBegin, ItPrev itPrevEnd);
272 template <
typename ItValue,
typename ItFunction>
273 void applyFunctions (ItValue itValue, ItValue itValueEnd, ItFunction itFunction);
276 template <
typename ItValue,
typename ItFunction,
typename ItInverseFunction,
typename ItGradient>
277 void applyFunctions (ItValue itValue, ItValue itValueEnd, ItFunction itFunction, ItInverseFunction itInverseFunction, ItGradient itGradient);
281 template <
typename ItSource,
typename ItDelta,
typename ItTargetGradient,
typename ItGradient>
282 void update (ItSource itSource, ItSource itSourceEnd,
283 ItDelta itTargetDeltaBegin, ItDelta itTargetDeltaEnd,
284 ItTargetGradient itTargetGradientBegin,
285 ItGradient itGradient);
289 template <EnumRegularization Regularization,
typename ItSource,
typename ItDelta,
typename ItTargetGradient,
typename ItGradient,
typename ItWeight>
290 void update (ItSource itSource, ItSource itSourceEnd,
291 ItDelta itTargetDeltaBegin, ItDelta itTargetDeltaEnd,
292 ItTargetGradient itTargetGradientBegin,
293 ItGradient itGradient,
350 double momentum = 0.5,
351 size_t repetitions = 10)
368 template <
typename Function,
typename Weights,
typename PassThrough>
369 double operator() (
Function& fitnessFunction, Weights& weights, PassThrough& passThrough);
397 template <
typename ItOutput,
typename ItTruth,
typename ItDelta,
typename ItInvActFnc>
398 double sumOfSquares (ItOutput itOutputBegin, ItOutput itOutputEnd, ItTruth itTruthBegin, ItTruth itTruthEnd, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc itInvActFnc,
double patternWeight);
402 template <
typename ItProbability,
typename ItTruth,
typename ItDelta,
typename ItInvActFnc>
403 double crossEntropy (ItProbability itProbabilityBegin, ItProbability itProbabilityEnd, ItTruth itTruthBegin, ItTruth itTruthEnd, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc itInvActFnc,
double patternWeight);
408 template <
typename ItOutput,
typename ItTruth,
typename ItDelta,
typename ItInvActFnc>
409 double softMaxCrossEntropy (ItOutput itProbabilityBegin, ItOutput itProbabilityEnd, ItTruth itTruthBegin, ItTruth itTruthEnd, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc itInvActFnc,
double patternWeight);
415 template <
typename ItWeight>
617 template <
typename Iterator>
719 template <
typename LAYERDATA>
720 void forward (
const LAYERDATA& prevLayerData, LAYERDATA& currLayerData);
723 template <
typename LAYERDATA>
724 void backward (LAYERDATA& prevLayerData, LAYERDATA& currLayerData);
727 template <
typename LAYERDATA>
745 size_t _convergenceSteps = 15,
size_t _batchSize = 10,
size_t _testRepetitions = 7,
748 double _learningRate = 1
e-5,
double _momentum = 0.3,
749 int _repetitions = 3,
750 bool _multithreading =
true);
765 template <
typename Iterator>
788 virtual void testSample (
double ,
double ,
double ,
double ) {}
814 virtual void drawSample (
const std::vector<double>& ,
const std::vector<double>& ,
const std::vector<double>& ,
double ) {}
827 void create (std::string histoName,
int bins,
double min,
double max,
int bins2,
double min2,
double max2) {
if (
fMonitoring)
fMonitoring->create (histoName, bins, min, max, bins2, min2, max2); }
909 size_t _convergenceSteps = 15,
size_t _batchSize = 10,
size_t _testRepetitions = 7,
912 double _learningRate = 1
e-5,
double _momentum = 0.3,
int _repetitions = 3,
913 bool _useMultithreading =
true)
914 :
Settings (name, _convergenceSteps, _batchSize, _testRepetitions, _factorWeightDecay,
915 _regularization, _eMinimizerType, _learningRate, _momentum, _repetitions, _useMultithreading)
993 void testSample (
double error,
double output,
double target,
double weight);
999 void setWeightSums (
double sumOfSigWeights,
double sumOfBkgWeights);
1000 void setResultComputation (std::string _fileNameNetConfig, std::string _fileNameResult, std::vector<Pattern>* _resultPatternContainer);
1112 template <
typename WeightsType,
typename DropProbabilities>
1114 const DropProbabilities& drops,
1115 bool inverse =
false);
1125 template <
typename Minimizer>
1126 double train (std::vector<double>& weights,
1127 std::vector<Pattern>& trainPattern,
1128 const std::vector<Pattern>& testPattern,
1136 template <
typename Minimizer>
1137 void preTrain (std::vector<double>& weights,
1138 std::vector<Pattern>& trainPattern,
1139 const std::vector<Pattern>& testPattern,
1152 template <
typename Iterator,
typename Minimizer>
1154 Iterator itPatternBegin, Iterator itPatternEnd,
1158 size_t numWeights (
size_t trainingStartLayer = 0)
const;
1159 size_t numNodes (
size_t trainingStartLayer = 0)
const;
1161 template <
typename Weights>
1162 std::vector<double>
compute (
const std::vector<double>& input,
const Weights& weights)
const;
1164 template <
typename Weights,
typename PassThrough>
1165 double operator() (PassThrough& settingsAndBatch,
const Weights& weights)
const;
1167 template <
typename Weights,
typename PassThrough,
typename OutContainer>
1168 double operator() (PassThrough& settingsAndBatch,
const Weights& weights,
ModeOutput eFetch, OutContainer& outputContainer)
const;
1170 template <
typename Weights,
typename Gradients,
typename PassThrough>
1171 double operator() (PassThrough& settingsAndBatch, Weights& weights, Gradients& gradients)
const;
1173 template <
typename Weights,
typename Gradients,
typename PassThrough,
typename OutContainer>
1174 double operator() (PassThrough& settingsAndBatch, Weights& weights, Gradients& gradients,
ModeOutput eFetch, OutContainer& outputContainer)
const;
1177 template <
typename LayerContainer,
typename DropContainer,
typename ItWeight,
typename ItGradient>
1181 ItWeight itWeightBegin,
1182 ItWeight itWeightEnd,
1183 ItGradient itGradientBegin,
1184 ItGradient itGradientEnd,
1185 size_t& totalNumWeights)
const;
1187 template <
typename LayerContainer>
1189 std::vector<LayerData>& layerData)
const;
1192 template <
typename LayerContainer,
typename LayerPatternContainer>
1194 LayerPatternContainer& layerPatternData,
1195 std::vector<double>& valuesMean,
1196 std::vector<double>& valuesStdDev,
1197 size_t trainFromLayer)
const;
1199 template <
typename OutputContainer>
1202 template <
typename OutputContainer>
1203 void fetchOutput (
const std::vector<LayerData>& layerPatternData, OutputContainer& outputContainer)
const;
1206 template <
typename ItWeight>
1208 std::vector<LayerData>& lastLayerData,
1210 ItWeight itWeightBegin,
1211 ItWeight itWeightEnd)
const;
1213 template <
typename Settings>
1214 void backPropagate (std::vector<std::vector<LayerData>>& layerPatternData,
1216 size_t trainFromLayer,
1217 size_t totalNumWeights)
const;
1225 template <
typename LayerContainer,
typename PassThrough,
typename ItWeight,
typename ItGradient,
typename OutContainer>
1227 ItWeight itWeightBegin, ItWeight itWeightEnd,
1228 ItGradient itGradientBegin, ItGradient itGradientEnd,
1229 size_t trainFromLayer,
1230 OutContainer& outputContainer,
bool fetchOutput)
const;
1242 template <
typename Container,
typename ItWeight>
1246 ItWeight itWeightEnd,
1247 double patternWeight,
1248 double factorWeightDecay,
1265 template <
typename OutIterator>
1267 OutIterator itWeight);
void addPoint(std::string histoName, double x)
for monitoring
void setWeightSums(double sumOfSigWeights, double sumOfBkgWeights)
set the weight sums to be scaled to (preparations for monitoring output)
void testIteration()
callback for monitoring and loggging
virtual void startTraining()
ModeErrorFunction m_eErrorFunction
denotes the error function
void setInput(const_iterator_type itInputBegin, const_iterator_type itInputEnd)
change the input iterators
void addLayer(Layer &&layer)
virtual ~ClassificationSettings()
d'tor
virtual void cycle(double progress, TString text)
MinimizerType
< list all the minimizer types
const_iterator_type deltasEnd() const
returns const iterator to the end of the deltas (back-propagation)
void add(ITERATOR itBegin, ITERATOR itEnd)
size_t m_sizeInput
input size of this DNN
std::vector< Layer > & layers()
returns the layers (structure)
void create(std::string histoName, int bins, double min, double max, int bins2, double min2, double max2)
for monitoring
std::vector< char > DropContainer
void setDropOut(Iterator begin, Iterator end, size_t _dropRepetitions)
set the drop-out configuration (layer-wise)
std::string m_fileNameNetConfig
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
stores the inverse activation function
iterator_type deltasBegin()
returns iterator to the begin of the deltas (back-propagation)
bool isFlagSet(T flag, T value)
double m_maxProgress
current limits for the progress bar
Steepest Gradient Descent algorithm (SGD)
void forwardBatch(const LayerContainer &_layers, LayerPatternContainer &layerPatternData, std::vector< double > &valuesMean, std::vector< double > &valuesStdDev, size_t trainFromLayer) const
bool useMultithreading() const
is multithreading turned on?
std::pair< iterator_type, iterator_type > begin_end_type
std::vector< std::function< double(double)> > function_container_type
void add(T value, double weight=1.0)
ModeOutputValues modeOutputValues() const
get the mode-output-value (direct, probabilities)
double errorFunction(LayerData &layerData, Container truth, ItWeight itWeight, ItWeight itWeightEnd, double patternWeight, double factorWeightDecay, EnumRegularization eRegularization) const
computes the error of the DNN
bool hasDropOut() const
has this layer drop-out turned on?
void applyFunctions(ItValue itValue, ItValue itValueEnd, ItFunction itFunction)
void backward(LAYERDATA &prevLayerData, LAYERDATA &currLayerData)
backward application of the weights (back-propagation of the error)
double learningRate() const
get the learning rate
int repetitions() const
how many steps have to be gone until the batch is changed
const_iterator_type valuesEnd() const
returns iterator to the end of the (node) values
void setErrorFunction(ModeErrorFunction eErrorFunction)
which error function is to be used
iterator_type valueGradientsEnd()
returns iterator to the end of the gradients of the node values
std::shared_ptr< Monitoring > fMonitoring
const_iterator end() const
const_dropout_iterator dropOut() const
return the begin of the drop-out information
function_container_type::iterator function_iterator_type
std::shared_ptr< std::function< double(double)> > activationFunction() const
double trainCycle(Minimizer &minimizer, std::vector< double > &weights, Iterator itPatternBegin, Iterator itPatternEnd, Settings &settings, DropContainer &dropContainer)
executes one training cycle
double factorWeightDecay() const
get the weight-decay factor
size_t inputSize() const
input size of the DNN
ModeOutputValues operator|(ModeOutputValues lhs, ModeOutputValues rhs)
void plot(std::string histoName, std::string options, int pad, EColor color)
for monitoring
iterator_type m_itGradientBegin
iterator to the first gradient of this layer in the gradient vector
bool m_hasGradients
does this layer have gradients (only if in training mode)
void update(ItSource itSource, ItSource itSourceEnd, ItDelta itTargetDeltaBegin, ItDelta itTargetDeltaEnd, ItTargetGradient itTargetGradientBegin, ItGradient itGradient)
update the gradients
ModeOutputValues m_eModeOutput
stores the output mode (DIRECT, SIGMOID, SOFTMAX)
void setOutputSize(size_t sizeOutput)
set the output size of the DNN
virtual void testIteration()
callback for monitoring and loggging
bool m_isInputLayer
is this layer an input layer
std::shared_ptr< std::function< double(double)> > inverseActivationFunction() const
fetch the inverse activation function for this layer
EnumFunction m_activationFunctionType
void applyWeights(ItSource itSourceBegin, ItSource itSourceEnd, ItWeight itWeight, ItTarget itTargetBegin, ItTarget itTargetEnd)
iterator_type valuesEnd()
returns iterator to the end of the (node) values
size_t testRepetitions() const
how often is the test data tested
size_t m_sizeOutput
outut size of this DNN
Settings(TString name, size_t _convergenceSteps=15, size_t _batchSize=10, size_t _testRepetitions=7, double _factorWeightDecay=1e-5, TMVA::DNN::EnumRegularization _regularization=TMVA::DNN::EnumRegularization::NONE, MinimizerType _eMinimizerType=MinimizerType::fSteepest, double _learningRate=1e-5, double _momentum=0.3, int _repetitions=3, bool _multithreading=true)
c'tor
size_t outputSize() const
output size of the DNN
std::vector< double > m_valueGradients
stores the gradients of the values (nodes)
std::vector< double > m_significances
size_t numNodes(size_t trainingStartLayer=0) const
returns the number of nodes in this net
size_t convergenceCount() const
returns the current convergence count
virtual void startTestCycle()
callback for monitoring and loggging
size_t m_convergenceSteps
number of steps without improvement to consider the DNN to have converged
std::vector< double > compute(const std::vector< double > &input, const Weights &weights) const
compute the net with the given input and the given weights
container_type::const_iterator const_iterator_type
MinimizerType fMinimizerType
double forward_backward(LayerContainer &layers, PassThrough &settingsAndBatch, ItWeight itWeightBegin, ItWeight itWeightEnd, ItGradient itGradientBegin, ItGradient itGradientEnd, size_t trainFromLayer, OutContainer &outputContainer, bool fetchOutput) const
main NN computation function
size_t dropRepetitions() const
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
std::vector< double > m_dropOut
std::vector< Layer > m_layers
layer-structure-data
void SetIpythonInteractive(IPythonInteractive *fI, bool *fE, UInt_t *M, UInt_t *C)
double momentum() const
get the momentum (e.g. for SGD)
size_t m_batchSize
mini-batch size
const_iterator_type m_itConstWeightBegin
const iterator to the first weight of this layer in the weight vector
function_container_type::const_iterator const_function_iterator_type
EnumFunction activationFunctionType() const
get the activation function type for this layer
ModeOutputValues m_eModeOutputValues
do the output values of this layer have to be transformed somehow (e.g. to probabilities) or returned...
void removeLayer()
remove one layer
const std::vector< Layer > & layers() const
returns the layers (structure)
void setInputSize(size_t sizeInput)
set the input size of the DNN
size_t numWeights(size_t trainingStartLayer=0) const
returns the number of weights in this net
const_iterator_type m_itInputBegin
iterator to the first of the nodes in the input node vector
const_iterator_type valuesBegin() const
returns const iterator to the begin of the (node) values
bool m_hasDropOut
dropOut is turned on?
ClassificationSettings(TString name, size_t _convergenceSteps=15, size_t _batchSize=10, size_t _testRepetitions=7, double _factorWeightDecay=1e-5, EnumRegularization _regularization=EnumRegularization::NONE, size_t _scaleToNumEvents=0, MinimizerType _eMinimizerType=MinimizerType::fSteepest, double _learningRate=1e-5, double _momentum=0.3, int _repetitions=3, bool _useMultithreading=true)
c'tor
std::vector< double > m_prevGradients
vector remembers the gradients of the previous step
const_iterator_type gradientsBegin() const
returns const iterator to the begin of the gradients
void function(const Char_t *name_, T fun, const Char_t *docstring=0)
void startTrainCycle()
action to be done when the training cycle is started (e.g.
void create(std::string histoName, int bins, double min, double max)
for monitoring
void dropOutWeightFactor(WeightsType &weights, const DropProbabilities &drops, bool inverse=false)
set the drop out configuration
std::vector< double > m_output
const_iterator_type weightsBegin() const
returns const iterator to the begin of the weights for this layer
void backPropagate(std::vector< std::vector< LayerData >> &layerPatternData, const Settings &settings, size_t trainFromLayer, size_t totalNumWeights) const
LayerData(const_iterator_type itInputBegin, const_iterator_type itInputEnd, ModeOutputValues eModeOutput=ModeOutputValues::DIRECT)
c'tor of LayerData
virtual ~Settings()
d'tor
iterator_type valuesBegin()
returns iterator to the begin of the (node) values
void setMonitoring(std::shared_ptr< Monitoring > ptrMonitoring)
prepared for monitoring
std::vector< Pattern > * m_pResultPatternContainer
std::vector< double > m_deltas
stores the deltas for the DNN training
void initializeWeights(WeightInitializationStrategy eInitStrategy, OutIterator itWeight)
initialize the weights with the given strategy
ModeOutputValues operator&(ModeOutputValues lhs, ModeOutputValues rhs)
double m_minProgress
current limits for the progress bar
iterator_type gradientsBegin()
returns iterator to the begin of the gradients
Layer(size_t numNodes, EnumFunction activationFunction, ModeOutputValues eModeOutputValues=ModeOutputValues::DIRECT)
c'tor for defining a Layer
container_type::iterator iterator_type
std::vector< Pattern >::const_iterator const_iterator
virtual void endTrainCycle(double)
callback for monitoring and logging
RooCmdArg Minimizer(const char *type, const char *alg=0)
LayerData(LayerData &&other)
move c'tor of LayerData
size_t m_convergenceCount
std::vector< double > container_type
double softMaxCrossEntropy(ItOutput itProbabilityBegin, ItOutput itProbabilityEnd, ItTruth itTruthBegin, ItTruth itTruthEnd, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc itInvActFnc, double patternWeight)
soft-max-cross-entropy error function (for mutual exclusive cross-entropy)
Timer m_timer
timer for monitoring
void endTrainCycle(double)
action to be done when the training cycle is ended (e.g.
double m_factorWeightDecay
void forwardPattern(const LayerContainer &_layers, std::vector< LayerData > &layerData) const
LayerData(const LayerData &other)
copy c'tor of LayerData
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
virtual void endTestCycle()
action to be done when the training cycle is ended (e.g.
void modeOutputValues(ModeOutputValues eModeOutputValues)
set the mode-output-value
size_t maxConvergenceCount() const
returns the max convergence count so far
void pads(int numPads)
preparation for monitoring
Settings for the training of the neural net.
virtual void startTrainCycle()
virtual void drawSample(const std::vector< double > &, const std::vector< double > &, const std::vector< double > &, double)
callback for monitoring and loggging
std::string m_fileNameResult
WeightInitializationStrategy
weight initialization strategies to be chosen from
size_t numNodes() const
return the number of nodes of this layer
const std::vector< double > & dropFractions() const
ModeErrorFunction
error functions to be chosen from
const_iterator begin() const
std::vector< double > m_localGradients
local gradients for reuse in thread.
Layer defines the layout of a layer.
void preTrain(std::vector< double > &weights, std::vector< Pattern > &trainPattern, const std::vector< Pattern > &testPattern, Minimizer &minimizer, Settings &settings)
pre-training for future use
void applyWeightsBackwards(ItSource itCurrBegin, ItSource itCurrEnd, ItWeight itWeight, ItPrev itPrevBegin, ItPrev itPrevEnd)
size_t batchSize() const
mini-batch size
const_dropout_iterator m_itDropOut
iterator to a container indicating if the corresponding node is to be dropped
Steepest(double learningRate=1e-4, double momentum=0.5, size_t repetitions=10)
c'tor
bool m_hasWeights
does this layer have weights (it does not if it is the input layer)
std::vector< double > m_ams
double stdDev_corr() const
EnumRegularization m_regularization
void clearDropOut()
clear the drop-out-data for this layer
ModeOutputValues outputMode() const
returns the output mode
ModeOutputValues operator&=(ModeOutputValues &lhs, ModeOutputValues rhs)
IPythonInteractive * fInteractive
double gaussDouble(double mean, double sigma)
The Batch class encapsulates one mini-batch.
EnumRegularization regularization() const
some regularization of the DNN is turned on?
double m_beta
internal parameter (momentum)
double train(std::vector< double > &weights, std::vector< Pattern > &trainPattern, const std::vector< Pattern > &testPattern, Minimizer &minimizer, Settings &settings)
start the training
container_type::iterator iterator_type
std::vector< double > m_targets
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
void fillDropContainer(DropContainer &dropContainer, double dropFraction, size_t numNodes) const
prepare the drop-out-container (select the nodes which are to be dropped out)
void forward(const LAYERDATA &prevLayerData, LAYERDATA &currLayerData)
apply the weights (and functions) in forward direction of the DNN
size_t convergenceSteps() const
how many steps until training is deemed to have converged
container_type computeProbabilities() const
compute the probabilities from the node values
void fetchOutput(const LayerData &lastLayerData, OutputContainer &outputContainer) const
double uniformDouble(double minValue, double maxValue)
std::vector< double > m_values
stores the values of the nodes in this layer
std::vector< double > m_weights
void setResultComputation(std::string _fileNameNetConfig, std::string _fileNameResult, std::vector< Pattern > *_resultPatternContainer)
preparation for monitoring output
size_t m_maxConvergenceCount
std::vector< std::vector< LayerData > > prepareLayerData(LayerContainer &layers, Batch &batch, const DropContainer &dropContainer, ItWeight itWeightBegin, ItWeight itWeightEnd, ItGradient itGradientBegin, ItGradient itGradientEnd, size_t &totalNumWeights) const
double operator()(Function &fitnessFunction, Weights &weights, PassThrough &passThrough)
operator to call the steepest gradient descent algorithm
iterator_type deltasEnd()
returns iterator to the end of the deltas (back-propagation)
double operator()(PassThrough &settingsAndBatch, const Weights &weights) const
execute computation of the DNN for one mini-batch (used by the minimizer); no computation of gradient...
void addLayer(Layer &layer)
add a layer (layout)
This class is needed by JsMVA, and it's a helper class for tracking errors during the training in Jup...
const_iterator_type valueGradientsBegin() const
returns const iterator to the begin of the gradients
std::tuple< double, double > computeError(const Settings &settings, std::vector< LayerData > &lastLayerData, Batch &batch, ItWeight itWeightBegin, ItWeight itWeightEnd) const
const_iterator_type m_itInputEnd
iterator to the end of the nodes in the input node vector
container_type probabilities() const
computes the probabilities from the current node values and returns them
std::shared_ptr< std::function< double(double)> > m_activationFunction
activation function for this layer
Double_t(* Function)(Double_t)
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
inverse activation function for this layer
size_t size() const
return the size of the layer
std::shared_ptr< std::function< double(double)> > activationFunction() const
fetch the activation function for this layer
virtual void setProgressLimits(double minProgress=0, double maxProgress=100)
void testSample(double error, double output, double target, double weight)
action to be done after the computation of a test sample (e.g.
virtual void startTestCycle()
action to be done when the test cycle is started (e.g.
MinimizerType minimizerType() const
which minimizer shall be used (e.g. SGD)
size_t m_scaleToNumEvents
virtual void endTestCycle()
callback for monitoring and loggging
std::vector< double > m_localWeights
local weights for reuse in thread.
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
iterator_type valueGradientsBegin()
returns iterator to the begin of the gradients of the node values
const_iterator m_itBegin
iterator denoting the beginning of the batch
std::vector< double > container_type
void clear()
clear the values and the deltas
Settings for classificationused to distinguish between different function signatures.
void clear(std::string histoName)
for monitoring
virtual void computeResult(const Net &, std::vector< double > &)
callback for monitoring and loggging
size_t minError() const
returns the smallest error so far
const_iterator_type deltasBegin() const
returns const iterator to the begin of the deltas (back-propagation)
Batch(typename std::vector< Pattern >::const_iterator itBegin, typename std::vector< Pattern >::const_iterator itEnd)
DropContainer::const_iterator const_dropout_iterator
double m_alpha
internal parameter (learningRate)
void setDropOut(Iterator itDrop)
set the drop-out info for this layer
void addPoint(std::string histoName, double x, double y)
for monitoring
ModeOutputValues operator|=(ModeOutputValues &lhs, ModeOutputValues rhs)
double sumOfSquares(ItOutput itOutputBegin, ItOutput itOutputEnd, ItTruth itTruthBegin, ItTruth itTruthEnd, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc itInvActFnc, double patternWeight)
Timing information for training and evaluation of MVA methods.
double crossEntropy(ItProbability itProbabilityBegin, ItProbability itProbabilityEnd, ItTruth itTruthBegin, ItTruth itTruthEnd, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc itInvActFnc, double patternWeight)
cross entropy error function
bool exists(std::string histoName)
for monitoring
virtual bool hasConverged(double testError)
has this training converged already?
LayerData holds the data of one layer.
virtual void testSample(double, double, double, double)
virtual function to be used for monitoring (callback)
Net(const Net &other)
d'tor
const_iterator_type valueGradientsEnd() const
returns const iterator to the end of the gradients
size_t numWeights(size_t numInputNodes) const
return the number of weights for this layer (fully connected)
std::tuple< Settings &, Batch &, DropContainer & > pass_through_type
const_iterator m_itEnd
iterator denoting the end of the batch
std::shared_ptr< std::function< double(double)> > inverseActivationFunction() const
int randomInt(int maxValue)
std::shared_ptr< std::function< double(double)> > m_activationFunction
stores the activation function
std::vector< double > m_input