-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathNet.cpp
117 lines (88 loc) · 3.56 KB
/
Net.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
#include "Net.h"
Net::Net(const vector<unsigned> &topology)
{
DEBUG_PRINT("Net Constructor");
unsigned numLayers = topology.size();
for (unsigned layerNum = 0; layerNum < numLayers; layerNum++) {
m_layers.push_back(Layer());
unsigned numOutputs = (layerNum == topology.size() - 1) ? 0 : topology[layerNum + 1];
// We have made a new Layer, now fill it with neurons and
// add a bias neuron to the layer
for (unsigned neuronNum = 0; neuronNum <= topology[layerNum]; neuronNum++) {
m_layers.back().push_back(Neuron(numOutputs, neuronNum));
DEBUG_PRINT("Made Neuron " << neuronNum);
}
// Force the bias node's output value to 1.0. It's the last neuron created above
m_layers.back().back().setOutputVal(1.0);
}
}
void Net::feedForward(const Array<double> &inputVals)
{
DEBUG_PRINT(inputVals.size() << " == " << m_layers[0].size() -1 << " ??");
assert(inputVals.size() == m_layers[0].size() - 1);
DEBUG_PRINT("Passed assert");
// Assign (latch) the input values into the input neurons
for (unsigned i = 0; i < inputVals.size(); i++) {
m_layers[0][i].setOutputVal(inputVals[i]);
}
DEBUG_PRINT("Latched values to first layer");
DEBUG_PRINT("Forward propogating...");
// Forward propogate
for (unsigned layerNum = 1; layerNum < m_layers.size(); layerNum++) {
Layer &prevLayer = m_layers[layerNum - 1];
for (unsigned neuronNum = 0; neuronNum < m_layers[layerNum].size() - 1; neuronNum++) {
m_layers[layerNum][neuronNum].feedForward(prevLayer);
}
}
}
void Net::backProp(const Array<double> &targetVals)
{
// Calulate the overall net error (Root Mean Square of output neuron errors)
Layer &outputLayer = m_layers.back();
m_error = 0.0;
for (unsigned n = 0; n < outputLayer.size() - 1; n++) {
double delta = targetVals[n] - outputLayer[n].getOutputVal();
m_error += delta * delta;
}
m_error /= outputLayer.size() - 1;
m_error = sqrt(m_error);
// Implement a recent average measurement
m_recentAverageError =
(m_recentAverageError * m_recentAverageSmoothingFactor + m_error)
/ (m_recentAverageSmoothingFactor + 1.0);
// Calculate output layer gradients
for (unsigned n = 0; n < outputLayer.size() - 1; n++) {
outputLayer[n].calcOutputGradients(targetVals[n]);
}
// Calulate gradients on hidden layers
for (unsigned layerNum = m_layers.size() - 2; layerNum > 0; layerNum--) {
Layer &hiddenLayer = m_layers[layerNum];
Layer &nextLayer = m_layers[layerNum + 1];
for (unsigned n = 0; n < hiddenLayer.size(); n++) {
hiddenLayer[n].calcHiddenGradients(nextLayer);
}
}
// For all layers from outputs to first hidden layer,
// update connection weights
for (unsigned layerNum = m_layers.size() - 1; layerNum > 0; layerNum--) {
Layer &layer = m_layers[layerNum];
Layer &prevLayer = m_layers [layerNum - 1];
for (unsigned n = 0; n < layer.size(); n++) {
layer[n].updateInputWeights(prevLayer);
}
}
}
void Net::getResults(Array<double> &resultVals) const
{
resultVals.clear();
for (unsigned n = 0; n < m_layers.back().size() - 1; n++) {
resultVals.push_back(m_layers.back()[n].getOutputVal());
}
}
ostream& operator<<(ostream& output, Net& n)
{
unsigned i = 0;
for (i = 0; i < n.m_layers.back().size() -1; i++){
output << "Neuron " << i << ": " << n.m_layers.back()[i].getOutputVal() << endl;
}
}