|
@@ -22,15 +22,9 @@ def train(inputNetwork, learnRate, epochs, batchSize = 10):
|
|
|
|
|
|
a0 = np.empty((net.inputLength, batchSize))
|
|
|
|
|
|
- w1 = net.layer1
|
|
|
- b1 = np.stack([net.bias1] * batchSize).transpose()
|
|
|
-
|
|
|
z1 = np.empty((net.hiddenLength, batchSize))
|
|
|
a1 = np.empty((net.hiddenLength, batchSize))
|
|
|
|
|
|
- w2 = net.layer2
|
|
|
- b2 = np.stack([net.bias2] * batchSize).transpose()
|
|
|
-
|
|
|
z2 = np.empty((net.outputLength, batchSize))
|
|
|
a2 = np.empty((net.outputLength, batchSize))
|
|
|
|
|
@@ -50,6 +44,13 @@ def train(inputNetwork, learnRate, epochs, batchSize = 10):
|
|
|
|
|
|
|
|
|
for batchIndex in range(0, nbSamples, batchSize):
|
|
|
+
|
|
|
+ w1 = net.layer1
|
|
|
+ b1 = np.stack([net.bias1] * batchSize).transpose()
|
|
|
+
|
|
|
+ w2 = net.layer2
|
|
|
+ b2 = np.stack([net.bias2] * batchSize).transpose()
|
|
|
+
|
|
|
|
|
|
batchEndIndex = batchIndex + batchSize
|
|
|
batchSelection = permut[batchIndex : batchEndIndex]
|