Esempio n. 1
0
 def stochasticGradient(self, train_X, train_y, trainLossVec):
     changeWeights = [0] * (self.L - 1)
     changeBiases = [0] * (self.L - 1)
     lossFuncSum = 0
     for i in range(0, BATCH_SIZE):
         aVec = []
         DVec = []
         deltaVec = []
         k = random.randint(0, len(train_X) - 1)
         xk = train_X[k]
         yk = train_y[k]
         a = hf.flatten(xk)
         aVec.append(a)
         # Performs back-propagation for all layers
         for l in range(0, self.L - 1):
             z = np.matmul(self.weights[l], a) + self.biases[l]
             a = hf.relu(z)
             D = np.diag(hf.reluPrim(z))
             #a = hf.sigmoid(z)
             #D = np.diag(hf.sigmoidPrim(z))
             aVec.append(a)
             DVec.append(D)
         delta_L = np.matmul(DVec[-1], (a - hf.formatY(yk)))
         deltaVec.append(delta_L)
         for l in reversed(range(-self.L + 1, -1)):
             delta_l = np.matmul(
                 DVec[l],
                 np.matmul(np.transpose(self.weights[l + 1]),
                           deltaVec[l + 1]))
             deltaVec.insert(0, delta_l)
         for l in reversed(range(-self.L + 1, 0)):
             changeBiases[l] += deltaVec[l]
             changeWeights[l] += np.outer(deltaVec[l], aVec[l - 1])
         prediction = max(aVec[-1])
         index = aVec[-1].index(prediction)
         if (index == int(yk)):
             self.correct += 1
         lossFuncSum += hf.lossFunc(aVec[-1], yk)
     trainLossVec.append(lossFuncSum / BATCH_SIZE)
     # Calculates average values
     dw = [cw / BATCH_SIZE for cw in changeWeights]
     db = [cb / BATCH_SIZE for cb in changeBiases]
     return dw, db
Esempio n. 2
0
 def predict(self, image):
     a = hf.flatten(image)
     for l in range(0, self.L - 1):
         a = self.nextLayer(a, l)
     return a