def forward_propagation(self, turn):
     """
     前馈计算
     :param turn: 计算第turn个样本数据
     :return:
     """
     self.i = self.x[turn]
     self.h = sigmoid(np.dot(self.W1, self.i))
     self.o = sigmoid(np.dot(self.W2, self.h))
 def test(self, file_name):
     """
     测试
     :return:
     """
     self.read_data(file_name)
     count = 0
     for i in range(self.n):
         h = sigmoid(np.dot(self.W1, self.x[i]))
         o = sigmoid(np.dot(self.W2, h))
         predict = list(o).index(max(list(o)))
         if predict == i:
             count += 1
     # print count * 1.0 / 10, '测试集的损失函数是:',
     # print self.loss_function()
     return count * 1.0 / 10, self.loss_function()
    def loss_function(self):
        """
        损失函数
        :return:
        """
        self.loss = 0
        for i in range(self.n):
            h = sigmoid(np.dot(self.W1, self.x[i]))
            o = sigmoid(np.dot(self.W2, h))
            for j in range(self.output_layer_size):
                self.loss += (o[j] - self.y[i][j]) ** 2
        self.loss *= 0.5

        # 先计算W1的权值
        for i in range(self.hidden_layer_size):
            for j in range(self.input_layer_size):
                self.loss += self.lambda_ * self.W1[i][j] * self.W1[i][j] / self.n * 0.5

        # 再计算W2的权值
        for i in range(self.output_layer_size):
            for j in range(self.hidden_layer_size):
                self.loss += self.lambda_ * self.W2[i][j] * self.W2[i][j] / self.n * 0.5
        return self.loss
Exemple #4
0
 def feedforward(self):
     # feedforward(forwardpropagation) we will predict outputs
     self.layer1 = sigmoid(np.dot(self.input, self.weights1))
     self.layer2 = sigmoid(np.dot(self.layer1, self.weights2))
     self.output = sigmoid(np.dot(self.layer2, self.weights3))