Exemplo n.º 1
0
 def feedforward(self, x):
     """前向传播得到神经网络的输出"""
     # 计算第一层神经元的输出
     sum_layer_1 = self.weights_layer_1 * x + self.biases_layer_1
     out_layer_1 = af.Sigmoid(sum_layer_1)
     # 计算第二层神经元的输出
     sum_layer_2 = self.weights_layer_2 * out_layer_1 + self.biases_layer_2
     out_layer_2 = af.Sigmoid(sum_layer_2)
     return out_layer_2
 def backprop(self, x, y_true):
     # 计算损失函数对相应参数的偏导数,优化后的整个神经网络的权重矩阵列表
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # 优化后的整个神经网络的偏置向量列表
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     # 前向传播
     activation = x  # 输入层的输入值直接作为激活值
     activation_list = [x]  # 存储每个神经层的输出
     wx_plus_b_list = []
     # 遍历每个神经层(除了输入层)的权重矩阵和偏置向量
     for w, b in zip(self.weights, self.biases):
         wx_plus_b = np.dot(w, activation) + b
         wx_plus_b_list.append(wx_plus_b)
         activation = af.Sigmoid(wx_plus_b)
         activation_list.append(activation)
     # 反向传播(从输出层开始更新神经网络的参数)
     # 计算输出层误差
     delta = self.Deriv_Loss(y_true,activation_list[-1])*\
             af.Deriv_Sigmoid(wx_plus_b_list[-1])
     # 计算输出层参数的梯度
     nabla_b[-1] = delta
     nabla_w[-1] = np.dot(delta, activation_list[-2].transpose())
     # 计算中间层参数的梯度
     for l in range(2, self.num_layer):
         delta =np.dot(self.weights[-l+1].transpose(),delta)* \
                af.Deriv_Sigmoid(wx_plus_b_list[-l])
         nabla_b[-l] = delta
         nabla_w[-l] = np.dot(delta, activation_list[-l - 1].transpose())
     return (nabla_w, nabla_b)
Exemplo n.º 3
0
 def backprop(self, x, y_true):
     """反向传播优化神经网络"""
     # 计算损失函数对相应参数的偏导数,优化后的整个神经网络的权重矩阵列表
     nabla_weight_layer_1 = np.zeros(self.weights_layer_1.shape)
     nabla_weight_layer_2 = np.zeros(self.weights_layer_2.shape)
     # 优化后的整个神经网络的偏置向量列表
     nabla_biase_layer_1 = np.zeros(self.biases_layer_1.shape)
     nabla_biase_layer_2 = np.zeros(self.biases_layer_2.shape)
     # 前向传播
     activation = x  # 输入层的输出,也是中间层第一层的输入
     activation_list = [x]  # 存储每个神经层的输出
     wx_plus_b_list = []  # 存储每个神经层未激活的值
     # 将第一层神经元的输出中间结果存储进列表
     wx_plus_b = np.dot(self.weights_layer_1,
                        activation) + self.biases_layer_1
     wx_plus_b_list.append(wx_plus_b)
     activation = af.Sigmoid(wx_plus_b)
     activation_list.append(activation)
     # 将第二层神经元的输出中间结果存储进列表
     wx_plus_b = np.dot(self.weights_layer_2,
                        activation) + self.biases_layer_2
     wx_plus_b_list.append(wx_plus_b)
     activation = af.Sigmoid(wx_plus_b)
     activation_list.append(activation)
     # 反向传播(从输出层开始计算神经网络的参数的梯度)
     # 计算输出层误差
     delta = self.Deriv_Cost(
         y_true, activation_list[-1]) * af.Deriv_Sigmoid(wx_plus_b_list[-1])
     # 计算输出神经层各个参数的梯度
     nabla_biase_layer_2 = delta
     nabla_weight_layer_2 = np.dot(delta, activation_list[-2].transpose())
     # 计算中间层各个参数的梯度
     delta = np.dot(self.weights_layer_2.transpose(), delta) * \
             af.Deriv_Sigmoid(wx_plus_b_list[-2])
     nabla_biase_layer_1 = delta
     nabla_weight_layer_1 = np.dot(delta, activation_list[-3].transpose())
     return (nabla_biase_layer_1, nabla_biase_layer_2, nabla_weight_layer_1,
             nabla_weight_layer_2)
 def feedforward(self, x):
     for w, b in zip(self.weights, self.biases):
         x = af.Sigmoid(np.dot(w, x) + b)
     return x