コード例 #1
0
def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot( x, W1) + b1
    z1 = af.sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = af.sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y  = af.identity_function(a3)
    return y
コード例 #2
0
def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    B1, B2, B3 = network['b1'], network['b2'], network['b3']

    A1 = np.dot(x, W1) + B1
    Z1 = sigmoid(A1)
    A2 = np.dot(Z1, W2) + B2
    Z2 = sigmoid(A2)
    A3 = np.dot(Z2, W3) + B3
    Y = identity_function(A3)

    return Y
コード例 #3
0
 def predict(self):
     W1, W2 = self.params['W1'], self.params['W2']
     b1, b2 = self.params['b1'], self.params['b2']
     a1 = np.dot(self.x, W1) + b1
     z1 = sigmoid(a1)
     a2 = np.dot(z1, W2) + b2
     y = softmax(a2)
     return y
コード例 #4
0
    def evaluate(self, x_test_, y_test_):
        """
        验证集评估
        :return:
        """
        x_test_ = x_test_.reshape(x_test_.shape[0], -1)
        loss = 0
        for i in range(x_test_.shape[0]):
            x = x_test_[i].reshape(-1, 1)
            y = y_test_[i].reshape(-1, 1)
            x = sigmoid(
                np.dot(self.w_input_to_hidden, x).reshape(-1, 1) -
                self.hidden_bias)
            pred = sigmoid(
                np.dot(self.w_hidden_to_output, x).reshape(-1, 1) -
                self.output_bias)
            loss += mse(y, pred)

        return loss / x_test_.shape[0]
コード例 #5
0
ファイル: network.py プロジェクト: zhongzebin/network
 def linear_activation_forward(self, A_prev, W, b, activation, alpha=None):
     if activation == "sigmoid":
         Z = self.linear_forward(A_prev, W, b)
         A = sigmoid(Z)
     elif activation == "relu":
         Z = self.linear_forward(A_prev, W, b)
         A = relu(Z)
     elif activation == "prelu":
         Z = self.linear_forward(A_prev, W, b)
         A = prelu(Z, alpha)
     return Z, A
コード例 #6
0
    def fit(self, x_train_, y_train_, epochs=100, lr=0.1):
        """
         模型训练
        :param x_train_:
        :param y_train_:
        :param epochs:
        :param lr:
        :return:
        """

        for epoch in range(epochs):

            for i in range(x_train_.shape[0]):
                x = x_train_[i].reshape(-1, 1)  # (4, 1)
                y = y_train_[i].reshape(-1, 1)  # (3, 1)

                # forward前向传播
                hidden_layer_input = np.dot(self.w_input_to_hidden, x).reshape(
                    -1, 1) - self.hidden_bias
                hidden_layer_output = sigmoid(hidden_layer_input)  # 激活为非线性

                output_layer_input = np.dot(self.w_hidden_to_output,
                                            hidden_layer_output).reshape(
                                                -1, 1) - self.output_bias
                output_layer_output = sigmoid(output_layer_input)
                # error计算误差
                # backward反向传播
                theta = (y - output_layer_output) * sigmoid_grad(
                    output_layer_input)  # (3, 1)
                self.w_hidden_to_output += lr * np.dot(theta,
                                                       hidden_layer_output.T)
                self.output_bias += -lr * theta

                beta = np.dot(self.w_hidden_to_output.T,
                              theta) * sigmoid_grad(hidden_layer_input)
                self.w_input_to_hidden += lr * np.dot(beta, x.T)
                self.hidden_bias += -lr * beta

            loss = self.evaluate(x_train_, y_train_)
コード例 #7
0
 def encode(self, x_input):
     """
     预测新数据
     :param x_input:
     :return:
     """
     x_test_ = x_input.reshape(x_input.shape[0], -1)
     rst = []
     for i in range(x_test_.shape[0]):
         x = x_test_[i]
         x = sigmoid(
             np.dot(self.w_input_to_hidden, x).reshape(-1, 1) -
             self.hidden_bias)
         rst.append(np.squeeze(x, axis=-1))
     return np.array(rst).astype('float32')
コード例 #8
0
    def gradient(self):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}

        batch_num = self.x.shape[0]

        # forward
        a1 = np.dot(self.x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        # backward
        dy = (y - self.t) / batch_num
        grads['W2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)

        da1 = np.dot(dy, W2.T)
        dz1 = sigmoid_grad(a1) * da1
        grads['W1'] = np.dot(self.x.T, dz1)
        grads['b1'] = np.sum(dz1, axis=0)

        return grads