def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
    def predict(self, x):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        return y
    def loss(self, x, t):
        """
        计算损失函数
        :param x:
        :param t: 正确标签
        :return:
        """
        z = self.predict(x)
        y = softmax(z)

        loss = cross_entropy_error(y, t)
        return loss
Exemple #4
0
    def get_result_matrix(self, x):
        """
        Возвращает матрицу активаций выходного слоя
        :param x: Матрица входов, (n, m)
        :return: Матрица выходных активаций, (n, N)
        """

        next_layer_input = np.append(np.ones((x.shape[0], 1), dtype=int), x, axis=1)
        for layer in self.layers[:-1]:
            activations = layer.process_input(next_layer_input)
            next_layer_input = np.append(np.ones((len(activations), 1), dtype=int), activations, axis=1)
        self.layers[-1].process_input(next_layer_input)
        sums = self.layers[-1].intermediate_sums
        res = []
        for vect in sums:
            res.append(softmax(vect))
        return np.array(res)
Exemple #5
0
def predict(network, x):
    """
    各个标签对应的概率
    :return:
    """
    w1, w2, w3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)

    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)

    a3 = np.dot(z2, w3) + b3
    y = softmax(a3)

    return y