Exemplo n.º 1
0
    def _prop(self, inputs: Inputs):
        forward = [np.array(inputs.inputs)]

        # do hidden layers
        for ind in range(2):
            forward.append(np.dot(forward[-1], self.weights[ind]))
            forward[-1] = funcs.ReLU(forward[-1])

        # do output layer
        forward.append(funcs.sigmoid(forward[-1]))

        # softmax the output
        output = forward[-1]
        output = funcs.softmax(output)

        return output
Exemplo n.º 2
0
    # learn = np.array([[0], [16], [255], [121]])
    # answer = np.array([1])
    if ((n + 1) * batch) % 60000 != 0:
        learn = np.reshape(X[(n * batch) % 60000: ((n + 1) * batch) % 60000:], (batch, row * row)).T
        answer = Y[(n * batch) % 60000: ((n + 1) * batch) % 60000:]
    else:
        learn = np.reshape(X[(n * batch) % 60000: 60000:], (batch, row * row)).T
        answer = Y[(n * batch) % 60000: 60000:]

    # 中間層
    midin = weight1.dot(learn) + b1
    midout = funcs.ReLU(midin)

    # 出力層
    finin = weight2.dot(midout) + b2
    finout = funcs.softmax(finin)

    indexmax = finout.argmax(axis=0)
    power = indexmax - answer

    # print("answer")
    # print(answer)
    # print("indexmax")
    # print(indexmax)
    # print("power")
    # print(power)

    # クロスエントロピー
    entropy = funcs.cross(finout, answer, end)
    if n * batch % 60000 == 0:
        print(str(n) + "回目")
Exemplo n.º 3
0
    def forward(self, x, y_one_hot):
        self.y_one_hot = y_one_hot
        self.y = softmax(x)
        self.loss = cross_entropy_error(self.y, self.y_one_hot)

        return self.loss
Exemplo n.º 4
0
        for batch_count in range(1, BATCH_SIZE + 1):
            try:
                trainImageList.append(next(train_image))
                trainLabelList.append(int(next(train_label)[0]))
            except StopIteration:
                reachedEnd = True
                break
        if len(trainImageList) == 0:
            break

        Y = oneHot(trainLabelList)
        X = createInputArray(trainImageList)

        #now we have our input matrix X and one hot output matrix Y.  run forward propogation
        l1_out = funcs.vec_sig(np.dot(X, w1) + b1)
        final_out = funcs.softmax(np.dot(l1_out, w2) + b2)

        #get gradients
        d2 = final_out - Y
        d1 = l1_out * (1 - l1_out) * np.dot(d2, w2.T)

        #update weights and bias with gradient descent

        w2 = w2 - LEARN_RATE * np.dot(l1_out.T, d2)
        b2 = b2 - LEARN_RATE * d2.sum(axis=0)

        w1 = w1 - LEARN_RATE * np.dot(X.T, d1)
        b1 = b1 - LEARN_RATE * d1.sum(axis=0)

        #get batchLoss and update epoLoss
        batchLoss = -1 * np.sum(Y * np.log(final_out))