Beispiel #1
0
def test_gpu_node_sigmoid_cross_entropy(a, b):
    set_cuda_active(True)

    g1 = Variable(a)
    g2 = Variable(b)

    g3 = rm.sigmoid_cross_entropy(g1, g2)
    g = g3.grad()
    g_g1 = g.get(g1)
    g3.to_cpu()
    g_g1.to_cpu()

    set_cuda_active(False)
    c3 = rm.sigmoid_cross_entropy(g1, g2)
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
Beispiel #2
0
 def fit(self, x, y):
     N = len(x)
     labels = self.lb.transform(y)
     for i in range(self.epoch):
         perm = np.random.permutation(N)
         for j in range(N // self.batch):
             train_batch = x[perm[j * self.batch:(j + 1) * self.batch]]
             labels_batch = labels[perm[j * self.batch:(j + 1) * self.batch]]
             with self.network.train():
                 z = self.network(train_batch)
                 loss = rm.sigmoid_cross_entropy(z, labels_batch)
             loss.grad().update(self.optimizer)
Beispiel #3
0
        return t3


epoch = 50
batch = 1
N = len(X)

optimizer = Sgd()

network = Mnist()
learning_curve = []

for i in range(epoch):
    perm = np.random.permutation(N)
    loss = 0
    for j in range(0, N // batch):
        train_batch = X[perm[j * batch:(j + 1) * batch]]
        response_batch = y[perm[j * batch:(j + 1) * batch]]
        with network.train():
            result = network(train_batch)
            l = rm.sigmoid_cross_entropy(result, response_batch)
        grad = l.grad()
        grad.update(optimizer)
        loss += l
    train_loss = loss / (N // batch)
    learning_curve.append(train_loss)
    print("train_loss:{}".format(train_loss))
print(network(X))
plt.plot(learning_curve, linewidth=3, label="train")
plt.show()
Beispiel #4
0
 def func(node, x):
     return sum(rm.sigmoid_cross_entropy(node, x, reduce_sum=False))
Beispiel #5
0
 def func(node, x):
     return rm.sigmoid_cross_entropy(node, x)