Esempio n. 1
0
x_train, x_test = np.split(x_all, [N])
N_test = x_test.shape[0]

# モデルの作成
da = DenoisingAutoencoder(784, 100, noise=SaltAndPepperNoise())

# 学習
all_loss = []
for epoch in xrange(n_epoch):
    print 'epoch', epoch
    indexes = np.random.permutation(N)
    losses = []
    sum_loss = 0
    for i in xrange(0, N, batchsize):
        x_batch = x_train[indexes[i:i+batchsize]]
        loss = da.train(x_batch)
        sum_loss += float(loss.data) * batchsize
        if epoch == 0 and i == 0:
            with open('../output/da/graph.dot', 'w') as o:
                o.write(computational_graph.build_computational_graph((loss, )).dump())
            with open('../output/da/graph.wo_split.dot', 'w') as o:
                g = computational_graph.build_computational_graph((loss, ), remove_split=True)
                o.write(g.dump())
    print 'train mean loss={}'.format(sum_loss / N)
    losses += [sum_loss / N]
    # 評価
    sum_loss = 0
    for i in xrange(0, N_test, batchsize):
        x_batch = x_test[i:i+batchsize]
        loss = da.test(x_batch)
        sum_loss += float(loss.data) * batchsize
Esempio n. 2
0
# 1層目
da1_filename = '../output/sda/model_da1.pkl'
try:
    da1 = pickle.load(open(da1_filename))
except IOError:
    da1 = DenoisingAutoencoder(784, 100, noise=SaltAndPepperNoise())
    n_epoch = 30
    all_loss = []
    for epoch in xrange(n_epoch):
        print 'epoch', epoch
        indexes = np.random.permutation(N)
        losses = []
        sum_loss = 0
        for i in xrange(0, N, batchsize):
            x_batch = x_train[indexes[i:i+batchsize]]
            loss = da1.train(x_batch)
            sum_loss += float(loss.data) * batchsize
        print 'train mean loss={}'.format(sum_loss / N)
        losses += [sum_loss / N]
        # 評価
        sum_loss = 0
        for i in xrange(0, N_test, batchsize):
            x_batch = x_test[i:i+batchsize]
            loss = da1.test(x_batch)
            sum_loss += float(loss.data) * batchsize
        print 'test mean loss={}'.format(sum_loss / N_test)
        losses += [sum_loss / N_test]
        all_loss.append(losses)
        # 可視化
        h1 = MinMaxScaler().fit_transform(da1.model.encoder.W)
        visualize(h1, '../img/sda/da1/sda_da1_{0:04d}.jpg'.format(epoch + 1), (8, 8), (10, 10))