예제 #1
0
def _main():
    data, _ = mnist.MNIST("train",
                          path="../../machine-learning/data/mnist/",
                          data_size=40,
                          batch_size=20,
                          reshape=False,
                          one_hot=False,
                          binarize=True).to_ndarray()
    test_data, _ = mnist.MNIST("test",
                               path="../../machine-learning/data/mnist/",
                               data_size=40,
                               batch_size=20,
                               reshape=False,
                               one_hot=False,
                               binarize=True).to_ndarray()

    max_epoch = 1

    # Layer 1
    print("----- Layer 1 -----")
    layer_i = rbm.RBM(train_data=data, num_hidden=1000)
    layer_i.train(max_epoch=max_epoch)
    # layer_i_param = (layer_i.weight, layer_i.visible_bias, layer_i.hidden_bias)

    # Layer 2
    print("----- Layer 2 -----")
    layer_ii = rbm.RBM(train_data=layer_i.hidden_data, num_hidden=500)
    layer_ii.train(max_epoch=max_epoch)
    # layer_ii_param = (layer_ii.weight, layer_ii.visible_bias, layer_ii.hidden_bias)

    # Layer 3
    print("----- Layer 3 -----")
    layer_iii = rbm.RBM(train_data=layer_ii.hidden_data, num_hidden=250)
    layer_iii.train(max_epoch=max_epoch)
    # layer_iii_param = (layer_iii.weight, layer_iii.visible_bias, layer_iii.hidden_bias)

    # Layer 4
    print("----- Layer 4 -----")
    layer_iv = rbm.RBM(train_data=layer_iii.hidden_data, num_hidden=30)
    layer_iv.train(max_epoch=max_epoch)
    # layer_iv_param = (layer_iv.weight, layer_iv.visible_bias, layer_iv.hidden_bias)

    # Backpropagation
    print("\n=============== Backpropagation ===============\n")
    bp.backpropagation(layers=[layer_i, layer_ii, layer_iii, layer_iv],
                       train_data=data,
                       test_data=test_data,
                       max_epoch=2)
예제 #2
0
파일: test_mnist.py 프로젝트: Jthon/wgan-gp
import load_mnist
import config as cfg
import numpy as np
import cv2
mnist_dataset = load_mnist.MNIST(cfg.params["mnist_image"],
                                 cfg.params["mnist_label"])
for i in range(0, mnist_dataset.datanum):
    signal = False
    print("num=%d" % int(mnist_dataset.labels[i]))
    while True:
        key = cv2.waitKey(5)
        if key == 13:
            break
        if key == 27:
            signal = True
            break
        cv2.imshow("image", np.array(mnist_dataset.images[i], np.uint8))
    if signal == True:
        break
예제 #3
0
###################################################################################################
# Prepare to train
transform = transforms.Compose(
    # [transforms.RandomCrop(32, padding=4),
    # transforms.RandomHorizontalFlip(),
    [
        transforms.Resize(opt.imageSize),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

num_classes = opt.num_classes

trainset = dset.MNIST(root='./data',
                      train=True,
                      out_digit=opt.out_digit,
                      download=True,
                      transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=opt.batchSize,
                                          shuffle=True,
                                          num_workers=opt.workers)
testset = dset.MNIST(root='./data',
                     train=False,
                     out_digit=opt.out_digit,
                     download=True,
                     transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=opt.batchSize,
                                         shuffle=False,
                                         num_workers=opt.workers)
예제 #4
0
                    os.path.join(
                        samples_dir,
                        'iteration_%d.png' % (i / train_data.batch_num)))
                print('Saved samples.')
            batch_x, _ = train_data.next_batch()
            sess.run(step, feed_dict={x: batch_x, rbm.lr: 0.1})
            cost = sess.run(pl, feed_dict={x: batch_x})
            mean_cost.append(cost)
            # Save model
            if i is not 0 and train_data.batch_index is 0:
                checkpoint_path = os.path.join(logs_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=epoch + 1)
                print('Saved Model.')
            # Print pseudo likelihood
            if i is not 0 and train_data.batch_index is 0:
                print('Epoch %d Cost %g' % (epoch, np.mean(mean_cost)))
                mean_cost = []
                epoch += 1
        print('Test')
        samples = sess.run(sampler, feed_dict={x: noise_x})
        samples = samples.reshape([train_data.batch_size, 28, 28])
        save_images(samples, [8, 8], os.path.join(samples_dir, 'test.png'))
        print('Saved samples.')


data_path = "../../machine-learning/data/mnist/"
train_data = mnist.MNIST("train", data_path, data_size=256, batch_size=64)
# test_data = mnist.MNIST("test", data_path)

train(train_data, 10)