Beispiel #1
0
def get_dataset(dataset_name):
    if dataset_name == 'mnist':
        return get_mnist(ndim=3)
    if dataset_name == 'binary-mnist':
        train, test = get_mnist(ndim=3)
        train._datasets[0][train._datasets[0] != 0] = 1
        train._datasets[0][train._datasets[0] == 0] = -1
        test._datasets[0][test._datasets[0] != 0] = 1
        test._datasets[0][test._datasets[0] == 0] = -1
        return train, test
    if dataset_name == 'cifar10':
        return get_cifar10(ndim=3)
    raise NameError('{}'.format(dataset_name))
Beispiel #2
0
    def __init__(self, src='train', rotate=(0, 15, 30, 45, 60, 75), return_domain=True):
        if src == 'train':
            data, _ = get_mnist(ndim=3)
        elif src == 'test':
            _, data = get_mnist(ndim=3)
        else:
            raise ValueError

        self.data = data
        self.n_domain = len(rotate)
        self.rotate = rotate
        self.return_domain = return_domain
        self.src = src
Beispiel #3
0
    def __init__(self, svhn_path=dataset_path, n_mnist=1000, n_svhn=5):
        mat = io.loadmat(os.path.join(svhn_path, 'train_32x32.mat'))
        svhn_x = mat['X'].transpose(2, 0, 1, 3).mean(axis=0)
        svhn_y = mat['y'][:, 0]
        train, test = get_mnist(ndim=3)

        self.svhn = {c: [] for c in range(self.n_classes)}
        self.mnist = {c: [] for c in range(self.n_classes)}

        n, i = 0, 0
        while n < n_mnist * 10:
            x, y = train[i]
            if len(self.mnist[y]) < n_mnist:
                self.mnist[y].append(x)
                n += 1
            i += 1

        n, i = 0, 0
        while n < n_svhn * 10:
            x = svhn_x[..., i]
            y = svhn_y[i] % 10
            if len(self.svhn[y]) < n_svhn:
                xr = imresize(x, self.img_size)[np.newaxis, ...]
                self.svhn[y].append(xr)
                n += 1
            i += 1

        self.n_mnist = n_mnist
        self.n_svhn = n_svhn
Beispiel #4
0
def get_dataset(dataset):

    if dataset == "mnist":
        # label 0 ~ 10
        n_class = 10
        # mnistのロード
        train, test = get_mnist(ndim=3)

        # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
        train_dataset, test_dataset = split_dataset(train, test)

    elif dataset == "cifar10":
        # label
        n_class = 10
        # cifar10のロード
        train, test = get_cifar10()

        # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
        train_dataset, test_dataset = split_dataset(train, test)

    elif dataset == "cifar100":
        # label
        n_class = 100
        # cifar100
        train, test = get_cifar100()

        # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
        train_dataset, test_dataset = split_dataset(train, test)

    else:
        raise RuntimeError('Invalid dataset choice.')

    return n_class, train_dataset, test_dataset
Beispiel #5
0
def get_dataset(dataset):

    if dataset == "mnist":
        # label 0 ~ 10
        n_class = 10
        # mnistのロード
        train, test = get_mnist(ndim=3)

        # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
        train_dataset, test_dataset = split_dataset(train, test)

    elif dataset == "cifar10":
        # label
        n_class = 10
        # cifar10のロード
        train, test = get_cifar10()

        # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
        train_dataset, test_dataset = split_dataset(train, test)

    elif dataset == "cifar100":
        # label
        n_class = 100
        # cifar100
        train, test = get_cifar100()

        # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
        train_dataset, test_dataset = split_dataset(train, test)

    else:
        raise RuntimeError('Invalid dataset choice.')

    return n_class, train_dataset, test_dataset
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize', '-b', type=int, default=128)
    parser.add_argument('--epoch', '-e', type=int, default=20)
    parser.add_argument('--out', '-o', default='result')
    args = parser.parse_args()

    model = L.Classifier(MLP(128, 10))

    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train, test = datasets.get_mnist()

    train_iter = iterators.SerialIterator(train, args.batchsize)
    test_iter = iterators.SerialIterator(
        test, args.batchsize, repeat=False, shuffle=False)

    updater = StandardUpdater(train_iter, optimizer, device=-1)
    trainer = Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    trainer.extend(extensions.Evaluator(test_iter, model, device=-1))

    trainer.extend(JsonlReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time'],
        log_report='JsonlReport',
    ))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
Beispiel #7
0
def main(args):
    # Load trained model
    model = Inception()
    serializers.load_hdf5(args.model, model)

    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        model.to_gpu()

    # Load images
    if 0:
        train, test = datasets.get_cifar10(ndim=3,
                                           withlabel=False,
                                           scale=255.0)
    else:
        train, test = datasets.get_mnist(ndim=3,
                                         rgb_format=True,
                                         scale=255.0,
                                         withlabel=False)

    # Use all 60000 images, unless the number of samples are specified
    ims = np.concatenate((train, test))
    if args.samples > 0:
        ims = ims[:args.samples]

    mean, std = inception_score(model, ims)

    print('Inception score mean:', mean)
    print('Inception score std:', std)
Beispiel #8
0
def dataset(name):
    from chainer.datasets import get_mnist, get_cifar10, get_cifar100
    from datasets import get_imagenet

    def_attr = lambda image_colors, class_labels: \
        (image_colors, class_labels)

    sets = {
        "mnist": {
            "attr": def_attr(1, 10),
            "data": lambda: get_mnist(ndim=3)
        },
        "cifar10": {
            "attr": def_attr(3, 10),
            "data": lambda: get_cifar10()
        },
        "cifar100": {
            "attr": def_attr(3, 100),
            "data": lambda: get_cifar100()
        },
        "imagenet": {
            "attr": def_attr(3, 1000),
            "data": lambda: get_imagenet()
        }
    }

    print('using {} dataset.'.format(name))

    if name in sets:
        return sets[name]
    else:
        raise RuntimeError('Invalid dataset choice.')
Beispiel #9
0
    def __init__(self, data_name):
        # get data from chainer
        # images are normalized to [0.0, 1.0]
        if data_name == 'mnist':
            train_tuple, test_tuple = datasets.get_mnist(ndim=3)
        elif data_name == 'fmnist':
            train_tuple, test_tuple = get_fmnist(withlabel=True,
                                                 ndim=3,
                                                 scale=1.0)
        elif data_name == 'cifar10':
            train_tuple, test_tuple = datasets.get_cifar10()
        else:
            raise ValueError('Invalid data')

        self.data_name = data_name

        # preprocess
        # convert to array
        train_image, train_label = concat_examples(train_tuple)
        test_image, test_label = concat_examples(test_tuple)

        # set images to [-0.5, 0.5]
        self.train_image = np.array(train_image, dtype=np.float32) - 0.5
        self.train_label = np.array(train_label, dtype=np.int32)
        self.test_image = np.array(test_image, dtype=np.float32) - 0.5
        self.test_label = np.array(test_label, dtype=np.int32)

        # re-convert to TupleDataset
        self.train_tuple = datasets.TupleDataset(self.train_image,
                                                 self.train_label)
        self.test_tuple = datasets.TupleDataset(self.test_image,
                                                self.test_label)
Beispiel #10
0
def main(args):
    train, test = datasets.get_mnist(withlabel=True, ndim=3)
    train_iter = iterators.SerialIterator(train, args.batchsize)
    test_iter = iterators.SerialIterator(test,
                                         args.batchsize,
                                         repeat=False,
                                         shuffle=False)

    model = L.Classifier(CNN())

    if args.gpu >= 0:
        cuda.check_cuda_available()
        cuda.get_device(args.gpu).use()
        model.to_gpu()

    optimizer = optimizers.Adam()
    optimizer.setup(model)

    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)

    trainer = training.Trainer(updater, (args.epochs, 'epoch'))
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    trainer.extend(extensions.LogReport())  # Default log report
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'main/accuracy', 'validation/main/loss',
            'validation/main/accuracy'
        ]))
    trainer.extend(extensions.ProgressBar())
    trainer.run()
Beispiel #11
0
def main():
    model = L.Classifier(MnistCNN())
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train, test = datasets.get_mnist()
    train_iter = iterators.SerialIterator(train, Config.batchsize)
    test_iter = iterators.SerialIterator(test,
                                         Config.batchsize,
                                         repeat=False,
                                         shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer)

    trainer = training.Trainer(updater, (Config.epoch, 'epoch'))
    trainer.extend(extensions.Evaluator(test_iter, model))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PlotReport(['main/loss', 'validation/main/loss'],
                              'epoch',
                              file_name='loss.png'))
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
Beispiel #12
0
def main():
    model = NeuralNetwork(n_nodes, 10)
    optimizer = AdaGrad()
    optimizer.setup(model)

    train, test = get_mnist()
    xs, ts = train._datasets
    print(xs[0].shape)
    print(ts[0])
    txs, tts = test._datasets

    for i in range(n_iter):
        for j in range(600):
            model.cleargrads()
            x = xs[(j * batch_size):((j + 1) * batch_size)]
            t = ts[(j * batch_size):((j + 1) * batch_size)]
            t = Variable(np.array(t, "i"))
            y = model(x)
            loss = softmax_cross_entropy(y, t)
            loss.backward()
            optimizer.update()

        accuracy_train, loss_train = calc_accuracy(model, xs, ts)
        accuracy_test, _ = calc_accuracy(model, txs, tts)

        print("Epoch {}: Acc.(train) = {:.4f}, Acc.(test) = {:.4f}".format(
            i + 1, accuracy_train, accuracy_test))
Beispiel #13
0
def main():
    # data
    _, test = get_mnist(withlabel=False)
    n_x = test.shape[1]

    # model
    model = create_sample_model(n_x)
    test = test.astype('f')[:25]

    for i in range(25):
        plt.subplot(5, 5, i + 1)
        plt.imshow(test[i].reshape(28, 28), cmap='gray_r')
    plt.savefig('./result/ans.png')
    plt.close()

    pattern = re.compile(r'.+npz$')
    for fname in sorted(os.listdir('./result')):
        if not pattern.match(fname):
            continue
        out = './result/{}.png'.format(fname)
        if os.path.exists(out):
            continue

        print(fname)
        load_npz(os.path.join('./result', fname), model)

        gen_x = model.generate(test)
        gen_x = gen_x.reshape(-1, 28, 28)
        for i in range(25):
            plt.subplot(5, 5, i + 1)
            plt.imshow(gen_x[i], cmap='gray_r')
        plt.savefig(out)
        plt.close()
Beispiel #14
0
def main():
    # data
    train, test = get_mnist(withlabel=False)
    n_x = train.shape[1]

    # model
    model = create_sample_model(n_x)

    n_batch = 256
    train_iter = SerialIterator(train, n_batch)
    # TODO: report test loss
    # test_iter = SerialIterator(test, n_batch)

    optimizer = Adam()
    optimizer.setup(model)
    gpu = 0
    updater = StandardUpdater(train_iter, optimizer, device=gpu)

    n_epoch = 50
    trainer = Trainer(updater, (n_epoch, 'epoch'))
    trainer.extend(
        snapshot_object(
            model, filename='snapshot_epoch_{.updater.epoch:03d}.npz'),
        trigger=(1, 'epoch'))
    trainer.extend(LogReport())
    trainer.extend(PrintReport([
        'epoch', 'main/loss', 'main/iaf_loss', 'main/rec_loss',
    ]))

    trainer.run()
Beispiel #15
0
def get_dataset():
    train, test = datasets.get_mnist()
    train = datasets.TransformDataset(train, lambda x: _transform(x, True))
    test = datasets.TransformDataset(train, _transform)
    return {
        'train': train,
        'test': test,
    }
def run(input_data):
    i = np.array(json.loads(input_data)['data'])

    _, test = datasets.get_mnist()
    x = Variable(np.asarray([test[i][0]]))
    y = model(x)

    return np.ndarray.tolist(y.data.argmax(axis=1))
Beispiel #17
0
def get_dataset():
    train, test = datasets.get_mnist()
    validation, train = datasets.split_dataset_random(train, 5000)
    train = datasets.TransformDataset(train, _transform)
    return {
        'train': train,
        'validation': validation,
        'test': test,
    }
Beispiel #18
0
def mnist_orig_score():
    from chainer import datasets
    from chainer import serializers
    from models import MNISTClassifier
    model = MNISTClassifier()
    serializers.load_hdf5('./mnist.model', model)
    train, _ = datasets.get_mnist(withlabel=False, ndim=3, scale=255)
    mean, std = inception_score(model, train)
    return mean, std
Beispiel #19
0
    def __init__(self, source, bs, opt, sample_shape, random_seed=None):
        _ = np.random.get_state()  # 保存
        if random_seed is not None: np.random.seed(random_seed)
        self.random_state = np.random.get_state()
        np.random.set_state(_)  # 復元
        self.source = source
        self.bs = bs
        self.opt = opt
        self.sample_shape = list(sample_shape) if isinstance(
            sample_shape, tuple) else sample_shape
        if self.source == 'random_normal':
            self.sample_num = self.bs
        elif self.source in [
                'mnist_train_x', 'mnist_train_t', 'mnist_test_x',
                'mnist_test_t'
        ]:
            # self.dataをロードする
            mnist_train, mnist_test = datasets.get_mnist()
            if self.source == 'mnist_train_x':
                self.data = np.array([d[0] for d in mnist_train],
                                     dtype=np.float32)
            if self.source == 'mnist_train_t':
                self.data = np.array([d[1] for d in mnist_train],
                                     dtype=np.int32)
            if self.source == 'mnist_test_x':
                self.data = np.array([d[0] for d in mnist_test],
                                     dtype=np.float32)
            if self.source == 'mnist_test_t':
                self.data = np.array([d[1] for d in mnist_test],
                                     dtype=np.int32)
            self.sample_num = len(self.data)

        elif self.source in [
                'fashion_mnist_train_x', 'fashion_mnist_train_t',
                'fashion_mnist_test_x', 'fashion_mnist_test_t'
        ]:
            # self.dataをロードする
            fashion_mnist_train, fashion_mnist_test = datasets.get_fashion_mnist(
            )
            if self.source == 'fashion_mnist_train_x':
                self.data = np.array([d[0] for d in fashion_mnist_train],
                                     dtype=np.float32)
            if self.source == 'fashion_mnist_train_t':
                self.data = np.array([d[1] for d in fashion_mnist_train],
                                     dtype=np.int32)
            if self.source == 'fashion_mnist_test_x':
                self.data = np.array([d[0] for d in fashion_mnist_test],
                                     dtype=np.float32)
            if self.source == 'fashion_mnist_test_t':
                self.data = np.array([d[1] for d in fashion_mnist_test],
                                     dtype=np.int32)
            self.sample_num = len(self.data)
        else:
            raise NotImplementedError
        self.epoch = 0
        self.sample_cnt = 0
Beispiel #20
0
def get_mnist_num(dig_list: list, train=True) -> np.ndarray:
    """
    指定した数字の画像だけ返す
    """
    mnist_dataset = datasets.get_mnist(ndim=3)[0 if train else 1]  # MNISTデータ取得
    mnist_dataset = [
        img for img, label in mnist_dataset[:] if label in dig_list
    ]
    mnist_dataset = np.stack(mnist_dataset)
    return mnist_dataset
Beispiel #21
0
    def __init__(self, validation=False, convolutional=True, batch_size=32):

        if validation:
            data = datasets.get_mnist()[1]
        else:
            data = datasets.get_mnist()[0]

        X = data._datasets[0].astype('float32')
        T = data._datasets[1].astype('int32')

        if convolutional:
            X = np.reshape(X,np.concatenate([[X.shape[0]], [1], [28, 28]]))
            self.nin = [1, 28, 28]
        else:
            self.nin = X.shape[1]

        self.nout = (np.max(T) + 1)

        super(MNISTData, self).__init__(X, T, batch_size)
Beispiel #22
0
def show_dataset():
    from matplotlib import pyplot as plt
    train, test = datasets.get_mnist(ndim=3)
    for t in train[:10]:
        img, label = t
        img = (255*img.reshape(img.shape[1:])).astype(np.uint8)
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.imshow(img, cmap='gray', vmin=0, vmax=255)
        ax.set_title(str(label))
        plt.show()
Beispiel #23
0
    def __init__(self, validation,trial_length,pnoise,batch_size=32,c_noise = True):
        self.batch_size = batch_size
        self.pnoise=pnoise
        self.trial_length=trial_length
        self.predict=0
        self.c_noise=c_noise

        if validation:
            data = datasets.get_mnist()[1]
        else:
            data = datasets.get_mnist()[0]

        self.X = data._datasets[0].astype('float32')
        self.T = data._datasets[1].astype('int32')

        self.X =  np.tile(np.expand_dims(self.X,1),(1,self.trial_length,1)).astype('float32')
        self.T =  np.tile(np.expand_dims(self.T,1),(1,self.trial_length)).astype('int32')

        self.batch_ind = np.reshape(np.random.permutation(self.X.shape[0]),(self.batch_size,-1))

        super(MNISTDataSilvan, self).__init__()
def main():
    gpu_device = 0
    epoch = 30
    batch_size = 512
    frequency = -1

    train, test = datasets.get_mnist(ndim=3)

    model = L.Classifier(CNN())

    if gpu_device != -1:
        chainer.cuda.get_device_from_id(gpu_device)
        model.to_gpu()

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train_iter = chainer.iterators.SerialIterator(train, batch_size)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 batch_size,
                                                 repeat=False,
                                                 shuffle=False)

    updater = training.StandardUpdater(train_iter,
                                       optimizer,
                                       device=gpu_device)
    trainer = training.Trainer(updater, (epoch, 'epoch'))

    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_device))
    trainer.extend(extensions.dump_graph('main/loss'))

    frequency = epoch if frequency == -1 else max(1, frequency)

    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PlotReport(['main/loss', 'validation/main/loss'],
                              'epoch',
                              file_name='loss.png'))
    trainer.extend(
        extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],
                              'epoch',
                              file_name='accuracy.png'))
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    trainer.run()

    model.to_cpu()
    chainer.serializers.save_npz("result/CNN.model", model)
Beispiel #25
0
def test_mnist_nn(epoch_num=1000,
                  batch_size=100,
                  dropout_r=0.5,
                  h_cnl=0.51,
                  conv=True,
                  layers=3,
                  residual=True,
                  conv_ksize=9):
    '''
    パラメーター入れたら,MNISTの誤答率を出す関数
    '''
    #モデル最適化の準備
    model = mnist_nn(dropout_r, h_cnl, conv, layers, residual, conv_ksize)
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    #一応シードは固定する(これによって,パラメーターに対して出力はexactに定まる
    np.random.seed(1234)

    #データの用意
    train, test = datasets.get_mnist()
    trainn, testn = len(train), len(test)

    #ログ出力用のlinspace
    logging_num = np.linspace(0, epoch_num, 11).astype(int)

    for epoch in range(epoch_num):
        #バッチ作成
        batch_num = np.random.choice(trainn, batch_size)
        batch = train[batch_num]
        x = batch[0]
        t = batch[1]

        #学習
        model.zerograds()
        loss = model.loss(x, t)
        loss.backward()
        optimizer.update()

        #ログ出力
        if epoch in logging_num:
            print(
                str(np.where(logging_num == epoch)[0][0] * 10) + '%',
                '\tcross entropy =', loss.data)

    #性能評価
    x = test[range(testn)][0].reshape(testn, 1, 28, 28)
    t = test[range(testn)][1]
    res = model(x).data.argmax(axis=1)
    false_p = np.mean(t != res)

    print(false_p)
    return false_p
Beispiel #26
0
def main():
    xp = cuda.cupy
    # モデルの生成
    model = MyModel()
    if args.gpu >= 0:
        (x_train, t_train), (x_test, t_test) =\
            mnist.load_mnist(normalize=True, one_hot_label=True)
        train = tuple(x_train, t_train)
        test = tuple(x_test, t_test)
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU
    else:
        train, test = datasets.get_mnist(ndim=3)
        xp = np
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train, test = datasets.get_mnist(ndim=3, dtype=xp.float32)

    # パラメータの更新
    iterator = iterators.SerialIterator(train, 1000)
    updater = training.StandardUpdater(iterator, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (10, 'epoch'))

    trainer.run()

    # 評価
    ok = 0
    for i in range(len(test)):
        x = Variable(xp.array([test[i][0]], dtype=xp.float32))
        t = test[i][1]
        out = model.fwd(x)
        ans = np.argmax(out.data)
        if (ans == t):
            ok += 1

    print((ok * 1.0) / len(test))
Beispiel #27
0
def check_accuracy(num_loop):
    print("lets check_accuracy")
    train, test = datasets.get_mnist(ndim=3)
    model=CnnModel()
    chainer.serializers.load_npz(os.path.join('result','cnn_{}.npz'.format(num_loop)),model)
    model.to_cpu()
    counter=0
    for t in test:
        img,label=t
        x=Variable(np.array([img]))
        predict = np.argmax(model.fwd(x).data)
        if predict==label:
            counter+=1
    print("done")
    print(counter/len(test))
Beispiel #28
0
def main():
    train, test = datasets.get_mnist(ndim=3)

    model = L.Classifier(CNN())
    chainer.serializers.load_npz("result/CNN.model", model)


    for i in range(10):
        index = random.randrange(0, len(train))

        data, label = train[index]
    
        x = chainer.Variable(data.reshape(1, 1, 28, 28))
        result = F.softmax(model.predictor(x))
        print("  input: {}, result: {}".format(label, result.data.argmax()))
Beispiel #29
0
def load_ims(args):
    """ Return images scaled to [-1, 1]."""
    if args.dataset == 'cifar10':
        ims, ims_test = datasets.get_cifar10(ndim=3, withlabel=False, scale=2)
        ims = numpy.concatenate((ims, ims_test))
        ims -= 1.0
    elif args.dataset == 'mnist':
        ims, ims_test = datasets.get_mnist(ndim=3, withlabel=False, scale=2)
        ims = numpy.concatenate((ims, ims_test))
        ims -= 1.0
    else:
        raise ValueError('Unknown dataset {}'.format(args.model_cls))
    if args.samples is not None:
        ims = ims[:args.samples]
    print(ims.shape)
    return ims
    def __init__(self,
                 phase,
                 indices=None,
                 withlabel=True,
                 ndim=3,
                 scale=1.,
                 dtype=np.float32,
                 label_dtype=np.int32,
                 rgb_format=False):

        super(Dataset, self).__init__()

        train, test = get_mnist(withlabel, ndim, scale, dtype, label_dtype,
                                rgb_format)

        if phase == 'train':
            dataset = train
        elif phase == 'test':
            dataset = test
        else:
            raise KeyError('`phase` should be `train` or `test`..')

        if indices is not None:
            if isinstance(indices, list):
                indices = np.asarray(indices)
        else:
            indices = np.arange(len(dataset))

        assert len(indices) <= len(dataset)

        dataset = dataset[indices]

        if withlabel:
            images, labels = dataset
        else:
            images, labels = dataset, None

        self._phase = phase
        self._indices = indices
        self._ndim = ndim
        self._scale = scale
        self._dtype = dtype
        self._label_dtype = label_dtype
        self._rgb_format = rgb_format

        self._images = images
        self._labels = labels
Beispiel #31
0
def main():
    train, test = datasets.get_mnist()
    train_iter = iterators.SerialIterator(train, batch_size=100)
    test_iter = iterators.SerialIterator(test, batch_size=100, repeat=False, shuffle=False)

    model = L.Classifier(MLP())
    model.to_gpu()
    optimizer = optimizers.SGD()
    optimizer.setup(model)

    updater = training.StandardUpdater(train_iter, optimizer, device=0)
    trainer = training.Trainer(updater, (20, 'epoch'), out='result')
    trainer.extend(extensions.Evaluator(test_iter, model, device=0))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(['epoch', 'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar())
    trainer.run()
Beispiel #32
0
def mnist_test():
    train, test = datasets.get_mnist()
    train_iter = iterators.SerialIterator(train, batch_size=100, shuffle=True)
    test_iter = iterators.SerialIterator(test, batch_size=100, repeat=False, shuffle=False)

    class MLP(Chain):
        def __init__(self, n_units, n_out):
            super(MLP, self).__init__()
            with self.init_scope():
                self.l1 = L.Linear(None, n_units)
                self.l2 = L.Linear(None, n_units)
                self.l3 = L.Linear(None, n_out)

        def __call__(self, x):
            h1 = F.relu(self.l1(x))
            h2 = F.relu(self.l2(h1))
            y = self.l3(h2)
            return y

    class Classifier(Chain):
        def __init__(self, predictor):
            super(Classifier, self).__init__()
            with self.init_scope():
                self.predictor = predictor

        def __call__(self, x, t):
            y = self.predictor(x)
            loss = F.softmax_cross_entropy(y, t)
            accuracy = F.accuracy(y, t)
            report({'loss': loss, 'accuracy': accuracy}, self)
            return loss

    model = Classifier(MLP(100, 10))
    optimizer = optimizers.SGD()
    optimizer.setup(model)

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (6, 'epoch'), out='result')

    trainer.extend(extensions.Evaluator(test_iter, model))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(['epoch', 'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
Beispiel #33
0
def main():
    parser = argparse.ArgumentParser(description='DCGAN_MNIST')
    parser.add_argument('--batchsize', '-b', type=int, default=200, help='Number of the mini batch')
    parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of the training epoch')
    parser.add_argument('--gpu', '-g', type=int, default=-1, help='If use GPU, then 0.(-1 is CPU)')
    args = parser.parse_args()

    z_dim = 2
    batch_size = args.batchsize
    epoch = args.epoch
    device = args.gpu
    output = "result{}".format(device)

    print("GPU: {}".format(device))
    print("BatchSize: {}".format(batch_size))
    print("Epoch: {}".format(epoch))

    gen = Generator(z_dim)
    dis = Discriminator()
    if device == 0:
        gen.to_gpu()
        dis.to_gpu()

    opt = {'gen': optimizers.Adam(alpha=-0.001, beta1=0.5),
           'dis': optimizers.Adam(alpha=0.001, beta1=0.5)}
    opt['gen'].setup(gen)
    opt['dis'].setup(dis)

    train, test = datasets.get_mnist(withlabel=False, ndim=3)
    train_iter = iterators.SerialIterator(train, batch_size=batch_size)

    updater = GAN_Updater(train_iter, gen, dis, opt, device=device, z_dim=z_dim)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out=output)

    trainer.extend(extensions.dump_graph('loss'))
    trainer.extend(extensions.snapshot(), trigger=(epoch, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(['epoch', 'loss', 'loss_gen', 'loss_data']))
    trainer.extend(extensions.ProgressBar(update_interval=100))

    trainer.run()
Beispiel #34
0
def train(num_loop):
    chainer.cuda.get_device_from_id(0).use()
    model=CnnModel()
    model.to_gpu()

    optimizer=optimizers.Adam()
    optimizer.setup(model)
    minibatch_size=1000
    train, test = datasets.get_mnist(ndim=3)
    iterator = iterators.SerialIterator(train, minibatch_size)
    updater=training.StandardUpdater(iterator,optimizer,device=0)
    loops=(num_loop,'epoch')
    if not os.path.exists('result'):
        os.mkdir('result')
    trainer = training.Trainer(updater,loops,out='result')
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.snapshot_object(
        model, 'cnn_{.updater.epoch}.npz'), trigger=(1,'epoch'))
    print('start to train')
    trainer.run()
    print('finish to train')
Beispiel #35
0
def main():
    # get mnist dataset as TupleDataset
    train, test = datasets.get_mnist(ndim=3)
    model = MnistModel()
    optimizer = optimizers.Adam()
    optimizer.setup(model)
    minibatch_size = 1000
    iterator = iterators.SerialIterator(train, minibatch_size)
    updater = training.StandardUpdater(iterator, optimizer)
    loops = (10, 'epoch')
    trainer = training.Trainer(updater, loops)
    trainer.extend(extensions.ProgressBar())
    trainer.run()

    counter = 0

    for t in test:
        img, label = t
        x = Variable(img)
        predict = np.argmax(model.fwd(x).data)
        if predict == label:
            counter += 1

    print(counter/len(test))
Beispiel #36
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-"

"""
0 .. 9までのMNISTデータを1つづつ、標準出力に可視化してみよ。
"""

from chainer import datasets

train, test = datasets.get_mnist()


for label_i_want in xrange(10):
    for labeled_image in train:
        image, label = labeled_image
        if label != label_i_want:
            continue
        print label
        for i in xrange(28):
            print "".join([str(n)[2] for n in image[28*i:28*(i+1)]]).replace("0"," ")
        break
Beispiel #37
0
from chainer.optimizer import GradientClipping
from chainer.optimizers import Adam
from chainer.datasets import get_cifar10, get_cifar100, get_mnist

import numpy as np

from vision.resnet.resnet import ResNet
from vision.utils import plot_loss, draw_image, draw_layer, split_dataset, draw_graph

dataset = "cifar10"

if dataset == "mnist":
    # label 0 ~ 10
    n_class = 10
    # mnistのロード
    train, test = get_mnist(ndim=3)

    # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
    train_dataset, test_dataset = split_dataset(train, test)

    train_x = np.array(train_dataset[0])
    train_y = np.array(train_dataset[1])
elif dataset == "cifar10":
    # label
    n_class = 10
    # cifar10のロード
    train, test = get_cifar10()

    # 本来ならiteratorで回すがわかりやすようにデータとラベルで分割
    train_dataset, test_dataset = split_dataset(train, test)