Exemple #1
0
    def test_reset(self):
        dataset = [1, 2, 3, 4, 5]
        it = iterators.SerialIterator(dataset, 2, repeat=False,
                                      shuffle=self.shuffle,
                                      order_sampler=self.order_sampler)

        for trial in range(4):
            batches = sum([it.next() for _ in range(3)], [])
            self.assertEqual(sorted(batches), dataset)
            for _ in range(2):
                self.assertRaises(StopIteration, it.next)
            it.reset()
def train():
    # make training data
    data_maker = DataMaker(steps_per_cycle=STEPS_PER_CYCLE,
                           number_of_cycles=NUMBER_OF_CYCLES)
    data = data_maker.make(LENGTH_OF_SEQUENCE)
    harf = len(data) // 2
    train_data = data[:harf]
    test_data = data[harf:]
    # Iterator
    batchsize = 100
    train_iter = iterators.SerialIterator(train_data, batchsize)
    test_iter = iterators.SerialIterator(test_data,
                                         batchsize,
                                         repeat=False,
                                         shuffle=False)

    # setup model
    model = LSTM(IN_UNITS, HIDDEN_UNITS, OUT_UNITS)

    # setup optimizer
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    updater = training.StandardUpdater(train_iter, optimizer, MyConverter)
    trainer = training.Trainer(updater, (20, 'epoch'), out='result')
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.observe_lr())
    trainer.extend(extensions.Evaluator(test_iter, model, MyConverter),
                   name='val')
    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'main/loss', 'val/main/loss', 'elapsed_time', 'lr']))
    trainer.extend(
        extensions.PlotReport(['main/loss', 'val/main/loss'],
                              x_key='epoch',
                              file_name='loss.png'))
    #    trainer.extend(extensions.ProgressBar())

    trainer.run()
Exemple #3
0
def main():

    # ハイパーパラメータを定義
    # 学習回数
    epoch = 100
    # バッチサイズ
    batchsize = 500
    # 学習率
    learning_rate = 0.01
    
    # 入力ファイルを読み込む
    train = np.loadtxt('C:/wk/UCI_add2/data/poker-hand-training-true.csv')
    test = np.loadtxt('C:/wk/UCI_add2/data/poker-hand-testing.csv')
    
    train_iter = iterators.SerialIterator(train, batchsize)
    test_iter = iterators.SerialIterator(test, batchsize,repeat=False, shuffle=False)
    
    # モデル定義
    model = L.Classifier(Model())
    # 最適化
    optimizer = optimizers.SGD(learning_rate)     # 確率的勾配降下
    optimizer.setup(model)
    
    # trainerを使用し深層学習を行う
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (epoch, 'epoch'))

    # 学習後のモデルに検証用データを投入し検証を行う
    trainer.extend(extensions.Evaluator(test_iter, model))
    
    # 学習結果を視覚化する。
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], file_name='loss.png'))
    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 
                                           'validation/main/accuracy', 'elapsed_time']))
    trainer.extend(extensions.ProgressBar())
    
    trainer.run()
Exemple #4
0
def main():
    conf = get_conf()
    train, val, index2tok, SEQ_LEN = get_Dataset(conf)
    train_iter = iterators.SerialIterator(train, conf.batch_size)
    val_iter = iterators.SerialIterator(val, conf.batch_size)

    N_VOC = len(list(index2tok.keys()))

    if conf.gpu_num > 0:
        for i in range(conf.gpu_num):
            cuda.get_device_from_id(i).use()

    models = build_models(conf, N_VOC, SEQ_LEN)
    opts = build_opts(models, conf.adam_alpha, conf.adam_beta1,
                      conf.adam_beta2)

    updater = Updater(train_iter, models, opts, conf.gpu_num)
    trainer = training.Trainer(updater, (conf.max_epoch, "epoch"),
                               out=conf.save_path)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
Exemple #5
0
def main():
    max_history = 30
    n_epoch = 20
    batch_size = 128
    n_factor = 30
    n_cold_start_item = 100
    cold_start_click_count = 10
    passes = 10

    n_user, n_item, train, test, cs_test = make_data(max_history, passes,
                                                     n_cold_start_item,
                                                     cold_start_click_count)

    model = LinearWeightAdaption(n_user, n_item + 1, n_factor=n_factor)
    train_iter = iterators.SerialIterator(train, batch_size, shuffle=True)
    test_iter = iterators.SerialIterator(test,
                                         batch_size,
                                         repeat=False,
                                         shuffle=False)
    cs_test_iter = iterators.SerialIterator(cs_test,
                                            batch_size,
                                            repeat=False,
                                            shuffle=False)
    optimizer = optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.003))

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (n_epoch, 'epoch'), out='result')
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.Evaluator(test_iter, model), name='test')
    trainer.extend(extensions.Evaluator(cs_test_iter, model), name='cs_test')
    trainer.extend(
        extensions.PrintReport(entries=[
            'epoch', 'main/loss', 'test/main/loss', 'cs_test/main/loss',
            'elapsed_time'
        ]))
    trainer.extend(extensions.ProgressBar())
    trainer.run()
Exemple #6
0
    def learn(self):
        '''
        学習をするメソッド
        '''
        dataset = chainer.datasets.TupleDataset(self.spect, self.score)
        p = 0.999
        trainn = int(p * len(dataset))
        print(trainn, len(dataset) - trainn)
        train, test = chainer.datasets.split_dataset_random(dataset, trainn)

        train_iter = iterators.SerialIterator(train,
                                              batch_size=1,
                                              shuffle=True)
        test_iter = iterators.SerialIterator(test,
                                             batch_size=2,
                                             repeat=False,
                                             shuffle=False)

        updater = training.StandardUpdater(train_iter, self.optimizer)
        trainer = training.Trainer(updater, (300000, 'iteration'),
                                   out='result')

        trainer.extend(extensions.Evaluator(test_iter,
                                            self.classifier,
                                            eval_func=self.eval_call),
                       trigger=(500, 'iteration'))
        trainer.extend(extensions.LogReport(trigger=(50, 'iteration')))
        trainer.extend(
            extensions.PrintReport([
                'iteration', 'main/accuracy', 'main/loss',
                'validation/main/accuracy', 'validation/main/loss'
            ]))
        trainer.extend(extensions.ProgressBar(update_interval=5))
        trainer.extend(
            extensions.snapshot_object(self.model,
                                       'model_{.updater.iteration}.npz',
                                       serializers.save_npz,
                                       trigger=(500, 'iteration')))
        trainer.run()
Exemple #7
0
    def test_iterator_serialize(self):
        dataset = [1, 2, 3, 4, 5, 6]
        it = iterators.SerialIterator(dataset,
                                      2,
                                      shuffle=self.shuffle,
                                      order_sampler=self.order_sampler)

        self.assertEqual(it.epoch, 0)
        self.assertAlmostEqual(it.epoch_detail, 0 / 6)
        self.assertIsNone(it.previous_epoch_detail)
        batch1 = it.next()
        self.assertEqual(len(batch1), 2)
        self.assertIsInstance(batch1, list)
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 2 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
        batch2 = it.next()
        self.assertEqual(len(batch2), 2)
        self.assertIsInstance(batch2, list)
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 4 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)

        target = dict()
        it.serialize(serializers.DictionarySerializer(target))

        it = iterators.SerialIterator(dataset, 2)
        it.serialize(serializers.NpzDeserializer(target))
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 4 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)

        batch3 = it.next()
        self.assertEqual(len(batch3), 2)
        self.assertIsInstance(batch3, list)
        self.assertTrue(it.is_new_epoch)
        self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
        self.assertAlmostEqual(it.epoch_detail, 6 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
Exemple #8
0
    def setUp(self):
        self.data = [
            numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)
        ]

        self.iterator = iterators.SerialIterator(self.data,
                                                 1,
                                                 repeat=False,
                                                 shuffle=False)
        self.target = DummyModel(self)
        self.evaluator = extensions.Evaluator(self.iterator, {},
                                              eval_func=self.target,
                                              progress_bar=True)
Exemple #9
0
def train_model(X_train, y_train):
    print('training...')
    loss_sum = 0
    acc_sum = 0
    train_count = X_train.shape[0]
    model = VegeCNN(55)
    optimizer = optimizers.Adam()
    optimizer.setup(model)
#    optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
    
    
    train_iter_X = iterators.SerialIterator(X_train, batch_num)
    train_iter_y = iterators.SerialIterator(y_train, batch_num)
    
    start = time.time()
#    while train_iter.epoch < EPOCH:
    batch_X = np.array(train_iter_X.next())
    batch_y = np.array(train_iter_y.next())
    X = Variable(batch_X)
    t = Variable(batch_y)
    y = model(X)
    
    loss = F.soft_max_entropy(y, t)
    acc  = F.accuracy(y, t)
    model.cleargrads()
    loss.backward()
    optimizer.update()
    
    loss_sum += loss
    acc_sum += acc
#    if train_iter.is_new_epoch:net
#    print('epoch: ', 1)
    print('train mean loss: {:.2f}, accuracy: {:.2f}'.format( loss_sum / train_count, acc_sum / train_count))
             

    end = time.time()
    elapsd_time = end-start
    print("elapsd_time:{0}".format(elapsd_time))
    return model
Exemple #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--pretrained-model', default='coco')
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    model = LightHeadRCNNResNet101(
        n_fg_class=len(coco_bbox_label_names),
        pretrained_model=args.pretrained_model)
    model.use_preset('evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = COCOBboxDataset(
        split='minival', use_crowded=True,
        return_crowded=True, return_area=True)
    iterator = iterators.SerialIterator(
        dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values

    result = eval_detection_coco(
        pred_bboxes, pred_labels, pred_scores,
        gt_bboxes, gt_labels, gt_areas, gt_crowdeds)

    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]

    print('')
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))
Exemple #11
0
    def test_iterator_not_repeat_not_even(self):
        dataset = [1, 2, 3, 4, 5]
        it = iterators.SerialIterator(dataset, 2, repeat=False, shuffle=False)

        self.assertAlmostEqual(it.epoch_detail, 0 / 5)
        self.assertEqual(it.next(), [1, 2])
        self.assertAlmostEqual(it.epoch_detail, 2 / 5)
        self.assertEqual(it.next(), [3, 4])
        self.assertAlmostEqual(it.epoch_detail, 4 / 5)
        self.assertEqual(it.next(), [5])
        self.assertTrue(it.is_new_epoch)
        self.assertEqual(it.epoch, 1)
        self.assertAlmostEqual(it.epoch_detail, 5 / 5)
        self.assertRaises(StopIteration, it.next)
Exemple #12
0
def train():
    model_gen = Generator()
    model_dis = Discriminator()

    if DEVICE >= 0:
        chainer.cuda.get_device_from_id(DEVICE).use()
        chainer.cuda.check_cuda_available()
        model_gen.to_gpu(DEVICE)
        model_dis.to_gpu(DEVICE)

    images = []

    fs = os.listdir('train')
    for f in fs:
        img = Image.open('train/' + f).convert('RGB').resize(
            (IMAGE_SIZE, IMAGE_SIZE))
        hpix = np.array(img, dtype=np.float32) / 255.0
        hpix = hpix.transpose(2, 0, 1)
        images.append(hpix)

    train_iter = iterators.SerialIterator(images, BATCH_SIZE, shuffle=True)

    optimizer_gen = optimizers.Adam(alpha=0.0002, beta1=0.5)
    optimizer_gen.setup(model_gen)
    optimizers_dis = optimizers.Adam(alpha=0.0002, beta1=0.5)
    optimizers_dis.setup(model_dis)

    updater = Updater(train_iter, {
        'opt_gen': optimizer_gen,
        'opt_dis': optimizers_dis
    },
                      device=DEVICE)

    trainer = training.Trainer(updater, (100000, 'epoch'), out='result')
    trainer.extend(extensions.ProgressBar())

    snapshot_interval = (5000, 'epoch')
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        model_gen, 'model_gen_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        model_dis, 'model_dis_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    if RESUME:
        chainer.serializers.load_npz('result/snapshot_epoch_26797.npz',
                                     trainer)
    trainer.run()
def train(args):
    nz = args.nz
    batch_size = args.batch_size
    epochs = args.epochs
    gpu = args.gpu

    # CIFAR-10 images in range [-1, 1] (tanh generator outputs)
    train, _ = datasets.get_cifar10(withlabel=True, ndim=3, scale=2)

    train_iter = iterators.SerialIterator(train, batch_size)

    z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz),
                                 batch_size)

    optimizer_generator = optimizers.RMSprop(lr=0.00005)
    optimizer_critic = optimizers.RMSprop(lr=0.00005)
    generator = Generator()
    optimizer_generator.setup(generator)
    optimizer_critic.setup(Critic())

    updater = WassersteinGANUpdater(iterator=train_iter,
                                    noise_iterator=z_iter,
                                    optimizer_generator=optimizer_generator,
                                    optimizer_critic=optimizer_critic,
                                    device=gpu)

    trainer = training.Trainer(updater,
                               stop_trigger=(epochs, 'epoch'),
                               out=args.out)
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.LogReport(trigger=(10, 'iteration')))
    trainer.extend(GeneratorSample(), trigger=(1, 'epoch'))
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'iteration', 'critic/loss', 'critic/loss/real',
            'critic/loss/fake', 'generator/loss'
        ]))
    # Take a snapshot at each epoch
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
        trigger=(1, 'epoch'))
    trainer.extend(extensions.snapshot_object(generator,
                                              'model_epoch_{.updater.epoch}'),
                   trigger=(1, 'epoch'))

    if args.resume:
        # Resume from a snapshot
        serializers.load_npz(args.resume, trainer)

    trainer.run()
def grouping_notmnist_cnn(size):
    start = time()
    train = make_tupledata_set(size=size)
    test = make_tupledata_set(size=100)

    class notMnistCNNModel(Chain):
        def __init__(self):
            super(notMnistCNNModel, self).__init__(
                cn1=L.Convolution2D(1, 20, 5),
                cn2=L.Convolution2D(20, 50, 5),
                l1=L.Linear(800, 500),
                l2=L.Linear(500, 10),
            )

        def __call__(self, x, t):
            return F.softmax_cross_entropy(self.fwd(x), t)

        def fwd(self, x):
            h1 = F.max_pooling_2d(F.relu(self.cn1(x)), 2)
            h2 = F.max_pooling_2d(F.relu(self.cn2(h1)), 2)
            h3 = F.dropout(F.relu(self.l1(h2)))
            return self.l2(h3)

    model = notMnistCNNModel()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    iterator = iterators.SerialIterator(train, 1000)
    updater = training.StandardUpdater(iterator, optimizer)
    trainer = training.Trainer(updater, (10, "epoch"))

    trainer.run()  #学習開始

    # serializers.save_npz("notMNIST/model/notmnist_cnn.model",model)#ここをコメントアウトすると前回の続きからになる
    # serializers.load_npz("notMNIST/model/notmnist_cnn.model",model)

    #評価部分
    ok = 0
    for i in range(len(test)):
        x = Variable(np.array([test[i][0]], dtype=np.float32))
        t = test[i][1]
        out = model.fwd(x)
        ans = np.argmax(out.data)
        if (ans == t):
            ok += 1
    finish = time()
    print("train:", len(train))
    print("test: ", len(test))
    print((ok / len(test)) * 100, "%")
    print("time: ", int(finish - start), "s", "\n")
Exemple #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('faster_rcnn', 'ssd300', 'ssd512'),
                        default='ssd300')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int, default=32)
    args = parser.parse_args()

    if args.model == 'faster_rcnn':
        model = FasterRCNNVGG16(pretrained_model='voc07')
    elif args.model == 'ssd300':
        model = SSD300(pretrained_model='voc0712')
    elif args.model == 'ssd512':
        model = SSD512(pretrained_model='voc0712')

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    model.use_preset('evaluate')

    dataset = VOCDetectionDataset(year='2007',
                                  split='test',
                                  use_difficult=True,
                                  return_difficult=True)
    iterator = iterators.SerialIterator(dataset,
                                        args.batchsize,
                                        repeat=False,
                                        shuffle=False)

    pred_bboxes, pred_labels, pred_scores, gt_values = \
        apply_detection_link(model, iterator, hook=ProgressHook(len(dataset)))
    gt_bboxes, gt_labels, gt_difficults = gt_values

    eval_ = eval_detection_voc(pred_bboxes,
                               pred_labels,
                               pred_scores,
                               gt_bboxes,
                               gt_labels,
                               gt_difficults,
                               use_07_metric=True)

    print()
    print('mAP: {:f}'.format(eval_['map']))
    for l, name in enumerate(voc_detection_label_names):
        if l in eval_:
            print('{:s}: {:f}'.format(name, eval_[l]['ap']))
        else:
            print('{:s}: -'.format(name))
Exemple #16
0
def get_test_data_source(ds_metainfo, batch_size):
    predictor_class = ds_metainfo.test_transform
    dataset = ds_metainfo.dataset_class(root=ds_metainfo.root_dir_path,
                                        mode="test",
                                        transform=None)
    iterator = iterators.SerialIterator(dataset=dataset,
                                        batch_size=batch_size,
                                        repeat=False,
                                        shuffle=False)
    return {
        "predictor_class": predictor_class,
        "iterator": iterator,
        "ds_len": len(dataset)
    }
Exemple #17
0
def main():
    src_index_text_filepath = args.src_index_sentence_text
    dst_index_text_filepath = args.dst_index_sentence_text

    print("src_index_data: ", src_index_text_filepath)
    print("dst_index_data: ", dst_index_text_filepath)

    src_dataset = load_wordid_text(src_index_text_filepath, reverse=False)
    dst_dataset = load_wordid_text(dst_index_text_filepath, reverse=False)

    n_words_src = max([max(x) for x in src_dataset]) + 1
    n_words_dst = max([max(x) for x in dst_dataset]) + 1

    n_layers = 2
    n_dim = 500

    if args.resume != '':
        model = NStepEncDec(n_layers, n_words_src, n_words_dst, n_dim)
        chainer.serializers.load_hdf5(args.resume, model)
        print(model)
    else:
        model = NStepEncDec(n_layers, n_words_src, n_words_dst, n_dim)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(0).use()
        chainer.cuda.check_cuda_available()
        model.to_gpu()

    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train = TupleDataset(src_dataset, dst_dataset)
    train_iter = iterators.SerialIterator(train,
                                          args.batch_size,
                                          shuffle=False)
    updater = EncDecUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out='result')

    snapshot_interval = (10, 'epoch')
    trainer.extend(extensions.snapshot_object(
        model, 'model_epoch_{.updater.epoch}.hdf',
        chainer.serializers.save_hdf5),
                   trigger=snapshot_interval)
    # trainer.extend(extensions.LogReport())
    # trainer.extend(extensions.PrintReport(['epoch', 'main/loss']))
    trainer.extend(extensions.ProgressBar(update_interval=1))
    trainer.run()

    model.to_cpu()
    chainer.serializers.save_hdf5("result/enc-dec_transmodel.hdf5", model)
Exemple #18
0
 def test_iterator_repeat(self):
     dataset = [1, 2, 3, 4, 5, 6]
     it = iterators.SerialIterator(dataset, 2)
     for i in range(3):
         self.assertEqual(it.epoch, i)
         batch1 = it.next()
         self.assertEqual(len(batch1), 2)
         self.assertFalse(it.is_new_epoch)
         batch2 = it.next()
         self.assertEqual(len(batch2), 2)
         self.assertFalse(it.is_new_epoch)
         batch3 = it.next()
         self.assertEqual(len(batch3), 2)
         self.assertTrue(it.is_new_epoch)
         self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
Exemple #19
0
    def train(self):
        # 一回の学習で20セットを利用する。ミニバッチ学習
        train_iter = iterators.SerialIterator(self.data.train(), 20)

        # updaterの生成
        updater = training.StandardUpdater(train_iter, self.optimizer)

        # 20000エポック学習する
        trainer = training.Trainer(updater, (20000, 'epoch'))

        # 学習状況をプログレスバーで出力
        trainer.extend(extensions.ProgressBar())

        # 学習の実行
        trainer.run()
Exemple #20
0
def main():
    X, y = generate_data()
    model = L.Classifier(MakeMoonModel())
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train_dataset = tuple_dataset.TupleDataset(X, y)
    train_iter = iterators.SerialIterator(train_dataset, batch_size=200)

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (10000, 'epoch'), out='result')
    trainer.extend(extensions.ProgressBar())
    trainer.run()

    visualize(X, y, model)
Exemple #21
0
def setup(opt):
    # Iterator setup
    cifar10 = opt.dataset.lower() == 'cifar10'
    train_data = ImageDataset(opt, cifar10, train=True)
    val_data = ImageDataset(opt, cifar10, train=False)
    if opt.debug:
        train_data = chainer.datasets.split_dataset(
            train_data, opt.batch_size)[0]
        val_data = chainer.datasets.split_dataset(val_data, opt.batch_size)[0]
    train_iter = iterators.MultiprocessIterator(
        train_data, opt.batch_size)
    val_iter = iterators.SerialIterator(
        val_data, opt.batch_size, repeat=False, shuffle=False)

    return train_iter, val_iter
Exemple #22
0
    def test_iterator_not_repeat_not_even(self):
        dataset = [1, 2, 3, 4, 5]
        it = iterators.SerialIterator(dataset, 2, repeat=False)

        self.assertAlmostEqual(it.epoch_detail, 0 / 5)
        batch1 = it.next()
        self.assertAlmostEqual(it.epoch_detail, 2 / 5)
        batch2 = it.next()
        self.assertAlmostEqual(it.epoch_detail, 4 / 5)
        batch3 = it.next()
        self.assertAlmostEqual(it.epoch_detail, 5 / 5)
        self.assertRaises(StopIteration, it.next)

        self.assertEqual(len(batch3), 1)
        self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
Exemple #23
0
 def test_iterator_repeat(self):
     dataset = [1, 2, 3, 4, 5, 6]
     it = iterators.SerialIterator(dataset, 2, shuffle=False)
     for i in range(3):
         self.assertEqual(it.epoch, i)
         self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
         self.assertEqual(it.next(), [1, 2])
         self.assertFalse(it.is_new_epoch)
         self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
         self.assertEqual(it.next(), [3, 4])
         self.assertFalse(it.is_new_epoch)
         self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
         self.assertEqual(it.next(), [5, 6])
         self.assertTrue(it.is_new_epoch)
         self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
Exemple #24
0
    def test_iterator_not_repeat(self):
        dataset = [1, 2, 3, 4, 5, 6]
        it = iterators.SerialIterator(dataset, 2, repeat=False, shuffle=False)

        self.assertAlmostEqual(it.epoch_detail, 0 / 6)
        self.assertEqual(it.next(), [1, 2])
        self.assertAlmostEqual(it.epoch_detail, 2 / 6)
        self.assertEqual(it.next(), [3, 4])
        self.assertAlmostEqual(it.epoch_detail, 4 / 6)
        self.assertEqual(it.next(), [5, 6])
        self.assertTrue(it.is_new_epoch)
        self.assertEqual(it.epoch, 1)
        self.assertAlmostEqual(it.epoch_detail, 6 / 6)
        for i in range(2):
            self.assertRaises(StopIteration, it.next)
Exemple #25
0
    def predict(
            self,
            X,
            dataset_creator=None,
            iterator=lambda x, s: iterators.SerialIterator(
                x, s if s < len(x) else len(x), repeat=False, shuffle=False),
            converter=convert.concat_examples):

        if dataset_creator is None:
            from commonml.skchainer import XyDataset
            dataset_creator = XyDataset

        has_train = 'train' in inspect.getargspec(
            self.model.predictor.__call__).args

        def predict_on_predictor(X):
            if has_train:
                return self.model.predictor(X, train=False)
            else:
                return self.model.predictor(X)

        results = None
        batch_size = self.batch_size
        dataset = dataset_creator(X=X, model=self.model)
        while True:
            try:
                dataset_iter = iterator(dataset, self.batch_size)
                for batch in dataset_iter:
                    in_arrays = converter(batch, self.device)
                    pred = predict_on_predictor(in_arrays[0])
                    if results is None:
                        results = cuda.to_cpu(pred.data)
                    else:
                        results = np.concatenate(
                            (results, cuda.to_cpu(pred.data)), axis=0)
            except RuntimeError as e:
                if 'out of memory' not in e.message:
                    raise e
                results = None
                batch_size = int(batch_size * 0.8)
                if batch_size == 0:
                    raise e
                logger.warn('Memory shortage. batch_size is changed to %d',
                            batch_size)
                continue
            break

        return self.model.postpredict_y(results)
Exemple #26
0
def predict(model, modelfile, test_ids, test_file_ids, is_train, output_dir,
            args, templates):

    #set chainer config train global variable
    chainer.config.train = False

    serializers.load_npz(modelfile, model)
    log.info("Model loaded: %s", modelfile)

    test_iter = iterators.SerialIterator(test_ids,
                                         batch_size=args['batch_size'],
                                         shuffle=True,
                                         repeat=False)

    test_fscore = 0.0
    test_predictions = []
    new_test_instance_ids = []
    log.info("Generating predictions on test set.")

    totalcount = 0

    while True:
        test_batch = test_iter.next()
        test_prediction, _, count = model(test_batch)
        test_predictions.extend(test_prediction)
        new_test_instance_ids.extend(test_batch)

        totalcount += count

        if test_iter.is_new_epoch:
            dp.generate_prediction_file_for_sbm(
                new_test_instance_ids, test_predictions, templates,
                output_dir + const.TEST_OUTPUT_DIR, test_file_ids,
                args['special_entities'], args['post_filter'])
            if args['scenario'] == 1:
                log.info("scenario 1")
                test_fscore, test_recall, test_precision = postprocess.compute_f_score(
                    output_dir + const.TEST_OUTPUT_DIR, args['train_dir'],
                    args['test_dir'], args['evaluation_script'],
                    args['interpreter'], args['dataset'], is_train, output_dir)
                log.info("\t%.4f\t%.4f\t%.4f", test_precision, test_recall,
                         test_fscore)
            elif args['scenario'] == 0:
                postprocess.convert_tr_to_t(output_dir + const.TEST_OUTPUT_DIR,
                                            output_dir)
            break
    log.info("Finished.")
    log.info("Total Classifications: %s", totalcount)
Exemple #27
0
def tictoctoe_test():
    train = create_dataset_board_and_result()
    train_iter = iterators.SerialIterator(train, batch_size=100, shuffle=True)

    # test_iter = iterators.SerialIterator(test, batch_size=100, repeat=False, shuffle=False)

    class MLP(Chain):
        def __init__(self, n_units, n_out):
            super(MLP, self).__init__()
            with self.init_scope():
                self.l1 = L.Linear(None, n_units)
                self.l2 = L.Linear(None, n_units)
                self.l3 = L.Linear(None, n_out)

        def __call__(self, x):
            h1 = F.relu(self.l1(x))
            h2 = F.relu(self.l2(h1))
            y = self.l3(h2)
            return y

    class Classifier(Chain):
        def __init__(self, predictor):
            super(Classifier, self).__init__()
            with self.init_scope():
                self.predictor = predictor

        def __call__(self, x, t):
            y = self.predictor(x)
            loss = F.softmax_cross_entropy(y, t)
            accuracy = F.accuracy(y, t)
            report({'loss': loss, 'accuracy': accuracy}, self)
            return loss

    model = Classifier(MLP(100, 2))
    optimizer = optimizers.SGD()
    optimizer.setup(model)

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (6, 'epoch'), out='result')

    # trainer.extend(extensions.Evaluator(test_iter, model))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
Exemple #28
0
    def fit(self, X_train, T_train):
        self.mlp = MLP(self.n_output, self.n_hidden)
        print("start fitting")
        train = list(zip(X_train, T_train))
        batchsize = 100
        max_label = int(max(T_train)) + 1

        train_iter = iterators.SerialIterator(train, batchsize)

        gpu_id = -1  # Set to -1 if you use CPU
        if gpu_id >= 0:
            self.mlp.to_gpu(gpu_id)
        optimizer = optimizers.Adam(alpha=0.001)
        optimizer.setup(self.mlp)

        max_epoch = 30
        while train_iter.epoch < max_epoch:

            # ---------- One iteration of the training loop ----------
            train_batch = train_iter.next()
            image_train, target_train = concat_examples(train_batch, gpu_id)
            image_train = Variable(image_train).data.astype(np.float32)
            target_train = Variable(target_train).data.astype(np.float32)
            OH_T = np.asarray(
                [one_hot(int(x), max_label) for x in target_train])
            OH_T = Variable(OH_T).data.astype(np.float32)
            # Calculate the prediction of the network
            prediction_train = self.mlp(image_train)
            final_pred = np.zeros(shape=(len(prediction_train), ))
            for i in range(len(prediction_train)):
                dummy = list(prediction_train[i].data)
                final_pred[i] = dummy.index(max(dummy))
            # Calculate the loss with MSE
            loss = F.mean_squared_error(prediction_train, OH_T)
            # Calculate the gradients in the network
            self.mlp.cleargrads()
            loss.backward()
            # Update all the trainable parameters
            optimizer.update()

            # --------------------- until here ---------------------

            # Check the validation accuracy of prediction after every epoch
            if train_iter.is_new_epoch:  # If this iteration is the final iteration of the current epoch
                # Display the training loss
                print('epoch:{:02d} train_loss:{:.04f}'.format(
                    train_iter.epoch, float(to_cpu(loss.array))))
        return self.mlp
Exemple #29
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model', choices=('fcis_resnet101',),
        default='fcis_resnet101')
    parser.add_argument('--pretrained-model')
    parser.add_argument('--iou-thresh', type=float, default=0.5)
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    if args.model == 'fcis_resnet101':
        if args.pretrained_model:
            model = FCISResNet101(
                n_fg_class=len(sbd_instance_segmentation_label_names),
                pretrained_model=args.pretrained_model)
        else:
            model = FCISResNet101(pretrained_model='sbd')

    model.use_preset('evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = SBDInstanceSegmentationDataset(split='val')
    iterator = iterators.SerialIterator(
        dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_masks, pred_labels, pred_scores = out_values
    gt_masks, gt_labels = rest_values

    result = eval_instance_segmentation_voc(
        pred_masks, pred_labels, pred_scores,
        gt_masks, gt_labels, args.iou_thresh,
        use_07_metric=True)

    print('')
    print('mAP: {:f}'.format(result['map']))
    for l, name in enumerate(sbd_instance_segmentation_label_names):
        if result['ap'][l]:
            print('{:s}: {:f}'.format(name, result['ap'][l]))
        else:
            print('{:s}: -'.format(name))
Exemple #30
0
 def fit(self, intmat):
     coo = coo_matrix(intmat)
     logs = [(target, drug, data) for target, drug, data in zip(coo.col, coo.row, coo.data)]
     it = iterators.SerialIterator(self.dataset, 100)
     opt = optimizers.SGD(0.001)
     opt.setup(self.encoder)
     updater = CDLUpdater(self.ctr, self.encoder, logs, it, opt, None, **self.cdl_parameters)
     trainer = training.Trainer(updater, stop_trigger=(self.stop_epoch, 'epoch'))
     trainer.extend(extensions.LogReport())
     trainer.extend(
         extensions.PrintReport(
             ['epoch', 'elapsed_time', 'main/loss', 'main/recons_loss', 'main/code_loss', 'main/regularization_loss',
              'main/ctr_error'],
             out=sys.stderr,
         ))
     trainer.run()