Exemplo n.º 1
0
def prepare_data(args):
    print("Begin loading source domain")
    source = datasets.Domain(args.source)
    print("Finish loading source domain")
    print("Begin loading target domain")
    target = datasets.Domain(args.target)
    print("Finish loading target domain")
    s_train = TransformDataset(source.train, utils.train_transform_office)
    t_train = TransformDataset(target.train, utils.train_transform_office)
    s_test = TransformDataset(source.test, utils.test_transform_office)
    t_test = TransformDataset(target.test, utils.test_transform_office)
    s_train = utils.data2iterator(s_train,
                                  args.batchsize,
                                  is_train=True,
                                  multiprocess=args.multiprocess)
    t_train = utils.data2iterator(t_train,
                                  args.batchsize,
                                  is_train=True,
                                  multiprocess=args.multiprocess)
    s_test = utils.data2iterator(s_test,
                                 args.batchsize,
                                 is_train=False,
                                 multiprocess=args.multiprocess)
    t_test = utils.data2iterator(t_test,
                                 args.batchsize,
                                 is_train=False,
                                 multiprocess=args.multiprocess)
    return s_train, t_train, s_test, t_test
Exemplo n.º 2
0
def split_dataset(fnamesp, fnamesn, index_labelsp, index_labelsn):
    permp = np.random.permutation(len(fnamesp))
    permn = np.random.permutation(len(fnamesn))

    split_p = np.split(np.array(fnamesp)[permp], 2)
    t_indexp = np.split(np.array(index_labelsp)[permp], 2)

    split_n = np.split(np.array(fnamesn)[permn], [int(len(fnamesn) / 2)])
    t_indexn = np.split(
        np.array(index_labelsn)[permn], [int(len(fnamesn) / 2)])

    permt = np.random.permutation(
        len(split_p[0][:9500]) + len(split_n[0][:15000]))
    permv = np.random.permutation(len(split_p[1]) + len(split_n[1]))
    d1 = LabeledImageDataset(
        list(
            zip(list(np.r_[split_p[0][:9500], split_n[0][:15000]][permt]),
                list(np.r_[t_indexp[0][:9500], t_indexn[0][:15000]][permt]))))
    d2 = LabeledImageDataset(
        list(
            zip(list(np.r_[split_p[1], split_n[1]][permv]),
                list(np.r_[t_indexp[1], t_indexn[1]][permv]))))

    train = TransformDataset(d1, transform)
    valid = TransformDataset(d2, transform)

    return train, valid
Exemplo n.º 3
0
def create_iterator(train, valid, mean, std, pca_sigma, random_angle,
                    x_random_flip, y_random_flip, expand_ratio,
                    random_crop_size, random_erase, output_size, batchsize,
                    transform_fun):
    transform_train = partial(transform_fun,
                              mean=mean,
                              std=std,
                              pca_sigma=pca_sigma,
                              random_angle=random_angle,
                              x_random_flip=x_random_flip,
                              y_random_flip=y_random_flip,
                              expand_ratio=expand_ratio,
                              random_crop_size=random_crop_size,
                              random_erase=random_erase,
                              output_size=output_size,
                              train=True)
    transform_valid = partial(transform_fun,
                              mean=mean,
                              std=std,
                              output_size=output_size,
                              random_crop_size=random_crop_size)

    processed_train = TransformDataset(train, transform_train)
    processed_valid = TransformDataset(valid, transform_valid)

    train_iter = iterators.SerialIterator(processed_train, batchsize)
    valid_iter = iterators.SerialIterator(processed_valid,
                                          batchsize,
                                          repeat=False,
                                          shuffle=False)
    return train_iter, valid_iter
Exemplo n.º 4
0
def train(network_object, dataset, batchsize=128, gpu_id=0, max_epoch=20, postfix='', base_lr=0.01, lr_decay=None):

    # prepare dataset
    train_size = int(len(dataset) * 0.9)
    train_val, test = chainer.datasets.split_dataset_random(Honkan_dataset, train_size, seed=0)
    train_size = int(len(train_val) * 0.9)
    train, valid = chainer.datasets.split_dataset_random(train_val, train_size, seed=0)

    # data augement
    train_dataset = TransformDataset(train, partial(transform, train=True))
    valid_dataset = TransformDataset(valid, partial(transform, train=False))
    test_dataset = TransformDataset(test, partial(transform, train=False))

    # 2. Iterator
    train_iter = iterators.SerialIterator(train, batchsize)
    #train_iter = iterators.MultiprocessIterator(train, batchsize)
    valid_iter = iterators.SerialIterator(valid, batchsize, False, False)
    #valid_iter = iterators.MultiprocessIterator(valid, batchsize, False, False)

    # 3. Model
    net = L.Classifier(network_object)

    # 4. Optimizer
    optimizer = optimizers.MomentumSGD(lr=base_lr).setup(net)
    #optimizer = optimizers.Adam().setup(net)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    # 5. Updater
    updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)

    # 6. Trainer
    trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='{}_HonkanEntrance3_NewSize_{}result'.format(network_object.__class__.__name__, postfix))


    # 7. Trainer extensions
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.snapshot(filename='snapshot_epoch-{.updater.epoch}'))
    trainer.extend(extensions.Evaluator(valid_iter, net, device=gpu_id), name='val')
    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'val/main/loss', 'val/main/accuracy', 'l1/W/data/std', 'elapsed_time']))
    trainer.extend(extensions.PlotReport(['l1/W/data/std'], x_key='epoch', file_name='std.png'))
    trainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss'], x_key='epoch', file_name='loss.png'))
    trainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], x_key='epoch', file_name='accuracy.png'))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.ProgressBar())

    if lr_decay is not None:
        trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=lr_decay)
    trainer.run()
    del trainer

    # 8. Evaluation
    test_iter = iterators.SerialIterator(test, batchsize, False, False)
    #test_iter = iterators.MultiprocessIterator(test, batchsize, False, False)
    test_evaluator = extensions.Evaluator(test_iter, net, device=gpu_id)
    results = test_evaluator()
    print('Test accuracy:', results['main/accuracy'])

    return net
def get_mnist_vae():
    train, test = mnist.get_mnist(withlabel=False)
    val = train[50000:60000]
    train = train[0:50000]
    train = TransformDataset(train, mnist_transform)
    val = TransformDataset(val, mnist_transform)
    test = TransformDataset(test, mnist_transform)

    return train, val, test
def evaluate(model, dataset, hand_param, debug):
    transformed_dataset = TransformDataset(dataset, model.encode)
    avg_distances = []
    max_distances = []
    length = len(transformed_dataset) if not debug else 10

    for idx in tqdm.tqdm(range(length)):
        image, gt_2dj, gt_3dj = transformed_dataset.get_example(idx)
        example = dataset.get_example(idx)
        pred_j = model.predict(np.array([image], dtype=np.float32))
        with chainer.using_config('train', False):
            loss = model.forward(
                np.expand_dims(image, axis=0),
                np.expand_dims(gt_3dj, axis=0),
                np.expand_dims(gt_2dj, axis=0),
            )
        pred_j = pred_j.array.reshape(hand_param["n_joints"], -1)
        dim = pred_j.shape[-1]
        if dim == 5:
            pred_3d = pred_j[:, :3]
            pred_2d = pred_j[:, 3:]
        else:
            pred_3d = pred_j

        logger.debug("> {}".format(pred_j))
        logger.debug("> loss {}".format(loss))
        logger.debug("> visualize pred_joint")

        z_half = hand_param["cube"][0] / 2
        pred_3d = z_half * pred_3d
        gt_3dj = example["rgb_joint"] if hand_param["use_rgb"] else example[
            "depth_joint"]
        gt_3dj = gt_3dj - calc_com(gt_3dj)
        dist = np.sqrt(np.sum(np.square(pred_3d - gt_3dj), axis=1))
        avg_dist = np.mean(dist)
        max_dist = np.max(dist)
        avg_distances.append(avg_dist)
        max_distances.append(max_dist)

    print(np.array(avg_distances).mean())
    max_distances = np.array(max_distances)
    ps = []
    max_threshold = 80
    for threshold in range(3, max_threshold):
        oks = np.sum(max_distances <= threshold)
        percent = 100 * (oks / len(max_distances))
        ps.append(percent)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_xlabel("Distance threshold / mm")
    ax.set_ylabel("Fraction of frames iwth mean below distance / %")
    ax.set_ylim(0, 100)
    ax.set_xlim(0, max_threshold)
    ax.plot(ps)
    ax.grid(True, linestyle="--")
    plt.savefig("plot.png")
Exemplo n.º 7
0
 def load_dataset(name, dtype='train'):
     if name == 'mnist':
         train, _ = chainer.datasets.get_mnist(withlabel=True, ndim=3)
         dataset = TransformDataset(train, transform=gray2rgb)
         return TransformDataset(dataset, transform=scale)
     elif name == 'mnist_m':
         dataset = get_mnist_m(dtype, withlabel=True)
         return TransformDataset(dataset, transform=scale)
     else:
         raise NotImplementedError
 def setup_iterator(self):
     train_dataset_transformed = TransformDataset(
         self.train_dataset, self.transform_dataset)
     val_dataset_transformed = TransformDataset(
         self.val_dataset, self.transform_dataset)
     self.train_iterator = chainer.iterators.MultiprocessIterator(
         train_dataset_transformed, batch_size=self.batch_size,
         shared_mem=10 ** 7)
     self.val_iterator = chainer.iterators.MultiprocessIterator(
         val_dataset_transformed, batch_size=self.batch_size,
         shared_mem=10 ** 7, repeat=False, shuffle=False)
Exemplo n.º 9
0
def mnist():

    target_train, target_test = chainer.datasets.get_mnist(ndim=3,
                                                           rgb_format=True)

    def transform(in_data):
        img, label = in_data
        img = resize(img, (32, 32))
        return img, label

    source_ = TransformDataset(target_train, transform)

    source_train = TransformDataset(target_test, transform)

    labels = []

    images = []

    for i in range(10000):

        print(i)

        image = source_train[i][0]

        label = source_train[i][1]

        if label >= 5:
            image = np.transpose(image, [1, 2, 0])

            images.append(image.astype(np.float32))

            labels.append(label - 5)

    images = np.array(images).astype(np.float32)
    labels = np.squeeze(np.array(labels)).astype(np.int32)

    print(labels[4])
    print(images[4])

    print(labels.shape)
    print(images.shape)

    print(labels[330])
    PIL.Image.fromarray((images[330] * 256).astype(np.int8), 'RGB').show()

    np.save('mnist_rgb_images_test', images)
    np.save('mnist_rgb_labels_test', labels)

    lb = sklearn.preprocessing.LabelBinarizer().fit([0, 1, 2, 3, 4])

    labels = lb.transform(labels).astype(np.float32)

    np.save('mnist_rgb_labels_onehot_test', labels)
Exemplo n.º 10
0
 def setup_iterator(self):
     train_dataset_transformed = TransformDataset(
         self.train_dataset, cmr.datasets.MaskRCNNTransform(self.mask_rcnn))
     val_dataset_transformed = TransformDataset(
         self.val_dataset,
         cmr.datasets.MaskRCNNTransform(self.mask_rcnn, train=False))
     # FIXME: MultiProcessIterator sometimes hangs
     self.train_iterator = chainer.iterators.SerialIterator(
         train_dataset_transformed, batch_size=self.batch_size)
     self.val_iterator = chainer.iterators.SerialIterator(
         val_dataset_transformed, batch_size=self.batch_size,
         repeat=False, shuffle=False)
Exemplo n.º 11
0
def prepare_dataset(full_data=False):

    train_split = 'trainval' if full_data else 'train'
    train = TransformDataset(KuzushijiRecognitionDataset(split=train_split),
                             Preprocessor(augmentation=True))

    val_raw = split_dataset_random(KuzushijiRecognitionDataset('val'),
                                   first_size=16 * 10,
                                   seed=0)[0]

    val = TransformDataset(val_raw, Preprocessor(augmentation=False))

    return train, val, val_raw
def run(batch_size, n_process, prefetch,
        model_name, exits_bn, activation_function, number_filter_list,
        gpu_id, lossfun, learning_rate, max_epoch, out_dir, epoch):
    train, test = get_image()
    train = TransformDataset(train, trans)
    test = TransformDataset(test, trans)


    train_iter = iterators.MultiprocessIterator(train, batch_size, True, True, n_process, prefetch)
    test_iter = iterators.MultiprocessIterator(test, batch_size, False, False, n_process, prefetch)


    model = model_name(exits_bn, activation_function, number_filter_list)
    
    if gpu_id >= 0:
	    model.to_gpu(gpu_id)

    # Wrap your model by Classifier and include the process of loss calculation within your model.
    # Since we do not specify a loss function here, the default 'softmax_cross_entropy' is used.

    model = links.Loss_Classifier(model, lossfun)
    # selection of your optimizing method
    optimizer = optimizers.MomentumSGD(lr=learning_rate, momentum=0.9)

    # Give the optimizer a reference to the model
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    # Get an updater that uses the Iterator and Optimizer
    updater = training.updaters.StandardUpdater(train_iter, optimizer, device=gpu_id)

    # Setup a Trainer
    trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='{}'.format(out_dir))

    from chainer.training import extensions

    trainer.extend(extensions.LogReport()) # generate report
    trainer.extend(extensions.snapshot(filename='snapshot_epoch-{.updater.epoch}')) # save updater
    trainer.extend(extensions.snapshot_object(model.predictor, filename='model_epoch-{.updater.epoch}')) # save model
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id)) # validation

    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time'])) # show loss and accuracy
    trainer.extend(extensions.ProgressBar()) # show trainning progress
    trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], x_key='epoch', file_name='loss.png')) # loss curve
    trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], x_key='epoch', file_name='accuracy.png')) # accuracy curve
    trainer.extend(extensions.dump_graph('main/loss'))
    if epoch > 0:
        serializers.load_npz('./{}/snapshot_epoch-{}'.format(out_dir, epoch), trainer)
        trainer.updater.get_optimizer('main').lr = learning_rate
    trainer.run()
Exemplo n.º 13
0
def prepare_dataset(image_size=(64, 64), full_data=False):

    train_split = 'trainval' if full_data else 'train'
    train = TransformDataset(
        RandomSampler(KuzushijiCharCropDataset(split=train_split),
                      virtual_size=20000),
        Preprocess(image_size=image_size, augmentation=True))

    val = TransformDataset(
        split_dataset_random(KuzushijiCharCropDataset(split='val'),
                             first_size=5000,
                             seed=0)[0],
        Preprocess(image_size=image_size, augmentation=False))

    return train, val
Exemplo n.º 14
0
def train_yolov2():
    """Training yolov2."""
    config = parse_args()
    model = get_model(config["model"])
    devices = parse_devices(config['gpus'], config['updater']['name'])
    train_data, test_data = load_dataset(config["dataset"])

    Transform = Transform_v2 if parse_dict(config, 'version',
                                           '2') == '2' else Transform_v3
    train_data = TransformDataset(
        train_data,
        Transform(0.5,
                  dim=model.dim,
                  max_target=30,
                  anchors=model.anchors,
                  batchsize=config['iterator']['train_batchsize']))

    train_iter, test_iter = create_iterator(train_data, test_data,
                                            config['iterator'], devices,
                                            config['updater']['name'])
    optimizer = create_optimizer(config['optimizer'], model)
    updater = create_updater(train_iter, optimizer, config['updater'], devices)
    trainer = training.Trainer(updater,
                               config['end_trigger'],
                               out=config['results'])
    trainer = create_extension(trainer,
                               test_iter,
                               model,
                               config['extension'],
                               devices=devices)
    trainer.run()
    chainer.serializers.save_npz(os.path.join(config['results'], 'model.npz'),
                                 model)
Exemplo n.º 15
0
def transform_dataset(dataset, model, train):
    if train:
        transform = Compose(
            RGBAugmentation(["rgb"]),
            Affine(
                rgb_indices=["rgb"],
                mask_indices=["masks"],
                bbox_indices=["bboxes"],
            ),
            ClassIds2FGClassIds(["labels"]),
            AsType(["rgb", "labels", "bboxes"],
                   [np.float32, np.int32, np.float32]),
            HWC2CHW(["rgb"]),
            Dict2Tuple(["rgb", "masks", "labels", "bboxes"]),
            MaskRCNNTransform(800, 1333, model.extractor.mean),
        )
    else:
        transform = Compose(
            ClassIds2FGClassIds(["labels"]),
            AsType(["rgb", "labels", "bboxes"],
                   [np.float32, np.int32, np.float32]),
            HWC2CHW(["rgb"]),
            Dict2Tuple(["rgb", "masks", "labels"]),
        )
    return TransformDataset(dataset, transform)
Exemplo n.º 16
0
def load_mnist():
    """Load MNSIT handwritten digit images in 32x32.

    Return:
        train dataset, test dataset
        in [n, c, h, w] format.
    """
    train, test = chainer.datasets.get_mnist(ndim=3, rgb_format=False)

    def transform(data):
        img, label = data
        img = resize(img, [32, 32])
        return img, label

    train = TransformDataset(train, transform)
    test = TransformDataset(test, transform)
    return train, test
Exemplo n.º 17
0
def visualize_dataset(config):
    from matplotlib import pyplot as plt
    dataset = select_dataset(config, return_data=["train_set"])
    hand_class = config.get('model_param', 'hand_class').split(",")
    hand_class = [k.strip() for k in hand_class]
    class_converter, flip_converter = create_converter(hand_class)
    logger.info("hand_class = {}".format(hand_class))
    logger.info("done get dataset")

    idx = random.randint(0, len(dataset) - 1)
    logger.info("get example")
    rgb, rgb_bbox, rgb_class = dataset.get_example(idx)
    logger.info("Done get example")
    fig = plt.figure(figsize=(5, 10))
    ax1 = fig.add_subplot(211)
    ax2 = fig.add_subplot(212)

    label = rgb_class
    class_converter = {v: k for k, v in class_converter.items()}
    color = [COLOR_MAP[class_converter[c]] for c in label]
    print(label)
    vis_bbox(
        rgb,
        rgb_bbox,
        instance_colors=color,
        label=label,
        label_names=hand_class,
        ax=ax1,
    )

    model = create_ssd_model()
    transform_dataset = TransformDataset(
        dataset, Transform(model.coder, model.insize, model.mean, train=True))

    img, mb_loc, mb_label = transform_dataset.get_example(idx)
    mb_color = [COLOR_MAP[class_converter[c]] for c in mb_label]
    vis_bbox(
        img,
        mb_loc,
        instance_colors=mb_color,
        label=mb_label,
        label_names=hand_class,
        ax=ax2,
    )
    plt.savefig("vis.png")
    plt.show()
Exemplo n.º 18
0
    def transform(self, x):
        dataset = LabeledImageDataset(x)

        def normarize(in_data):
            img, label = in_data
            img = img / .255
            return img, label

        return TransformDataset(dataset, normarize)
Exemplo n.º 19
0
    def transform(self, x):
        dataset = LabeledImageDataset(x)

        def augumentaion(in_data):
            img, label = in_data
            img = self._image_process(img)
            return img, label

        return TransformDataset(dataset, augumentaion)
Exemplo n.º 20
0
def load_svhn():
    """Load grayscaled SVHN digit images.

    Return:
        train dataset, test dataset
        in [n, c, h, w] format.
    """
    train, test = chainer.datasets.get_svhn()

    def transform(data):
        img, label = data
        img = img[0] * 0.2989 + img[1] * 0.5870 + img[2] * 0.1140
        img = img.reshape(1, 32, 32)
        return img, label

    train = TransformDataset(train, transform)
    test = TransformDataset(test, transform)
    return train, test
Exemplo n.º 21
0
    def transform(self, x):
        dataset = LabeledImageDataset(x)

        def normarize(in_data):
            img, label = in_data
            img = chainer.links.model.vision.vgg.prepare(img)
            return img, label

        return TransformDataset(dataset, normarize)
Exemplo n.º 22
0
    def transform(self, x):
        dataset = LabeledImageDataset(x)

        def normarize(in_data):
            img, label = in_data
            img = img / 255
            img = resize(img, (224, 224))
            return img, label

        return TransformDataset(dataset, normarize)
Exemplo n.º 23
0
def main():
    root_dir = 'dataset directory'
    train, val = dataset.get_mscoco(root_dir)
    vocab_size = len(train.vocab)
    batch_size = 10

    vggenc = model.VGGEncoder()
    vggenc.to_gpu()

    def transform(x):
        image, caption = x
        image = L.model.vision.vgg.prepare(image)
        return image, caption

    train = TransformDataset(train, transform=transform)
    val = TransformDataset(val, transform=transform)

    encode(val, 'val.npy', batch_size, vggenc)
    encode(train, 'train.npy', batch_size, vggenc)
Exemplo n.º 24
0
    def train(self, epoch_num=40, batch_size=128, gpu=-1):
        train = chainer.datasets.LabeledImageDataset(
            "../dataset/train/info.txt", "../dataset/train")
        test = chainer.datasets.LabeledImageDataset(
            "../dataset/validation/info.txt", "../dataset/validation")

        model = L.Classifier(
            Model(out_size=25))  # loss function, default softmax_cross_entropy
        alpha = 1e-4
        optimizer = optimizers.Adam(alpha=alpha)
        optimizer.setup(model)
        model.predictor.vgg.disable_update()  # not update weight of VGG16

        train = TransformDataset(train, self.transform)
        test = TransformDataset(test, self.transform)

        train_iter = chainer.iterators.SerialIterator(train, batch_size)
        test_iter = chainer.iterators.SerialIterator(test,
                                                     batch_size,
                                                     repeat=False,
                                                     shuffle=False)
        #updater = training.StandardUpdater(train_iter, optimizer, device=gpu)
        updater = training.ParallelUpdater(train_iter,
                                           optimizer,
                                           devices=self.gpu_devices)
        trainer = training.Trainer(updater, (epoch_num, 'epoch'), out='result')
        trainer.extend(
            extensions.Evaluator(test_iter,
                                 model,
                                 device=self.gpu_devices['main']))
        trainer.extend(extensions.LogReport())
        trainer.extend(
            extensions.PrintReport([
                'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
                'validation/main/accuracy', 'elapsed_time'
            ]))
        #trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png'))
        #trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))
        trainer.run()

        model.to_cpu()
        serializers.save_npz("mymodel.npz", model)
Exemplo n.º 25
0
    def transform(self, x):
        dataset = LabeledImageDataset(x)

        def _transform(in_data):
            img, label = in_data
            img = random_sized_crop(img, scale_ratio_range=(0.3, 1))
            img = random_flip(img, x_random=True)
            img = chainer.links.model.vision.vgg.prepare(img)
            return img, label

        return TransformDataset(dataset, _transform)
Exemplo n.º 26
0
def make_dataset(source_bpe_filename,
                 target_bpe_filename,
                 source_vocab,
                 target_vocab,
                 chunk_length=1000):

    d = TextDataset(
        (source_bpe_filename, target_bpe_filename),
        filter_func=lambda x, y: filter_example(x, y, chunk_length - 2))
    return TransformDataset(
        d, TokenTransformer(source_vocab, target_vocab, chunk_length))
Exemplo n.º 27
0
def load_dataset():
    train = h5py.File(path.join(ROOT_PATH, 'dataset/General100_train.hdf5'))
    test = h5py.File(path.join(ROOT_PATH, 'dataset/Set14_test.hdf5'))

    train_x, train_y = np.array(train['x_data']) / 255, np.array(train['y_data']) /255
    test_x, test_y = np.array(test['x_data']) / 255, np.array(test['y_data']) / 255

    train = TupleDataset(train_x, train_y)
    test = TupleDataset(test_x, test_y)

    train = TransformDataset(train, transform)

    return train, test
Exemplo n.º 28
0
def create_iterator(train, valid, crop_size, rotate, horizontal_flip,
                    scale_range, batchsize):

    mean = np.array((123.68, 116.779, 103.939), dtype=np.float32)[:, None,
                                                                  None]
    processed_train = TransformDataset(
        train, Transform(mean, crop_size, rotate, horizontal_flip,
                         scale_range))

    train_iter = iterators.SerialIterator(processed_train, batchsize)
    valid_iter = iterators.SerialIterator(valid,
                                          batchsize,
                                          repeat=False,
                                          shuffle=False)
    return train_iter, valid_iter
def main(args):
    logging.basicConfig(level=logging.INFO)

    config = configparser.ConfigParser()
    config_path = os.path.join(args.trained, "pose", "config.ini")
    if not os.path.exists(config_path):
        raise Exception("config_path {} does not found".format(config_path))
    logger.info("read {}".format(config_path))
    config.read(config_path, 'UTF-8')

    logger.info("setup devices")
    chainer.global_config.autotune = True
    chainer.config.cudnn_fast_batch_normalization = True

    logger.info("> get dataset {}".format(args.mode))
    mode_dict = {
        "train": "train_set",
        "val": "val_set",
        "test": "test_set",
    }
    return_type = mode_dict[args.mode]

    dataset, hand_param = select_dataset(config, [return_type, "hand_param"])

    logger.info("> hand_param = {}".format(hand_param))
    model = select_model(config, hand_param)
    transformed_dataset = TransformDataset(dataset, model.encode)

    logger.info("> size of dataset is {}".format(len(dataset)))
    model_path = os.path.expanduser(
        os.path.join(args.trained, "pose", "bestmodel.npz"))

    logger.info("> restore model")
    logger.info("> model.device = {}".format(model.device))
    chainer.serializers.load_npz(model_path, model)

    if config["model"]["name"] in ["ppn", "ppn_edge"]:
        if args.evaluate:
            evaluate_ppn(model, dataset, hand_param)
        else:
            predict_ppn(model, dataset, hand_param)
    elif config["model"]["name"] in ["rhd", "hm", "orinet"]:
        predict_heatmap(model, dataset, hand_param)
    elif config["model"]["name"] == "ganerated":
        predict_ganerated(model, dataset, hand_param)
    else:
        predict_sample(model, dataset, hand_param)
Exemplo n.º 30
0
## read data
data_orig = mydata.read()

win_total = 0
draw_total = 0
lose_total = 0

for year in xrange(1996, 2019):
    ## network setup
    net = mymodel.create()
    chainer.serializers.load_npz("models/{0}.npz".format(year), net)

    # 前年までのデータで学習したモデルで、1年分のデータを予測
    data = data_orig[data_orig[:, 0] == year]
    inputs = TransformDataset(data, mymodel.transform)
    win = 0
    draw = 0
    lose = 0
    for testcase in inputs:
        detected = net.predictor(testcase[0].reshape((1,-1))).data.argmax(axis=1)[0]
        # 相手が最も出す確率の高い手に勝つように出す
        mychoice = (detected + 2) % 3 # 0: G, 1: C, 2: P
        schoice = testcase[1]
        if (mychoice + 3 - schoice) % 3 == 0:
            draw += 1
        elif (mychoice + 3 - schoice) % 3 == 1:
            lose += 1
        else:
            win += 1