Example #1
0
 def test_sub_dataset(self):
     original = [1, 2, 3, 4, 5]
     subset = datasets.SubDataset(original, 1, 4)
     self.assertEqual(len(subset), 3)
     self.assertEqual(subset[0], 2)
     self.assertEqual(subset[1], 3)
     self.assertEqual(subset[2], 4)
Example #2
0
    def __init__(self, n_units, n_out):
        super(MLP, self).__init__()
        with self.init_scope():
            self.l1 = L.Linear(None, n_units)  # n_in -> n_units
            self.l2 = L.Linear(None, n_units)  # n_units -> n_units
            self.l3 = L.Linear(None, n_out)    # n_units -> n_out

    def __call__(self, x):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        return self.l3(h2)


batchsize = 10
train, test = datasets.get_mnist()
train = datasets.SubDataset(train, 0, 100)
test = datasets.SubDataset(test, 0, 100)
train_iter = chainer.iterators.SerialIterator(train, batchsize)
test_iter = chainer.iterators.SerialIterator(test, batchsize,
                                             repeat=False, shuffle=False)

model = L.Classifier(MLP(784, 10))
opt = chainer.optimizers.Adam()
opt.setup(model)

epoch = 2

# Set up a trainer
updater = training.StandardUpdater(train_iter, opt, device=-1)
trainer = training.Trainer(updater, (epoch, 'epoch'), out='/tmp/result')
Example #3
0
def train(settings: dict, output_path: PosixPath):
    """Main."""
    gpu_num = len(settings["gpu_devices"])
    # # make dataset
    # # # read meta info.
    train_df = pd.read_csv(
        config.PROC_DATA /
        "train_add-{}fold-index.csv".format(settings["n_folds"]))

    # # # make label arr
    train_labels_arr = train_df[config.COMP_NAMES].values.astype("i")

    # # # make train set
    if settings["val_fold"] != -1:
        train_dataset = datasets.LabeledImageDataset(
            pairs=list(
                zip((train_df[train_df["fold"] != settings["val_fold"]]
                     ["image_id"] + ".png").tolist(),
                    train_labels_arr[train_df["fold"] != settings["val_fold"],
                                     ...])),
            root=config.TRAIN_IMAGES_DIR.as_posix())
    else:
        train_dataset = datasets.LabeledImageDataset(
            pairs=list(
                zip((train_df["image_id"] + ".png").tolist(),
                    train_labels_arr)),
            root=config.TRAIN_IMAGES_DIR.as_posix())

    train_dataset = datasets.TransformDataset(
        train_dataset,
        nn_training.ImageTransformer(settings["training_transforms"]))

    if gpu_num > 1:
        # # if using multi-gpu, split train set into gpu_num.
        train_sub_dataset_list = []
        total_size = len(train_dataset)
        subset_size = (total_size + gpu_num - 1) // gpu_num
        np.random.seed(1086)
        random_order = np.random.permutation(len(train_dataset))
        for i in range(gpu_num):
            start_idx = min(i * subset_size, total_size - subset_size)
            end_idx = min((i + 1) * subset_size, total_size)
            print(i, start_idx, end_idx)
            train_sub_dataset_list.append(
                datasets.SubDataset(train_dataset,
                                    start=start_idx,
                                    finish=end_idx,
                                    order=random_order))
        train_dataset = train_sub_dataset_list

        for i, subset in enumerate(train_dataset):
            print("subset{}: {}".format(i, len(subset)))

    # # # # validation set
    if settings["val_fold"] != -1:
        val_dataset = datasets.LabeledImageDataset(
            pairs=list(
                zip((train_df[train_df["fold"] == settings["val_fold"]]
                     ["image_id"] + ".png").tolist(),
                    train_labels_arr[train_df["fold"] == settings["val_fold"],
                                     ...])),
            root=config.TRAIN_IMAGES_DIR.as_posix())
    else:
        # # if train models using all train data, calc loss for all data at the evaluation step.
        val_dataset = datasets.LabeledImageDataset(
            pairs=list(
                zip((train_df["image_id"] + ".png").tolist(),
                    train_labels_arr)),
            root=config.TRAIN_IMAGES_DIR.as_posix())

    val_dataset = datasets.TransformDataset(
        val_dataset,
        nn_training.ImageTransformer(settings["inference_transforms"]))

    print("[make dataset] train: {}, val: {}".format(len(train_dataset),
                                                     len(val_dataset)))

    # # initialize model.
    model = nn_training.ImageClassificationModel(
        extractor=getattr(
            backborn_chains,
            settings["backborn_class"])(**settings["backborn_kwargs"]),
        global_pooling=None if settings["pooling_class"] is None else getattr(
            global_pooling_chains, settings["pooling_class"])(
                **settings["pooling_kwargs"]),
        classifier=getattr(classifer_chains,
                           settings["head_class"])(**settings["head_kwargs"]))
    model.name = settings["model_name"]

    # # set training wrapper.
    train_model = nn_training.CustomClassifier(
        predictor=model,
        lossfun=getattr(
            nn_training,
            settings["loss_function"][0])(**settings["loss_function"][1]),
        evalfun_dict={
            "SCE_{}".format(i): getattr(nn_training, name)(**param)
            for i, (name, param) in enumerate(settings["eval_functions"])
        })

    settings["eval_func_names"] = [
        "SCE_{}".format(i) for i in range(len(settings["eval_functions"]))
    ]

    gc.collect()
    # # training.
    # # # create trainer.
    utils.set_random_seed(settings["seed"])
    trainer = nn_training.create_trainer(settings, output_path.as_posix(),
                                         train_model, train_dataset,
                                         val_dataset)
    trainer.run()

    # # # save model of last epoch,
    model = trainer.updater.get_optimizer('main').target.predictor
    serializers.save_npz(output_path / "model_snapshot_last_epoch.npz", model)

    del trainer
    del train_model
    gc.collect()

    # # inference validation data by the model of last epoch.
    _, val_iter, _ = nn_training.create_iterator(settings, None, val_dataset,
                                                 None)
    val_pred, val_label = nn_training.inference_test_data(
        model, val_iter, gpu_device=settings["gpu_devices"][0])
    np.save(output_path / "val_pred_arr_fold{}".format(settings["val_fold"]),
            val_pred)

    # # calc validation score
    score_list = [[] for i in range(2)]

    for i in range(len(config.N_CLASSES)):
        y_pred_subset = val_pred[:, config.COMP_INDEXS[i]:config.
                                 COMP_INDEXS[i + 1]].argmax(axis=1)
        y_true_subset = val_label[:, i]
        score_list[0].append(
            recall_score(y_true_subset,
                         y_pred_subset,
                         average='macro',
                         zero_division=0))
        score_list[1].append(
            recall_score(y_true_subset,
                         y_pred_subset,
                         average='macro',
                         zero_division=1))
    score_list[0].append(np.average(score_list[0], weights=[2, 1, 1]))
    score_list[1].append(np.average(score_list[1], weights=[2, 1, 1]))

    score_df = pd.DataFrame(score_list, columns=config.COMP_NAMES + ["score"])

    print(score_df)
    score_df.to_csv(output_path / "score.csv", index=False)
Example #4
0
 def test_permuted_sub_dataset_len_mismatch(self):
     original = [1, 2, 3, 4, 5]
     with self.assertRaises(ValueError):
         datasets.SubDataset(original, 1, 4, [2, 0, 3, 1])
Example #5
0
 def test_sub_dataset_overrun(self):
     original = [1, 2, 3, 4, 5]
     subset = datasets.SubDataset(original, 1, 4)
     with self.assertRaises(IndexError):
         subset[len(subset)]
Example #6
0
            l2=L.Linear(None, n_units),  # n_units -> n_units
            l3=L.Linear(None, n_out),  # n_units -> n_out
        )

    def __call__(self, x):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        return self.l3(h2)


# create model
model = L.Classifier(MLP(100, 10))

# load dataset
train_full, test_full = chainer.datasets.get_mnist()
train = datasets.SubDataset(train_full, 0, 1000)
test = datasets.SubDataset(test_full, 0, 1000)

# Set up a iterator
batchsize = 100
train_iter = chainer.iterators.SerialIterator(train, batchsize)
test_iter = chainer.iterators.SerialIterator(test,
                                             batchsize,
                                             repeat=False,
                                             shuffle=False)

# Set up an optimizer

# Set up an updater

# Set up a trainer
Example #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpus', type=str, default="-1")
    parser.add_argument('--batchsize', type=int, default=2)
    parser.add_argument('--epoch', type=int, default=10)
    parser.add_argument('--mini', action="store_true")
    args = parser.parse_args()

    gpus = list(filter(lambda x: x >= 0, map(int, args.gpus.split(","))))

    num_class = len(voc_bbox_label_names)

    data_augmentation_transform = DataAugmentationTransform(512)
    center_detection_transform = CenterDetectionTransform(512, num_class, 4)

    train = TransformDataset(
        ConcatenatedDataset(
            VOCBboxDataset(year='2007', split='trainval'),
            VOCBboxDataset(year='2012', split='trainval')
        ),
        data_augmentation_transform
    )
    train = TransformDataset(train, center_detection_transform)
    if args.mini:
        train = datasets.SubDataset(train, 0, 100)
    train_iter = chainer.iterators.MultiprocessIterator(train, args.batchsize)

    test = VOCBboxDataset(
        year='2007', split='test',
        use_difficult=True, return_difficult=True)
    if args.mini:
        test = datasets.SubDataset(test, 0, 20)
    test_iter = chainer.iterators.SerialIterator(
        test, args.batchsize // len(gpus), repeat=False, shuffle=False)

    detector = CenterDetector(HourglassNet, 512, num_class)
    train_chain = CenterDetectorTrain(detector, 1, 0.1, 1)

    gpus.sort()
    first_gpu = gpus[0]
    remain_gpu = gpus[1:]
    train_chain.to_gpu(first_gpu)

    optimizer = Adam(amsgrad=True)
    optimizer.setup(train_chain)

    devices = {
        "main": first_gpu
    }

    for i, gpu in enumerate(remain_gpu):
        devices[f"{i + 2}"] = gpu

    updater = training.updaters.ParallelUpdater(
        train_iter,
        optimizer,
        devices=devices,
    )

    log_interval = 1, 'epoch'
    trainer = Trainer(updater, (args.epoch, 'epoch'))
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(extensions.PrintReport(
        [
            'epoch', 'iteration', 'lr',
            'main/loss', 'main/hm_loss', 'main/wh_loss', 'main/offset_loss',
            'validation/main/map',
        ]),
        trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(
        DetectionVOCEvaluator(
            test_iter, detector, use_07_metric=True,
            label_names=voc_bbox_label_names),
        trigger=(1, 'epoch'))
    trainer.extend(
        extensions.snapshot_object(detector, 'detector{.updater.epoch:03}.npz'),
        trigger=(1, 'epoch')
    )

    trainer.run()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int, default=2)
    parser.add_argument('--epoch', type=int, default=10)
    parser.add_argument('--mini', action="store_true")
    parser.add_argument('--input_size', type=int, default=512)
    args = parser.parse_args()

    dtype = np.float32

    num_class = len(voc_bbox_label_names)

    data_augmentation_transform = DataAugmentationTransform(args.input_size)
    center_detection_transform = CenterDetectionTransform(args.input_size,
                                                          num_class,
                                                          4,
                                                          dtype=dtype)

    train = TransformDataset(
        ConcatenatedDataset(VOCBboxDataset(year='2007', split='trainval'),
                            VOCBboxDataset(year='2012', split='trainval')),
        data_augmentation_transform)
    train = TransformDataset(train, center_detection_transform)
    if args.mini:
        train = datasets.SubDataset(train, 0, 100)
    train_iter = chainer.iterators.MultiprocessIterator(train, args.batchsize)

    test = VOCBboxDataset(year='2007',
                          split='test',
                          use_difficult=True,
                          return_difficult=True)
    if args.mini:
        test = datasets.SubDataset(test, 0, 20)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    detector = CenterDetector(HourglassNet,
                              args.input_size,
                              num_class,
                              dtype=dtype)
    #detector = CenterDetector(SimpleCNN, args.input_size, num_class)
    train_chain = CenterDetectorTrain(detector, 1, 0.1, 1)
    #train_chain = CenterDetectorTrain(detector, 1, 0, 0)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        train_chain.to_gpu(args.gpu)

    optimizer = Adam(alpha=1.25e-4)
    #optimizer = SGD()
    optimizer.setup(train_chain)

    updater = StandardUpdater(train_iter, optimizer, device=args.gpu)

    log_interval = 1, 'epoch'
    log_interval_mini = 500, 'iteration'
    trainer = Trainer(updater, (args.epoch, 'epoch'), out=f"result{args.gpu}")
    trainer.extend(extensions.LogReport(trigger=log_interval_mini))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'lr',
        'main/loss',
        'main/hm_loss',
        'main/wh_loss',
        'main/offset_loss',
        'main/hm_mae',
        'main/hm_pos_loss',
        'main/hm_neg_loss',
        'validation/main/map',
    ]),
                   trigger=log_interval_mini)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(DetectionVOCEvaluator(test_iter,
                                         detector,
                                         use_07_metric=True,
                                         label_names=voc_bbox_label_names),
                   trigger=(1, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        detector, 'detector{.updater.epoch:03}.npz'),
                   trigger=(1, 'epoch'))

    trainer.run()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int, default=4)
    parser.add_argument('--epoch', type=int, default=10)
    parser.add_argument('--mini', action="store_true")
    args = parser.parse_args()

    if hasattr(multiprocessing, 'set_start_method'):
        multiprocessing.set_start_method('forkserver')
        p = multiprocessing.Process()
        p.start()
        p.join()

    comm = chainermn.create_communicator('pure_nccl')
    print(comm.size)

    device = comm.intra_rank

    num_class = len(voc_bbox_label_names)

    data_augmentation_transform = DataAugmentationTransform(512)
    center_detection_transform = CenterDetectionTransform(512, num_class, 4)

    train = TransformDataset(
        ConcatenatedDataset(VOCBboxDataset(year='2007', split='trainval'),
                            VOCBboxDataset(year='2012', split='trainval')),
        data_augmentation_transform)

    if comm.rank == 0:
        train = TransformDataset(train, center_detection_transform)
        if args.mini:
            train = datasets.SubDataset(train, 0, 100)
    else:
        train = None
    train = chainermn.scatter_dataset(train, comm, shuffle=True)
    train_iter = chainer.iterators.MultiprocessIterator(train,
                                                        args.batchsize //
                                                        comm.size,
                                                        n_processes=2)

    if comm.rank == 0:
        test = VOCBboxDataset(year='2007',
                              split='test',
                              use_difficult=True,
                              return_difficult=True)
        if args.mini:
            test = datasets.SubDataset(test, 0, 20)
        test_iter = chainer.iterators.SerialIterator(test,
                                                     args.batchsize,
                                                     repeat=False,
                                                     shuffle=False)

    detector = CenterDetector(HourglassNet, 512, num_class)
    train_chain = CenterDetectorTrain(detector, 1, 0.1, 1, comm=comm)

    chainer.cuda.get_device_from_id(device).use()
    train_chain.to_gpu()

    optimizer = chainermn.create_multi_node_optimizer(Adam(amsgrad=True), comm)
    optimizer.setup(train_chain)

    updater = StandardUpdater(train_iter, optimizer, device=device)

    trainer = Trainer(updater, (args.epoch, 'epoch'))

    if comm.rank == 0:
        log_interval = 1, 'epoch'
        trainer.extend(extensions.LogReport(trigger=log_interval))
        trainer.extend(extensions.observe_lr(), trigger=log_interval)
        trainer.extend(extensions.PrintReport([
            'epoch',
            'iteration',
            'lr',
            'main/loss',
            'main/hm_loss',
            'main/wh_loss',
            'main/offset_loss',
            'validation/main/map',
        ]),
                       trigger=log_interval)
        trainer.extend(extensions.ProgressBar(update_interval=10))
        trainer.extend(DetectionVOCEvaluator(test_iter,
                                             detector,
                                             use_07_metric=True,
                                             label_names=voc_bbox_label_names),
                       trigger=(1, 'epoch'))
        trainer.extend(extensions.snapshot_object(
            detector, 'detector{.updator.epoch:03}.npz'),
                       trigger=(1, 'epoch'))

    trainer.run()