Beispiel #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    gpu = args.gpu

    dataset = JskSemantics201607Dataset()
    n_class = len(dataset.target_names)

    # setup model
    pretrained_model = get_vgg16_pretrained_model(n_class)
    model = FCN32s(n_class=n_class)
    fcn.util.copy_chainermodel(pretrained_model, model)
    if gpu != -1:
        model.to_gpu(gpu)

    # setup optimizer
    # optimizer = O.MomentumSGD(lr=1e-8, momentum=0.99)
    optimizer = O.Adam(alpha=1e-5)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.GradientClipping(threshold=5))

    # train
    trainer = fcn.Trainer(
        dataset=dataset,
        model=model,
        optimizer=optimizer,
        weight_decay=0.0005,
        test_interval=100,
        max_iter=5000,
        snapshot=100,
        gpu=gpu,
    )
    trainer.train()
Beispiel #2
0
def main():
    gpu = 0

    # setup dataset
    dataset = bleaney.SegmentationClassDataset()
    n_class = len(dataset.target_names)

    # setup model
    # pretrained_model = get_vgg16_pretrained_model()
    model = FCN32s(n_class=n_class)
    # fcn.util.copy_chainermodel(pretrained_model, model)
    if gpu != -1:
        model.to_gpu(gpu)

    # setup optimizer
    optimizer = O.MomentumSGD(lr=1e-10, momentum=0.99)
    optimizer.setup(model)

    # train
    trainer = fcn.Trainer(
        dataset=dataset,
        model=model,
        optimizer=optimizer,
        weight_decay=0.0005,
        test_interval=5000,
        max_iter=100000,
        snapshot=10000,
        gpu=gpu,
    )
    trainer.train()
Beispiel #3
0
def main():
    gpu = 0

    this_dir = osp.dirname(osp.abspath(__file__))
    db_path = osp.join(this_dir, 'leveldb')

    dataset = apc2015.APC2015(db_path=db_path)
    n_class = len(dataset.target_names)

    # setup model
    pretrained_model = get_vgg16_pretrained_model(n_class)
    model = FCN32s(n_class=n_class)
    fcn.util.copy_chainermodel(pretrained_model, model)
    if gpu != -1:
        model.to_gpu(gpu)

    # setup optimizer
    # optimizer = O.MomentumSGD(lr=1e-8, momentum=0.99)
    optimizer = O.Adam(alpha=1e-5)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.GradientClipping(threshold=5))

    # train
    trainer = fcn.Trainer(
        dataset=dataset,
        model=model,
        optimizer=optimizer,
        weight_decay=0.0005,
        test_interval=1000,
        max_iter=1000000,
        snapshot=4000,
        gpu=gpu,
    )
    trainer.train()
Beispiel #4
0
def main(gpu):

    # setup dataset
    dataset = bleaney.SegmentationClassDataset()
    n_class = len(dataset.target_names)

    # setup model
    model = FCNbleaney(n_class=n_class)
    if gpu != -1:
        model.to_gpu(gpu)

    # setup optimizer
    optimizer = O.MomentumSGD(lr=1e-10, momentum=0.99)
    optimizer.setup(model)

    # train
    trainer = fcn.Trainer(
        dataset=dataset,
        model=model,
        optimizer=optimizer,
        weight_decay=0.0005,
        test_interval=1000,
        max_iter=10000,
        snapshot=1000,
        gpu=gpu,
    )
    trainer.train()
Beispiel #5
0
def main(gpu, config_file):
    # 0. config

    config = yaml.load(open(config_file))

    out = osp.splitext(osp.basename(config_file))[0]
    for key, value in sorted(config.items()):
        if key == 'name':
            continue
        if isinstance(value, basestring):
            value = value.replace('/', 'SLASH')
            value = value.replace(':', 'COLON')
        out += '_{key}-{value}'.format(key=key.upper(), value=value)
    config['out'] = osp.join(this_dir, 'logs', config['name'], out)

    config['config_file'] = osp.realpath(config_file)
    config['timestamp'] = datetime.datetime.now(
        pytz.timezone('Asia/Tokyo')).isoformat()
    if not osp.exists(config['out']):
        os.makedirs(config['out'])
    with open(osp.join(config['out'], 'config.yaml'), 'w') as f:
        yaml.safe_dump(config, f, default_flow_style=False)
    yaml.safe_dump(config, sys.stderr, default_flow_style=False)

    # 1. dataset

    dataset_train = PascalVOC2012SegmentationDataset('train')
    dataset_val = PascalVOC2012SegmentationDataset('val')

    iter_train = chainer.iterators.MultiprocessIterator(
        dataset_train, batch_size=1, shared_mem=10**7)
    iter_valid = chainer.iterators.MultiprocessIterator(
        dataset_val, batch_size=1, shared_mem=10**7,
        repeat=False, shuffle=False)

    # 2. model

    n_class = len(dataset_train.label_names)

    vgg_path = osp.join(chainer.dataset.get_dataset_directory('fcn'),
                        'vgg16.chainermodel')
    vgg = fcn.models.VGG16()
    chainer.serializers.load_hdf5(vgg_path, vgg)

    model = fcn.models.FCN32s(n_class=n_class)
    model.init_from_vgg16(vgg, copy_fc8=config.get('copy_fc8', True),
                          init_upscore=False)
    model.train = True

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.Adam(alpha=config['lr'])
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_valid=iter_valid,
        out=config['out'],
        max_iter=config['max_iteration'],
    )
    trainer.train()
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-g', '--gpu', type=int, required=True, help='GPU id')
    parser.add_argument(
        '--fcn16s-file', default=fcn.models.FCN16s.pretrained_model,
        help='Pretrained model file of FCN16s')
    args = parser.parse_args()

    gpu = args.gpu
    fcn16s_file = args.fcn16s_file

    # 0. config

    cmd = 'git log -n1 --format="%h"'
    vcs_version = subprocess.check_output(cmd, shell=True).strip()
    timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
    out = 'fcn8s_VCS-%s_TIME-%s' % (
        vcs_version,
        timestamp,
    )
    out = osp.join(here, 'logs', out)
    if not osp.exists(out):
        os.makedirs(out)
    with open(osp.join(out, 'config.yaml'), 'w') as f:
        f.write('fcn16s_file: %s\n' % fcn16s_file)

    # 1. dataset

    dataset_train = datasets.SBDClassSeg(split='train')
    dataset_valid = datasets.VOC2011ClassSeg(split='seg11valid')

    iter_train = chainer.iterators.MultiprocessIterator(
        dataset_train, batch_size=1, shared_mem=10 ** 7)
    iter_valid = chainer.iterators.MultiprocessIterator(
        dataset_valid, batch_size=1, shared_mem=10 ** 7,
        repeat=False, shuffle=False)

    # 2. model

    n_class = len(dataset_train.class_names)

    fcn16s = fcn.models.FCN16s()
    chainer.serializers.load_npz(fcn16s_file, fcn16s)

    model = fcn.models.FCN8s(n_class=n_class)
    model.init_from_fcn16s(fcn16s)

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.MomentumSGD(lr=1.0e-14, momentum=0.99)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
    for p in model.params():
        if p.name == 'b':
            p.update_rule = chainer.optimizers.momentum_sgd.MomentumSGDRule(
                lr=optimizer.lr * 2, momentum=0)
    model.upscore2.disable_update()
    model.upscore8.disable_update()
    model.upscore_pool4.disable_update()

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_valid=iter_valid,
        out=out,
        max_iter=100000,
    )
    trainer.train()
Beispiel #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--out', required=True)
    args = parser.parse_args()

    gpu = args.gpu
    out = args.out

    if not osp.exists(out):
        os.makedirs(out)

    # 1. dataset

    dataset_train = PascalVOC2012SegmentationDataset('train')
    dataset_val = PascalVOC2012SegmentationDataset('val')

    iter_train = chainer.iterators.SerialIterator(dataset_train, batch_size=1)
    iter_val = chainer.iterators.SerialIterator(dataset_val,
                                                batch_size=1,
                                                repeat=False,
                                                shuffle=False)

    # 2. model

    n_class = len(dataset_train.label_names)

    vgg_path = fcn.data.download_vgg16_chainermodel()
    vgg = fcn.models.VGG16()
    chainer.serializers.load_hdf5(vgg_path, vgg)

    model = fcn.models.FCN32s(n_class=n_class)
    model.train = True
    fcn.utils.copy_chainermodel(vgg, model)
    for link_name in ['fc6', 'fc7']:
        W1, b1 = getattr(vgg, link_name).params()
        W2, b2 = getattr(model, link_name).params()
        W2.data = W1.data.reshape(W2.shape)
        b2.data = b1.data

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.Adam(alpha=1e-5)
    optimizer.setup(model)

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_val=iter_val,
        out=out,
    )
    trainer.train(
        max_iter=150000,
        interval_eval=5000,
    )
Beispiel #8
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-g', '--gpu', type=int, required=True, help='GPU id')
    args = parser.parse_args()

    gpu = args.gpu

    # 0. config

    cmd = 'git log -n1 --format="%h"'
    vcs_version = subprocess.check_output(cmd, shell=True).strip()
    timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
    out = 'fcn8s_atonce_VCS-%s_TIME-%s' % (
        vcs_version,
        timestamp,
    )
    out = osp.join(here, 'logs', out)

    # 1. dataset

    dataset_train = datasets.BridgeSeg(split='train',
                                       rcrop=[400, 400],
                                       use_class_weight=False)
    dataset_train_nocrop = datasets.BridgeSeg(split='train',
                                              use_class_weight=False)
    dataset_valid = datasets.BridgeSeg(split='validation',
                                       use_class_weight=False)

    if dataset_train.class_weight is not None:
        print("Using class weigths: ", dataset_train.class_weight)

    iter_train = chainer.iterators.MultiprocessIterator(dataset_train,
                                                        batch_size=1,
                                                        shared_mem=10**7)
    iter_valid = chainer.iterators.MultiprocessIterator(dataset_valid,
                                                        batch_size=1,
                                                        shared_mem=10**7,
                                                        repeat=False,
                                                        shuffle=False)

    # 2. model

    n_class = len(dataset_train.class_names)
    class_weight = dataset_train.class_weight

    vgg = fcn.models.VGG16()
    chainer.serializers.load_npz(vgg.pretrained_model, vgg)

    model = fcn.models.FCN8sAtOnce(n_class=n_class, class_weight=class_weight)
    model.init_from_vgg16(vgg)

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.MomentumSGD(lr=1.0e-10, momentum=0.99)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
    for p in model.params():
        if p.name == 'b':
            p.update_rule = chainer.optimizers.momentum_sgd.MomentumSGDRule(
                lr=optimizer.lr * 2, momentum=0)
    model.upscore2.disable_update()
    model.upscore8.disable_update()
    model.upscore_pool4.disable_update()

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_valid=iter_valid,
        out=out,
        max_iter=100000,
    )
    trainer.train()
Beispiel #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--fcn16s', required=True)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--out', required=True)
    parser.add_argument('--dataset', default='v2', choices=['v1', 'v2'])
    args = parser.parse_args()

    fcn16s_path = args.fcn16s
    gpu = args.gpu
    out = args.out
    if args.dataset == 'v1':
        dataset_class = datasets.APC2016DatasetV1
    else:
        dataset_class = datasets.APC2016DatasetV2

    if not osp.exists(out):
        os.makedirs(out)

    # 1. dataset

    dataset_train = dataset_class('train')
    dataset_val = dataset_class('val')

    iter_train = chainer.iterators.SerialIterator(dataset_train, batch_size=1)
    iter_val = chainer.iterators.SerialIterator(dataset_val,
                                                batch_size=1,
                                                repeat=False,
                                                shuffle=False)

    # 2. model

    n_class = len(dataset_train.label_names)

    fcn16s = fcn.models.FCN16s(n_class=n_class)
    chainer.serializers.load_npz(fcn16s_path, fcn16s)

    model = fcn.models.FCN8s(n_class=n_class)
    model.train = True
    fcn.utils.copy_chainermodel(fcn16s, model)

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.Adam(alpha=1e-5)
    optimizer.setup(model)

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_val=iter_val,
        out=out,
    )
    trainer.train(
        max_iter=150000,
        interval_eval=5000,
    )
Beispiel #10
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-g', '--gpu', type=int, required=True, help='GPU id')
    parser.add_argument('-da', '--data-augmentation', type=int, \
                        default=0, choices=(0,1),
                        help='Data augmentation flag. Default 0, 1 for data augmentation')
    parser.add_argument('--fcn32s-file',
                        default=fcn.models.FCN32s.pretrained_model,
                        help='Pretrained model file of FCN32s')
    parser.add_argument('-d', '--deck-mask', type=int, default=1, choices=(0,1),\
                        help='Applying deck mask. Default 1, 0 for not masking deck')
    parser.add_argument('-e', '--epochs', type=int, default=100, choices=range(1000), \
                        help='Number of epochs')
    args = parser.parse_args()

    gpu = args.gpu
    fcn32s_file = args.fcn32s_file

    # 0. config

    cmd = 'git log -n1 --format="%h"'
    vcs_version = subprocess.check_output(cmd, shell=True).strip()
    timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
    out = 'fcn16s_VCS-%s_TIME-%s' % (
        vcs_version,
        timestamp,
    )
    out = osp.join(here, 'logs', out)
    if not osp.exists(out):
        os.makedirs(out)
    with open(osp.join(out, 'config.yaml'), 'w') as f:
        f.write('fcn32s_file: %s\n' % fcn32s_file)

    # 1. dataset
    deck_flag = bool(args.deck_mask)
    data_augmentation = bool(args.data_augmentation)
    class_weight_flag = False
    train_dataset = datasets.BridgeSeg(split='train',
                                       rcrop=[512, 512],
                                       use_class_weight=class_weight_flag,
                                       black_out_non_deck=deck_flag,
                                       use_data_augmentation=data_augmentation)
    train_dataset_nocrop = datasets.BridgeSeg(
        split='train',
        use_class_weight=class_weight_flag,
        black_out_non_deck=deck_flag,
        use_data_augmentation=False)
    test_dataset = datasets.BridgeSeg(split='validation',
                                      use_class_weight=class_weight_flag,
                                      black_out_non_deck=deck_flag,
                                      use_data_augmentation=False)

    iter_train = chainer.iterators.MultiprocessIterator(train_dataset,
                                                        batch_size=1,
                                                        shared_mem=10**8)
    iter_train_nocrop = chainer.iterators.MultiprocessIterator(
        train_dataset_nocrop,
        batch_size=1,
        shared_mem=10**8,
        repeat=False,
        shuffle=False)
    iter_valid = chainer.iterators.MultiprocessIterator(test_dataset,
                                                        batch_size=1,
                                                        shared_mem=10**8,
                                                        repeat=False,
                                                        shuffle=False)

    train_samples = len(train_dataset)
    nbepochs = args.epochs

    # 2. model

    n_class = len(train_dataset.class_names)
    class_weight = train_dataset.class_weight

    fcn32s = fcn.models.FCN32s(n_class=n_class, class_weight=class_weight)
    chainer.serializers.load_npz(fcn32s_file, fcn32s)

    model = fcn.models.FCN16s(n_class=n_class, class_weight=class_weight)
    model.init_from_fcn32s(fcn32s)

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.MomentumSGD(lr=1.0e-12, momentum=0.99)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
    for p in model.params():
        if p.name == 'b':
            p.update_rule = chainer.optimizers.momentum_sgd.MomentumSGDRule(
                lr=optimizer.lr * 2, momentum=0)
    model.upscore2.disable_update()
    model.upscore16.disable_update()

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_train_noncrop=iter_train_nocrop,
        iter_valid=iter_valid,
        out=out,
        max_iter=train_samples * nbepochs,
        interval_validate=train_samples,
    )
    trainer.train(fold=0)
Beispiel #11
0
def main(gpu):
    # 0. config

    cmd = 'git log -n1 --format="%h"'
    vcs_version = subprocess.check_output(cmd, shell=True).strip()
    timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
    out = 'fcn32s_VCS-%s_TIME-%s' % (
        vcs_version,
        timestamp,
    )
    out = osp.join(here, 'logs', out)

    # 1. dataset

    dataset_train = datasets.SBDClassSeg(split='train')
    dataset_valid = datasets.VOC2011ClassSeg(split='seg11valid')

    iter_train = chainer.iterators.MultiprocessIterator(dataset_train,
                                                        batch_size=1,
                                                        shared_mem=10**7)
    iter_valid = chainer.iterators.MultiprocessIterator(dataset_valid,
                                                        batch_size=1,
                                                        shared_mem=10**7,
                                                        repeat=False,
                                                        shuffle=False)

    # 2. model

    n_class = len(dataset_train.class_names)

    vgg_path = fcn.data.download_vgg16_chainermodel(check_md5=False)
    vgg = fcn.models.VGG16()
    chainer.serializers.load_npz(vgg_path, vgg)

    model = fcn.models.FCN32s(n_class=n_class)
    model.init_from_vgg16(vgg)

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.MomentumSGD(lr=1.0e-10, momentum=0.99)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
    for p in model.params():
        if p.name == 'b':
            p.update_rule = chainer.optimizers.momentum_sgd.MomentumSGDRule(
                lr=optimizer.lr * 2, momentum=0)
    model.upscore.disable_update()

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_valid=iter_valid,
        out=out,
        max_iter=100000,
    )
    trainer.train()