コード例 #1
0
def main():
    # initialise parser for arguments
    arg_settings = get_parser().parse_args()

    # get directory root for saving models, losses and accuracies
    if not os.path.exists(arg_settings.experiment_root):
        os.makedirs(arg_settings.experiment_root)

    # check if GPU is available
    if torch.cuda.is_available() and not arg_settings.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    # set seed so results can be reproduced
    torch.cuda.cudnn_enabled = False
    np.random.seed(arg_settings.seed)
    torch.manual_seed(arg_settings.seed)
    torch.cuda.manual_seed(arg_settings.seed)

    # load training, testing and validation datasets
    training_dataloader = DataLoader('train', arg_settings).data_loader
    testing_dataloader = DataLoader('test', arg_settings).data_loader
    validation_dataloader = DataLoader('val', arg_settings).data_loader

    # initialise prototypical network model (utilise GPU if available)
    device = 'cuda:0' if torch.cuda.is_available(
    ) and arg_settings.cuda else 'cpu'
    model = PrototypicalNetwork().to(device)

    # initialise optimizer: Adaptive Moment Estimation (Adam)
    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=arg_settings.learning_rate)

    # initialise learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer=optimizer,
        gamma=arg_settings.lr_scheduler_gamma,
        step_size=arg_settings.lr_scheduler_step)

    # train model, obtain results from training and save the best model
    best_state, best_acc, train_loss, train_acc, val_loss, val_acc = train(
        device=device,
        arg_settings=arg_settings,
        training_dataloader=training_dataloader,
        validation_dataloader=validation_dataloader,
        model=model,
        optimizer=optimizer,
        lr_scheduler=lr_scheduler,
        loss_function=loss_func)

    # test the best model from training
    test(device=device,
         arg_settings=arg_settings,
         testing_dataloader=testing_dataloader,
         model=model,
         loss_function=loss_func)
コード例 #2
0
ファイル: __main__.py プロジェクト: harzival/uniscan-compose
def main(args=None):

    parser = get_parser()
    args = parser.parse_args(args)
    obj_root_dir = Path(args.input)
    b3dm_root_path = Path(args.output)
    converter = Converter(_obj2gltf_js_path=args._obj2gltf_js_path,
                          _3d_tiles_tools_js_path=args._3d_tiles_tools_js_path,
                          _node_path=args._node_path)
    geom_error_list = [args.max_geom_error]
    geom_error_division_factor = args.geom_error_division_factor
    geom_error_list.append(geom_error_list[0] / geom_error_division_factor)
    root_transform = args.root_transform
    tileset_version = args.tileset_version

    if b3dm_root_path.exists() is False:
        b3dm_root_path.mkdir()

    lod_list = [Lod(dir) for dir in obj_root_dir.iterdir() if dir.is_dir()]
    lod_list.sort(key=lambda l: l.total_slices())

    for lod in lod_list:
        geom_error_list.append(geom_error_list[-1] /
                               geom_error_division_factor)
        if (b3dm_root_path / lod.dir.name).exists() is False:
            (b3dm_root_path / lod.dir.name).mkdir()
        for tile in lod.tile_list:
            b3dm_path = b3dm_root_path / lod.dir.name / tile.get_b3dm_name()
            tile.geom_error = geom_error_list[-1]
            obj_path = lod.dir / tile.get_obj_name()
            tile.geom_box = Box.from_obj_geometry(str(obj_path))
            if b3dm_path.exists():
                print("UNISCAN: The '.b3dm' of " + lod.dir.name + "/" +
                      tile.get_name() +
                      " already exists in export location (" +
                      str(b3dm_root_path) + ")")
            else:
                amap_path = lod.dir / tile.get_albedo_map_name()
                nmap_path = lod.dir / tile.get_normal_map_name()
                converter.write_mtl_data(obj_path)
                with tempfile.TemporaryDirectory() as temp_dir:
                    glb_path = Path(temp_dir) / tile.get_glb_name()
                    converter.obj_to_glb(obj_path, glb_path, amap_path,
                                         nmap_path)
                    converter.glb_to_b3dm(glb_path, b3dm_path)

    tile_list = Lod.sort_lods_into_tile_tree(lod_list)
    world_box = Box.from_joining_box_list(
        [tile.geom_box for tile in tile_list])
    root_tile = RootTile(world_box, tile_list, 10000, root_transform)
    tileset = Tileset("1.0", 100000, root_tile)
    with (b3dm_root_path / "tileset.json").open('w') as file:
        json.dump(dict(tileset), file, indent=4)
コード例 #3
0
ファイル: test.py プロジェクト: sxrczh/FETHI
def main():

    torch.cuda.cudnn_enabled = False
    parser = arg_parser.get_parser()
    opt = parser.parse_args()

    if torch.cuda.is_available() and not opt.cuda:
        print(
            "WARNING: you have a cuda device, so you should probably run with '-cuda' option."
        )

    init_seed(opt)
    model = init_model(opt)
    model.load_state_dict(
        torch.load(open(
            os.path.join(opt.experiment_root, opt.corpus_dir) +
            "best_model.pth", 'rb'),
                   map_location=config.CUDA))

    # test_dataloader = torch.utils.data.DataLoader(init_dataset(opt, "test90p"),
    #                                                 batch_size=opt.batch_size,
    #                                                 collate_fn=dataloader.collate_fn)

    # test_dataloader = torch.utils.data.DataLoader(init_dataset(opt, "test"),
    #                                               batch_size=opt.batch_size,
    #                                               collate_fn=dataloader.collate_fn)

    test_dataloader = torch.utils.data.DataLoader(
        init_dataset(opt, "dev"),
        batch_size=opt.batch_size,
        collate_fn=dataloader.collate_fn)

    maxm = 0
    for i in range(1):
        threshold = 0.05 * i
        test_ma, test_mi, test_str = test(opt=opt,
                                          model=model,
                                          threshold=threshold,
                                          test_dataloader=test_dataloader,
                                          record_result=True,
                                          analysis_result=False,
                                          mode=config.DEV)

        if (test_str[0] + test_mi[0] + test_str[0]) > maxm:
            maxm = test_str[0] + test_mi[0] + test_str[0]
            print(f"new pred_threshold: {threshold}")
            print(
                f"Model acc in test data:\n"
                f" \nmacro: F1: {test_ma[0]*100:.2f}, P: {test_ma[1]*100:.2f}, R: {test_ma[2]*100:.2f}"
                f" \nmicro: F1: {test_mi[0]*100:.2f}, P: {test_mi[1]*100:.2f}, R: {test_mi[2]*100:.2f}"
                f" \nstrict: F1: {test_str[0]*100:.2f}, P: {test_str[1]*100:.2f}, R: {test_str[2]*100:.2f}"
            )
コード例 #4
0
def main():

    chainer.set_debug(True)
    parser = get_parser()
    args = parser.parse_args()

    reset_seed(args.seed)

    #load vocabulary
    source0_ids = load_vocabulary(args.SOURCE_VOCAB0)
    source1_ids = load_vocabulary(args.SOURCE_VOCAB1)
    target_ids = load_vocabulary(args.TARGET_VOCAB)

    corpus = make_data_tuple(source0=(source0_ids, args.SOURCE0),
                             source1=(source1_ids, args.SOURCE1),
                             target=(target_ids, args.TARGET))

    source0_words = {i: w for w, i in source0_ids.items()}
    source1_words = {i: w for w, i in source1_ids.items()}
    target_words = {i: w for w, i in target_ids.items()}

    # Setup model
    model = Seq2seq(args.layer, len(source0_ids), len(source1_ids),
                    len(target_ids), args.unit)
    if args.resume:
        # Resume from a snapshot
        print("Load Model")
        chainer.serializers.load_npz(args.resume, model)

    if args.gpu >= 0:
        chainer.backends.cuda.get_device(args.gpu).use()
        model.to_gpu(args.gpu)

    for i in range(len(corpus)):
        source0, source1, target = corpus[i]
        result = model.translate([model.xp.array(source0)],
                                 [model.xp.array(source1)])[0]
        source0_sentence = ' '.join([source0_words[x] for x in source0])
        source1_sentence = ' '.join([source1_words[x] for x in source1])
        target_sentence = ' '.join([target_words[y] for y in target])
        result_sentence = ' '.join([target_words[y] for y in result])
        print('# source0 : ' + source0_sentence)
        print('# source1 : ' + source1_sentence)
        print('# result : ' + result_sentence)
        print('# expect : ' + target_sentence)
        print("")
コード例 #5
0
ファイル: train.py プロジェクト: rqhappy/FGEC
def main():

    torch.cuda.cudnn_enabled = False
    parser = arg_parser.get_parser()
    opt = parser.parse_args()

    if torch.cuda.is_available() and not opt.cuda:
        print(
            "WARNING: you have a cuda device, so you should probably run with '-cuda' option."
        )

    init_seed(opt)
    model = init_model(opt)
    optim = init_optim(model, opt)
    lr_scheduler = init_lr_scheduler(opt, optim)
    logger = init_log()

    tr_dataloader = torch.utils.data.DataLoader(
        init_dataset(opt, "train"),
        batch_size=opt.batch_size,
        shuffle=True,
        collate_fn=dataloader.collate_fn)
    dev_dataloader = torch.utils.data.DataLoader(
        init_dataset(opt, "dev"),
        batch_size=opt.batch_size,
        collate_fn=dataloader.collate_fn)
    test_dataloader = torch.utils.data.DataLoader(
        init_dataset(opt, "test"),
        batch_size=opt.batch_size,
        collate_fn=dataloader.collate_fn)

    train(opt=opt,
          tr_dataloader=tr_dataloader,
          model=model,
          optim=optim,
          lr_scheduler=lr_scheduler,
          test_dataloader=test_dataloader,
          dev_dataloader=dev_dataloader,
          logger=logger)
コード例 #6
0
ファイル: Launcher.py プロジェクト: ekunnii/APPIAN
import os
import sys
from Extra.nii2mnc_batch import nii2mnc_batch
from Extra.minc_json_header_batch import create_minc_headers
from scanLevel import run_scan_level
from groupLevel import run_group_level
from arg_parser import get_parser, modify_opts

version = "1.0"

############################################
# Define dictionaries for default settings #
############################################

if __name__ == "__main__":
    parser = get_parser()

    opts = parser.parse_args()
    opts = modify_opts(opts)
    args = opts.args

    ############################################
    # Create BIDS-style header for MINC inputs #
    ############################################
    create_minc_headers(opts.sourceDir)

    #######################################
    ### Convert NII to MINC if necessary. #
    #######################################
    nii2mnc_batch(opts)
コード例 #7
0
def main():

    chainer.set_debug(True)
    parser = get_parser()
    args = parser.parse_args()

    reset_seed(args.seed)

    #load vocabulary
    source0_ids = load_vocabulary(args.SOURCE_VOCAB0)
    source1_ids = load_vocabulary(args.SOURCE_VOCAB1)
    target_ids = load_vocabulary(args.TARGET_VOCAB)
    print('Source vocabulary size: %d' % len(source0_ids))
    print('Source vocabulary size: %d' % len(source1_ids))
    print('Target vocabulary size: %d' % len(target_ids))

    train_data = make_data_tuple(source0=(source0_ids, args.SOURCE0),
                                 source1=(source1_ids, args.SOURCE1),
                                 target=(target_ids, args.TARGET))

    source0_words = {i: w for w, i in source0_ids.items()}
    source1_words = {i: w for w, i in source1_ids.items()}
    target_words = {i: w for w, i in target_ids.items()}

    # Setup model
    model = Seq2seq(args.layer, len(source0_ids), len(source1_ids),
                    len(target_ids), args.unit)
    if args.gpu >= 0:
        chainer.backends.cuda.get_device(args.gpu).use()
        model.to_gpu(args.gpu)

    # Setup optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(args.l2))

    # Setup iterator
    train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)

    # Setup updater and trainer
    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                converter=convert,
                                                device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
    trainer.extend(
        extensions.LogReport(trigger=(args.log_interval, 'iteration')))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/perp',
        'validation/main/perp', 'validation/main/bleu', 'test/main/bleu',
        'elapsed_time'
    ]),
                   trigger=(args.log_interval, 'iteration'))

    if args.validation_source0 and args.validation_source1 and args.validation_target:
        valid_data = make_data_tuple(
            source0=(source0_ids, args.validation_source0),
            source1=(source1_ids, args.validation_source1),
            target=(target_ids, args.validation_target))

        @chainer.training.make_extension()
        def translate(trainer):
            source0, source1, target = valid_data[numpy.random.choice(
                len(valid_data))]
            result = model.translate([model.xp.array(source0)],
                                     [model.xp.array(source1)])[0]

            source0_sentence = ' '.join([source0_words[x] for x in source0])
            source1_sentence = ' '.join([source1_words[x] for x in source1])
            target_sentence = ' '.join([target_words[y] for y in target])
            result_sentence = ' '.join([target_words[y] for y in result])
            print('# source0 : ' + source0_sentence)
            print('# source1 : ' + source1_sentence)
            print('# result : ' + result_sentence)
            print('# expect : ' + target_sentence)

        trainer.extend(translate,
                       trigger=(args.validation_interval, 'iteration'))
        trainer.extend(CalculateBleu(model,
                                     valid_data,
                                     'validation/main/bleu',
                                     device=args.gpu),
                       trigger=(args.validation_interval, 'iteration'))

        dev_iter = chainer.iterators.SerialIterator(valid_data,
                                                    args.batchsize,
                                                    repeat=False,
                                                    shuffle=False)
        dev_eval = extensions.Evaluator(dev_iter,
                                        model,
                                        device=args.gpu,
                                        converter=convert)
        dev_eval.name = 'valid'
        trainer.extend(dev_eval,
                       trigger=(args.validation_interval, 'iteration'))

    if args.test_source0 and args.test_source1 and args.test_target:
        test_data = make_data_tuple(source0=(source0_ids, args.test_source0),
                                    source1=(source1_ids, args.test_source1),
                                    target=(target_ids, args.test_target))
        trainer.extend(CalculateBleu(model,
                                     test_data,
                                     'test/main/bleu',
                                     device=args.gpu),
                       trigger=(args.test_interval, 'iteration'))

    print('start training')
    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)
    save_args(args, args.out)

    trainer.run()

    if args.save:
        # Save a snapshot
        chainer.serializers.save_npz(args.out + "/trainer.npz", trainer)
        chainer.serializers.save_npz(args.out + "/model.npz", model)
コード例 #8
0
    Installs dependencies, loads kube config and starts watch loop
    or deletes generated resources.

    Args:
        args_ (argparse.Namespace): args from ArgumentParser

    Return:
        None
    """
    if args_.dev:
        os.environ["LIBSONNET_PATH"] = "./jsonnet_libs"

    install_dependencies(args.libsonnet)

    try:
        config.load_kube_config()
    except config.config_exception.ConfigException:
        config.load_incluster_config()

    if args.delete_resources:
        delete_generated_resources(args)
    else:
        watch_for_changes(args_)


if __name__ == "__main__":
    args = get_parser().parse_args()
    logger.set_logger(args.log.upper(), args.log_format)
    main(args)