def main():

    # get config and arguments
    args, config = get_downstream_args()

    # Fix seed and make backends deterministic
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True

    # mkdir
    if args.expdir == '':
        if args.name is None: args.name = 'exp_' + str(random.randint(0, 999))
        expdir = os.path.join(
            'result/result_' + args.upstream + '_' + args.task + '/',
            args.name)
    else:
        expdir = args.expdir
    if not os.path.exists(expdir):
        os.makedirs(expdir)
    copyfile(args.config, os.path.join(expdir, args.config.split('/')[-1]))

    # get upstream model
    upstream_model = get_upstream_model(
        args)  ######### plug in your upstream pre-trained model here #########

    # get dataloaders
    train_loader, dev_loader, test_loader = get_dataloader(
        args, config['dataloader'])

    # get downstream model
    downstream_model = get_downstream_model(args, upstream_model.out_dim,
                                            train_loader.dataset.class_num,
                                            config)

    # train
    runner = Runner(args=args,
                    runner_config=config['runner'],
                    dataloader={
                        'train': train_loader,
                        'dev': dev_loader,
                        'test': test_loader
                    },
                    upstream=upstream_model,
                    downstream=downstream_model,
                    expdir=expdir)
    runner.set_model()
    runner.train()
    runner.save_results(config['runner']['save_file'])
示例#2
0
def main():
    torch.multiprocessing.set_sharing_strategy('file_system')
    torchaudio.set_audio_backend('sox_io')
    hack_isinstance()

    # get config and arguments
    args, config, backup_files = get_downstream_args()
    if args.cache_dir is not None:
        torch.hub.set_dir(args.cache_dir)

    # When torch.distributed.launch is used
    if args.local_rank is not None:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(args.backend)

    if args.mode == 'train' and args.past_exp:
        ckpt = torch.load(args.init_ckpt, map_location='cpu')

        now_use_ddp = is_initialized()
        original_use_ddp = ckpt['Args'].local_rank is not None
        assert now_use_ddp == original_use_ddp, f'{now_use_ddp} != {original_use_ddp}'

        if now_use_ddp:
            now_world = get_world_size()
            original_world = ckpt['WorldSize']
            assert now_world == original_world, f'{now_world} != {original_world}'

    # Save command
    if is_leader_process():
        with open(os.path.join(args.expdir, f'args_{get_time_tag()}.yaml'),
                  'w') as file:
            yaml.dump(vars(args), file)

        with open(os.path.join(args.expdir, f'config_{get_time_tag()}.yaml'),
                  'w') as file:
            yaml.dump(config, file)

        for file in backup_files:
            backup(file, args.expdir)

    # Fix seed and make backends deterministic
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    runner = Runner(args, config)
    eval(f'runner.{args.mode}')()
示例#3
0
def main():
    # get config and arguments
    args, config = get_downstream_args()

    # Fix seed and make backends deterministic
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True

    runner = Runner(args, config)
    eval(f'runner.{args.mode}')()
    runner.logger.close()