def main():
    args = utils.bin_config(get_arguments)

    if args.multi_gpus:  # use all GPUs in parallel
        assert torch.cuda.is_available()
        args.n_gpus = torch.cuda.device_count()
        os.environ['MASTER_ADDR'] = args.master_addr
        os.environ['MASTER_PORT'] = str(args.master_port)
    elif args.gpu_id != -1:
        args.n_gpus = 1
    else:
        args.n_gpus = 0

    config = json.load(open(args.config_file))
    assert config['config_target'] == 'naive_psychology'

    # if args.mode == 'train':
    prefix = 'semisupervised' if args.is_semisupervised else 'unsupervised'
    train_inputs = get_dataset_inputs(prefix, args)
    dev_inputs = get_dataset_inputs('dev', args)
    if args.n_gpus > 1:
        mp.spawn(train,
                 nprocs=args.n_gpus,
                 args=(train_inputs, dev_inputs, config, args))
    else:
        train(args.gpu_id, train_inputs, dev_inputs, config, args)
def load_dataset_inputs(prefix):
    input_f = os.path.join(args.input_dir, '{}_inputs.h5'.format(prefix))
    return input_f


def main():
    config = json.load(open(args.config_file))
    assert config['config_target'] == 'naive_psychology'
    set_seed(
        args.gpu_id, args.seed
    )  # in distributed training, this has to be same for all processes

    if args.mode == 'train':
        train_dinputs = load_dataset_inputs('train')
        dev_dinputs = load_dataset_inputs('dev')
        train(train_dinputs, dev_dinputs)

        # test
        test_dinputs = load_dataset_inputs('test')
        test(test_dinputs, args.output_dir)
    else:  # test
        test_dinputs = load_dataset_inputs('test')
        test(test_dinputs, args.from_checkpoint)


if __name__ == "__main__":
    args = utils.bin_config(get_arguments)
    logger = utils.get_root_logger(args)
    main()