Example #1
0
def parse_args(argv):
    """Parse arguments @argv and return the flags needed for training."""
    parser = argparse.ArgumentParser(description=__doc__, allow_abbrev=False)

    group = parser.add_argument_group('General Options')
    opts.add_general_flags(group)

    group = parser.add_argument_group('Dataset Options')
    opts.add_dataset_flags(group)

    group = parser.add_argument_group('Model Options')
    opts.add_model_flags(group)

    group = parser.add_argument_group('Soft Label Options')
    opts.add_teacher_flags(group)

    group = parser.add_argument_group('Training Options')
    opts.add_training_flags(group)

    update_parser_defaults_from_yaml(parser)
    args = parser.parse_args(argv)

    # if args.teacher_model is not None and args.teacher_state_file is None:
    #     parser.error("You should set --teacher-state-file if "
    #                  "--teacher-model is set.")

    return args
def main():
    # parse command line
    parser = prepare_parser()

    update_parser_defaults_from_yaml(parser)

    config = vars(parser.parse_args())
    print(get_dict_str(config))
    run(config)
Example #3
0
def ddp_init():
    """
  use config_cfgnode
  """
    from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml
    from template_lib.v2.ddp import parser_local_rank

    rank = parser_local_rank()

    parser = update_parser_defaults_from_yaml(parser=None,
                                              append_local_rank=True,
                                              is_main_process=(rank == 0))

    args = parser.parse_args()

    n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = n_gpu > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        # comm.synchronize()
        dist.barrier()

    # eval(args.run_func)()
    return args
Example #4
0
def run():

    parser = default_argument_parser()
    update_parser_defaults_from_yaml(parser)
    args = parser.parse_args()

    logger = logging.getLogger('tl')
    logger.info(get_dict_str(vars(args)))

    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args, ),
    )
Example #5
0
def load_cfg_fom_args(description="Config file options."):
    """Load config from command line arguments and set any specified options."""
    parser = argparse.ArgumentParser(description=description)
    help_s = "Config file location"
    parser.add_argument("--cfg",
                        dest="cfg_file",
                        help=help_s,
                        required=True,
                        type=str)
    help_s = "See pycls/core/config.py for all options"
    parser.add_argument("opts",
                        help=help_s,
                        default=None,
                        nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    update_parser_defaults_from_yaml(parser)

    args = parser.parse_args()
    _C.merge_from_file(args.cfg_file)
    _C.merge_from_list(args.opts)
Example #6
0
def main():
    from template_lib.v2.config import update_parser_defaults_from_yaml

    parser = argparse.ArgumentParser()
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--run_func", type=str)

    update_parser_defaults_from_yaml(parser)

    args = parser.parse_args()

    n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = n_gpu > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        # comm.synchronize()
        dist.barrier()

    # eval(args.run_func)()
    return args
Example #7
0
def main():

    update_parser_defaults_from_yaml(parser)
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)
Example #8
0
def get_args():
    parser = argparse.ArgumentParser()
    # Dataset configuration
    parser.add_argument(
        '--cGAN',
        default=False,
        action='store_true',
        help='to train cGAN, set this ``True``. default: False')
    parser.add_argument(
        '--data_root',
        type=str,
        default='tiny-imagenet-200',
        help='path to dataset root directory. default: tiny-imagenet-200')
    parser.add_argument('--batch_size',
                        '-B',
                        type=int,
                        default=64,
                        help='mini-batch size of training data. default: 64')
    parser.add_argument(
        '--eval_batch_size',
        '-eB',
        default=None,
        help='mini-batch size of evaluation data. default: None')
    parser.add_argument(
        '--num_workers',
        type=int,
        default=8,
        help='Number of workers for training data loader. default: 8')
    # Generator configuration
    parser.add_argument(
        '--gen_num_features',
        '-gnf',
        type=int,
        default=64,
        help=
        'Number of features of generator (a.k.a. nplanes or ngf). default: 64')
    parser.add_argument(
        '--gen_dim_z',
        '-gdz',
        type=int,
        default=128,
        help='Dimension of generator input noise. default: 128')
    parser.add_argument(
        '--gen_bottom_width',
        '-gbw',
        type=int,
        default=4,
        help='Initial size of hidden variable of generator. default: 4')
    parser.add_argument(
        '--gen_distribution',
        '-gd',
        type=str,
        default='normal',
        help='Input noise distribution: normal (default) or uniform.')
    # Discriminator (Critic) configuration
    parser.add_argument(
        '--dis_arch_concat',
        '-concat',
        default=False,
        action='store_true',
        help='If use concat discriminator, set this true. default: False')
    parser.add_argument(
        '--dis_emb',
        type=int,
        default=128,
        help='Parameter for concat discriminator. default: 128')
    parser.add_argument(
        '--dis_num_features',
        '-dnf',
        type=int,
        default=64,
        help=
        'Number of features of discriminator (a.k.a nplanes or ndf). default: 64'
    )
    # Optimizer settings
    parser.add_argument('--lr',
                        type=float,
                        default=0.0002,
                        help='Initial learning rate of Adam. default: 0.0002')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.0,
                        help='beta1 (betas[0]) value of Adam. default: 0.0')
    parser.add_argument('--beta2',
                        type=float,
                        default=0.9,
                        help='beta2 (betas[1]) value of Adam. default: 0.9')
    parser.add_argument(
        '--lr_decay_start',
        '-lds',
        type=int,
        default=50000,
        help='Start point of learning rate decay. default: 50000')
    # Training setting
    parser.add_argument(
        '--seed',
        type=int,
        default=46,
        help='Random seed. default: 46 (derived from Nogizaka46)')
    parser.add_argument(
        '--max_iteration',
        '-N',
        type=int,
        default=100000,
        help='Max iteration number of training. default: 100000')
    parser.add_argument(
        '--n_dis',
        type=int,
        default=5,
        help='Number of discriminator updater per generator updater. default: 5'
    )
    parser.add_argument(
        '--num_classes',
        '-nc',
        type=int,
        default=0,
        help='Number of classes in training data. No need to set. default: 0')
    parser.add_argument('--loss_type',
                        type=str,
                        default='hinge',
                        help='loss function name. hinge (default) or dcgan.')
    parser.add_argument('--relativistic_loss',
                        '-relloss',
                        default=False,
                        action='store_true',
                        help='Apply relativistic loss or not. default: False')
    parser.add_argument(
        '--calc_FID',
        default=False,
        action='store_true',
        help='If calculate FID score, set this ``True``. default: False')
    # Log and Save interval configuration
    parser.add_argument('--results_root',
                        type=str,
                        default='results',
                        help='Path to results directory. default: results')
    parser.add_argument(
        '--no_tensorboard',
        action='store_true',
        default=False,
        help='If you dislike tensorboard, set this ``False``. default: True')
    parser.add_argument(
        '--no_image',
        action='store_true',
        default=False,
        help=
        'If you dislike saving images on tensorboard, set this ``True``. default: False'
    )
    parser.add_argument(
        '--checkpoint_interval',
        '-ci',
        type=int,
        default=1000,
        help=
        'Interval of saving checkpoints (model and optimizer). default: 1000')
    parser.add_argument('--log_interval',
                        '-li',
                        type=int,
                        default=100,
                        help='Interval of showing losses. default: 100')
    parser.add_argument(
        '--eval_interval',
        '-ei',
        type=int,
        default=1000,
        help=
        'Interval for evaluation (save images and FID calculation). default: 1000'
    )
    parser.add_argument(
        '--n_eval_batches',
        '-neb',
        type=int,
        default=100,
        help='Number of mini-batches used in evaluation. default: 100')
    parser.add_argument(
        '--n_fid_images',
        '-nfi',
        type=int,
        default=5000,
        help='Number of images to calculate FID. default: 5000')
    parser.add_argument(
        '--test',
        default=False,
        action='store_true',
        help='If test this python program, set this ``True``. default: False')
    # Resume training
    parser.add_argument('--args_path',
                        default=None,
                        help='Checkpoint args json path. default: None')
    parser.add_argument(
        '--gen_ckpt_path',
        '-gcp',
        default=None,
        help='Generator and optimizer checkpoint path. default: None')
    parser.add_argument(
        '--dis_ckpt_path',
        '-dcp',
        default=None,
        help='Discriminator and optimizer checkpoint path. default: None')

    update_parser_defaults_from_yaml(parser)
    args = parser.parse_args()
    return args
Example #9
0
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from pytorch_lightning.logging import TestTubeLogger

from template_lib.v2.config import update_parser_defaults_from_yaml, global_cfg
from template_lib.utils import update_config

parser = argparse.ArgumentParser(description='Generic runner for VAE models')
parser.add_argument('--config',
                    '-c',
                    dest="filename",
                    metavar='FILE',
                    help='path to the config file',
                    default='configs/vae.yaml')

update_parser_defaults_from_yaml(parser)

args = parser.parse_args()
with open(args.filename, 'r') as file:
    try:
        config = yaml.safe_load(file)
    except yaml.YAMLError as exc:
        print(exc)

config = update_config(config, global_cfg)

tt_logger = TestTubeLogger(
    save_dir=eval(config['logging_params']['save_dir']),
    name=config['logging_params']['name'],
    debug=False,
    create_git_tag=False,
Example #10
0
def parse_args():
    # Training settings
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('cmd', choices=['train', 'test'])
    parser.add_argument('-d', '--data-dir', default=None, required=True)
    parser.add_argument('-l',
                        '--list-dir',
                        default=None,
                        help='List dir to look for train_images.txt etc. '
                        'It is the same with --data-dir if not set.')
    parser.add_argument('-c', '--classes', default=0, type=int)
    parser.add_argument('-s', '--crop-size', default=0, type=int)
    parser.add_argument('--step', type=int, default=200)
    parser.add_argument('--arch')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--lr-mode', type=str, default='step')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')
    parser.add_argument('--weight-decay',
                        '--wd',
                        default=1e-4,
                        type=float,
                        metavar='W',
                        help='weight decay (default: 1e-4)')
    parser.add_argument('-e',
                        '--evaluate',
                        dest='evaluate',
                        action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument('--resume',
                        default='',
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('--pretrained',
                        dest='pretrained',
                        default='',
                        type=str,
                        metavar='PATH',
                        help='use pre-trained model')
    parser.add_argument('--save_path',
                        default='',
                        type=str,
                        metavar='PATH',
                        help='output path for training checkpoints')
    parser.add_argument('--save_iter',
                        default=1,
                        type=int,
                        help='number of training iterations between'
                        'checkpoint history saves')
    parser.add_argument('-j', '--workers', type=int, default=8)
    parser.add_argument('--load-release', dest='load_rel', default=None)
    parser.add_argument('--phase', default='val')
    parser.add_argument('--random-scale', default=0, type=float)
    parser.add_argument('--random-rotate', default=0, type=int)
    parser.add_argument('--bn-sync', action='store_true')
    parser.add_argument('--ms',
                        action='store_true',
                        help='Turn on multi-scale testing')
    parser.add_argument('--with-gt', action='store_true')
    parser.add_argument('--test-suffix', default='', type=str)

    update_parser_defaults_from_yaml(parser)
    args = parser.parse_args()

    assert args.classes > 0

    print(' '.join(sys.argv))
    print(args)

    if args.bn_sync:
        drn.BatchNorm = batchnormsync.BatchNormSync

    return args