コード例 #1
0
def main():
    """Main function."""
    # Parse arguments.
    args = parse_args()

    # Parse configurations.
    config = parse_config(args.config)
    config = update_config(config, args.options)
    config.work_dir = args.work_dir
    config.checkpoint = args.checkpoint
    config.launcher = args.launcher
    config.backend = args.backend
    if not os.path.isfile(config.checkpoint):
        raise FileNotFoundError(f'Checkpoint file `{config.checkpoint}` is '
                                f'missing!')

    # Set CUDNN.
    config.cudnn_benchmark = config.get('cudnn_benchmark', True)
    config.cudnn_deterministic = config.get('cudnn_deterministic', False)
    torch.backends.cudnn.benchmark = config.cudnn_benchmark
    torch.backends.cudnn.deterministic = config.cudnn_deterministic

    # Setting for launcher.
    config.is_distributed = True
    init_dist(config.launcher, backend=config.backend)
    config.num_gpus = dist.get_world_size()

    # Setup logger.
    if dist.get_rank() == 0:
        logger_type = config.get('logger_type', 'normal')
        logger = build_logger(logger_type, work_dir=config.work_dir)
        shutil.copy(args.config, os.path.join(config.work_dir, 'config.py'))
        commit_id = os.popen('git rev-parse HEAD').readline()
        logger.info(f'Commit ID: {commit_id}')
    else:
        logger = build_logger('dumb', work_dir=config.work_dir)

    # Start inference.
    runner = getattr(runners, config.runner_type)(config, logger)
    runner.load(filepath=config.checkpoint,
                running_metadata=False,
                learning_rate=False,
                optimizer=False,
                running_stats=False)

    if args.synthesis_num > 0:
        num = args.synthesis_num
        logger.print()
        logger.info(f'Synthesizing images ...')
        runner.synthesize(num, html_name=f'synthesis_{num}.html')
        logger.info(f'Finish synthesizing {num} images.')

    if args.fid_num > 0:
        num = args.fid_num
        logger.print()
        logger.info(f'Testing FID ...')
        fid_value = runner.fid(num, align_tf=not args.use_torchvision)
        logger.info(f'Finish testing FID on {num} samples. '
                    f'The result is {fid_value:.6f}.')
コード例 #2
0
ファイル: train.py プロジェクト: McHz1s/genforce
def main():
    """Main function."""
    # Parse arguments.
    args = parse_args()

    # Parse configurations.
    config = parse_config(args.config)
    config = update_config(config, args.options)
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpus
    timestamp = datetime.datetime.now()
    version = '%d-%d-%d-%02.0d-%02.0d-%02.0d' % \
              (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second)
    work_dir = os.path.join(args.work_dir, version)
    config.work_dir = work_dir
    config.resume_path = args.resume_path
    config.weight_path = args.weight_path
    config.seed = args.seed
    config.launcher = args.launcher
    config.backend = args.backend

    # Set CUDNN.
    config.cudnn_benchmark = config.get('cudnn_benchmark', True)
    config.cudnn_deterministic = config.get('cudnn_deterministic', False)
    torch.backends.cudnn.benchmark = config.cudnn_benchmark
    torch.backends.cudnn.deterministic = config.cudnn_deterministic

    # Set random seed.
    if config.seed is not None:
        random.seed(config.seed)
        np.random.seed(config.seed)
        torch.manual_seed(config.seed)
        config.cudnn_deterministic = True
        torch.backends.cudnn.deterministic = True
        warnings.warn('Random seed is set for training! '
                      'This will turn on the CUDNN deterministic setting, '
                      'which may slow down the training considerably! '
                      'Unexpected behavior can be observed when resuming from '
                      'checkpoints.')

    # Set launcher.
    config.is_distributed = True
    init_dist(config.launcher, backend=config.backend)
    config.num_gpus = dist.get_world_size()

    # Setup logger.
    if dist.get_rank() == 0:
        logger_type = config.get('logger_type', 'normal')
        logger = build_logger(logger_type, work_dir=config.work_dir)
        shutil.copy(args.config, os.path.join(config.work_dir, 'config.py'))
        commit_id = os.popen('git rev-parse HEAD').readline()
        logger.info(f'Commit ID: {commit_id}')
    else:
        logger = build_logger('dumb', work_dir=config.work_dir)

    # Start training.
    runner = getattr(runners, config.runner_type)(config, logger)
    if config.resume_path:
        runner.load(filepath=config.resume_path,
                    running_metadata=True,
                    learning_rate=True,
                    optimizer=True,
                    running_stats=False)
    if config.weight_path:
        runner.load(filepath=config.weight_path,
                    running_metadata=False,
                    learning_rate=False,
                    optimizer=False,
                    running_stats=False)
    runner.train()
コード例 #3
0
ファイル: train.py プロジェクト: maximilianschaller/genforce
def main():
    """Main function."""
    # Parse arguments.
    args = parse_args()

    # Parse configurations.
    config = parse_config(args.config)
    config = update_config(config, args.options)
    config.work_dir = args.work_dir
    config.resume_path = args.resume_path
    config.weight_path = args.weight_path
    config.seed = args.seed
    config.launcher = args.launcher
    config.backend = args.backend
    if args.adv != None:
        config.loss['g_loss_kwargs']['adv'] = float(args.adv)
    if args.lamb != None:
        config.loss['g_loss_kwargs']['lamb'] = float(args.lamb)
    if args.metric != None:
        config.loss['g_loss_kwargs']['metric'] = args.metric
    if args.baseLR != None:
        config.modules['generator']['opt']['base_lr'] = float(args.baseLR) / 2
    if args.nethz != None:
        config.nethz = args.nethz
    config.savename = args.adv + '_' + args.lamb.replace(
        '.', 'dot') + '_' + args.metric.replace(
            '.', 'dot') + '_' + args.baseLR.replace('.', 'dot')

    config.data['train'][
        'root_dir'] = '/cluster/scratch/' + config.nethz + '/data'
    config.data['val'][
        'root_dir'] = '/cluster/scratch/' + config.nethz + '/data'

    # Set CUDNN.
    config.cudnn_benchmark = config.get('cudnn_benchmark', True)
    config.cudnn_deterministic = config.get('cudnn_deterministic', False)
    torch.backends.cudnn.benchmark = config.cudnn_benchmark
    torch.backends.cudnn.deterministic = config.cudnn_deterministic

    # Set random seed.
    config.seed = 26
    if config.seed is not None:
        random.seed(config.seed)
        np.random.seed(config.seed)
        torch.manual_seed(config.seed)
        config.cudnn_deterministic = True
        torch.backends.cudnn.deterministic = True
        warnings.warn('Random seed is set for training! '
                      'This will turn on the CUDNN deterministic setting, '
                      'which may slow down the training considerably! '
                      'Unexpected behavior can be observed when resuming from '
                      'checkpoints.')

    # Set launcher.
    config.is_distributed = True
    init_dist(config.launcher, backend=config.backend)
    config.num_gpus = dist.get_world_size()

    # Setup logger.
    if dist.get_rank() == 0:
        logger_type = config.get('logger_type', 'normal')
        logger = build_logger(logger_type, work_dir=config.work_dir)
        shutil.copy(args.config, os.path.join(config.work_dir, 'config.py'))
        commit_id = os.popen('git rev-parse HEAD').readline()
        logger.info(f'Commit ID: {commit_id}')
    else:
        logger = build_logger('dumb', work_dir=config.work_dir)

    # Start training.
    runner = getattr(runners, config.runner_type)(config, logger)
    if config.resume_path:
        runner.load(filepath=config.resume_path,
                    running_metadata=True,
                    learning_rate=True,
                    optimizer=True,
                    running_stats=False)
    if config.weight_path:
        runner.load(filepath=config.weight_path,
                    running_metadata=False,
                    learning_rate=False,
                    optimizer=False,
                    running_stats=False)
    runner.train()