Ejemplo n.º 1
0
    handle_distributed(args_parser,
                       os.path.expanduser(os.path.abspath(__file__)))

    if args_parser.seed is not None:
        random.seed(args_parser.seed)
        torch.manual_seed(args_parser.seed)

    cudnn.enabled = True
    cudnn.benchmark = args_parser.cudnn

    configer = Configer(args_parser=args_parser)
    data_dir = configer.get('data', 'data_dir')
    if isinstance(data_dir, str):
        data_dir = [data_dir]
    abs_data_dir = [os.path.expanduser(x) for x in data_dir]
    configer.update(['data', 'data_dir'], abs_data_dir)

    project_dir = os.path.dirname(os.path.realpath(__file__))
    configer.add(['project_dir'], project_dir)

    if configer.get('logging', 'log_to_file'):
        log_file = configer.get('logging', 'log_file')
        new_log_file = '{}_{}'.format(
            log_file, time.strftime("%Y-%m-%d_%X", time.localtime()))
        configer.update(['logging', 'log_file'], new_log_file)
    else:
        configer.update(['logging', 'logfile_level'], None)

    Log.init(logfile_level=configer.get('logging', 'logfile_level'),
             stdout_level=configer.get('logging', 'stdout_level'),
             log_file=configer.get('logging', 'log_file'),
                                       args_parser.local_rank)

    configer = Configer(args_parser=args_parser)
    cudnn.enabled = True
    if configer.get('data', 'multiscale') is None:
        cudnn.benchmark = args_parser.cudnn
    else:
        cudnn.benchmark = False

    if configer.get('gpu') is not None and not configer.get('distributed',
                                                            default=False):
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
            str(gpu_id) for gpu_id in configer.get('gpu'))

    if configer.get('network', 'norm_type') is None:
        configer.update('network.norm_type', 'batchnorm')

    if torch.cuda.device_count() <= 1 or configer.get('distributed',
                                                      default=False):
        configer.update('network.gather', True)

    project_dir = os.path.dirname(os.path.realpath(__file__))
    configer.add('project_dir', project_dir)
    if configer.get('phase') == 'test':
        from tools.data_generator import DataGenerator
        DataGenerator.gen_toyset(project_dir)
        for source in range(configer.get('data', 'num_data_sources')):
            configer.update('data.src{}_label_path'.format(source),
                            os.path.join(project_dir, 'toyset/label.txt'))

    configer.update('logging.logfile_level', None)
Ejemplo n.º 3
0
    args_parser = parser.parse_args()

    from lib.utils.distributed import handle_distributed
    handle_distributed(args_parser,
                       os.path.expanduser(os.path.abspath(__file__)))

    if args_parser.seed is not None:
        random.seed(args_parser.seed)
        torch.manual_seed(args_parser.seed)

    cudnn.enabled = True
    cudnn.benchmark = args_parser.cudnn

    configer = Configer(args_parser=args_parser)
    abs_data_dir = os.path.expanduser(configer.get('data', 'data_dir'))
    configer.update(['data', 'data_dir'], abs_data_dir)

    project_dir = os.path.dirname(os.path.realpath(__file__))
    configer.add(['project_dir'], project_dir)

    if configer.get('logging', 'log_to_file'):
        log_file = configer.get('logging', 'log_file')
        new_log_file = '{}_{}'.format(
            log_file, time.strftime("%Y-%m-%d_%X", time.localtime()))
        configer.update(['logging', 'log_file'], new_log_file)
    else:
        configer.update(['logging', 'logfile_level'], None)

    Log.init(logfile_level=configer.get('logging', 'logfile_level'),
             stdout_level=configer.get('logging', 'stdout_level'),
             log_file=configer.get('logging', 'log_file'),
Ejemplo n.º 4
0
        if args_parser.gpu is not None:
            torch.cuda.manual_seed_all(args_parser.seed +
                                       args_parser.local_rank)

    cudnn.enabled = True
    cudnn.benchmark = args_parser.cudnn
    cudnn.deterministic = True

    configer = Configer(args_parser=args_parser)
    if configer.get('gpu') is not None and not configer.get('distributed',
                                                            default=False):
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
            str(gpu_id) for gpu_id in configer.get('gpu'))

    if configer.get('network', 'norm_type') is None:
        configer.update('network.norm_type', 'batchnorm')

    if torch.cuda.device_count() <= 1 or configer.get('distributed',
                                                      default=False):
        configer.update('network.gather', True)

    project_dir = os.path.dirname(os.path.realpath(__file__))
    configer.add('project_dir', project_dir)

    Log.init(logfile_level=configer.get('logging', 'logfile_level'),
             stdout_level=configer.get('logging', 'stdout_level'),
             log_file=configer.get('logging', 'log_file'),
             log_format=configer.get('logging', 'log_format'),
             rewrite=configer.get('logging', 'rewrite'),
             dist_rank=configer.get('local_rank'))