示例#1
0
                        nargs='?',
                        default=True,
                        help='Use CUDNN.')
    parser.add_argument("--local_rank", default=0, type=int)

    args = parser.parse_args()
    configer = Configer(args_parser=args)

    if args.seed is not None:
        random.seed(args.seed + args.local_rank)
        torch.manual_seed(args.seed + args.local_rank)

    cudnn.enabled = True
    cudnn.benchmark = args.cudnn

    abs_data_dir = os.path.expanduser(configer.get('data', 'data_dir'))
    configer.update('data.data_dir', abs_data_dir)

    if configer.get('gpu') is not None and not configer.get(
            'network.distributed', default=False):
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
            str(gpu_id) for gpu_id in configer.get('gpu'))

    if configer.get('network', 'norm_type') is None:
        configer.update('network.norm_type', 'batchnorm')

    if torch.cuda.device_count() <= 1 or configer.get('network.distributed',
                                                      default=False):
        configer.update('network.gather', True)

    project_dir = os.path.dirname(os.path.realpath(__file__))
示例#2
0
文件: main.py 项目: wxwoods/torchcv
                        nargs='?',
                        default=True,
                        help='Use CUDNN.')
    parser.add_argument("--local_rank", default=0, type=int)

    args = parser.parse_args()
    configer = Configer(args_parser=args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)

    cudnn.enabled = True
    cudnn.benchmark = args.cudnn

    abs_data_dir = os.path.expanduser(configer.get('data', 'data_dir'))
    configer.update('data.data_dir', abs_data_dir)

    if configer.get('gpu') is not None and not configer.get(
            'network.distributed', default=False):
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
            str(gpu_id) for gpu_id in configer.get('gpu'))

    if configer.get('network', 'norm_type') is None:
        configer.update('network.norm_type', 'batchnorm')

    if torch.cuda.device_count() <= 1 or configer.get('network.distributed',
                                                      default=False):
        configer.update('network.gather', True)

    if configer.get('phase') == 'train':