Ejemplo n.º 1
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    # set cuda
    cfg.cuda = not args.no_cuda and torch.cuda.is_available()
    # set cudnn_benchmark & cudnn_deterministic
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if cfg.get('cudnn_deterministic', False):
        torch.backends.cudnn.deterministic = True
    # update configs according to args
    if not hasattr(cfg, 'work_dir'):
        if args.work_dir is not None:
            cfg.work_dir = args.work_dir
        else:
            cfg_name = rm_suffix(os.path.basename(args.config))
            cfg.work_dir = os.path.join('./data/work_dir', cfg_name)
    mkdir_if_no_exists(cfg.work_dir, is_folder=True)
    if not hasattr(cfg, 'stage'):
        cfg.stage = args.stage

    cfg.load_from1 = args.load_from1
    cfg.load_from2 = args.load_from2
    cfg.load_from3 = args.load_from3
    cfg.resume_from = args.resume_from

    #cfg.gpus = args.gpus
    cfg.distributed = args.distributed
    cfg.save_output = args.save_output
    cfg.phase = args.phase
    logger = create_logger()

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = [build_model(cfg.model1['type'], **cfg.model1['kwargs']), \
            build_model(cfg.model2['type'], **cfg.model2['kwargs']), \
            build_model(cfg.model3['type'], **cfg.model3['kwargs'])]
    if cfg.phase == 'train':
        if cfg.load_from1:
            model1, model2, model3 = model[0], model[1], model[2]
            model1.load_state_dict(torch.load(cfg.load_from1))
            model[0] = model1
        if cfg.load_from2:
            model2.load_state_dict(torch.load(cfg.load_from2))
            model[1] = model2
        if cfg.load_from3:
            model3.load_state_dict(torch.load(cfg.load_from3))
            model[2] = model3
    handler = build_handler(args.phase, args.stage)

    handler(model, cfg, logger)
Ejemplo n.º 2
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    # set cuda
    cfg.cuda = not args.no_cuda and torch.cuda.is_available()

    # set cudnn_benchmark & cudnn_deterministic
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if cfg.get('cudnn_deterministic', False):
        torch.backends.cudnn.deterministic = True

    # update configs according to args
    if not hasattr(cfg, 'work_dir'):
        if args.work_dir is not None:
            cfg.work_dir = args.work_dir
        else:
            cfg_name = rm_suffix(os.path.basename(args.config))
            cfg.work_dir = os.path.join('./data/work_dir', cfg_name)
    mkdir_if_no_exists(cfg.work_dir, is_folder=True)
    if not hasattr(cfg, 'stage'):
        cfg.stage = args.stage

    if not hasattr(cfg, 'test_batch_size_per_gpu'):
        cfg.test_batch_size_per_gpu = cfg.batch_size_per_gpu

    cfg.load_from = args.load_from
    cfg.resume_from = args.resume_from

    cfg.pred_iou_score = args.pred_iou_score
    cfg.pred_iop_score = args.pred_iop_score

    cfg.gpus = args.gpus
    cfg.det_label = args.det_label
    cfg.distributed = args.distributed
    cfg.save_output = args.save_output

    logger = create_logger()

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_model(cfg.model['type'], **cfg.model['kwargs'])
    handler = build_handler(args.phase, args.stage)

    handler(model, cfg, logger)