Beispiel #1
0
def main():
    args = parse_args()
    fb_cfg = mmcv_config.fromfile(args.fb_cfg)
    _space = fb_cfg.search_space
    # base = _space['base']
    # depth = _space['depth']
    # space = _space['space']

    model_cfg = mmcv_config.fromfile(args.model_cfg)
    # set cudnn_benchmark
    if model_cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        model_cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        model_cfg.resume_from = args.resume_from
    model_cfg.gpus = args.gpus
    if model_cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        model_cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                config=model_cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **model_cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(model_cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    model = detection(mmcv_config(model_cfg['model_cfg']),
                      mmcv_config(model_cfg['train_cfg']),
                      mmcv_config(model_cfg['test_cfg']), _space,
                      args.theta_txt)
    print(model)
    train_dataset = get_dataset(model_cfg.data.train)
    train_detector(model,
                   train_dataset,
                   model_cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
Beispiel #2
0
def main():
    args = parse_args()
    search_cfg = mmcv_config.fromfile(args.fb_cfg)
    _space = search_cfg.search_space
    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    # cfg = mmcv.Config.fromfile(args.config)
    cfg = mmcv.Config.fromfile(args.model_cfg)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = detection(mmcv_config(cfg['model_cfg']),
                          mmcv_config(cfg['train_cfg']),
                          mmcv_config(cfg['test_cfg']), _space, args.theta_txt)
        # model = build_detector(
        #     cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Beispiel #3
0
    _logger.warn("{} not exists, create it".format(args.model_save_path))
    os.makedirs(args.model_save_path)
_set_file(args.model_save_path + 'log.log')

# Build model
from ..models.fbnet_faster_rcnn import FBNetCustomFasterRCNN
from ..search.detection_searcher import DetectionSearcher
from mmdet.datasets import CocoDataset, build_dataloader
coco_dataset = CocoDataset(**data_cfg['train'])
coco_dataset = build_dataloader(coco_dataset,
                                imgs_per_gpu=config.imgs_per_gpu,
                                workers_per_gpu=config.imgs_per_gpu,
                                dist=False,
                                num_gpus=len(args.gpus.split(',')))

model = FBNetCustomFasterRCNN(cfg=mmcv_config(model_cfg),
                              train_cfg=mmcv_config(train_cfg),
                              test_cfg=mmcv_config(test_cfg),
                              channels=model_cfg['neck']['in_channels'])
model.speed_test(torch.randn((1, 3, 224, 224)),
                 verbose=False,
                 device='cuda:' + args.gpus[0])

searcher = DetectionSearcher(model=model,
                             mod_opt_dict={
                                 'type': 'SGD',
                                 'lr': config.w_lr,
                                 'momentum': config.w_mom,
                                 'weight_decay': config.w_wd
                             },
                             arch_opt_dict={
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser(
        description="Train a model with data parallel for base net \
                                    and model parallel for classify net.")
    #parser.add_argument('--batch-size', type=int, default=256,
    #                    help='training batch size of all devices.')
    #parser.add_argument('--epochs', type=int, default=1000,
    #                    help='number of training epochs.')
    #parser.add_argument('--log-frequence', type=int, default=10,
    #                    help='log frequence, default is 400')
    parser.add_argument('--gpus',
                        type=str,
                        default='0',
                        help='gpus, default is 0')
    parser.add_argument('--fb_cfg', type=str, help='fbnet_buildconfig')
    parser.add_argument('--model_cfg', type=str, help='fbnet_buildconfig')
    parser.add_argument('--speed_txt', type=str, help='block_time')

    args = parser.parse_args()

    search_cfg = mmcv_config.fromfile(args.fb_cfg)
    _space = search_cfg.search_space

    model_cfg = mmcv_config.fromfile(args.model_cfg)
    # # dataset settings
    classes = ['background', 'face']
    min_scale = 0
    w_data, t_data = split_data('./data/newlibraf_info/train_imglist',
                                './newlibraf_info/newlibraf_face', classes,
                                min_scale)
    img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True)
    w_data_cfg = dict(train=dict(
        # ann_file='/home1/zhaoyu/dataset/car_w.pkl',
        ann_file=w_data,
        img_prefix="./data/",
        img_scale=(1000, 216),
        img_norm_cfg=img_norm_cfg,
        size_divisor=32,
        flip_ratio=0.5,
        with_mask=False,
        with_crowd=True,
        with_label=True))

    t_data_cfg = dict(train=dict(
        # ann_file='/home1/zhaoyu/dataset/car_t.pkl',
        ann_file=t_data,
        img_prefix="./data/",
        img_scale=(1000, 216),
        img_norm_cfg=img_norm_cfg,
        size_divisor=32,
        flip_ratio=0.5,
        with_mask=False,
        with_crowd=True,
        with_label=True))
    gpus = [int(x) for x in args.gpus.split(",")]
    w_dataset = CustomDataset(**w_data_cfg['train'])
    w_dataset = build_dataloader(w_dataset,
                                 imgs_per_gpu=16,
                                 workers_per_gpu=4,
                                 dist=False,
                                 num_gpus=len(gpus))

    t_dataset = CustomDataset(**t_data_cfg['train'])
    t_dataset = build_dataloader(t_dataset,
                                 imgs_per_gpu=16,
                                 workers_per_gpu=2,
                                 dist=False,
                                 num_gpus=len(gpus))

    det = detection(mmcv_config(model_cfg['model_cfg']),
                    mmcv_config(model_cfg['train_cfg']),
                    mmcv_config(model_cfg['test_cfg']),
                    _space,
                    speed_txt=args.speed_txt)
    print(det)
    save_result_path = "./theta/" + args.fb_cfg.split(
        '/')[-1][:-3] + '_' + args.model_cfg.split('/')[-1][:-3]
    if not os.path.exists(save_result_path):
        os.makedirs(save_result_path)
    searcher = fbnet_search(det,
                            gpus,
                            imgs_per_gpu=8,
                            weight_opt_dict={
                                'type': 'SGD',
                                'lr': 0.01,
                                'momentum': 0.9,
                                'weight_decay': 0.001
                            },
                            theta_opt_dict={
                                'type': 'Adam',
                                'lr': 0.01,
                                'betas': (0.9, 0.99),
                                'weight_decay': 5e-4
                            },
                            weight_lr_sche={
                                'logger': _logger,
                                'T_max': 400,
                                'alpha': 1e-4,
                                'warmup_step': 1000,
                                't_mul': 1.5,
                                'lr_mul': 0.95,
                            },
                            alpha=0.1,
                            save_result_path=save_result_path)
    searcher.search(train_w_ds=w_dataset,
                    train_t_ds=t_dataset,
                    epoch=100,
                    start_w_epoch=5,
                    log_frequence=10)