Ejemplo n.º 1
0
def main():
    args = parse_args()

    assert args.out or args.show, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco)
Ejemplo n.º 2
0
def main(args):
    

    cfg = Config.fromfile(args.config)
    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))
    logger.info('MMDetection Version: {}'.format(__version__))
    logger.info('Config: {}'.format(cfg.text))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    # if cfg.weights2d_path[0] is not None and cfg.load_from is None:
    #     expand_sd = pretrain2d_to_3d(model, cfg.weights2d_path[0])
    #     model.load_state_dict(expand_sd, strict=False)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    validate = cfg.get('evaluation', False)
    model.CLASSES = datasets[0].CLASSES
    train_detector(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=validate,
        logger=logger)
Ejemplo n.º 3
0
def main():
    args = parse_args()

    # assert args.show or args.json_out, \
    #     ('Please specify at least one operation (save or show the results) '
    #      'with the argument "--out" or "--show" or "--json_out"')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)

    checkpoint_file = args.checkpoint
    if not checkpoint_file:

        def _epoch_num(name):
            return int(
                re.findall('epoch_[0-9]*.pth',
                           name)[0].replace('epoch_', '').replace('.pth', ''))

        pths = sorted(glob.glob(os.path.join(cfg.work_dir, 'epoch_*.pth')),
                      key=_epoch_num)
        if len(pths) > 0:
            print("Found {}, use it as checkpoint by default.".format(
                pths[-1]))
            checkpoint_file = pths[-1]
    if not checkpoint_file:
        raise ValueError("Checkpoints not found, check work_dir non empty.")
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=args.shuffle)  # TODO: hack shuffle True

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, checkpoint_file, map_location='cpu')

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    num_evals = args.num_evals
    if num_evals < 0:
        num_evals = len(data_loader)
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, num_evals, args.show)

    rank, _ = get_dist_info()
    if rank == 0:
        gt_bboxes, gt_labels, gt_ignore, dataset_name = get_pascal_gts(
            dataset, num_evals)
        print('\nStarting evaluate {}'.format(dataset_name))
        eval_map(outputs,
                 gt_bboxes,
                 gt_labels,
                 gt_ignore,
                 scale_ranges=None,
                 iou_thr=0.5,
                 dataset=dataset_name,
                 print_summary=True)

    # Always output to pkl for analysing.
    if args.out is None:
        args.out = osp.join(
            cfg.work_dir,
            args.config.split('/')[-1].replace('.py', '_results.pkl'))
    with open(args.out, 'wb') as f:
        pickle.dump(outputs, f, pickle.HIGHEST_PROTOCOL)
    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Ejemplo n.º 4
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    cfg.train_cfg.rcnn.sampler.add_gt_as_proposals = False  # Actually it doesn't matter.

    if args.ckpt:
        cfg.resume_from = args.ckpt

    if args.imgs_per_gpu > 0:
        cfg.data.imgs_per_gpu = args.imgs_per_gpu
    if args.nms_thr:
        cfg.test_cfg.rcnn.nms.iou_thr = args.nms_thr

    FOCAL_LENGTH = cfg.get('FOCAL_LENGTH', 1000)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    # train_dataset = get_dataset(cfg.datasets[0].train)
    train_dataset = get_dataset(cfg.datasets[1].train)
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES

    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)

    runner = Runner(model, lambda x: x, optimizer, cfg.work_dir, cfg.log_level)
    runner.resume(cfg.resume_from)
    model = runner.model

    # ONLY FOR DEBUG
    # print('remove DDP for debug!')
    # model = model._modules['module']

    model.eval()

    dataset_cfg = eval_dataset_mapper[args.dataset]
    dataset_cfg.update(cfg.common_val_cfg)
    dataset_cfg.pop('max_samples')
    dataset = get_dataset(dataset_cfg)
    # dataset.debugging = True
    shuffle = False if args.dataset in stable_list else True
    data_loader = build_dataloader_fuse(
        dataset,
        1,
        0,
        cfg.gpus,
        dist=False,
        shuffle=shuffle,
        drop_last=False,
    )

    dump_dir = os.path.join(cfg.work_dir, f'eval_{args.dataset}')
    os.makedirs(dump_dir, exist_ok=True)
    if args.viz_dir:
        os.makedirs(args.viz_dir, exist_ok=True)
    eval_handler = eval_handler_mapper[args.dataset](
        writer=tqdm.write,
        viz_dir=args.viz_dir,
        FOCAL_LENGTH=FOCAL_LENGTH,
        work_dir=cfg.work_dir)  # type: EvalHandler

    with torch.no_grad():
        for i, data_batch in enumerate(tqdm(data_loader)):
            file_name = data_batch['img_meta'].data[0][0]['file_name']
            try:
                bbox_results, pred_results = model(**data_batch,
                                                   return_loss=False,
                                                   use_gt_bboxes=args.use_gt)
                pred_results['bboxes'] = bbox_results
                if args.paper_dir:
                    os.makedirs(args.paper_dir, exist_ok=True)
                    img = denormalize(data_batch['img'].data[0][0].numpy())
                    verts = pred_results['pred_vertices'] + pred_results[
                        'pred_translation']
                    dump_folder = osp.join(args.paper_dir, file_name)
                    os.makedirs(dump_folder, exist_ok=True)
                    plt.imsave(osp.join(dump_folder, 'img.png'), img)
                    for obj_i, vert in enumerate(verts):
                        nr.save_obj(osp.join(dump_folder, f'{obj_i}.obj'),
                                    vert,
                                    torch.tensor(smpl.faces.astype(np.int64)))

                save_pack = eval_handler(data_batch,
                                         pred_results,
                                         use_gt=args.use_gt)
                save_pack.update({'bbox_results': pred_results['bboxes']})
                if args.dump_pkl:
                    with open(
                            osp.join(dump_dir,
                                     f"{save_pack['file_name']}.pkl"),
                            'wb') as f:
                        pickle.dump(save_pack, f)
            except Exception as e:
                tqdm.write(f"Fail on {file_name}")
                tqdm.write(str(e))

    eval_handler.finalize()
Ejemplo n.º 5
0
    def set_configuration(self, cfg_in):
        cfg = self.get_configuration()
        cfg.merge_config(cfg_in)

        self._config_file = str(cfg.get_value("config_file"))
        self._seed_weights = str(cfg.get_value("seed_weights"))
        self._train_directory = str(cfg.get_value("train_directory"))
        self._output_directory = str(cfg.get_value("output_directory"))
        self._gpu_count = int(cfg.get_value("gpu_count"))
        self._integer_labels = strtobool(cfg.get_value("integer_labels"))
        self._launcher = str(cfg.get_value("launcher"))
        self._validate = strtobool(cfg.get_value("validate"))

        self._training_data = []

        from mmcv import Config
        self._cfg = Config.fromfile(self._config_file)

        if self._cfg.get('cudnn_benchmark', False):
            torch.backends.cudnn.benchmark = True

        if self._train_directory is not None:
            self._cfg.work_dir = self._train_directory
            self._groundtruth_store = os.path.join(self._train_directory,
                                                   self._tmp_annotation_file)
            if not os.path.exists(self._train_directory):
                os.mkdir(self._train_directory)
        else:
            self._groundtruth_store = self._tmp_annotation_file

        if self._seed_weights is not None:
            self._cfg.resume_from = self._seed_weights

        if self._gpu_count > 0:
            self._cfg.gpus = self._gpu_count
        else:
            self._cfg.gpus = torch.cuda.device_count()

        if self._cfg.checkpoint_config is not None:
            from mmdet import __version__
            self._cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                    config=self._cfg.text)

        if self._launcher == 'none':
            self._distributed = False
        else:
            self._distributed = True
            from mmdet.apis import init_dist
            init_dist(self._launcher, **self._cfg.dist_params)

        from mmdet.apis import get_root_logger
        self._logger = get_root_logger(self._cfg.log_level)
        self._logger.info('Distributed training: {}'.format(self._distributed))

        if self._random_seed is not "none":
            logger.info('Set random seed to {}'.format(self._random_seed))
            from mmdet.apis import set_random_seed
            set_random_seed(int(self._random_seed))

        from mmdet.models import build_detector

        self._model = build_detector(self._cfg.model,
                                     train_cfg=self._cfg.train_cfg,
                                     test_cfg=self._cfg.test_cfg)
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    print('load model from {}'.format(cfg.load_from))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # set0 = mmcv.load('../liyu_mmdet/set0.pkl')
    # set1 = mmcv.load('../liyu_mmdet/set1.pkl')
    # set2 = mmcv.load('../liyu_mmdet/set2.pkl')
    # set3 = mmcv.load('../liyu_mmdet/set3.pkl')
    # set4 = mmcv.load('../liyu_mmdet/set4.pkl')
    # set5 = mmcv.load('../liyu_mmdet/set5.pkl')
    # set6 = mmcv.load('../liyu_mmdet/set6.pkl')
    # set7 = mmcv.load('../liyu_mmdet/set7.pkl')
    # set0 = mmcv.load('./set0.pkl')
    # set1 = mmcv.load('./set1.pkl')
    # set2 = mmcv.load('./set2.pkl')
    # set3 = mmcv.load('./set3.pkl')
    # set4 = mmcv.load('./set4.pkl')
    # set5 = mmcv.load('./set5.pkl')
    # set6 = mmcv.load('./set6.pkl')
    # set7 = mmcv.load('./set7.pkl')
    # set_combine = set0+set1+set2+set3+set4+set5+set6+set7
    # prefix = 'mrcnnr50_14.3_clshead'
    # set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    # set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    # set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    # set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    # set_combine = set0+set1+set2+set3

    # prefix = '/mrcnnr50_ag_coco_clshead'
    prefix = 'mrcnnr50_ag_3fc_ft_cocolongtail_cat400_epoch_2'
    prefix = 'mrcnn_r50_ag_cocolt'
    print(prefix)

    set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    set4 = mmcv.load('./{}_set4.pkl'.format(prefix))
    set5 = mmcv.load('./{}_set5.pkl'.format(prefix))
    set6 = mmcv.load('./{}_set6.pkl'.format(prefix))
    set7 = mmcv.load('./{}_set7.pkl'.format(prefix))

    # set0 = mmcv.load('./set0.pkl')
    # set1 = mmcv.load('./set1.pkl')
    # set2 = mmcv.load('./set2.pkl')
    # set3 = mmcv.load('./set3.pkl')
    # set4 = mmcv.load('./set4.pkl')
    # set5 = mmcv.load('./set5.pkl')
    # set6 = mmcv.load('./set6.pkl')
    # set7 = mmcv.load('./set7.pkl')
    set_combine = set0 + set1 + set2 + set3 + set4 + set5 + set6 + set7

    # set_liyu = mmcv.load('../mmdet_ensemble/results319.pkl')

    # mmcv.dump(set_combine, args.out)
    # result_files = results2json(dataset, set_combine,
    #                             args.out)
    print('pkl result dumped, start eval')
    # result_files = results2json(dataset, set_combine,
    #                             args.out, dump=False)
    #
    # lvis_eval(result_files, args.eval, dataset.lvis)

    result_files = results2json(dataset, set_combine, args.out, dump=False)
    coco_eval(result_files, args.eval, dataset.coco)
Ejemplo n.º 7
0
def evaluate_model(model_name, paper_arxiv_id, weights_url, weights_name,
                   paper_results, config):
    print('---')
    print('Now Evaluating %s' % model_name)

    evaluator = COCOEvaluator(root='./.data/vision/coco',
                              model_name=model_name,
                              paper_arxiv_id=paper_arxiv_id,
                              paper_results=paper_results)

    out = 'results.pkl'
    launcher = 'none'

    if out is not None and not out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(config)
    cfg.data.test[
        'ann_file'] = './.data/vision/coco/annotations/instances_val2017.json'
    cfg.data.test['img_prefix'] = './.data/vision/coco/val2017/'

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    local_checkpoint, _ = urllib.request.urlretrieve(
        weights_url, '%s/.cache/torch/%s' % (str(Path.home()), weights_name))

    print(local_checkpoint)

    # '/home/ubuntu/GCNet/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth'
    checkpoint = load_checkpoint(model, local_checkpoint, map_location='cpu')

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    evaluator.reset_time()

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs, cache_exists = single_gpu_test(model, data_loader, False,
                                                evaluator)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    if cache_exists:
        print('Cache exists: %s' % (evaluator.batch_hash))
        evaluator.save()

    else:
        from mmdet.core import results2json

        rank, _ = get_dist_info()
        if out and rank == 0:
            print('\nwriting results to {}'.format(out))
            mmcv.dump(outputs, out)
            eval_types = ['bbox']
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = out
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, out)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = out + '.{}'.format(name)
                            result_files = results2json(
                                dataset, outputs_, result_file)
        anns = json.load(open(result_files['bbox']))
        evaluator.detections = []
        evaluator.add(anns)
        evaluator.save()
Ejemplo n.º 8
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    logger.info('after building detector')

    cfg.past_iteration = args.past_iteration
    # load model to continue training
    if args.past_iteration > 0:
        load_checkpoint(model, args.work_dir + 'epoch_1.pth')
    logger.info('after loading checkpoint')

    try:
        datasets = eval(args.dataset)
        ann_csvs = eval(args.ann_csv)
        train_dataset = []
        for i in range(len(datasets)):
            cfg.data.train.img_prefix_list = [datasets[i]]
            cfg.data.train.ann_file = ann_csvs[i]
            train_dataset.append(get_dataset(cfg.data.train))
        CLASSES = train_dataset[0].CLASSES
    except:
        cfg.data.train.img_prefix_list = [args.dataset]
        cfg.data.train.ann_file = args.ann_csv
        train_dataset = get_dataset(cfg.data.train)
        CLASSES = train_dataset.CLASSES

    logger.info('after getting dataset')
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = CLASSES
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
    logger.info('after training')
Ejemplo n.º 9
0
def main():
    #os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    args = parse_args()

    assert args.out or args.show, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        result_file = args.out
        #         args = parser.parse_args()
        #         cfg = mmcv.Config.fromfile(args.config)
        #         test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
        #         txt_eval(args.result, test_dataset, args.iou_thr)
        txt_eval(result_file, dataset, iou_thr=args.iou_thr)
Ejemplo n.º 10
0
def main():
    args = parse_args()
    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset_type = 'OIDSegDataset'
    data_root = 'gs://oid2019/data/'
    img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True)
    dataset = get_dataset(
        dict(type=dataset_type,
             ann_file='/home/bo_liu/' + args.ann_file,
             img_prefix=data_root +
             ('val/'
              if args.ann_file == 'seg_val_2844_ann.pkl' else 'OD_test/'),
             img_scale=(1333, 800),
             img_norm_cfg=img_norm_cfg,
             size_divisor=32,
             flip_ratio=0,
             with_mask=True,
             with_label=False,
             test_mode=True))

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)
    # build the model and load checkpoint
    test_cfg = mmcv.ConfigDict(
        dict(
            rpn=dict(nms_across_levels=False,
                     nms_pre=1000,
                     nms_post=1000,
                     max_num=1000,
                     nms_thr=0.7,
                     min_bbox_size=0),
            rcnn=dict(
                score_thr=args.thres,
                # score_thr=0.0,
                nms=dict(type=args.nms_type, iou_thr=0.5),
                max_per_img=args.max_per_img,
                mask_thr_binary=0.5),
            keep_all_stages=False))
    model = build_detector(cfg.model, train_cfg=None, test_cfg=test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('Evaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 11
0
def main(config,
         work_dir=None,
         resume_from=None,
         validate=False,
         visible_gpus='0,1,2,3',
         gpus=1,
         seed=None,
         launcher='none',
         local_rank=0,
         autoscale_lr=False):

    if 'LOCAL_RANK' not in os.environ:
        os.environ['LOCAL_RANK'] = str(local_rank)

    os.environ['CUDA_VISIBLE_DEVICES'] = visible_gpus

    cfg = Config.fromfile(config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if work_dir is not None:
        cfg.work_dir = work_dir
    if resume_from is not None:
        cfg.resume_from = resume_from
    cfg.gpus = gpus

    if autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if seed is not None:
        logger.info('Set random seed to {}'.format(seed))
        set_random_seed(seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=validate,
                   logger=logger)
Ejemplo n.º 12
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
        # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=datasets[0].CLASSES)

    maybe_init_wandb(cfg)

    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Ejemplo n.º 13
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    if args.non_inplace:
        convert_non_inplace(model)

    if args.read_zip:
        # cfg.data.train.img_prefix = cfg.data.train.img_prefix[:-1] + '.zip@/' + cfg.data.train.img_prefix.split('/')[-2] + '/'
        # cfg.data.val.img_prefix = cfg.data.val.img_prefix[:-1] + '.zip@/' + cfg.data.val.img_prefix.split('/')[-2] + '/'
        # cfg.data.test.img_prefix = cfg.data.test.img_prefix[:-1] + '.zip@/' + cfg.data.test.img_prefix.split('/')[-2] + '/'
        cfg.data.train.img_prefix = cfg.data.train.img_prefix[:-1] + '.zip@/'
        cfg.data.val.img_prefix = cfg.data.val.img_prefix[:-1] + '.zip@/'
        cfg.data.test.img_prefix = cfg.data.test.img_prefix[:-1] + '.zip@/'

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(
        model,
        datasets,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Ejemplo n.º 14
0
def main():
    args = parse_args()

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # Wudi change the args.out directly related to the model checkpoint file data
    if args.horizontal_flip:
        args.out = os.path.join(cfg.work_dir,
                                cfg.data.test.img_prefix.split('/')[-2].replace('_images', '_') +
                                args.checkpoint.split('/')[-1][:-4] + '_horizontal_flip.pkl')
        print('horizontal_flip activated')
    else:
        args.out = os.path.join(cfg.work_dir,
                                cfg.data.test.img_prefix.split('/')[-2].replace('_images', '_') +
                                args.checkpoint.split('/')[-2][:-4] + '.pkl')

        # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    args.out = '/data/Kaggle/wudi_data/validation_Jan16-09-20.pkl'
    if not os.path.exists(args.out):
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=distributed,
            shuffle=False)

        # build the model and load checkpoint
        model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        fp16_cfg = cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)
        checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
        # old versions did not save class info in checkpoints, this walkaround is
        # for backward compatibility
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            model.CLASSES = dataset.CLASSES

        if not distributed:
            model = MMDataParallel(model, device_ids=[0])
            outputs = single_gpu_test(model, data_loader, args.show)
        else:
            model = MMDistributedDataParallel(model.cuda())
            outputs = multi_gpu_test(model, data_loader, args.tmpdir)
        mmcv.dump(outputs, args.out)

    else:
        outputs = mmcv.load(args.out)

    if distributed:
        rank, _ = get_dist_info()
        if rank != 0:
            return

    outputs = outputs
    local_rank = args.local_rank
    world_size = args.world_size
    for idx, output in enumerate(outputs):
        #if idx % world_size == local_rank:
        #if output[2]['file_name'].split('/')[-1] == 'ID_0cec00e6a.jpg':
        if True:
            finetune_RT(output, dataset,
                        draw_flag=True,
                        num_epochs=20,
                        loss_grayscale_light=0.02,
                        loss_grayscale_RT=0.02,
                        loss_IoU=0.95,
                        lr=0.05,
                        conf_thresh=0.8,
                        fix_rot=False,
                        tmp_save_dir='/data/Kaggle/wudi_data/tmp_output_grayscale')
Ejemplo n.º 15
0
    def load_network(self):
        train_config = "train_config.py"

        if len(self._train_directory) > 0:
            if not os.path.exists(self._train_directory):
                os.mkdir(self._train_directory)
            train_config = os.path.join(self._train_directory, train_config)

        self.insert_training_params(self._config_file, train_config)

        from mmcv import Config
        self._cfg = Config.fromfile(train_config)

        if self._cfg.get('cudnn_benchmark', False):
            torch.backends.cudnn.benchmark = True

        if self._train_directory is not None:
            self._cfg.work_dir = self._train_directory
            self._groundtruth_store = os.path.join(self._train_directory,
                                                   self._tmp_annotation_file)
            if not os.path.exists(self._train_directory):
                os.mkdir(self._train_directory)
        else:
            self._groundtruth_store = self._tmp_annotation_file

        if self._seed_weights is not None:
            self._cfg.resume_from = self._seed_weights

        if self._gpu_count > 0:
            self._cfg.gpus = self._gpu_count
        else:
            self._cfg.gpus = torch.cuda.device_count()

        if self._cfg.checkpoint_config is not None:
            from mmdet import __version__
            self._cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                                    config=self._cfg.text)

        if self._launcher == 'none':
            self._distributed = False
        else:
            self._distributed = True
            from mmdet.apis import init_dist
            init_dist(self._launcher, **self._cfg.dist_params)

        from mmdet.apis import get_root_logger
        self._logger = get_root_logger(self._cfg.log_level)
        self._logger.info('Distributed training: {}'.format(self._distributed))

        if self._random_seed is not 'none':
            self._logger.info('Set random seed to {}'.format(
                self._random_seed))
            from mmdet.apis import set_random_seed
            if isinstance(self._random_seed, int):
                set_random_seed(int(self._random_seed))

        from mmdet.models import build_detector

        if self._cfg.model['pretrained'] is not None:
            if not os.path.exists(self._cfg.model['pretrained']):
                dirname = os.path.dirname(self._config_file)
                relpath = os.path.join(dirname, self._cfg.model['pretrained'])
                if os.path.exists(relpath):
                    self._cfg.model['pretrained'] = relpath

        self._model = build_detector(self._cfg.model,
                                     train_cfg=self._cfg.train_cfg,
                                     test_cfg=self._cfg.test_cfg)
Ejemplo n.º 16
0
def main():
    args = parse_args()

    assert args.out or args.show, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    # data_loader = build_dataloader(
    #     dataset,
    #     imgs_per_gpu=1,
    #     workers_per_gpu=cfg.data.workers_per_gpu,
    #     dist=distributed,
    #     shuffle=False)
    #
    # # build the model and load checkpoint
    # model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    # fp16_cfg = cfg.get('fp16', None)
    # if fp16_cfg is not None:
    #     wrap_fp16_model(model)
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # if args.det_ckpt is not None:
    #     print('Loading Detection Models!!!!!!')
    #     det_ckpt = load_checkpoint(model, args.det_ckpt, map_location='cpu')
    # # old versions did not save class info in checkpoints, this walkaround is
    # # for backward compatibility
    # if 'CLASSES' in checkpoint['meta']:
    #     model.CLASSES = checkpoint['meta']['CLASSES']
    # else:
    #     model.CLASSES = dataset.CLASSES
    #
    # if not distributed:
    #     model = MMDataParallel(model, device_ids=[0])
    #     outputs = single_gpu_test(model, data_loader, args.show)
    # else:
    #     model = MMDistributedDataParallel(model.cuda())
    #     outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    outputs = mmcv.load(args.out)
    print('Finding video tubes...')
    track_results = finding_video_tubes(outputs, dataset)
    print('Finding video tubes done!')
    # draw_results(track_results, dataset, args.out, cfg)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        # print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        # result_files = results2json(dataset, outputs['bbox_results'], args.out)
        # coco_eval(result_files, ['bbox'], cfg.data.test.ann_file)
        # For tracking
        # result_files = results2json(dataset, track_results['bbox_results'],
        #                             args.out)
        # coco_eval(result_files, ['bbox'], cfg.data.test.ann_file)
        print("Evaluating tracking...")
        mdat_eval(track_results['track_results'], dataset, args.out, cfg)
Ejemplo n.º 17
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs
    dataset.img_infos = dataset.img_infos[:20]

    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    print('load model from {}'.format(cfg.load_from))
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


    def load_ckpt(ncm_model, cal_head):
        print('load cls head {}'.format('{}/{}.pth'.format(cfg.work_dir, cal_head)))
        # epoch = torch.load('{}/{}_epoch.pth'.format(cfg.work_dir, cal_head))
        load_checkpoint(ncm_model, '{}/{}.pth'.format(cfg.work_dir, cal_head))
        # return epoch

    print('use {}'.format(args.cal_head))
    if len(dataset.CLASSES) == 1230:##lvis
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc().cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc().cuda()
    elif len(dataset.CLASSES) ==80:## coco
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc(num_classes=81).cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc(num_classes=81).cuda()
    # epoch = load_ckpt(calibrated_head, args.head_ckpt)
    load_ckpt(calibrated_head, args.head_ckpt)
    calibrated_head.eval()


    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, calibrated_head, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        calibrated_head = MMDistributedDataParallel(calibrated_head.cuda())
        outputs = multi_gpu_test(model, data_loader, calibrated_head, args.show, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if len(dataset.CLASSES) == 1230:
            if eval_types:

                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    lvis_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, args.out, dump=False)
                        print('Starting evaluate {}'.format(' and '.join(eval_types)))
                        lvis_eval(result_files, eval_types, dataset.lvis)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}'.format(name)
                            result_files = results2json(dataset, outputs_,
                                                        result_file)
                            lvis_eval(result_files, eval_types, dataset.coco)
        elif len(dataset.CLASSES) == 80:
            result_files = results2json(dataset, outputs, args.out, dump=False)
            coco_eval(result_files, args.eval, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Ejemplo n.º 18
0
def main():
    args = parse_args()
    assert type(args.input) is list()
    N = len(args.input)
    for ii in range(N):
        if args.input[ii] is not None and not args.input[ii].endswith(
            ('.pkl', '.pickle')):
            raise ValueError('The input file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)

    rank, _ = get_dist_info()
    if args.input and rank == 0:
        print('\n results is {}'.format(args.input))
        root_Path = '/'.join(args.out[0].split('/')[:-1])
        if not os.path.exists(root_Path):
            os.makedirs(root_Path)
        #mmcv.dump(outputs, args.out)
        outputs = list()
        print("{} models ".format(N))
        for jj in range(N):
            input = mmcv.load(args.input[jj])
            print("{} images".format(len(input)))
            for zz in range(len(input)):
                if jj == 0:
                    outputs.append(input[zz])
                else:
                    assert len(outputs[zz]) == len(input[zz])
                    outputs[zz].extend(input[zz])
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if not args.is_coco:
                #  test VisDrone2019
                if args.eval == ['bbox']:
                    print("eval {}".format(args.eval))
                    test_dataset = cfg.data.test
                    eval_visdrone_det(cfg.work_dir, args.out, test_dataset,
                                      args.is_patch, args.show)
            else:
                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_file = args.out + '.json'
                        results2json(dataset, outputs, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}.json'.format(name)
                            results2json(dataset, outputs_, result_file)
                            coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 19
0
def main():
    args = parse_args()

    assert args.out or args.show, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    if args.workers == 0:
        args.workers = cfg.data.workers_per_gpu

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # set random seeds
    if args.seed is not None:
        set_random_seed(args.seed)

    if 'all' in args.corruptions:
        corruptions = [
            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
            'brightness', 'contrast', 'elastic_transform', 'pixelate',
            'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
            'saturate'
        ]
    elif 'benchmark' in args.corruptions:
        corruptions = [
            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
            'brightness', 'contrast', 'elastic_transform', 'pixelate',
            'jpeg_compression'
        ]
    elif 'noise' in args.corruptions:
        corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
    elif 'blur' in args.corruptions:
        corruptions = [
            'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
        ]
    elif 'weather' in args.corruptions:
        corruptions = ['snow', 'frost', 'fog', 'brightness']
    elif 'digital' in args.corruptions:
        corruptions = [
            'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
        ]
    elif 'holdout' in args.corruptions:
        corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
    elif 'None' in args.corruptions:
        corruptions = ['None']
        args.severities = [0]
    else:
        corruptions = args.corruptions

    aggregated_results = {}
    for corr_i, corruption in enumerate(corruptions):
        aggregated_results[corruption] = {}
        for sev_i, corruption_severity in enumerate(args.severities):
            # evaluate severity 0 (= no corruption) only once
            if corr_i > 0 and corruption_severity == 0:
                aggregated_results[corruption][0] = \
                    aggregated_results[corruptions[0]][0]
                continue

            test_data_cfg = copy.deepcopy(cfg.data.test)
            # assign corruption and severity
            if corruption_severity > 0:
                corruption_trans = dict(type='Corrupt',
                                        corruption=corruption,
                                        severity=corruption_severity)
                # TODO: hard coded "1", we assume that the first step is
                # loading images, which needs to be fixed in the future
                test_data_cfg['pipeline'].insert(1, corruption_trans)

            # print info
            print('\nTesting {} at severity {}'.format(corruption,
                                                       corruption_severity))

            # build the dataloader
            # TODO: support multiple images per gpu
            #       (only minor changes are needed)
            dataset = build_dataset(test_data_cfg)
            data_loader = build_dataloader(dataset,
                                           imgs_per_gpu=1,
                                           workers_per_gpu=args.workers,
                                           dist=distributed,
                                           shuffle=False)

            # build the model and load checkpoint
            model = build_detector(cfg.model,
                                   train_cfg=None,
                                   test_cfg=cfg.test_cfg)
            fp16_cfg = cfg.get('fp16', None)
            if fp16_cfg is not None:
                wrap_fp16_model(model)
            checkpoint = load_checkpoint(model,
                                         args.checkpoint,
                                         map_location='cpu')
            # old versions did not save class info in checkpoints,
            # this walkaround is for backward compatibility
            if 'CLASSES' in checkpoint['meta']:
                model.CLASSES = checkpoint['meta']['CLASSES']
            else:
                model.CLASSES = dataset.CLASSES

            if not distributed:
                model = MMDataParallel(model, device_ids=[0])
                outputs = single_gpu_test(model, data_loader, args.show)
            else:
                model = MMDistributedDataParallel(model.cuda())
                outputs = multi_gpu_test(model, data_loader, args.tmpdir)

            rank, _ = get_dist_info()
            if args.out and rank == 0:
                eval_results_filename = (osp.splitext(args.out)[0] +
                                         '_results' +
                                         osp.splitext(args.out)[1])
                mmcv.dump(outputs, args.out)
                eval_types = args.eval
                if cfg.dataset_type == 'VOCDataset':
                    if eval_types:
                        for eval_type in eval_types:
                            if eval_type == 'bbox':
                                test_dataset = mmcv.runner.obj_from_dict(
                                    cfg.data.test, datasets)
                                mean_ap, eval_results = \
                                    voc_eval_with_return(
                                        args.out, test_dataset,
                                        args.iou_thr, args.summaries)
                                aggregated_results[corruption][
                                    corruption_severity] = eval_results
                            else:
                                print('\nOnly "bbox" evaluation \
                                is supported for pascal voc')
                else:
                    if eval_types:
                        print('Starting evaluate {}'.format(
                            ' and '.join(eval_types)))
                        if eval_types == ['proposal_fast']:
                            result_file = args.out
                        else:
                            if not isinstance(outputs[0], dict):
                                result_files = results2json(
                                    dataset, outputs, args.out)
                            else:
                                for name in outputs[0]:
                                    print('\nEvaluating {}'.format(name))
                                    outputs_ = [out[name] for out in outputs]
                                    result_file = args.out
                                    + '.{}'.format(name)
                                    result_files = results2json(
                                        dataset, outputs_, result_file)
                        eval_results = coco_eval_with_return(
                            result_files, eval_types, dataset.coco)
                        aggregated_results[corruption][
                            corruption_severity] = eval_results
                    else:
                        print('\nNo task was selected for evaluation;'
                              '\nUse --eval to select a task')

            # save results after each evaluation
            mmcv.dump(aggregated_results, eval_results_filename)

    # print filan results
    print('\nAggregated results:')
    prints = args.final_prints
    aggregate = args.final_prints_aggregate

    if cfg.dataset_type == 'VOCDataset':
        get_results(eval_results_filename,
                    dataset='voc',
                    prints=prints,
                    aggregate=aggregate)
    else:
        get_results(eval_results_filename,
                    dataset='coco',
                    prints=prints,
                    aggregate=aggregate)
Ejemplo n.º 20
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.imgs_per_gpu > 0:
        cfg.data.imgs_per_gpu = args.imgs_per_gpu

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    if hasattr(cfg, 'fuse') and cfg.fuse:
        train_dataset = get_dataset(cfg.datasets[0].train)
        if cfg.checkpoint_config is not None:
            # save mmdet version, config file content and class names in
            # checkpoints as meta data
            cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                              config=cfg.text,
                                              CLASSES=train_dataset.CLASSES)
        # add an attribute for visualization convenience
        model.CLASSES = train_dataset.CLASSES
        datasets = list()
        for flow in cfg.workflow:
            mode, epoches = flow
            cur_datasets = list()
            for dataset_cfg in cfg.datasets:
                if hasattr(dataset_cfg, mode):
                    cur_datasets.append(get_dataset(getattr(dataset_cfg,
                                                            mode)))
            datasets.append(ConcatDataset(cur_datasets))
        val_dataset = None
        if cfg.data.train.get('val_every', None):
            val_dataset = list()
            for dataset_cfg in cfg.datasets:
                if hasattr(dataset_cfg, 'val'):
                    val_dataset.append(get_dataset(dataset_cfg.val))
            val_dataset = ConcatDataset(val_dataset)
        if hasattr(cfg.model,
                   'smpl_head') and cfg.model.smpl_head.loss_cfg.get(
                       'adversarial_cfg', False):
            train_adv_smpl_detector(
                model,
                datasets,
                cfg,
                distributed=distributed,
                validate=args.validate,
                logger=logger,
                create_dummy=args.create_dummy,
                val_dataset=val_dataset,
                load_pretrain=args.load_pretrain,
            )
        else:
            train_smpl_detector_fuse(model,
                                     datasets,
                                     cfg,
                                     distributed=distributed,
                                     validate=args.validate,
                                     logger=logger,
                                     create_dummy=args.create_dummy,
                                     val_dataset=val_dataset,
                                     load_pretrain=args.load_pretrain)
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES

    tune_part = cfg.get('selectp', 0)
    if tune_part == 1:
        print('Train fc_cls only.')
        model = select_training_param(model)
    elif tune_part == 2:
        print('Train bbox head only.')
        model = select_head(model)
    elif tune_part == 3:
        print('Train cascade fc_cls only.')
        model = select_cascade_cls_params(model)
    elif tune_part == 4:
        print('Train bbox and mask head.')
        model = select_mask_params(model)
    else:
        print('Train all params.')

    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)
def main():
    args = parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid

    img_dir = args.img_dir
    out_dir = args.out_dir
    batch_size = args.batch_size

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    if args.img_dir != '':
        file_list = common.load_filepaths(args.img_dir,
                                          suffix=('.jpg', '.png', '.jpeg'),
                                          recursive=True)
    elif args.img_list != '':
        file_list = parse_testfile(args.img_list)
    else:
        raise "Both img_dir and img_list is empty."

    dataset = FilesDataset(file_list, cfg.test_pipeline)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=batch_size,
                                   workers_per_gpu=batch_size,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')

    model = reweight_cls(model, args.tau).cuda()

    model = MMDataParallel(model, device_ids=[0])

    model.eval()
    count = 0
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            # bbox_results, segm_results
            results = model(return_loss=False, rescale=True, **data)

        # batch
        #for result  in results:
        #    file_path = file_list[count]
        #    save_name = file_path.replace('/home/songbai.xb/workspace/projects/TAO/data/TAO/frames/val/', '')
        #    save_path = os.path.join(out_dir, save_name)
        #    common.makedirs(os.path.dirname(save_path))
        #    save_in_tao_format(result, save_path)
        #    count += 1
        file_path = file_list[i]
        save_name = file_path.replace(
            '/home/songbai.xb/workspace/projects/TAO/data/TAO/frames/val/', '')
        save_name = save_name.replace('.jpg', '.pkl').replace('.jpeg', '')
        save_path = os.path.join(out_dir, save_name)
        common.makedirs(os.path.dirname(save_path))
        save_in_tao_format(results[0], save_path)
Ejemplo n.º 23
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if not os.path.exists(cfg.work_dir):
        os.makedirs(cfg.work_dir)
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    dash_line = '-' * 60 + '\n'
    logger.info('Environment info:\n' + dash_line + env_info + '\n' +
                dash_line)
    meta['env_info'] = env_info
    meta['config'] = cfg.pretty_text
    # log some basic info
    logger.info(f'Distributed training: {distributed}')
    logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)

    train_dataset, dataset_dicts = get_dataset(cfg.data.train)

    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.text,
            CLASSES=train_dataset.CLASSES)
    # add an attribute for visualization convenience
    model.CLASSES = train_dataset.CLASSES
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        dataset_dicts=dataset_dicts)
Ejemplo n.º 24
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # update gpu num
    if dist.is_initialized():
        cfg.gpus = dist.get_world_size()

    cfg.data.imgs_per_gpu = int(cfg.data.imgs_per_gpu * args.scale_bs)
    cfg.data.workers_per_gpu = int(cfg.data.workers_per_gpu * args.scale_bs)
    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer[
            'lr'] * cfg.gpus / 8 * cfg.data.imgs_per_gpu / 2

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # log cfg
    logger.info('training config:{}\n'.format(pprint.pformat(cfg._cfg_dict)))

    # log git hash
    logger.info('git hash: {}'.format(get_git_hash()))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        datasets.append(build_dataset(cfg.data.val))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES,
                                          git_hash=get_git_hash())

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    try:
        logger.info(summary(model, cfg))
    except RuntimeError:
        logger.info('RuntimeError during summary')
        logger.info(str(model))

    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES
    train_detector(model,
                   datasets,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)

    logger.info('git hash: {}'.format(get_git_hash()))
Ejemplo n.º 25
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        if args.job_name is '':
            args.job_name = 'output'
        else:
            args.job_name = time.strftime("%Y%m%d-%H%M%S-") + args.job_name
        cfg.work_dir = osp.join(args.work_dir, args.job_name)
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        os.environ['MASTER_ADDR'] = 'localhost'
        os.environ['MASTER_PORT'] = '%d' % args.port
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    # logger = get_root_logger(cfg.log_level)
    utils.create_work_dir(cfg.work_dir)
    logger = utils.get_root_logger(cfg.work_dir, cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))
    logger.info('Retrain configs: \n' + str(cfg))
    logger.info('Retrain args: \n' + str(args))

    if cfg.checkpoint_config is not None:
        # save mmdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text)

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    utils.set_data_path(args.data_path, cfg.data)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    if not hasattr(model, 'neck'):
        model.neck = None

    logger.info('Backbone net config: \n' + cfg.model.backbone.net_config)
    utils.get_network_madds(model.backbone, model.neck, model.bbox_head,
                            cfg.image_size_madds, logger)

    if cfg.use_syncbn:
        model = utils.convert_sync_batchnorm(model)

    train_dataset = get_dataset(cfg.data.train)
    train_detector(model,
                   train_dataset,
                   cfg,
                   distributed=distributed,
                   validate=args.validate,
                   logger=logger)

    logger.info('Backbone net config: \n' + cfg.model.backbone.net_config)
    utils.get_network_madds(model.backbone, model.neck, model.bbox_head,
                            cfg.image_size_madds, logger)
Ejemplo n.º 26
0
def main():  # noqa: C901
    """Start test."""
    args = parse_args()

    if args.work_dir is not None:
        mmcv.mkdir_or_exist(args.work_dir)
        if args.tmpdir is None:
            args.tmpdir = osp.join(args.work_dir, 'tmp_dir')
            mmcv.mkdir_or_exist(args.tmpdir)
        if args.out is None:
            args.out = osp.join(args.work_dir, 'result.pkl')
        if args.checkpoint is None:
            args.checkpoint = osp.join(args.work_dir, 'latest.pth')
        fps_file = osp.join(args.work_dir, 'fps.pkl')
        mAP_file = osp.join(args.work_dir, 'mAP.pkl')
    else:
        mAP_file, fps_file = None, None
        if args.checkpoint is None:
            raise ValueError('Checkpoint file cannot be empty.')

    if args.config.endswith(".json"):
        load_method = mmcv.load
        mmcv.load = json_to_dict
        cfg = mmcv.Config.fromfile(args.config)
        mmcv.load = load_method
    else:
        cfg = mmcv.Config.fromfile(args.config)
    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    if args.dist:
        init_dist('pytorch', **cfg.dist_params)

    # build the dataloader
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=True,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, map_location='cpu')
    model.CLASSES = dataset.CLASSES
    if args.dist:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)
    else:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, fps_file)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                assert not isinstance(outputs[0], dict)
                result_files = results2json(dataset, outputs, args.out)
                coco_eval(result_files,
                          eval_types,
                          dataset.coco,
                          dump_file=mAP_file)
Ejemplo n.º 27
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    # per_set_img_num = int(len(dataset.img_infos)/args.total_set_num)
    # this_set_start = per_set_img_num*args.set
    # if args.set < args.total_set_num-1:
    #     dataset.img_infos = dataset.img_infos[this_set_start: this_set_start+per_set_img_num]
    # else:
    #     dataset.img_infos = dataset.img_infos[this_set_start:]
    # dataset.img_infos = dataset.img_infos[:100]

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    # print('load from {}'.format(args.checkpoint))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    print('load model from {}'.format(cfg.load_from))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


## load longtail classifier

# def load_ncm_ckpt(ncm_model):
#     if not os.path.exists('./simple3fc.pth'):
#         print('start training from 0 epoch')
#         return 0
#     else:
#         epoch = torch.load('./simple3fc_epoch.pth')
#         load_checkpoint(ncm_model, './simple3fc.pth')
#         return epoch

# def load_ncm_ckpt(ncm_model):
#     if not os.path.exists('./simple3fc.pth'):
#         print('start training from 0 epoch')
#         return 0
#     else:
#         epoch = torch.load('./finetune_simple3fc_epoch.pth')
#         load_checkpoint(ncm_model, './finetune_simple3fc.pth')
#         return epoch

    def load_ncm_ckpt(ncm_model):
        if not os.path.exists(
                './exp_randominit_negpossame_finetune_simple3fc_stage2_epoch.pth'
        ):
            print('start training from 0 epoch')
            return 0
        else:
            epoch = torch.load(
                './exp_randominit_negpossame_finetune_simple3fc_stage2_epoch.pth'
            )
            load_checkpoint(
                ncm_model,
                'exp_randominit_negpossame_finetune_simple3fc_stage2.pth')
            return epoch

    # def load_simple2fc_stage0_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage0_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage0.pth')
    #     return epoch
    #
    # def load_simple2fc_stage1_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage1_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage1.pth')
    #     return epoch
    #
    # def load_simple2fc_stage2_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage2_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage2.pth')
    #     return epoch
    #
    #
    # olongtail_model_stage0 = simple2fc().cuda()
    # epoch = load_simple2fc_stage0_ckpt(olongtail_model_stage0)
    # print('load model epoch {}'.format(epoch))
    # olongtail_model_stage0.eval()
    #
    # olongtail_model_stage1 = simple2fc().cuda()
    # epoch = load_simple2fc_stage1_ckpt(olongtail_model_stage1)
    # olongtail_model_stage1.eval()
    #
    # olongtail_model_stage2 = simple2fc().cuda()
    # epoch = load_simple2fc_stage2_ckpt(olongtail_model_stage2)
    # olongtail_model_stage2.eval()
    #
    # olongtail_model_all_stage = [olongtail_model_stage0, olongtail_model_stage1, olongtail_model_stage2]

    prefix = '3fc_ft'

    def load_stage0_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage0_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage0.pth'.format(prefix))
        # return epoch

    def load_stage1_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage1_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage1.pth'.format(prefix))
        # return epoch

    def load_stage2_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage2_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage2.pth'.format(prefix))
        # return epoch

    olongtail_model_stage0 = simple3fc().cuda()
    epoch = load_stage0_ckpt(olongtail_model_stage0)
    # print('load model epoch {}'.format(epoch))
    olongtail_model_stage0.eval()

    olongtail_model_stage1 = simple3fc().cuda()
    epoch = load_stage1_ckpt(olongtail_model_stage1)
    olongtail_model_stage1.eval()

    olongtail_model_stage2 = simple3fc().cuda()
    epoch = load_stage2_ckpt(olongtail_model_stage2)
    olongtail_model_stage2.eval()

    olongtail_model_all_stage = [
        olongtail_model_stage0, olongtail_model_stage1, olongtail_model_stage2
    ]

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader,
                                  olongtail_model_all_stage, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, olongtail_model_all_stage,
                                 args.show, args.tmpdir)

    # mmcv.dump(outputs, args.out)
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:

            if eval_types == ['proposal_fast']:
                result_file = args.out
                lvis_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset,
                                                outputs,
                                                args.out,
                                                dump=False)
                    print('Starting evaluate {}'.format(
                        ' and '.join(eval_types)))
                    lvis_eval(result_files, eval_types, dataset.lvis)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        lvis_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Ejemplo n.º 28
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    datasets = [build_dataset(cfg.data.train)]
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    if cfg.load_from:
        checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
        model.CLASSES = datasets[0].CLASSES
    if cfg.load_from:
        checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
        model.CLASSES = datasets[0].CLASSES
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                          config=cfg.text,
                                          CLASSES=datasets[0].CLASSES)

    data_loader = build_dataloader(datasets[0],
                                   imgs_per_gpu=cfg.data.imgs_per_gpu,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   num_gpus=cfg.gpus,
                                   dist=False,
                                   shuffle=False)
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
    model.train()
    if hasattr(model, 'module'):
        model_load = model.module
    optimizer_all = obj_from_dict(cfg.optimizer, torch.optim,
                                  dict(params=model_load.parameters()))
    optimizer = obj_from_dict(cfg.optimizer, torch.optim,
                              dict(params=model_load.agg.parameters()))
    check_video = None
    start_epoch = 0
    meta = None
    epoch = start_epoch
    vis = visdom.Visdom(env='fuse_c')
    loss_cls_window = vis.line(X=torch.zeros((1, )).cpu(),
                               Y=torch.zeros((1)).cpu(),
                               opts=dict(xlabel='minibatches',
                                         ylabel='Loss of classification',
                                         title='Loss of classification ',
                                         legend=['Loss of classification']))

    loss_init_window = vis.line(X=torch.zeros((1, )).cpu(),
                                Y=torch.zeros((1)).cpu(),
                                opts=dict(xlabel='minibatches',
                                          ylabel='Loss of init reppoint',
                                          title='Loss of init reppoint',
                                          legend=['Loss of init reppoint']))
    loss_refine_window = vis.line(X=torch.zeros((1, )).cpu(),
                                  Y=torch.zeros((1)).cpu(),
                                  opts=dict(xlabel='minibatches',
                                            ylabel='Loss of refine reppoint',
                                            title='Loss of refine reppoint',
                                            legend=['Loss of refine reppoint'
                                                    ]))
    loss_total_window = vis.line(X=torch.zeros((1, )).cpu(),
                                 Y=torch.zeros((1)).cpu(),
                                 opts=dict(xlabel='minibatches',
                                           ylabel='Loss all',
                                           title='Loss all',
                                           legend=['Loss all']))
    loss_trans_window = vis.line(X=torch.zeros((1, )).cpu(),
                                 Y=torch.zeros((1)).cpu(),
                                 opts=dict(xlabel='minibatches',
                                           ylabel='Loss trans',
                                           title='Loss trans',
                                           legend=['Loss trans']))
    training_sample = 0
    for e in range(cfg.total_epochs):
        i = 0
        if epoch % 1 == 0:
            if meta is None:
                meta = dict(epoch=epoch + 1, iter=i)
            else:
                meta.update(epoch=epoch + 1, iter=i)
            checkpoint = {
                'meta': meta,
                'state_dict': weights_to_cpu(model.state_dict())
            }
            print()
            if optimizer_all is not None:
                checkpoint['optimizer'] = optimizer_all.state_dict()
            if not os.path.exists(cfg.work_dir):
                os.mkdir(cfg.work_dir)
            filename = os.path.join(cfg.work_dir, 'epoch_{}.pth'.format(epoch))
            torch.save(checkpoint, filename)
        for i, data in enumerate(data_loader):
            # if len(data['gt_bboxes'].data[0][0]) == 0:
            #	 continue
            optimizer.zero_grad()
            optimizer_all.zero_grad()
            reference_id = (data['img_meta'].data[0][0]['filename'].split('/')
                            [-1]).split('.')[0]
            video_id = data['img_meta'].data[0][0]['filename'].split('/')[-2]
            print('start image:', data['img_meta'].data[0][0]['filename'])
            print('end image:', data['img_meta'].data[-1][-1]['filename'])
            # print(len(data['img'].data),len(data['img'].data[0]))
            # exit()
            for m in range(len(data['img_meta'].data)):
                start_name = data['img_meta'].data[m][0]['filename'].split(
                    '/')[-2]
                # print(data['img_meta'].data[m][0]['filename'])
                for n in range(len(data['img_meta'].data[m])):
                    check_name = data['img_meta'].data[m][n]['filename'].split(
                        '/')[-2]
                    # print(data['img_meta'].data[m][n]['filename'])
                    if start_name != check_name:
                        print('end of video')
                        data['img_meta'].data[m][n] = data['img_meta'].data[m][
                            0]
                        data['gt_bboxes'].data[m][n] = data['gt_bboxes'].data[
                            m][0]
                        data['gt_labels'].data[m][n] = data['gt_labels'].data[
                            m][0]
                        data['img'].data[m][n] = data['img'].data[m][0]

            # losses,loss_trans=model(return_loss=True, **data)
            losses = model(return_loss=True, **data)
            # print(losses)
            if isinstance(losses, list):

                loss_all = []
                log = []
                for p in range(len(losses)):
                    # print(p)
                    # print(losses[p])
                    loss, log_var = parse_losses(losses[p])
                    loss_all.append(loss)
                    log.append(log_var)
            else:
                losses, log_vars = parse_losses(losses)
            if isinstance(losses, list):
                losses = loss_all[0] + 0.5 * loss_all[1] + 0.5 * loss_all[
                    2] + 0.5 * loss_all[3]
                losses = losses / 2.5
            # print(loss_trans.shape)
            # loss_trans=torch.mean(loss_trans)*0.1
            # losses=losses+loss_trans
            # if losses.item()>10:
            #	 losses.backward(retain_graph=False)
            #	 optimizer.zero_grad()
            #	 continue

            losses.backward()
            if epoch < 10:
                optimizer.step()
            else:
                optimizer_all.step()
            # if training_sample<700:
            # 	optimizer.step()
            # else:
            # 	optimizer_all.step()
            # print('transform kernel check',model.module.agg.trans_kernel.sum().item())
            log_vars = log[0]

            vis.line(X=torch.ones(1).cpu() * training_sample,
                     Y=(log_vars['loss_cls']) * torch.ones(1).cpu(),
                     win=loss_cls_window,
                     update='append')
            vis.line(X=torch.ones(1).cpu() * training_sample,
                     Y=(log_vars['loss_pts_init']) * torch.ones(1).cpu(),
                     win=loss_init_window,
                     update='append')
            vis.line(X=torch.ones(1).cpu() * training_sample,
                     Y=(log_vars['loss_pts_refine']) * torch.ones(1).cpu(),
                     win=loss_refine_window,
                     update='append')
            vis.line(X=torch.ones(1).cpu() * training_sample,
                     Y=(losses).item() * torch.ones(1).cpu(),
                     win=loss_total_window,
                     update='append')
            # vis.line(
            #		 X=torch.ones(1).cpu() * training_sample,
            #		 Y=loss_trans.item() * torch.ones(1).cpu(),
            #		 win=loss_trans_window,
            #		 update='append')

            print('agg')
            print('epoch:',epoch,'index:',i,'video_id:',video_id,'reference_id:',reference_id, \
              'loss_cls:',log_vars['loss_cls'],'loss_init_box:',log_vars['loss_pts_init'], \
               'loss_refine_box:',log_vars['loss_pts_refine'])
            log_vars = log[1]
            print('refer')
            print('epoch:',epoch,'index:',i,'video_id:',video_id,'reference_id:',reference_id, \
              'loss_cls:',log_vars['loss_cls'],'loss_init_box:',log_vars['loss_pts_init'], \
               'loss_refine_box:',log_vars['loss_pts_refine'])
            log_vars = log[2]
            print('support')
            print('epoch:',epoch,'index:',i,'video_id:',video_id,'reference_id:',reference_id, \
              'loss_cls:',log_vars['loss_cls'],'loss_init_box:',log_vars['loss_pts_init'], \
               'loss_refine_box:',log_vars['loss_pts_refine'])
            training_sample += 1
            # if i % 300 == 0:
            # 	if meta is None:
            # 		meta = dict(epoch=epoch + 1, iter=i)
            # 	else:
            # 		meta.update(epoch=epoch + 1, iter=i)
            # 	checkpoint = {
            # 		'meta': meta,
            # 		'state_dict': weights_to_cpu(model.state_dict())
            # 	}

            # 	if optimizer_all is not None:
            # 		checkpoint['optimizer'] = optimizer_all.state_dict()
            # 	if not os.path.exists(cfg.work_dir):
            # 		os.mkdir(cfg.work_dir)
            # 	filename=os.path.join(cfg.work_dir,'epoch_{}_{}.pth'.format(epoch,i))
            # 	torch.save(checkpoint,filename)
        epoch += 1
Ejemplo n.º 29
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs
    per_set_img_num = int(len(dataset.img_infos) / args.total_set_num)
    this_set_start = per_set_img_num * args.set
    ## comment to perform normal test
    # dataset.img_infos = dataset.img_infos[:100]
    dataset.img_infos = dataset.img_infos[this_set_start:this_set_start +
                                          per_set_img_num]

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    print('load model from {}'.format(cfg.load_from))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


## use retrained cls head

    if hasattr(dataset, 'coco'):
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc(num_classes=81).cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc(num_classes=81).cuda()

    elif hasattr(dataset, 'lvis'):
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc(num_classes=1231).cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc(num_classes=1231).cuda()

    def load_ckpt(ncm_model, cal_head):
        print('load cls head {}'.format('{}/{}.pth'.format(
            cfg.work_dir, cal_head)))
        epoch = torch.load('{}/{}_epoch.pth'.format(cfg.work_dir, cal_head))
        load_checkpoint(ncm_model, '{}/{}.pth'.format(cfg.work_dir, cal_head))
        return epoch

    epoch = load_ckpt(calibrated_head, args.head_ckpt)
    calibrated_head.eval()

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, calibrated_head,
                                  build_dataset(cfg.data.train), args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    mmcv.dump(outputs, args.out)
def main():
    args = parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid

    img_dir = args.img_dir
    out_dir = args.out_dir
    batch_size = args.batch_size

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    file_list = common.load_filepaths(img_dir,
                                      suffix=('.jpg', '.png', '.jpeg'),
                                      recursive=True)
    print(file_list[:10])
    print('imgs: ', len(file_list))
    #
    print(file_list[0].replace(img_dir, ''))

    dataset = FilesDataset(file_list, cfg.test_pipeline)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=batch_size,
                                   workers_per_gpu=batch_size,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')

    model = reweight_cls(model, args.tau)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, False, cfg)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)
        print(type(outputs))
        print(len(outputs))
        print(len(outputs[0]))

    exit()
    # save outputs
    for file_path, mmdet_ret in zip(file_list, outputs):
        save_name = file_path.replace(img_dir, '')
        save_path = os.path.join(out_dir, save_name)
        common.makedirs(os.path.dirname(save_path))

        save_in_tao_format(mmdet_ret, save_path)
Ejemplo n.º 31
0
  def set_configuration( self, cfg_in ):
    cfg = self.get_configuration()
    cfg.merge_config( cfg_in )

    self._config_file = str( cfg.get_value( "config_file" ) )
    self._seed_weights = str( cfg.get_value( "seed_weights" ) )
    self._train_directory = str( cfg.get_value( "train_directory" ) )
    self._output_directory = str( cfg.get_value( "output_directory" ) )
    self._gpu_count = int( cfg.get_value( "gpu_count" ) )
    self._integer_labels = strtobool( cfg.get_value( "integer_labels" ) )
    self._launcher = str( cfg.get_value( "launcher" ) )
    self._validate = strtobool( cfg.get_value( "validate" ) )

    self._training_data = []

    from mmcv import Config
    self._cfg = Config.fromfile( self._config_file )

    if self._cfg.get( 'cudnn_benchmark', False ):
      torch.backends.cudnn.benchmark = True

    if self._train_directory is not None:
      self._cfg.work_dir = self._train_directory
      self._groundtruth_store = os.path.join(
        self._train_directory, self._tmp_annotation_file )
      if not os.path.exists( self._train_directory ):
        os.mkdir( self._train_directory )
    else:
      self._groundtruth_store = self._tmp_annotation_file

    if self._seed_weights is not None:
      self._cfg.resume_from = self._seed_weights

    if self._gpu_count > 0:
      self._cfg.gpus = self._gpu_count
    else:
      self._cfg.gpus = torch.cuda.device_count()

    if self._cfg.checkpoint_config is not None:
      from mmdet import __version__
      self._cfg.checkpoint_config.meta = dict(
        mmdet_version=__version__, config=self._cfg.text )

    if self._launcher == 'none':
      self._distributed = False
    else:
      self._distributed = True
      from mmdet.apis import init_dist
      init_dist( self._launcher, **self._cfg.dist_params )

    from mmdet.apis import get_root_logger
    self._logger = get_root_logger( self._cfg.log_level )
    self._logger.info( 'Distributed training: {}'.format( self._distributed ) )

    if self._random_seed is not "none":
      logger.info( 'Set random seed to {}'.format( self._random_seed ) )
      from mmdet.apis import set_random_seed
      set_random_seed( int( self._random_seed ) )

    from mmdet.models import build_detector

    self._model = build_detector(
      self._cfg.model, train_cfg=self._cfg.train_cfg, test_cfg=self._cfg.test_cfg )