def test_two_stage_forward(cfg_file):
    model, train_cfg, test_cfg = _get_detector_cfg(cfg_file)
    model['pretrained'] = None

    from mmdet.models import build_detector
    detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)

    input_shape = (1, 3, 256, 256)

    # Test forward train with a non-empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')
    gt_bboxes = mm_inputs['gt_bboxes']
    gt_labels = mm_inputs['gt_labels']
    gt_masks = mm_inputs['gt_masks']
    losses = detector.forward(imgs,
                              img_metas,
                              gt_bboxes=gt_bboxes,
                              gt_labels=gt_labels,
                              gt_masks=gt_masks,
                              return_loss=True)
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses
    total_loss = parse_losses(losses)[0].requires_grad_(True)
    assert float(total_loss.item()) > 0
    total_loss.backward()

    # Test forward train with an empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')
    gt_bboxes = mm_inputs['gt_bboxes']
    gt_labels = mm_inputs['gt_labels']
    gt_masks = mm_inputs['gt_masks']
    losses = detector.forward(imgs,
                              img_metas,
                              gt_bboxes=gt_bboxes,
                              gt_labels=gt_labels,
                              gt_masks=gt_masks,
                              return_loss=True)
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses
    total_loss = parse_losses(losses)[0].requires_grad_(True)
    assert float(total_loss.item()) > 0
    total_loss.backward()

    # Test forward test
    with torch.no_grad():
        img_list = [g[None, :] for g in imgs]
        batch_results = []
        for one_img, one_meta in zip(img_list, img_metas):
            result = detector.forward([one_img], [[one_meta]],
                                      return_loss=False)
            batch_results.append(result)
示例#2
0
def test_faster_rcnn_ohem_forward():
    try:
        from torchvision import _C as C  # NOQA
    except ImportError:
        import pytest
        raise pytest.skip('requires torchvision on cpu')

    model, train_cfg, test_cfg = _get_detector_cfg(
        'faster_rcnn_ohem_r50_fpn_1x.py')
    model['pretrained'] = None
    # torchvision roi align supports CPU
    model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True

    from mmdet.models import build_detector
    detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)

    input_shape = (1, 3, 256, 256)

    # Test forward train with a non-empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')
    gt_bboxes = mm_inputs['gt_bboxes']
    gt_labels = mm_inputs['gt_labels']
    losses = detector.forward(
        imgs,
        img_metas,
        gt_bboxes=gt_bboxes,
        gt_labels=gt_labels,
        return_loss=True)
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses
    total_loss = float(parse_losses(losses)[0].item())
    assert total_loss > 0

    # Test forward train with an empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')
    gt_bboxes = mm_inputs['gt_bboxes']
    gt_labels = mm_inputs['gt_labels']
    losses = detector.forward(
        imgs,
        img_metas,
        gt_bboxes=gt_bboxes,
        gt_labels=gt_labels,
        return_loss=True)
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses
    total_loss = float(parse_losses(losses)[0].item())
    assert total_loss > 0
示例#3
0
文件: test_forward.py 项目: zyg11/TSD
def test_cascade_forward():
    try:
        from torchvision import _C as C  # NOQA
    except ImportError:
        import pytest

        raise pytest.skip("requires torchvision on cpu")

    model, train_cfg, test_cfg = _get_detector_cfg("cascade_rcnn_r50_fpn_1x.py")
    model["pretrained"] = None
    # torchvision roi align supports CPU
    model["bbox_roi_extractor"]["roi_layer"]["use_torchvision"] = True

    from mmdet.models import build_detector

    detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)

    input_shape = (1, 3, 256, 256)

    # Test forward train with a non-empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
    imgs = mm_inputs.pop("imgs")
    img_metas = mm_inputs.pop("img_metas")
    gt_bboxes = mm_inputs["gt_bboxes"]
    gt_labels = mm_inputs["gt_labels"]
    losses = detector.forward(
        imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True
    )
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses

    total_loss = float(parse_losses(losses)[0].item())
    assert total_loss > 0

    # Test forward train with an empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
    imgs = mm_inputs.pop("imgs")
    img_metas = mm_inputs.pop("img_metas")
    gt_bboxes = mm_inputs["gt_bboxes"]
    gt_labels = mm_inputs["gt_labels"]
    losses = detector.forward(
        imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True
    )
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses

    total_loss = float(parse_losses(losses)[0].item())
    assert total_loss > 0
示例#4
0
 def step_t(self, _optim, **input):
     _optim.zero_grad()
     loss, lateloss = self.model(**input)
     loss, self.loss_vars = parse_losses(loss)
     self.lateloss = self.alpha * (lateloss.mean().log().pow(self.beta))
     #loss = loss + self.lateloss
     loss.backward()
     _optim.step()
def test_faster_rcnn_ohem_forward():
    model, train_cfg, test_cfg = _get_detector_cfg(
        'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
    model['pretrained'] = None

    from mmdet.models import build_detector
    detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)

    input_shape = (1, 3, 256, 256)

    # Test forward train with a non-empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')
    gt_bboxes = mm_inputs['gt_bboxes']
    gt_labels = mm_inputs['gt_labels']
    losses = detector.forward(imgs,
                              img_metas,
                              gt_bboxes=gt_bboxes,
                              gt_labels=gt_labels,
                              return_loss=True)
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses
    total_loss = float(parse_losses(losses)[0].item())
    assert total_loss > 0

    # Test forward train with an empty truth batch
    mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
    imgs = mm_inputs.pop('imgs')
    img_metas = mm_inputs.pop('img_metas')
    gt_bboxes = mm_inputs['gt_bboxes']
    gt_labels = mm_inputs['gt_labels']
    losses = detector.forward(imgs,
                              img_metas,
                              gt_bboxes=gt_bboxes,
                              gt_labels=gt_labels,
                              return_loss=True)
    assert isinstance(losses, dict)
    from mmdet.apis.train import parse_losses
    total_loss = float(parse_losses(losses)[0].item())
    assert total_loss > 0
示例#6
0
    def _step_forward(self, *args, **kwargs):
        """Perform one forward step.

    Take inputs, return loss.
    Modify some attributes.
    """
        if self.decay_temperature:
            kwargs['tbs_input'] = {'temperature': self.temperature}
        losses = self.mod(*args, **kwargs)

        loss, log_vars = parse_losses(losses)
        self.cur_batch_loss = loss  # sum(loss.values())
        return self.cur_batch_loss
示例#7
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.dir is not None:
        if args.dir.startswith('//'):
            cfg.work_dir = args.dir[2:]
        else:
            localhost = get_localhost().split('.')[0]
            # results from server saved to /private
            if 'gpu' in localhost:
                output_dir = '/private/huangchenxi/mmdet/outputs'
            else:
                output_dir = 'work_dirs'

            if args.dir.endswith('-c'):
                args.dir = args.dir[:-2]
                args.resume_from = search_and_delete(os.path.join(
                    output_dir, args.dir),
                                                     prefix=cfg.work_dir,
                                                     suffix=localhost)
            cfg.work_dir += time.strftime("_%m%d_%H%M") + '_' + localhost
            cfg.work_dir = os.path.join(output_dir, args.dir, cfg.work_dir)

    if args.workers_per_gpu != -1:
        cfg.data['workers_per_gpu'] = args.workers_per_gpu

    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus

    if args.profiler or args.speed:
        cfg.data.imgs_per_gpu = 1

    if cfg.resume_from or cfg.load_from:
        cfg.model['pretrained'] = None

    if args.test:
        cfg.data.train['ann_file'] = cfg.data.val['ann_file']
        cfg.data.train['img_prefix'] = cfg.data.val['img_prefix']

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
        num_gpus = args.gpus
        rank = 0
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
        num_gpus = torch.cuda.device_count()
        rank, _ = get_dist_info()

    if cfg.optimizer['type'] == 'SGD':
        cfg.optimizer['lr'] *= num_gpus * cfg.data.imgs_per_gpu / 256
    else:
        cfg.optimizer['lr'] *= ((num_gpus / 8) * (cfg.data.imgs_per_gpu / 2))

    # init logger before other steps
    logger = get_root_logger(nlogger, cfg.log_level)
    if rank == 0:
        logger.set_logger_dir(cfg.work_dir, 'd')
    logger.info("Config: ------------------------------------------\n" +
                cfg.text)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    if rank == 0:
        # describe_vars(model)
        writer = set_writer(cfg.work_dir)
        # try:
        #     # describe_features(model.backbone)
        #     writer.add_graph(model, torch.zeros((1, 3, 800, 800)))
        # except (NotImplementedError, TypeError):
        #     logger.warn("Add graph failed.")
        # except Exception as e:
        #     logger.warn("Add graph failed:", e)

    if not args.graph and not args.profiler and not args.speed:
        if distributed:
            model = MMDistributedDataParallel(model.cuda())
        else:
            model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

        if isinstance(cfg.data.train, list):
            for t in cfg.data.train:
                logger.info("loading training set: " + str(t.ann_file))
            train_dataset = [build_dataset(t) for t in cfg.data.train]
            CLASSES = train_dataset[0].CLASSES
        else:
            logger.info("loading training set: " +
                        str(cfg.data.train.ann_file))
            train_dataset = build_dataset(cfg.data.train)
            logger.info("{} images loaded!".format(len(train_dataset)))
            CLASSES = train_dataset.CLASSES
        if cfg.checkpoint_config is not None:
            # save mmdet version, config file content and class names in
            # checkpoints as meta data
            cfg.checkpoint_config.meta = dict(mmdet_version=__version__,
                                              config=cfg.text,
                                              CLASSES=CLASSES)
        # add an attribute for visualization convenience
        if hasattr(model, 'module'):
            model.module.CLASSES = CLASSES
        else:
            model.CLASSES = CLASSES
        train_detector(model,
                       train_dataset,
                       cfg,
                       distributed=distributed,
                       validate=args.validate,
                       logger=logger,
                       runner_attr_dict={'task_name': args.dir})
    else:
        from mmcv.runner.checkpoint import load_checkpoint
        from mmdet.datasets import build_dataloader
        from mmdet.core.utils.model_utils import register_hooks
        from mmdet.apis.train import parse_losses

        model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
        if args.profiler == 'test' or args.speed == 'test':
            model.eval()
            dataset = build_dataset(cfg.data.test)
        else:
            model.train()
            dataset = build_dataset(cfg.data.train)

        if cfg.load_from and (args.profiler or args.speed):
            logger.info('load checkpoint from %s', cfg.load_from)
            load_checkpoint(model,
                            cfg.load_from,
                            map_location='cpu',
                            strict=True)

        data_loader = build_dataloader(dataset,
                                       cfg.data.imgs_per_gpu,
                                       cfg.data.workers_per_gpu,
                                       cfg.gpus,
                                       dist=False,
                                       shuffle=False)

        if args.graph:
            id_dict = {}
            for name, parameter in model.named_parameters():
                id_dict[id(parameter)] = name

        for i, data_batch in enumerate(data_loader):
            if args.graph:
                outputs = model(**data_batch)
                loss, log_vars = parse_losses(outputs)
                get_dot = register_hooks(loss, id_dict)
                loss.backward()
                dot = get_dot()
                dot.save('graph.dot')
                break
            elif args.profiler:
                with torch.autograd.profiler.profile(use_cuda=True) as prof:
                    if args.profiler == 'train':
                        outputs = model(**data_batch)
                        loss, log_vars = parse_losses(outputs)
                        loss.backward()
                    else:
                        with torch.no_grad():
                            model(**data_batch, return_loss=False)

                    if i == 20:
                        prof.export_chrome_trace('./trace.json')
                        logger.info(prof)
                        break
            elif args.speed:
                if args.speed == 'train':
                    start = time.perf_counter()
                    outputs = model(**data_batch)
                    loss, log_vars = parse_losses(outputs)
                    loss.backward()
                    torch.cuda.synchronize()
                    end = time.perf_counter()
                else:
                    start = time.perf_counter()
                    with torch.no_grad():
                        model(**data_batch, return_loss=False)
                    end = time.perf_counter()
                logger.info("{:.3f} s/iter, {:.1f} iters/s".format(
                    end - start, 1. / (end - start)))