Ejemplo n.º 1
0
def evaluate_rcnn(model_name, paper_arxiv_id, cfg_list, model_file):
    evaluator = COCOEvaluator(
        root=COCO_ROOT, model_name=model_name, paper_arxiv_id=paper_arxiv_id
    )
    category_id_to_coco_id = {
        v: k for k, v in COCODetection.COCO_id_to_category_id.items()
    }

    cfg.update_config_from_args(cfg_list)  # TODO backup/restore config
    finalize_configs(False)
    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
    predcfg = PredictConfig(
        model=MODEL,
        session_init=SmartInit(model_file),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1],
    )
    predictor = OfflinePredictor(predcfg)

    def xyxy_to_xywh(box):
        box[2] -= box[0]
        box[3] -= box[1]
        return box

    df = get_eval_dataflow("coco_val2017")
    df.reset_state()
    for img, img_id in tqdm.tqdm(df, total=len(df)):
        results = predict_image(img, predictor)
        res = [
            {
                "image_id": img_id,
                "category_id": category_id_to_coco_id.get(
                    int(r.class_id), int(r.class_id)
                ),
                "bbox": xyxy_to_xywh([round(float(x), 4) for x in r.box]),
                "score": round(float(r.score), 3),
            }
            for r in results
        ]
        evaluator.add(res)
        if evaluator.cache_exists:
            break

    evaluator.save()
Ejemplo n.º 2
0
def evaluate_model(model_name, paper_arxiv_id, weights_url, weights_name,
                   paper_results, config):

    evaluator = COCOEvaluator(root='./.data/vision/coco',
                              model_name=model_name,
                              paper_arxiv_id=paper_arxiv_id,
                              paper_results=paper_results)

    out = 'results.pkl'
    launcher = 'none'

    if out is not None and not out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(config)
    cfg.data.test[
        'ann_file'] = './.data/vision/coco/annotations/instances_val2017.json'
    cfg.data.test['img_prefix'] = './.data/vision/coco/val2017/'

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(launcher, **cfg.dist_params)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = get_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    local_checkpoint, _ = urllib.request.urlretrieve(weights_url, weights_name)

    # '/home/ubuntu/GCNet/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth'
    checkpoint = load_checkpoint(model, local_checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    evaluator.reset_time()

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs, cache_exists = single_gpu_test(model, data_loader, False,
                                                evaluator)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, '')

    if cache_exists:
        print('Cache exists: %s' % (evaluator.batch_hash))
        evaluator.save()

    else:
        from mmdet.core import results2json

        rank, _ = get_dist_info()
        if out and rank == 0:
            print('\nwriting results to {}'.format(out))
            mmcv.dump(outputs, out)
            eval_types = ['bbox']
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = out
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, out)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = out + '.{}'.format(name)
                            result_files = results2json(
                                dataset, outputs_, result_file)

        anns = json.load(open(result_files['bbox']))
        evaluator.detections = []
        evaluator.add(anns)
        evaluator.save()
Ejemplo n.º 3
0
def test(
        data,
        weights=None,
        batch_size=16,
        imgsz=640,
        conf_thres=0.001,
        iou_thres=0.6,  # for NMS
        save_json=False,
        single_cls=False,
        augment=False,
        verbose=False,
        model=None,
        dataloader=None,
        save_dir='',
        merge=False,
        save_txt=False):
    # Initialize/load model and set device
    training = model is not None
    if training:  # called by train.py
        device = next(model.parameters()).device  # get model device

    else:  # called directly
        set_logging()
        device = select_device(opt.device, batch_size=batch_size)
        merge, save_txt = opt.merge, opt.save_txt  # use Merge NMS, save *.txt labels
        if save_txt:
            out = Path('inference/v5s31_mask')
            if os.path.exists(out):
                shutil.rmtree(out)  # delete v5s31_mask folder
            os.makedirs(out)  # make new v5s31_mask folder

        # Remove previous
        for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
            os.remove(f)

        # Load model
        model = attempt_load(weights, map_location=device)  # load FP32 model
        imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size

        # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
        # if device.type != 'cpu' and torch.cuda.device_count() > 1:
        #     model = nn.DataParallel(model)

    # Half
    half = device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Configure
    model.eval()
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    check_dataset(data)  # check
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if not training:
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        path = data['test'] if opt.task == 'test' else data[
            'val']  # path to val/test images
        dataloader = create_dataloader(path,
                                       imgsz,
                                       batch_size,
                                       model.stride.max(),
                                       opt,
                                       hyp=None,
                                       augment=False,
                                       cache=True,
                                       pad=0.5,
                                       rect=True)[0]

    seen = 0
    names = model.names if hasattr(model, 'names') else model.module.names
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    evaluator = COCOEvaluator(root=DATA_ROOT,
                              model_name=opt.weights.replace('.pt', ''))
    for batch_i, (img, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Disable gradients
        with torch.no_grad():
            # Run model
            t = time_synchronized()
            inf_out, train_out = model(
                img, augment=augment)  # inference and training outputs
            t0 += time_synchronized() - t

            # Compute loss
            if training:  # if model has loss hyperparameters
                loss += compute_loss([x.float() for x in train_out], targets,
                                     model)[1][:3]  # box, obj, cls

            # Run NMS
            t = time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres,
                                         merge=merge)
            t1 += time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            if save_txt:
                gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0
                                                  ]]  # normalization gain whwh
                x = pred.clone()
                x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4],
                                        shapes[si][0],
                                        shapes[si][1])  # to original
                for *xyxy, conf, cls in x:
                    xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                            gn).view(-1).tolist()  # normalized xywh
                    with open(str(out / Path(paths[si]).stem) + '.txt',
                              'a') as f:
                        f.write(
                            ('%g ' * 5 + '\n') % (cls, *xywh))  # label format

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = Path(paths[si]).stem
                box = pred[:, :4].clone()  # xyxy
                scale_coords(img[si].shape[1:], box, shapes[si][0],
                             shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    result = {
                        'image_id':
                        int(image_id) if image_id.isnumeric() else image_id,
                        'category_id': coco91class[int(p[5])],
                        'bbox': [round(x, 3) for x in b],
                        'score': round(p[4], 5)
                    }
                    jdict.append(result)

                    #evaluator.add([result])
                    #if evaluator.cache_exists:
                    #    break

            # # Assign all predictions as incorrect
            # correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
            # if nl:
            #     detected = []  # target indices
            #     tcls_tensor = labels[:, 0]
            #
            #     # target boxes
            #     tbox = xywh2xyxy(labels[:, 1:5]) * whwh
            #
            #     # Per target class
            #     for cls in torch.unique(tcls_tensor):
            #         ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1)  # prediction indices
            #         pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1)  # target indices
            #
            #         # Search for detections
            #         if pi.shape[0]:
            #             # Prediction to target ious
            #             ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1)  # best ious, indices
            #
            #             # Append detections
            #             detected_set = set()
            #             for j in (ious > iouv[0]).nonzero(as_tuple=False):
            #                 d = ti[i[j]]  # detected target
            #                 if d.item() not in detected_set:
            #                     detected_set.add(d.item())
            #                     detected.append(d)
            #                     correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn
            #                     if len(detected) == nl:  # all targets already located in image
            #                         break
            #
            # # Append statistics (correct, conf, pcls, tcls)
            # stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # # Plot images
        # if batch_i < 1:
        #     f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i)  # filename
        #     plot_images(img, targets, paths, str(f), names)  # ground truth
        #     f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
        #     plot_images(img, output_to_target(v5s31_mask, width, height), paths, str(f), names)  # predictions

    evaluator.add(jdict)
    evaluator.save()
Ejemplo n.º 4
0
def eval_model(model_name, paper_model_name, paper_arxiv_id, batch_size=64, model_description=''):

    # create model
    bench = create_model(
        model_name,
        bench_task='predict',
        pretrained=True,
    )
    bench.eval()
    input_size = bench.config.image_size

    param_count = sum([m.numel() for m in bench.parameters()])
    print('Model %s created, param count: %d' % (model_name, param_count))

    bench = bench.cuda()
    if has_amp:
        print('Using AMP mixed precision.')
        bench = amp.initialize(bench, opt_level='O1')
    else:
        print('AMP not installed, running network in FP32.')

    annotation_path = os.path.join(DATA_ROOT, 'annotations', f'instances_{ANNO_SET}.json')
    evaluator = COCOEvaluator(
        root=DATA_ROOT,
        model_name=paper_model_name,
        model_description=model_description,
        paper_arxiv_id=paper_arxiv_id)

    dataset = CocoDetection(os.path.join(DATA_ROOT, ANNO_SET), annotation_path)

    loader = create_loader(
        dataset,
        input_size=input_size,
        batch_size=batch_size,
        use_prefetcher=True,
        fill_color='mean',
        num_workers=4,
        pin_mem=True)

    iterator = tqdm.tqdm(loader, desc="Evaluation", mininterval=5)
    evaluator.reset_time()

    with torch.no_grad():
        for i, (input, target) in enumerate(iterator):
            output = bench(input, target['img_scale'], target['img_size'])
            output = output.cpu()
            sample_ids = target['img_id'].cpu()
            results = []
            for index, sample in enumerate(output):
                image_id = int(sample_ids[index])
                for det in sample:
                    score = float(det[4])
                    if score < .001:  # stop when below this threshold, scores in descending order
                        break
                    coco_det = dict(
                        image_id=image_id,
                        bbox=det[0:4].tolist(),
                        score=score,
                        category_id=int(det[5]))
                    results.append(coco_det)
            evaluator.add(results)

            if evaluator.cache_exists:
                break

    evaluator.save()