Exemplo n.º 1
0
  def detect( self, image_data ):
    input_image = image_data.asarray().astype( 'uint8' )

    from mmdet.apis import inference_detector

    gpu_string = 'cuda:' + str( self._gpu_index )
    detections = inference_detector( self._model, input_image, self._cfg, device=gpu_string )

    class_names = [ 'fish' ] * 10000

    if isinstance( detections, tuple ):
      bbox_result, segm_result = detections
    else:
      bbox_result, segm_result = detections, None

    if np.size( bbox_result ) > 0:
      bboxes = np.vstack( bbox_result )
    else:
      bboxes = []

    sys.stdout.write( "Detected " + str( len( bbox_result ) ) + " objects" )
    sys.stdout.flush()

    # convert segmentation masks
    masks = []
    if segm_result is not None:
      segms = mmcv.concat_list( segm_result )
      inds = np.where( bboxes[:, -1] > score_thr )[0]
      for i in inds:
        masks.append( maskUtils.decode( segms[i] ).astype( np.bool ) )

    # collect labels
    labels = [
      np.full( bbox.shape[0], i, dtype=np.int32 )
      for i, bbox in enumerate( bbox_result )
    ]

    if np.size( labels ) > 0:
      labels = np.concatenate( labels )
    else:
      labels = []

    # convert to kwiver format, apply threshold
    output = []

    for entry in []:
      output.append( DetectedObject( BoundingBox( 1,1,2,2 ) ) )

    if np.size( labels ) > 0:
      mmcv.imshow_det_bboxes(
        input_image,
        bboxes,
        labels,
        class_names=class_names,
        score_thr=-100.0,
        show=True)

    return DetectedObjectSet( output )
Exemplo n.º 2
0
def show_result(img,
                result,
                class_names,
                score_thr=0.3,
                wait_time=0,
                out_file=None):
    """Visualize the detection results on the image.

    Args:
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        class_names (list[str] or tuple[str]): A list of class names.
        score_thr (float): The threshold to visualize the bboxes and masks.
        wait_time (int): Value of waitKey param.
        out_file (str, optional): If specified, the visualization result will
            be written to the out file instead of shown in a window.
    """
    assert isinstance(class_names, (tuple, list))
    img = mmcv.imread(img)
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    # draw segmentation masks
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        for i in inds:
            color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_mask * 0.5
    # draw bounding boxes
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)
    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           show=out_file is None,
                           wait_time=wait_time,
                           out_file=out_file)
def single_predict(filter_cate=None):
    img=cv2.imread("/root/data/gvision/dataset/predict/person/img/14_OCT_Habour_IMG_14_01___0.5___2072___4728.jpg")
    predictor = DefaultPredictor(cfg)
    outputs = predictor(img)
    pre_instances=outputs['instances']
    # # We can use `Visualizer` to draw the predictions on the image.
    # MetadataCatalog.get("pandahead").set(thing_classes=["head"], thing_dataset_id_to_contiguous_id={1: 0})
    train_dicts_metadata = MetadataCatalog.get("pandahead")
    print(train_dicts_metadata )
    # print(train_dicts_metadata)
    # v = Visualizer(img[:, :, ::-1], train_dicts_metadata, scale=1,instance_mode=ColorMode.IMAGE)
    # v = v.draw_instance_predictions(outputs["instances"].to("cpu")) 
    # # print(outputs["instances"])
    # cv2.imwrite("/root/data/gvision/detectron2-master/workdir/output/test/ouput_2.jpg",v.get_image()[:, :, ::-1])
    bboxes=pre_instances.pred_boxes.tensor.cpu().numpy()

    category_ids=pre_instances.pred_classes.cpu().numpy()
    print(category_ids)
    # print(category_ids)
    if filter_cate!=None:
        assert filter_cate-1 in category_ids,f"category not have cls:{filter_cate }"
    score=pre_instances.scores.cpu().numpy()
    "fliter"
    if filter_cate!=None:
        bboxes=[bboxes[i] for i in range(len(bboxes)) if category_ids[i]==filter_cate-1]
        score==[score[i] for i in range(len(score)) if category_ids[i]==filter_cate-1]
        category_ids=[category_ids[i] for i in range(len(category_ids)) if category_ids[i]==filter_cate-1]
    "fliter"
    # class_names=_create_text_labels(category_ids,['visible body', 'full body', 'head', 'vehicle'],score)
    bboxes=[list(bboxes[i]) for i in range(len(bboxes))]
    score=np.resize(score,(len(score),1))
    bboxes=[old+list(new) for old,new in zip(bboxes,score)]
    # print([old+new for old,new in zip(bboxes,score)])
    # print(score)
    # category_ids=[category_ids[i]+1 for i in range(len(category_ids))]
    # mmcv.imshow_bboxes(img,bboxes,top_k=10,show=False,out_file="/root/data/gvision/detectron2-master/demo/ouputmmcv_10_1.jpg")
    # mmcv.imshow_bboxes(img,bboxes,show=False,out_file="/root/data/gvision/1detectron2-master/demo/ouputmmcv2_all_1.jpg")
    # mmcv.imshow_bboxes(img,bboxes,top_k=500,show=False,out_file="/root/data/gvision/detectron2-master/demo/ouputmmcv_500_1.jpg")
    out_file=os.path.join(cfg.OUTPUT_DIR,"visual",f"ouput_c{filter_cate}.jpg")
    os.makedirs(os.path.join(cfg.OUTPUT_DIR,"visual"),exist_ok=True)
    mmcv.imshow_det_bboxes(
        img,
        np.array(bboxes),
        np.array(category_ids),
        class_names=["head"],
        show=False,
        out_file=out_file)
Exemplo n.º 4
0
def main(image_path):
    image = cv2.imread(image_path)
    data = img_pre_process(image)
    with torch.no_grad():
        res = model(return_loss=False, rescale=True, **data)
    bboxes = np.vstack(res)
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(res)
    ]
    labels = np.concatenate(labels)
    mmcv.imshow_det_bboxes(image,
                           bboxes,
                           labels,
                           score_thr=0.3,
                           show=True,
                           out_file="result.jpg")
    Detections = decode_detection(bboxes, labels, classes_det, score_thr=0.3)
Exemplo n.º 5
0
Arquivo: base.py Projeto: zyg11/TSD
    def show_result(self, data, result, dataset=None, score_thr=0.3):
        if isinstance(result, tuple):
            bbox_result, segm_result = result
        else:
            bbox_result, segm_result = result, None

        img_tensor = data["img"][0]
        img_metas = data["img_metas"][0].data[0]
        imgs = tensor2imgs(img_tensor, **img_metas[0]["img_norm_cfg"])
        assert len(imgs) == len(img_metas)

        if dataset is None:
            class_names = self.CLASSES
        elif isinstance(dataset, str):
            class_names = get_classes(dataset)
        elif isinstance(dataset, (list, tuple)):
            class_names = dataset
        else:
            raise TypeError(
                "dataset must be a valid dataset name or a sequence"
                " of class names, not {}".format(type(dataset))
            )

        for img, img_meta in zip(imgs, img_metas):
            h, w, _ = img_meta["img_shape"]
            img_show = img[:h, :w, :]

            bboxes = np.vstack(bbox_result)
            # draw segmentation masks
            if segm_result is not None:
                segms = mmcv.concat_list(segm_result)
                inds = np.where(bboxes[:, -1] > score_thr)[0]
                for i in inds:
                    color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
                    mask = maskUtils.decode(segms[i]).astype(np.bool)
                    img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
            # draw bounding boxes
            labels = [
                np.full(bbox.shape[0], i, dtype=np.int32)
                for i, bbox in enumerate(bbox_result)
            ]
            labels = np.concatenate(labels)
            mmcv.imshow_det_bboxes(
                img_show, bboxes, labels, class_names=class_names, score_thr=score_thr
            )
Exemplo n.º 6
0
def draw_bbox_with_gt(detector,
                      img,
                      gt_bboxes,
                      gt_labels,
                      score_thr=0.3,
                      show=True):
    output_name = os.path.join(f"{out_dir}/", Path(img).name)
    preds = inference_detector(detector, img)
    img = detector.show_result(img, preds, score_thr=score_thr, show=False)
    mmcv.imshow_det_bboxes(
        img,
        gt_bboxes,
        gt_labels,
        show=show,
        out_file=output_name,
        bbox_color='blue',
        text_color='blue',
    )
Exemplo n.º 7
0
def render(image, pred, person_bbox, bbox_thre=0):
    if pred is None:
        return image

    mmcv.imshow_det_bboxes(image,
                           person_bbox,
                           np.zeros(len(person_bbox)).astype(int),
                           class_names=['person'],
                           score_thr=bbox_thre,
                           show=False,
                           wait_time=0)

    for person_pred in pred:
        for joint_pred in person_pred:
            cv2.circle(image, (int(joint_pred[0]), int(joint_pred[1])), 2,
                       [255, 0, 0], 2)

    return np.uint8(image)
Exemplo n.º 8
0
def single_predict(filter_cate=2):
    cfg.MODEL.WEIGHTS=os.path.join(cfg.OUTPUT_DIR,"model_final.pth")
    img=cv2.imread("/root/data/gvision/detectron2-master/demo/2.jpg")
    predictor = DefaultPredictor(cfg)
    outputs = predictor(img)
    pre_instances=outputs['instances']
    # # We can use `Visualizer` to draw the predictions on the image.
    MetadataCatalog.get("pv_train").set(thing_classes=['visible body', 'full body', 'head', 'vehicle'], # 可以选择开启,但是不能显示中文,所以本人关闭
                                        thing_dataset_id_to_contiguous_id={1: 0, 2: 1, 3: 2, 4: 3})
    train_dicts_metadata = MetadataCatalog.get("pv_train")
    # print(train_dicts_metadata)
    v = Visualizer(img[:, :, ::-1], train_dicts_metadata, scale=1,instance_mode=ColorMode.IMAGE)
    v = v.draw_instance_predictions(outputs["instances"].to("cpu")) 
    # print(outputs["instances"])
    cv2.imwrite("/root/data/gvision/detectron2-master/demo/ouput_2.jpg",v.get_image()[:, :, ::-1])
    bboxes=pre_instances.pred_boxes.tensor.cpu().numpy()
    category_ids=pre_instances.pred_classes.cpu().numpy()
    # print(category_ids)
    assert filter_cate-1 in category_ids,f"category not have cls:{filter_cate }"
    score=pre_instances.scores.cpu().numpy()
    "fliter"
    if filter_cate!=None:
        bboxes=[bboxes[i] for i in range(len(bboxes)) if category_ids[i]==filter_cate-1]
        score==[score[i] for i in range(len(score)) if category_ids[i]==filter_cate-1]
        category_ids=[category_ids[i] for i in range(len(category_ids)) if category_ids[i]==filter_cate-1]
    "fliter"
    # class_names=_create_text_labels(category_ids,['visible body', 'full body', 'head', 'vehicle'],score)
    bboxes=[list(bboxes[i]) for i in range(len(bboxes))]
    score=np.resize(score,(len(score),1))
    bboxes=[old+list(new) for old,new in zip(bboxes,score)]
    # print([old+new for old,new in zip(bboxes,score)])
    # print(score)
    # category_ids=[category_ids[i]+1 for i in range(len(category_ids))]
    # mmcv.imshow_bboxes(img,bboxes,top_k=10,show=False,out_file="/root/data/gvision/detectron2-master/demo/ouputmmcv_10_1.jpg")
    # mmcv.imshow_bboxes(img,bboxes,show=False,out_file="/root/data/gvision/1detectron2-master/demo/ouputmmcv2_all_1.jpg")
    # mmcv.imshow_bboxes(img,bboxes,top_k=500,show=False,out_file="/root/data/gvision/detectron2-master/demo/ouputmmcv_500_1.jpg")
    out_file=f"/root/data/gvision/detectron2-master/demo/ouput_2_c{filter_cate}.jpg"
    mmcv.imshow_det_bboxes(
        img,
        np.array(bboxes),
        np.array(category_ids),
        class_names=['visible body', 'full body', 'head', 'vehicle'],
        show=False,
        out_file=out_file)
def show_gt(img, result, class_names, score_thr=0.3, out_file=None):
    """Visualize the detection results on the image.

    Args:
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        class_names (list[str] or tuple[str]): A list of class names.
        score_thr (float): The threshold to visualize the bboxes and masks.
        out_file (str, optional): If specified, the visualization result will
            be written to the out file instead of shown in a window.
    """
    val_ann = mmcv.load(
        '/media/bo/Elements/open-images/data/mmdet_anno/seg_val_2844_ann.pkl')
    assert len([x for x in val_ann
                if x['filename'] == os.path.basename(img)]) == 1
    this_ann = [x for x in val_ann
                if x['filename'] == os.path.basename(img)][0]

    assert isinstance(class_names, (tuple, list))
    img = mmcv.imread(img)

    bbs = this_ann['ann']['bboxes']
    bboxes = np.concatenate((bbs, np.ones(bbs.shape[0]).reshape(-1, 1)),
                            axis=1)
    segms = this_ann['ann']['MaskPath']
    for i in range(segms.shape[0]):
        color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
        mask = mmcv.imread(
            '/media/bo/Elements/open-images/data/val_masks/' + segms[i],
            'unchanged')
        mask = (mask > 0).astype('uint8')
        mask = mmcv.imresize(mask,
                             (img.shape[1], img.shape[0])).astype(np.bool)
        img[mask] = img[mask] * 0.5 + color_mask * 0.5
    labels = this_ann['ann']['labels'] - 1

    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           show=out_file is None,
                           out_file=out_file)
Exemplo n.º 10
0
def main():
    args = parse_args()
    cfg = retrieve_data_cfg(args.config, args.skip_type)

    dataset = build_dataset(cfg.data.train)

    progress_bar = mmcv.ProgressBar(len(dataset))
    for item in dataset:
        filename = os.path.join(args.output_dir,
                                Path(item['filename']).name
                                ) if args.output_dir is not None else None
        mmcv.imshow_det_bboxes(item['img'],
                               item['gt_bboxes'],
                               item['gt_labels'],
                               class_names=dataset.CLASSES,
                               show=not args.not_show,
                               out_file=filename,
                               wait_time=args.show_interval)
        progress_bar.update()
Exemplo n.º 11
0
def show_result(img,
                bboxes,
                labels,
                class_names,
                conf_thresh=0.1,
                thickness=1,
                font_scale=0.5,
                bbox_color='green',
                text_color='green'):
    if isinstance(img, str):
        img = mmcv.imread(img)
    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=conf_thresh,
                           thickness=thickness,
                           font_scale=font_scale,
                           bbox_color=bbox_color,
                           text_color=text_color)
Exemplo n.º 12
0
def save_result(img, result, out_file, namesfile, score_thr=0.8):
    class_names = load_classes(namesfile)
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(result)
    ]
    labels = np.concatenate(labels)
    bboxes = np.vstack(result)
    img = mmcv.imread(img)
    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           bbox_color='red',
                           text_color='blue',
                           thickness=2,
                           font_scale=0.8,
                           show=False,
                           out_file=out_file)
def show_result_3d(img, result, dataset='coco', score_thr=0.3, out_file=None, font_scale=0.5):
    img_np = np.load(img)    
    class_names = get_classes(dataset)
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    # draw segmentation masks
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        for i in inds:
            color_mask = np.random.randint(
                0, 256, (1, 3), dtype=np.uint8)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_mask * 0.5

    bboxes_placeholders = [[] for i in range(0, 160)]
    for bbox in bboxes:
        for z_index in range(int(np.floor(bbox[4])), int(np.ceil(bbox[5])+ 1)):
            bboxes_placeholders[z_index].append([bbox[0], bbox[1], bbox[2], bbox[3], bbox[6]])
    
    for index, boxes in enumerate(bboxes_placeholders):
        if len(boxes) > 0:
            img = img_np[:,:,index]
            img = Image.fromarray(img).convert('RGB')
            img = np.array(img)
            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

            labels = np.array([0 for i in range(len(boxes))])

            mmcv.imshow_det_bboxes(
            img.copy(),
            np.array(boxes),
            labels,
            class_names=class_names,
            score_thr=score_thr,
            show=out_file is None,
            out_file=out_file.split('.')[-2] + '-{}.png'.format(index),
            font_scale=0)
Exemplo n.º 14
0
def show_result_yu(img,
                   result,
                   dataset='coco',
                   score_thr=0.3,
                   out_file=None,
                   show=False):
    class_names = ['wheat']
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(result)
    ]
    labels = np.concatenate(labels)
    bboxes = np.vstack(result)
    img = mmcv.imread(img)
    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           show=show,
                           out_file=out_file)
Exemplo n.º 15
0
    def show_result(self,
                    img,
                    result,
                    classes=coco.COCO91_CLASSES,
                    score_thr=0.3,
                    wait_time=0,
                    out_file=None):
        """Visualize the detection results on the image.

        Args:
            img (str or np.ndarray): Image filename or loaded image.
            result (tuple[list] or list): The detection result, can be either
                (bbox, segm) or just bbox.
            class_names (list[str] or tuple[str]): A list of class names.
            score_thr (float): The threshold to visualize the bboxes and masks.
            wait_time (int): Value of waitKey param.
            out_file (str, optional): If specified, the visualization result will
                be written to the out file instead of shown in a window.
        """
        import mmcv
        img = mmcv.imread(img)
        if isinstance(result, tuple):
            bbox_result, segm_result = result
        else:
            bbox_result, segm_result = result, None

        # draw bounding boxes
        bboxes = bbox_result[:, :-1].cpu().numpy()
        labels = bbox_result[:, -1].cpu().int().numpy()
        #print(bbox_result.shape, bboxes.shape, labels.shape)
        mmcv.imshow_det_bboxes(
            img.copy(),
            bboxes,
            labels,
            class_names=self.CLASSES if classes is None else classes,
            score_thr=score_thr,
            show=out_file is None,
            wait_time=wait_time,
            out_file=out_file)
Exemplo n.º 16
0
    def show_result(self, data, result, dataset=None, score_thr=0.3):
        if isinstance(result, tuple):
            bbox_result = result
        else:
            bbox_result = result

        img_tensor = data['img'][0]
        img_metas = data['img_meta'][0].data[0]
        imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
        assert len(imgs) == len(img_metas)

        if dataset is None:
            class_names = self.CLASSES
        elif isinstance(dataset, str):
            class_names = get_classes(dataset)
        elif isinstance(dataset, (list, tuple)):
            class_names = dataset
        else:
            raise TypeError(
                'dataset must be a valid dataset name or a sequence'
                ' of class names, not {}'.format(type(dataset)))

        for img, img_meta in zip(imgs, img_metas):
            h, w, _ = img_meta['img_shape']
            img_show = img[:h, :w, :]

            bboxes = np.vstack(bbox_result)
            # draw bounding boxes
            labels = [
                np.full(bbox.shape[0], i, dtype=np.int32)
                for i, bbox in enumerate(bbox_result)
            ]
            labels = np.concatenate(labels)
            mmcv.imshow_det_bboxes(img_show,
                                   bboxes,
                                   labels,
                                   class_names=class_names,
                                   score_thr=score_thr)
Exemplo n.º 17
0
def show(full_img,
         det_bboxes,
         det_labels,
         target_class,
         save_dir,
         show_scale=0.05,
         score_thres=0.):
    # show the detection result and save it
    # load full image
    import copy
    if isinstance(det_bboxes, list):
        det_bboxes = np.array(det_bboxes)
    if isinstance(det_labels, list):
        det_labels = np.array(det_labels)
    det_bboxes = copy.deepcopy(det_bboxes)
    det_labels = copy.deepcopy(det_labels)

    full_height, full_width = full_img.shape[:2]
    full_img = mmcv.imresize(
        full_img,
        (int(full_width * show_scale), int(full_height * show_scale)))

    # transfer scale of detection results
    det_bboxes[:, 0:4] *= show_scale

    # save result after NMS
    mmcv.imshow_det_bboxes(
        full_img.copy(),
        det_bboxes,
        det_labels,
        class_names=['unsure', target_class],
        score_thr=score_thres,
        out_file=save_dir,
        show=False,
        wait_time=0,
    )
    return None
Exemplo n.º 18
0
def show_result(img, result, dataset='coco', score_thr=0.3, out_file=None):
    # ipdb.set_trace()
    img = mmcv.imread(img)
    class_names = get_classes(dataset)  # 获取类名list
    # 返回的tuple均含detection,看情况是否包含mask
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)

    # draw segmentation masks
    # 这里的img已经加上了mask,可以直接输出了
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        for i in inds:
            color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_mask * 0.5
    # 直接显示mask结果
    # cv2.imshow('window',img)
    # cv2.waitKey(0)

    # draw bounding boxes
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)
    # 显示box
    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           show=out_file is None)
Exemplo n.º 19
0
def main(args):

    cocoGt = coco.COCO(args.annotation_path)

    if args.output_dir is not None:
        os.makedirs(args.output_dir, exist_ok=True)

    for image_id in tqdm(cocoGt.getImgIds()):

        img_file_name = cocoGt.loadImgs(ids=[image_id])[0]['file_name']

        img_path = join(args.image_dir, img_file_name)

        #frame = cv2.imread(img_path)

        annotations = cocoGt.loadAnns(cocoGt.getAnnIds(image_id))

        if not annotations:
            continue

        gt_bboxes = []
        gt_labels = []

        for anno in annotations:
            bbox = anno['bbox']
            x, y, w, h = bbox
            category_id = anno.get('category_id', 1)
            gt_bboxes.append([x, y, x + w, y + h])
            gt_labels.append(category_id)

        gt_labels = np.asarray(gt_labels)
        gt_bboxes = np.asarray(gt_bboxes)
        mmcv.imshow_det_bboxes(join(args.image_dir, img_file_name),
                               gt_bboxes,
                               gt_labels,
                               show=False,
                               out_file=join(args.output_dir, img_file_name))
Exemplo n.º 20
0
def show_result(img,
                result,
                dataset='voc',
                score_thr=0.3,
                out_file=None):  # dataset:[coco, voc, ...]
    img = mmcv.imread(img)
    class_names = get_classes(dataset)
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    # draw segmentation masks
    if segm_result is not None:  # show the segmentation.
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[
            0]  # select boxes whose score higher than thr.
        for i in inds:
            color_mask = np.random.randint(0, 256, (1, 3),
                                           dtype=np.uint8)  # RGB-(1,3)
            mask = maskUtils.decode(segms[i]).astype(
                np.bool)  # the mask predicted.
            img[mask] = img[
                mask] * 0.5 + color_mask * 0.5  # color fusion to make it high-lighted!
    # draw bounding boxes
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)
    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           show=False,
                           out_file=out_file)
Exemplo n.º 21
0
def show_result(img,
                result,
                dataset='coco',
                score_thr=0.5,
                out_file=None,
                wait_time=0):
    img = mmcv.imread(img)
    class_names = get_classes(dataset)
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    # draw segmentation masks
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        for i in inds:
            color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_mask * 0.5
    # draw bounding boxes
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)

    mmcv.imshow_det_bboxes(img.copy(),
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           show=out_file is None,
                           wait_time=wait_time)

    return re_bboxes, re_scores
Exemplo n.º 22
0
    def show_result(self,
                    data,
                    result,
                    img_norm_cfg,
                    dataset='coco',
                    score_thr=0.3):
        img_tensor = data['img'][0]
        # img_tensor_t = datasets['img_t'][0]
        img_metas = data['img_meta'][0].data[0]
        imgs = tensor2imgs(img_tensor, **img_norm_cfg)
        # imgs_t = tensor2imgs(img_tensor_t,**)
        assert len(imgs) == len(img_metas)

        if isinstance(dataset, str):
            class_names = get_classes(dataset)
        elif isinstance(dataset, list):
            class_names = dataset
        else:
            raise TypeError('dataset must be a valid dataset name or a list'
                            ' of class names, not {}'.format(type(dataset)))

        for img, img_meta in zip(imgs, img_metas):
            h, w, _ = img_meta['img_shape']
            img_show = img[:h, :w, :]
            labels = [
                np.full(bbox.shape[0], i, dtype=np.int32)
                for i, bbox in enumerate(result)
            ]
            labels = np.concatenate(labels)
            bboxes = np.vstack(result)
            mmcv.imshow_det_bboxes(
                img_show,
                bboxes,
                labels,
                class_names=class_names,
                score_thr=score_thr)
Exemplo n.º 23
0
    def show_annotations(self, img_idx, show=False, out_file=None, **kwargs):
        img_info = self.img_infos[img_idx]
        img_id = img_info['id']
        img_path = osp.join(self.img_prefix, img_info['file_name'])
        img = mmcv.imread(img_path)
        annotations = self.lvis.load_anns(
            ids=self.lvis.get_ann_ids(img_ids=[img_id]))

        bboxes = []
        labels = []
        class_names = ['bg'] + list(self.CLASSES)
        for ann in annotations:
            if len(ann['segmentation']) > 0:
                rle = maskUtils.frPyObjects(ann['segmentation'],
                                            img_info['height'],
                                            img_info['width'])
                ann_mask = np.sum(
                    maskUtils.decode(rle), axis=2).astype(np.bool)
                color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
                img[ann_mask] = img[ann_mask] * 0.5 + color_mask * 0.5
            bbox = ann['bbox']
            x, y, w, h = bbox
            bboxes.append([x, y, x + w, y + h])
            labels.append(ann['category_id'])
        bboxes = np.stack(bboxes)
        labels = np.stack(labels)
        mmcv.imshow_det_bboxes(
            img,
            bboxes,
            labels,
            class_names=class_names,
            show=show,
            out_file=out_file,
            **kwargs)
        if not (show or out_file):
            return img
Exemplo n.º 24
0
def show_result(img, result, dataset='coco', score_thr=0.5, show=True):
    class_names = get_classes(dataset)
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(result)
    ]
    labels = np.concatenate(labels)
    bboxes = np.vstack(result)
    img = mmcv.imread(img)
    anno = mmcv.imshow_det_bboxes(img.copy(),
                                  bboxes,
                                  labels,
                                  show=show,
                                  class_names=class_names,
                                  score_thr=score_thr)
    return anno
Exemplo n.º 25
0
def show_mask_result(img,
                     result,
                     dataset='coco',
                     score_thr=0.7,
                     with_mask=True):
    segm_result = None
    if with_mask:
        bbox_result, segm_result = result
    else:
        bbox_result = result
    if isinstance(dataset,
                  str):  #  add own data label to mmdet.core.class_name.py
        class_names = get_classes(dataset)
        # print(class_names)
    elif isinstance(dataset, list):
        class_names = dataset
    else:
        raise TypeError('dataset must be a valid dataset name or a list'
                        ' of class names, not {}'.format(type(dataset)))
    h, w, _ = img.shape
    img_show = img[:h, :w, :]
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)
    bboxes = np.vstack(bbox_result)
    if with_mask:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        for i in inds:
            color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
    result_img = mmcv.imshow_det_bboxes(img_show,
                                        bboxes,
                                        labels,
                                        class_names=class_names,
                                        score_thr=score_thr)
    return result_img
    def show_result(self,
                    img,
                    result,
                    score_thr=0.3,
                    bbox_color='green',
                    text_color='green',
                    thickness=1,
                    font_scale=0.5,
                    win_name='',
                    show=False,
                    wait_time=0,
                    out_file=None):
        """Draw `result` over `img`.

        Args:
            img (str or Tensor): The image to be displayed.
            result (Tensor or tuple): The results to draw over `img`
                bbox_result or (bbox_result, segm_result).
            score_thr (float, optional): Minimum score of bboxes to be shown.
                Default: 0.3.
            bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
            text_color (str or tuple or :obj:`Color`): Color of texts.
            thickness (int): Thickness of lines.
            font_scale (float): Font scales of texts.
            win_name (str): The window name.
            wait_time (int): Value of waitKey param.
                Default: 0.
            show (bool): Whether to show the image.
                Default: False.
            out_file (str or None): The filename to write the image.
                Default: None.

        Returns:
            img (Tensor): Only if not `show` or `out_file`
        """
        img = mmcv.imread(img)
        img = img.copy()
        if isinstance(result, tuple):
            bbox_result, segm_result = result
            if isinstance(segm_result, tuple):
                segm_result = segm_result[0]  # ms rcnn
        else:
            bbox_result, segm_result = result, None
        bboxes = np.vstack(bbox_result)
        labels = [
            np.full(bbox.shape[0], i, dtype=np.int32)
            for i, bbox in enumerate(bbox_result)
        ]
        labels = np.concatenate(labels)
        # draw segmentation masks
        if segm_result is not None and len(labels) > 0:  # non empty
            segms = mmcv.concat_list(segm_result)
            inds = np.where(bboxes[:, -1] > score_thr)[0]
            np.random.seed(42)
            color_masks = [
                np.random.randint(0, 256, (1, 3), dtype=np.uint8)
                for _ in range(max(labels) + 1)
            ]
            for i in inds:
                i = int(i)
                color_mask = color_masks[labels[i]]
                mask = segms[i]
                img[mask] = img[mask] * 0.5 + color_mask * 0.5
        # if out_file specified, do not show image in window
        if out_file is not None:
            show = False
        # draw bounding boxes
        mmcv.imshow_det_bboxes(img,
                               bboxes,
                               labels,
                               class_names=self.CLASSES,
                               score_thr=score_thr,
                               bbox_color=bbox_color,
                               text_color=text_color,
                               thickness=thickness,
                               font_scale=font_scale,
                               win_name=win_name,
                               show=show,
                               wait_time=wait_time,
                               out_file=out_file)

        if not (show or out_file):
            warnings.warn('show==False and out_file is not specified, only '
                          'result image will be returned')
            return img
Exemplo n.º 27
0
def main(args):
    if not args.just_calculate:

        torch.set_default_tensor_type('torch.FloatTensor')

        net = network.Network(args)
        net.cuda('cuda')
        check_point = torch.load(args.model_path)
        net.load_state_dict(check_point['state_dict'])

        os.makedirs(args.output_dir, exist_ok=True)

        retDets = []
        for tmp_id, img_file_name in enumerate(os.listdir(args.image_dir)):

            img_path = join(args.image_dir, img_file_name)
            output_image_dir = None
            if args.save_image:
                output_image_dir = join(args.output_dir, 'predicts')
                os.makedirs(output_image_dir, exist_ok=True)

            np.set_printoptions(precision=2, suppress=True)
            try:
                detections = inference_image_path(
                    args.image_dir,
                    img_file_name,
                    net,
                    category_id=args.category_id)['dtboxes']
            except:
                print('[!] load error ', join(args.image_dir, img_file_name))
                continue

            if args.save_image:
                bboxes, labels = [], []
            for res in detections:

                score = res['score']
                if score < args.confidence:
                    continue
                bbox = res['bbox']
                det = {
                    'image_id': tmp_id,
                    'bbox': bbox,
                    'category_id': args.category_id,
                    'score': score,
                }
                retDets.append(det)

                if args.save_image:
                    labels.append(args.category_id)
                    bboxes.append([
                        bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1],
                        score
                    ])

            if args.save_image:
                dst_img_path = join(output_image_dir, basename(img_path))
                if not bboxes:
                    bboxes = [[0., 0., 0., 0., 0.]]
                    labels = [args.category_id]

                bboxes = np.asarray([
                    xywh2xyxy(det['bbox']) + [det['score']]
                    for det in detections
                ])
                labels = np.asarray([det['category_id'] for det in detections])
                # formatting
                mmcv.imshow_det_bboxes(img_path,
                                       bboxes,
                                       labels,
                                       score_thr=args.confidence,
                                       show=False,
                                       out_file=dst_img_path)

        output_json_path = join(args.output_dir, 'detres.json')
        with open(output_json_path, 'w') as json_fp:
            json_str = json.dumps(retDets)
            json_fp.write(json_str)

    #fpath = os.path.join(args.output_dir, 'dump_with_gt.odgt')
    #misc_utils.save_json_lines(all_results, fpath)

    print('Done')
Exemplo n.º 28
0
    def detect(self, image_data):
        input_image = image_data.asarray().astype('uint8')
        if self._rgb_to_bgr:
            input_image = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGR)

        from mmdet.apis import inference_detector
        detections = inference_detector(self._model, input_image)

        if isinstance(detections, tuple):
            bbox_result, segm_result = detections
        else:
            bbox_result, segm_result = detections, None

        if np.size(bbox_result) > 0:
            bboxes = np.vstack(bbox_result)
        else:
            bboxes = []

        # convert segmentation masks
        masks = []
        if segm_result is not None:
            segms = mmcv.concat_list(segm_result)
            inds = np.where(bboxes[:, -1] > score_thr)[0]
            for i in inds:
                masks.append(maskUtils.decode(segms[i]).astype(np.bool))

        # collect labels
        labels = [
            np.full(bbox.shape[0], i, dtype=np.int32)
            for i, bbox in enumerate(bbox_result)
        ]

        if np.size(labels) > 0:
            labels = np.concatenate(labels)
        else:
            labels = []

        # convert to kwiver format, apply threshold
        output = DetectedObjectSet()

        for bbox, label in zip(bboxes, labels):
            class_confidence = float(bbox[-1])
            if class_confidence < self._thresh:
                continue

            bbox_int = bbox.astype(np.int32)
            bounding_box = BoundingBoxD(bbox_int[0], bbox_int[1], bbox_int[2],
                                        bbox_int[3])

            class_name = self._labels[label]
            detected_object_type = DetectedObjectType(class_name,
                                                      class_confidence)

            detected_object = DetectedObject(bounding_box,
                                             np.max(class_confidence),
                                             detected_object_type)
            output.add(detected_object)

        if np.size(labels) > 0 and self._display_detections:
            mmcv.imshow_det_bboxes(input_image,
                                   bboxes,
                                   labels,
                                   class_names=self._labels,
                                   score_thr=self._thresh,
                                   show=True)

        return output
Exemplo n.º 29
0
# -*- coding: utf-8 -* -
'''
MMCV显示图片相关
参考:https://mmcv.readthedocs.io/en/latest/visualization.html
'''
import mmcv
import numpy as np

# 显示图片文件
mmcv.imshow("cluo.jpg")

# 显示numpy数组格式的图片
img = np.random.rand(100,100,3)*255
mmcv.imshow(img)

# 显示图片并伴有bounding box
img = np.random.rand(100,100,3)*255
bboxes = np.array([[0,0,50,50],[20,20,60,60]])
mmcv.imshow_bboxes(img,bboxes,colors=["red"],show=False,out_file="cluo_bbox.jpg")

# 显示图片并伴有bounding box以及框的标题
labels = np.array([1,3])
classes = ["类别0","类别1","类别2","类别3"]
mmcv.imshow_det_bboxes(img,bboxes,labels,classes,bbox_color="green",text_color="blue",out_file="cluo_bbox_label.jpg")

def show_result(img,
                result,
                class_names,
                score_thr=0.3,
                wait_time=0,
                show=True,
                out_file=None):
    """Visualize the detection results on the image.

    Args:
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        class_names (list[str] or tuple[str]): A list of class names.
        score_thr (float): The threshold to visualize the bboxes and masks.
        wait_time (int): Value of waitKey param.
        show (bool, optional): Whether to show the image with opencv or not.
        out_file (str, optional): If specified, the visualization result will
            be written to the out file instead of shown in a window.

    Returns:
        np.ndarray or None: If neither `show` nor `out_file` is specified, the
            visualized image is returned, otherwise None is returned.
    """
    assert isinstance(class_names, (tuple, list))
    img = mmcv.imread(img)
    img = img.copy()
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)
    # draw segmentation masks
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        np.random.seed(42)
        color_masks = [
            np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            for _ in range(max(labels) + 1)
        ]
        for i in inds:
            i = int(i)
            color_mask = color_masks[labels[i]]
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_mask * 0.5
    # draw bounding boxes
    mmcv.imshow_det_bboxes(img,
                           bboxes,
                           labels,
                           class_names=class_names,
                           score_thr=score_thr,
                           show=show,
                           wait_time=wait_time,
                           out_file=out_file)
    if not (show or out_file):
        return img
Exemplo n.º 31
0
    assert bboxes.shape[1] == 5

    all_res = {}
    scores = bboxes[:, -1]
    inds = scores > score_thr
    bboxes = bboxes[inds, :]
    labels = labels[inds]
    detection_cnt = 0
    for bbox, label in zip(bboxes, labels):
        res = {}
        bbox_int = bbox.astype(np.int32)
        bbox_int = bbox_int.tolist()
        label_text = class_names[label] if class_names is not None else 'cls {}'.format(label)

        res['label'] = label_text
        res['score'] = str(bbox[-1])
        res['bbox'] = bbox_int[:-1]
        res['label_txt'] = label_text + '|{:.02f}'.format(bbox[-1])

        all_res[str(detection_cnt)] = res
        detection_cnt = detection_cnt + 1

    return all_res

img = cv2.imread(image_path)
result, img_meta = inference_detector(model_detection, img)
bboxes = np.vstack(result)
labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(result)]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(img, bboxes , labels, score_thr=0.3, show=True, out_file="result.jpg")
Detections = decode_detection(bboxes, labels, classes_det, score_thr=0.3)