Пример #1
0
    def get_visualization(self, input_img, classes, test_cfg):
        vis = dict()
        batch_size = input_img.shape[0]
        img = tensor2imgs(input_img, **self.last_vals['img_metas'][0]
                          ['img_norm_cfg'])[0]  #get input image
        from PIL import Image
        #Image.fromarray(img).show()
        img_gt = imshow_det_bboxes(
            img.copy(),
            self.last_vals['gt_bboxes'][0].cpu().numpy(),
            self.last_vals['gt_labels'][0].cpu().numpy() - 1,
            class_names=classes,
            show=False)
        #Image.fromarray(img_gt).show()
        vis["img_bbox_gt"] = img_gt
        # predict bboxes
        pred_bboxes, pred_labels = self.get_bboxes(
            cls_scores=self.last_vals['cls_scores'],
            bbox_preds=self.last_vals['bbox_preds'],
            centernesses=self.last_vals['centernesses'],
            img_metas=self.last_vals['img_metas'],
            cfg=test_cfg)[0]
        img_preds = imshow_det_bboxes(img.copy(),
                                      pred_bboxes.cpu().numpy(),
                                      pred_labels.cpu().numpy(),
                                      class_names=classes,
                                      show=False,
                                      score_thr=0.05)
        vis["img_bbox_pred"] = img_preds
        #Image.fromarray(img_preds).show()

        scores_vis = []
        classes_vis = []
        for center_score, cl_score in zip(self.last_vals['centernesses'],
                                          self.last_vals['cls_scores']):
            cls_scores = cl_score[0].permute(1, 2, 0).sigmoid()
            centerness_scores = center_score[0].permute(1, 2, 0).sigmoid()

            final_score = (cls_scores *
                           centerness_scores).detach().cpu().numpy()
            max_final_score = np.max(final_score, axis=-1)
            scores_vis.append(max_final_score)

            final_classes = (
                (max_final_score > test_cfg['score_thr']) *
                np.argmax(cls_scores.detach().cpu().numpy(), axis=-1) +
                (max_final_score < test_cfg['score_thr']) * -1)
            classes_vis.append(final_classes)

        #img_scores = vt.image_pyramid(vt.normalize_centerness(scores_vis),  img.shape[:-1])
        # threshold should be about 0.5 lightness
        #[vis / test_cfg['score_thr'] * 125 for vis in scores_vis]
        # take care of overflow
        img_scores = vt.image_pyramid(
            [vis / test_cfg['score_thr'] * 125 for vis in scores_vis],
            img.shape[:-1])
        vis["energy_pred"] = np.expand_dims(img_scores, -1)
        #Image.fromarray(img_scores).show()
        img_classes = vt.image_pyramid(
            vt.colorize_class_preds(classes_vis,
                                    len(classes) + 1), img.shape[:-1])
        #Image.fromarray(img_classes).show()
        img_classes = vt.add_class_legend(img_classes, classes,
                                          vt.get_present_classes(classes_vis))
        #Image.fromarray(img_classes).show()
        vis["classes_pred"] = img_classes

        # show targets
        # centerness targets
        # cent = [self.centerness_target(tar) for tar in self.last_vals["bbox_targets"]]
        # cent = [tar.cpu().numpy() for tar in cent]
        # self.centerness_target(self.last_vals["bbox_targets"])
        reshaped_centers = []
        for tar, vis_class in zip(self.last_vals["bbox_targets"], classes_vis):
            tar[tar < 0] = 0
            tar = self.centerness_target(tar).cpu().numpy()
            tar = self.cut_batch_reshape(tar, vis_class.shape, batch_size)
            tar = np.nan_to_num(tar)
            reshaped_centers.append((tar * 255).astype(np.uint8))
        gt_targets = vt.image_pyramid(reshaped_centers, img.shape[:-1])
        #Image.fromarray(gt_targets).show()
        vis["energy_gt"] = np.expand_dims(gt_targets, -1)

        # class targets
        # align with VOC names
        self.last_vals['labels'] = [
            labels - 1 for labels in self.last_vals['labels']
        ]
        reshaped_labels = []
        for labels, vis_class in zip(self.last_vals['labels'], classes_vis):
            labels = labels.cpu().numpy()
            reshaped_labels.append(
                self.cut_batch_reshape(labels, vis_class.shape, batch_size))
        gt_classes = vt.image_pyramid(
            vt.colorize_class_preds(reshaped_labels,
                                    len(classes) + 1), img.shape[:-1])
        gt_classes = vt.add_class_legend(
            gt_classes, classes, vt.get_present_classes(reshaped_labels))
        vis["classes_gt"] = gt_classes
        #Image.fromarray(gt_classes).show()
        stitched = vt.stitch_big_image(
            [[vis["img_bbox_gt"], vis["energy_gt"], vis["classes_gt"]],
             [vis["img_bbox_pred"], vis["energy_pred"], vis["classes_pred"]]])

        return [{"name": "stitched_img", "image": stitched}]
Пример #2
0
def single_gpu_test_crop_img(model,
                             data_loader,
                             show=False,
                             out_dir=None,
                             show_score_thr=0.3):
    print('clw: using single_gpu_test_crop_img() !!')
    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))

    for i, data in enumerate(
            data_loader):  # data['img][0]: tensor (1, 3, 6016, 8192)
        img_h = data['img'][0].shape[2]
        img_w = data['img'][0].shape[3]
        with torch.no_grad():
            # 如果是 4096x3500,直接原图预测
            if img_h <= 3500 and img_w <= 4096:
                #if img_h <= 10000 and img_w <= 10000:
                result = model(return_loss=False, rescale=True, **data)
            else:
                # 否则切图, 4 nums
                ##############################
                overlap_h = 272
                overlap_w = 256

                crop_h = round(
                    (img_h + overlap_h) /
                    2)  # clw note: the size can be divided by 32 is better
                crop_w = round((img_w + overlap_w) / 2)
                #crop_h = 800
                #crop_w = 1344
                # crop_w = 1333

                #step_h = int(0.8 * crop_h)
                #step_w = int(0.8 * crop_w)
                step_h = crop_h - overlap_h
                step_w = crop_w - overlap_w

                nms_iou_thr = model.module.test_cfg['rcnn']['nms'][
                    'iou_threshold']
                results_crop = [[] for _ in range(len(model.module.CLASSES))]
                data['img_metas'][0].data[0][0]['ori_shape'] = (crop_h, crop_w)
                data['img_metas'][0].data[0][0]['img_shape'] = (crop_h, crop_w)
                data['img_metas'][0].data[0][0]['pad_shape'] = (crop_h, crop_w)
                img_tensor_orig = data['img'][0].clone()
                for start_h in range(0, img_h - crop_h + 1,
                                     step_h):  # imgsz is crop step here,
                    if start_h + crop_h > img_h:  # 如果最后剩下的不到imgsz,则step少一些,保证切的图尺寸不变
                        start_h = img_h - crop_h

                    for start_w in range(0, img_w - crop_w + 1, step_w):
                        if start_w + crop_w > img_w:  # 如果最后剩下的不到imgsz,则step少一些,保证切的图尺寸不变
                            start_w = img_w - crop_w
                        # crop
                        print(start_h, start_w)
                        data['img'][0] = img_tensor_orig[:, :,
                                                         start_h:start_h +
                                                         crop_h,
                                                         start_w:start_w +
                                                         crop_w]

                        result = model(
                            return_loss=False, rescale=True, **data
                        )  # result[0]: model.module.CLASSES 个list,每个里面装着(n, 5) ndarray
                        #result = model(return_loss=False, rescale=False, **data)  # clw modify
                        for idx, item in enumerate(result[0]):
                            for row in item:
                                #print('boxw:', row[2] - row[0],  'boxh:', row[3] - row[1] )
                                if row[2] - row[0] == 0 or row[3] - row[1] == 0:
                                    print(
                                        '==================================================================='
                                    )
                                    continue
                                row[[0, 2]] += start_w
                                row[[1, 3]] += start_h
                                results_crop[idx].append(row)

                results_afternms = []
                for idx, res in enumerate(results_crop):
                    if len(res) == 0:
                        results_afternms.append(np.array(
                            []))  # clw note: it's really important!!
                        continue
                    else:
                        prediction = torch.tensor(res)
                        boxes, scores = prediction[:, :
                                                   4], prediction[:,
                                                                  4]  # boxes (offset by class), scores
                        i = torchvision.ops.boxes.nms(boxes, scores,
                                                      nms_iou_thr)
                        results_afternms.append(prediction[i].numpy())
                result = [results_afternms]
                ##############################

        batch_size = len(result)
        if show or out_dir:
            if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
                img_tensor = data['img'][0]
            else:
                img_tensor = data['img'][0].data[0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(img_show,
                                         result[i],
                                         show=show,
                                         out_file=out_file,
                                         score_thr=show_score_thr)

        # encode mask results
        if isinstance(result[0], tuple):
            result = [(bbox_results, encode_mask_results(mask_results))
                      for bbox_results, mask_results in result]
        results.extend(result)

        for _ in range(batch_size):
            prog_bar.update()
    return results
Пример #3
0
def single_gpu_test(model,
                    data_loader,
                    show=False,
                    out_dir=None,
                    show_score_thr=0.3):
    """Test model with single gpu.

    This method tests model with single gpu and gives the 'show' option.
    By setting ``show=True``, it saves the visualization results under
    ``out_dir``.

    Args:
        model (nn.Module): Model to be tested.
        data_loader (nn.Dataloader): Pytorch data loader.
        show (bool): Whether to save viualization results.
            Default: True.
        out_dir (str): The path to save visualization results.
            Default: None.

    Returns:
        list[dict]: The prediction results.
    """
    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

        if show:
            # Visualize the results of MMdetection3D model
            # 'show_results' is MMdetection3D visualization API
            if hasattr(model.module, 'show_results'):
                model.module.show_results(data, result, out_dir)
            # Visualize the results of MMdetection model
            # 'show_result' is MMdetection visualization API
            else:
                batch_size = len(result)
                if batch_size == 1 and isinstance(data['img'][0],
                                                  torch.Tensor):
                    img_tensor = data['img'][0]
                else:
                    img_tensor = data['img'][0].data[0]
                img_metas = data['img_metas'][0].data[0]
                imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
                assert len(imgs) == len(img_metas)

                for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                    h, w, _ = img_meta['img_shape']
                    img_show = img[:h, :w, :]

                    ori_h, ori_w = img_meta['ori_shape'][:-1]
                    img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                    if out_dir:
                        out_file = os.path.join(out_dir,
                                                img_meta['ori_filename'])
                    else:
                        out_file = None

                    model.module.show_result(img_show,
                                             result[i],
                                             show=show,
                                             out_file=out_file,
                                             score_thr=show_score_thr)
        results.extend(result)

        batch_size = len(result)
        for _ in range(batch_size):
            prog_bar.update()
    return results
Пример #4
0
def single_gpu_test_processed_rect_img(model,
                                       data_loader,
                                       show=False,
                                       out_dir=None,
                                       show_score_thr=0.3):
    print('clw: using single_gpu_test_processed_rect_img() !!')
    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

            ########### clw note: for debug
            # for idx, item in enumerate(result[0]):
            #     if item.size == 0:
            #         print('111')

            #    for row in item:
            #         print('boxw:', row[2] - row[0],  'boxh:', row[3] - row[1] )
            #         if row[2] - row[0] == 0 or row[3] - row[1] == 0:
            #             print('aaaa')
            #########

        ##
        img_name = data['img_metas'][0].data[0][0]['ori_filename']

        aaa = img_name[:-4].split('_')[-2:]
        x_rect_left = int(aaa[0])
        y_rect_up = int(aaa[1])

        for i in range(len(result[0])):
            ddd = []
            ccc = result[0][i][:, :4]  # (n, 4)
            if ccc.size == 0:
                continue
            for xyxy in ccc:
                x1 = xyxy[0] + x_rect_left
                y1 = xyxy[1] + y_rect_up
                x2 = xyxy[2] + x_rect_left
                y2 = xyxy[3] + y_rect_up
                cnt = np.array((x1, y1, x2, y2))
                ddd.append(cnt)
            ddd = np.array(ddd)

            result[0][
                i][:, :
                   4] = ddd  # result[0][i] = np.concatenate((fff, result[0][i][:, 4]), axis=1)
        ##

        batch_size = len(result)
        if show or out_dir:
            if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
                img_tensor = data['img'][0]
            else:
                img_tensor = data['img'][0].data[0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(img_show,
                                         result[i],
                                         show=show,
                                         out_file=out_file,
                                         score_thr=show_score_thr)

        # encode mask results
        if isinstance(result[0], tuple):
            result = [(bbox_results, encode_mask_results(mask_results))
                      for bbox_results, mask_results in result]
        results.extend(result)

        for _ in range(batch_size):
            prog_bar.update()
    return results
Пример #5
0
def single_gpu_test_processed_rect_crop_img(model,
                                            data_loader,
                                            show=False,
                                            out_dir=None,
                                            show_score_thr=0.3):
    print('clw: using single_gpu_test_processed_rect_crop_img() !!')
    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        print(data['img_metas'][0].data[0][0]['ori_filename'])
        img_h = data['img'][0].shape[2]
        img_w = data['img'][0].shape[3]
        with torch.no_grad():
            # 否则切图, 4 nums
            ##############################
            overlap_h = 256
            overlap_w = 256
            crop_h = 2048
            crop_w = 2048

            step_h = crop_h - overlap_h
            step_w = crop_w - overlap_w

            nms_iou_thr = model.module.test_cfg['rcnn']['nms']['iou_threshold']
            results_crop = [[] for _ in range(len(model.module.CLASSES))]
            data['img_metas'][0].data[0][0]['ori_shape'] = (crop_h, crop_w)
            data['img_metas'][0].data[0][0]['img_shape'] = (crop_h, crop_w)
            data['img_metas'][0].data[0][0]['pad_shape'] = (crop_h, crop_w)
            img_tensor_orig = data['img'][0].clone()
            for start_h in range(0, img_h - crop_h + 1,
                                 step_h):  # imgsz is crop step here,
                if start_h + crop_h > img_h:  # 如果最后剩下的不到imgsz,则step少一些,保证切的图尺寸不变
                    start_h = img_h - crop_h

                for start_w in range(0, img_w - crop_w + 1, step_w):
                    if start_w + crop_w > img_w:  # 如果最后剩下的不到imgsz,则step少一些,保证切的图尺寸不变
                        start_w = img_w - crop_w
                    # crop
                    print(start_h, start_w)
                    data['img'][0] = img_tensor_orig[:, :,
                                                     start_h:start_h + crop_h,
                                                     start_w:start_w + crop_w]

                    result = model(
                        return_loss=False, rescale=True, **data
                    )  # result[0]: model.module.CLASSES 个list,每个里面装着(n, 5) ndarray
                    #result = model(return_loss=False, rescale=False, **data)  # clw modify
                    for idx, item in enumerate(result[0]):
                        for row in item:
                            #print('boxw:', row[2] - row[0],  'boxh:', row[3] - row[1] )
                            if row[2] - row[0] == 0 or row[3] - row[1] == 0:
                                print(
                                    '==================================================================='
                                )
                                continue
                            row[[0, 2]] += start_w
                            row[[1, 3]] += start_h
                            results_crop[idx].append(row)

            results_afternms = []
            for idx, res in enumerate(results_crop):
                if len(res) == 0:
                    results_afternms.append(np.array(
                        []))  # clw note: it's really important!!
                    continue
                else:
                    prediction = torch.tensor(res)
                    boxes, scores = prediction[:, :
                                               4], prediction[:,
                                                              4]  # boxes (offset by class), scores
                    i = torchvision.ops.boxes.nms(boxes, scores, nms_iou_thr)
                    results_afternms.append(prediction[i].numpy())
            result = [results_afternms]
            ##############################

            ########### clw note: for debug
            # for idx, item in enumerate(result[0]):
            #     if item.size == 0:
            #         print('111')

            #    for row in item:
            #         print('boxw:', row[2] - row[0],  'boxh:', row[3] - row[1] )
            #         if row[2] - row[0] == 0 or row[3] - row[1] == 0:
            #             print('aaaa')
            #########

        ##
        img_name = data['img_metas'][0].data[0][0]['ori_filename']

        aaa = img_name[:-4].split('_')[-2:]
        x_rect_left = int(aaa[0])
        y_rect_up = int(aaa[1])

        for i in range(len(result[0])):
            ddd = []
            if result[0][i].size == 0:
                continue
            ccc = result[0][i][:, :4]  # (n, 4)
            for xyxy in ccc:
                x1 = xyxy[0] + x_rect_left
                y1 = xyxy[1] + y_rect_up
                x2 = xyxy[2] + x_rect_left
                y2 = xyxy[3] + y_rect_up
                cnt = np.array((x1, y1, x2, y2))
                ddd.append(cnt)
            ddd = np.array(ddd)

            result[0][
                i][:, :
                   4] = ddd  # result[0][i] = np.concatenate((fff, result[0][i][:, 4]), axis=1)
        ##

        batch_size = len(result)
        if show or out_dir:
            if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
                img_tensor = data['img'][0]
            else:
                img_tensor = data['img'][0].data[0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(img_show,
                                         result[i],
                                         show=show,
                                         out_file=out_file,
                                         score_thr=show_score_thr)

        # encode mask results
        if isinstance(result[0], tuple):
            result = [(bbox_results, encode_mask_results(mask_results))
                      for bbox_results, mask_results in result]
        results.extend(result)

        for _ in range(batch_size):
            prog_bar.update()
    return results
Пример #6
0
def single_gpu_test(model, data_loader, show=False, out_dir=None):
    """Test with single GPU.

    Args:
        model (nn.Module): Model to be tested.
        data_loader (nn.Dataloader): Pytorch data loader.
        show (bool): Whether show results during infernece. Default: False.
        out_dir (str, optional): If specified, the results will be dumped
        into the directory to save output results.

    Returns:
        list: The prediction results.
    """

    model.eval()
    results_0 = []
    results_1 = []
    results_2 = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, **data)
        if isinstance(result, list):
            results_0.extend(result[0])
            results_1.extend(result[1])
            results_2.extend(result[2])
        else:
            # results.append(result)
            pass

        if show or out_dir:
            img_tensor = data['img'][0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for img, img_meta in zip(imgs, img_metas):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file_0 = osp.join(out_dir + '_dir', img_meta['ori_filename'])
                    out_file_1 = osp.join(out_dir + '_sty', img_meta['ori_filename'])
                    out_file_2 = osp.join(out_dir + '_type', img_meta['ori_filename'])
                else:
                    out_file_0 = None
                    out_file_1 = None
                    out_file_2 = None

                model.module.show_result(
                    img_show,
                    result[0],
                    palette=dataset.PALETTE,
                    show=show,
                    out_file=out_file_0)
                model.module.show_result(
                    img_show,
                    result[1],
                    palette=dataset.PALETTE,
                    show=show,
                    out_file=out_file_1)
                model.module.show_result(
                    img_show,
                    result[2],
                    palette=dataset.PALETTE,
                    show=show,
                    out_file=out_file_2)

        batch_size = data['img'][0].size(0)
        for _ in range(batch_size):
            prog_bar.update()
    return [results_0, results_1, results_2]
Пример #7
0
def single_gpu_test_rotate_rect_img(model,
                                    data_loader,
                                    show=False,
                                    out_dir=None,
                                    show_score_thr=0.3):
    print('clw: using single_gpu_test_rotate_rect_img() !!')
    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

            ########### clw note: for debug
            # for idx, item in enumerate(result[0]):
            #     if item.size == 0:
            #         print('111')

            #    for row in item:
            #         print('boxw:', row[2] - row[0],  'boxh:', row[3] - row[1] )
            #         if row[2] - row[0] == 0 or row[3] - row[1] == 0:
            #             print('aaaa')
            #########

        ##
        img_name = data['img_metas'][0].data[0][0]['ori_filename']
        # origin_name = img_name.split('CAM')[0] + 'CAM' + img_name.split('CAM')[1][0] + '.jpg'
        # data['img_metas'][0].data[0][0]['ori_filename'] = origin_name
        # data['img_metas'][0].data[0][0]['filename'] = data['img_metas'][0].data[0][0]['filename'].rsplit('/', 1)[0] + '/' + origin_name

        aaa = img_name[:-4].split('_')[-9:]
        bbb = [float(a) for a in aaa]
        M_perspective_inv = np.array(bbb).reshape(3, 3)

        for i in range(len(result[0])):
            ddd = []
            ccc = result[0][i][:, :4]  # (n, 4)
            if ccc.size == 0:
                continue
            for xyxy in ccc:
                x1 = xyxy[0]
                y1 = xyxy[1]
                x2 = xyxy[2]
                y2 = xyxy[3]
                cnt = np.array(((x1, y1), (x1, y2), (x2, y2), (x2, y1)))
                ddd.append(cnt)
            ddd = np.array(ddd)

            #
            fff = []
            src_pts = cv2.perspectiveTransform(ddd, M_perspective_inv)
            for cnt in src_pts:
                rect = cv2.boundingRect(cnt)
                x1 = rect[0]
                y1 = rect[1]
                x2 = rect[0] + rect[2]
                y2 = rect[1] + rect[3]
                ggg = np.array((x1, y1, x2, y2))
                fff.append(ggg)
            fff = np.array(fff)

            result[0][
                i][:, :
                   4] = fff  # result[0][i] = np.concatenate((fff, result[0][i][:, 4]), axis=1)
        ##

        batch_size = len(result)
        if show or out_dir:
            if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
                img_tensor = data['img'][0]
            else:
                img_tensor = data['img'][0].data[0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(img_show,
                                         result[i],
                                         show=show,
                                         out_file=out_file,
                                         score_thr=show_score_thr)

        # encode mask results
        if isinstance(result[0], tuple):
            result = [(bbox_results, encode_mask_results(mask_results))
                      for bbox_results, mask_results in result]
        results.extend(result)

        for _ in range(batch_size):
            prog_bar.update()
    return results
Пример #8
0
def single_gpu_test(model,
                    data_loader,
                    show=False,
                    out_dir=None,
                    show_score_thr=0.3):
    model.eval()
    results = []
    dataset = data_loader.dataset
    PALETTE = getattr(dataset, 'PALETTE', None)
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

        batch_size = len(result)
        if show or out_dir:
            if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
                img_tensor = data['img'][0]
            else:
                img_tensor = data['img'][0].data[0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(img_show,
                                         result[i],
                                         bbox_color=PALETTE,
                                         text_color=PALETTE,
                                         mask_color=PALETTE,
                                         show=show,
                                         out_file=out_file,
                                         score_thr=show_score_thr)

        # encode mask results
        if isinstance(result[0], tuple):
            result = [(bbox_results, encode_mask_results(mask_results))
                      for bbox_results, mask_results in result]
        # This logic is only used in panoptic segmentation test.
        elif isinstance(result[0], dict) and 'ins_results' in result[0]:
            for j in range(len(result)):
                bbox_results, mask_results = result[j]['ins_results']
                result[j]['ins_results'] = (bbox_results,
                                            encode_mask_results(mask_results))

        results.extend(result)

        for _ in range(batch_size):
            prog_bar.update()
    return results
Пример #9
0
def single_gpu_test(model,
                    data_loader,
                    show=False,
                    out_dir=None,
                    efficient_test=False,
                    opacity=0.5):
    """Test with single GPU.

    Args:
        model (nn.Module): Model to be tested.
        data_loader (utils.data.Dataloader): Pytorch data loader.
        show (bool): Whether show results during inference. Default: False.
        out_dir (str, optional): If specified, the results will be dumped into
            the directory to save output results.
        efficient_test (bool): Whether save the results as local numpy files to
            save CPU memory during evaluation. Default: False.
        opacity(float): Opacity of painted segmentation map.
            Default 0.5.
            Must be in (0, 1] range.
    Returns:
        list: The prediction results.
    """

    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, **data)

        if show or out_dir:
            img_tensor = data['img'][0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for img, img_meta in zip(imgs, img_metas):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(img_show,
                                         result,
                                         palette=dataset.PALETTE,
                                         show=show,
                                         out_file=out_file,
                                         opacity=opacity)

        if isinstance(result, list):
            if efficient_test:
                result = [np2tmp(_) for _ in result]
            results.extend(result)
        else:
            if efficient_test:
                result = np2tmp(result)
            results.append(result)

        batch_size = len(result)
        for _ in range(batch_size):
            prog_bar.update()
    return results
Пример #10
0
def single_gpu_test(model,
                    data_loader,
                    show=False,
                    out_dir=None,
                    fps=3,
                    show_score_thr=0.3):
    """Test model with single gpu.

    Args:
        model (nn.Module): Model to be tested.
        data_loader (nn.Dataloader): Pytorch data loader.
        show (bool, optional): If True, visualize the prediction results.
            Defaults to False.
        out_dir (str, optional): Path of directory to save the
            visualization results. Defaults to None.
        fps (int, optional): FPS of the output video.
            Defaults to 3.
        show_score_thr (float, optional): The score threshold of visualization
            (Only used in VID for now). Defaults to 0.3.

    Returns:
        dict[str, list]: The prediction results.
    """
    model.eval()
    results = defaultdict(list)
    dataset = data_loader.dataset
    prev_img_meta = None
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

        batch_size = data['img'][0].size(0)
        if show or out_dir:
            assert batch_size == 1, 'Only support batch_size=1 when testing.'
            img_tensor = data['img'][0]
            img_meta = data['img_metas'][0].data[0][0]
            img = tensor2imgs(img_tensor, **img_meta['img_norm_cfg'])[0]

            h, w, _ = img_meta['img_shape']
            img_show = img[:h, :w, :]

            ori_h, ori_w = img_meta['ori_shape'][:-1]
            img_show = mmcv.imresize(img_show, (ori_w, ori_h))

            if out_dir:
                out_file = osp.join(out_dir, img_meta['ori_filename'])
            else:
                out_file = None

            model.module.show_result(img_show,
                                     result,
                                     show=show,
                                     out_file=out_file,
                                     score_thr=show_score_thr)

            # Whether need to generate a video from images.
            # The frame_id == 0 means the model starts processing
            # a new video, therefore we can write the previous video.
            # There are two corner cases.
            # Case 1: prev_img_meta == None means there is no previous video.
            # Case 2: i == len(dataset) means processing the last video
            need_write_video = (prev_img_meta is not None
                                and img_meta['frame_id'] == 0
                                or i == len(dataset))
            if out_dir and need_write_video:
                prev_img_prefix, prev_img_name = prev_img_meta[
                    'ori_filename'].rsplit('/', 1)
                prev_img_idx, prev_img_type = prev_img_name.split('.')
                prev_filename_tmpl = '{:0' + str(
                    len(prev_img_idx)) + 'd}.' + prev_img_type
                prev_img_dirs = f'{out_dir}/{prev_img_prefix}'
                prev_img_names = sorted(os.listdir(prev_img_dirs))
                prev_start_frame_id = int(prev_img_names[0].split('.')[0])
                prev_end_frame_id = int(prev_img_names[-1].split('.')[0])

                mmcv.frames2video(prev_img_dirs,
                                  f'{prev_img_dirs}/out_video.mp4',
                                  fps=fps,
                                  fourcc='mp4v',
                                  filename_tmpl=prev_filename_tmpl,
                                  start=prev_start_frame_id,
                                  end=prev_end_frame_id,
                                  show_progress=False)

            prev_img_meta = img_meta

        for key in result:
            if 'mask' in key:
                result[key] = encode_mask_results(result[key])

        for k, v in result.items():
            results[k].append(v)

        for _ in range(batch_size):
            prog_bar.update()

    return results
Пример #11
0
def single_gpu_test(model,
                    data_loader,
                    show=False,
                    out_dir=None,
                    efficient_test=False,
                    opacity=0.5,
                    pre_eval=False,
                    format_only=False,
                    format_args={}):
    """Test with single GPU by progressive mode.

    Args:
        model (nn.Module): Model to be tested.
        data_loader (utils.data.Dataloader): Pytorch data loader.
        show (bool): Whether show results during inference. Default: False.
        out_dir (str, optional): If specified, the results will be dumped into
            the directory to save output results.
        efficient_test (bool): Whether save the results as local numpy files to
            save CPU memory during evaluation. Mutually exclusive with
            pre_eval and format_results. Default: False.
        opacity(float): Opacity of painted segmentation map.
            Default 0.5.
            Must be in (0, 1] range.
        pre_eval (bool): Use dataset.pre_eval() function to generate
            pre_results for metric evaluation. Mutually exclusive with
            efficient_test and format_results. Default: False.
        format_only (bool): Only format result for results commit.
            Mutually exclusive with pre_eval and efficient_test.
            Default: False.
        format_args (dict): The args for format_results. Default: {}.
    Returns:
        list: list of evaluation pre-results or list of save file names.
    """
    if efficient_test:
        warnings.warn(
            'DeprecationWarning: ``efficient_test`` will be deprecated, the '
            'evaluation is CPU memory friendly with pre_eval=True')
        mmcv.mkdir_or_exist('.efficient_test')
    # when none of them is set true, return segmentation results as
    # a list of np.array.
    assert [efficient_test, pre_eval, format_only].count(True) <= 1, \
        '``efficient_test``, ``pre_eval`` and ``format_only`` are mutually ' \
        'exclusive, only one of them could be true .'

    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    # The pipeline about how the data_loader retrieval samples from dataset:
    # sampler -> batch_sampler -> indices
    # The indices are passed to dataset_fetcher to get data from dataset.
    # data_fetcher -> collate_fn(dataset[index]) -> data_sample
    # we use batch_sampler to get correct data idx
    loader_indices = data_loader.batch_sampler

    for batch_indices, data in zip(loader_indices, data_loader):
        with torch.no_grad():
            result = model(return_loss=False, **data)

        if efficient_test:
            result = [np2tmp(_, tmpdir='.efficient_test') for _ in result]

        if format_only:
            result = dataset.format_results(result,
                                            indices=batch_indices,
                                            **format_args)
        if pre_eval:
            # TODO: adapt samples_per_gpu > 1.
            # only samples_per_gpu=1 valid now
            result = dataset.pre_eval(result, indices=batch_indices)

        results.extend(result)

        if show or out_dir:
            img_tensor = data['img'][0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for img, img_meta in zip(imgs, img_metas):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(img_show,
                                         result,
                                         palette=dataset.PALETTE,
                                         show=show,
                                         out_file=out_file,
                                         opacity=opacity)

        batch_size = len(result)
        for _ in range(batch_size):
            prog_bar.update()

    return results
Пример #12
0
def test(model, data_loader, show=False, out_dir=None, eval=True):
    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    y_pred = []
    y_gt = []
    for idx, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

        batch_size = len(result)
        if show or out_dir:
            img_tensor = data['img'].data[0]
            img_metas = data['img_metas'].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)
            gt_bboxes = [data['gt_bboxes'].data[0][0].numpy().tolist()]

            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                box_ann_infos = dataset.data_infos[idx]['annotations']
                node_gt = [
                    box_ann_info['label'] for box_ann_info in box_ann_infos
                ]

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                vis_img, node_pred = model.module.show_result(
                    img_show,
                    result[i],
                    gt_bboxes[i],
                    show=show,
                    out_file=out_file)
                if len(node_pred) != len(node_gt):
                    print('Here')
                y_pred.extend(node_pred)
                y_gt.extend(node_gt)

        for _ in range(batch_size):
            prog_bar.update()

    labels = [
        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
        21, 22, 23, 24, 25, 26, 27, 28, 29, 30
    ]
    if eval:
        print(
            '\nF1 scores of each class.................................................'
        )
        count = 0
        total_F1 = 0
        for label in labels:
            score = eval_marco_F1(
                y_pred=y_pred,
                y_gt=y_gt,
                labels=[label],
            )
            print(str(label).ljust(20), score)
            if score > -10:
                count += 1
                total_F1 += score
        print('average F1 in', count, 'class:', total_F1 / count)
    return results