コード例 #1
0
    def represent(self, batch, pred):
        images, labels = batch['image'], batch['label']
        mask = torch.ones(pred.shape[0], dtype=torch.int).to(pred.device)

        for i in range(pred.shape[1]):
            mask = (1 -
                    (pred[:, i] == self.charset.blank).type(torch.int)) * mask
            pred[:, i] = pred[:, i] * mask + self.charset.blank * (1 - mask)

        output = []
        for i in range(labels.shape[0]):
            label_str = self.label_to_string(labels[i])
            pred_str = self.label_to_string(pred[i])
            if False and label_str != pred_str:
                print('label: %s , pred: %s' % (label_str, pred_str))
                img = (np.clip(
                    images[i].cpu().data.numpy().transpose(1, 2, 0) + 0.5, 0,
                    1) * 255).astype('uint8')
                webcv.imshow(
                    '【 pred: <%s> , label: <%s> 】' % (pred_str, label_str),
                    np.array(img, dtype=np.uint8))
                if webcv.waitKey() == ord('q'):
                    continue
            output.append({'label_string': label_str, 'pred_string': pred_str})

        return output
コード例 #2
0
    def visualize_batch(self, batch, output):
        visualization = dict()
        for index, output_dict in enumerate(output):
            image = batch['image'][index]
            image = NormalizeImage.restore(image)

            mask = output_dict['mask']
            mask = cv2.resize(Visualize.visualize_weights(mask),
                              image.shape[:2][::-1])

            classify = output_dict['classify']
            classify = cv2.resize(
                Visualize.visualize_heatmap(classify, format='CHW'),
                image.shape[:2][::-1])

            canvas = np.concatenate([image, mask, classify], axis=0)
            key = "【%s-%s】" % (output_dict['label_string'],
                               output_dict['pred_string'])
            vis_dict = {key: canvas}

            if self.eager_show:
                for k, v in vis_dict.items():
                    # if output_dict['label_string'] != output_dict['pred_string']:
                    webcv2.imshow(k, v)
            visualization.update(mask=mask, classify=classify, image=image)
        if self.eager_show:
            webcv2.waitKey()
        return visualization
コード例 #3
0
 def output(vis, fname):
     if args.show:
         print(fname)
         webcv2.imshow(fname, vis.get_image()[:, :, ::-1])
         webcv2.waitKey()
     else:
         filepath = os.path.join(dirname, fname)
         print("Saving to {} ...".format(filepath))
         vis.save(filepath)
コード例 #4
0
    def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
        '''
        _bitmap: single map with shape (1, H, W),
            whose values are binarized as {0, 1}
        '''
        assert _bitmap.size(0) == 1
        bitmap = _bitmap.data.cpu().numpy()[0]  # The first channel
        pred = pred.cpu().detach().numpy()[0]
        height, width = bitmap.shape
        boxes = []
        _, contours, _ = cv2.findContours(
            (bitmap*255).astype(np.uint8),
            cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

        if self.debug:
            bitmap = cv2.cvtColor(pred * 255, cv2.COLOR_GRAY2BGR)

        for contour in contours[:self.max_candidates]:
            points, sside = self.get_mini_boxes(contour)
            if sside < self.min_size:
                continue
            points = np.array(points)
            score = self.box_score_fast(pred, points.reshape(-1, 2))

            if self.debug:
                points = points.astype(np.int32)
                bitmap = cv2.polylines(
                        bitmap, [points.reshape(-1, 2)], True, (255, 0, 0), 3)
                bitmap = cv2.putText(
                        bitmap, str(round(score, 3)),
                        (points[:, 0].min(), points[:, 1].min()),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
            if self.box_thresh > score:
                continue
            box = self.unclip(points).reshape(-1, 1, 2)
            box, sside = self.get_mini_boxes(box)
            if sside < self.min_size + 2:
                continue
            box = np.array(box)

            if not self.resize:
                dest_width = width
                dest_height = height

            box[:, 0] = np.clip(
                np.round(box[:, 0] / width * dest_width), 0, dest_width)
            box[:, 1] = np.clip(
                np.round(box[:, 1] / height * dest_height), 0, dest_height)
            boxes.append(box.tolist())

        if self.debug:
            webcv2.imshow('mask', bitmap)
        return boxes, bitmap
コード例 #5
0
    def single_visualize(self, batch, index, boxes, pred):
        image = batch['image'][index]
        polygons = batch['polygons'][index]
        if isinstance(polygons, torch.Tensor):
            polygons = polygons.cpu().data.numpy()
        ignore_tags = batch['ignore_tags'][index]
        original_shape = batch['shape'][index]
        filename = batch['filename'][index]
        std = np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1)
        mean = np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)
        image = (image.cpu().numpy() * std + mean).transpose(1, 2, 0) * 255
        pred_canvas = image.copy().astype(np.uint8)
        pred_canvas = cv2.resize(pred_canvas,
                                 (original_shape[1], original_shape[0]))

        if isinstance(pred, dict) and 'binary' in pred:
            binary = self._visualize_heatmap(pred['binary'][index])

        if isinstance(pred, dict) and 'thresh' in pred:
            thresh = self._visualize_heatmap(pred['thresh'][index])

        if isinstance(pred, dict) and 'thresh_binary' in pred:
            thresh_binary = self._visualize_heatmap(
                pred['thresh_binary'][index])
            MakeICDARData.polylines(self, thresh_binary, polygons, ignore_tags)

        for box in boxes:
            box = np.array(box).astype(np.int32).reshape(-1, 2)
            cv2.polylines(pred_canvas, [box], True, (0, 255, 0), 2)
            if isinstance(pred, dict) and 'thresh_binary' in pred:
                cv2.polylines(thresh_binary, [box], True, (0, 255, 0), 1)

        if self.eager_show:
            webcv2.imshow(filename + ' output',
                          cv2.resize(pred_canvas, (1024, 1024)))
            if isinstance(pred, dict) and 'thresh' in pred:
                webcv2.imshow(filename + ' thresh',
                              cv2.resize(thresh, (1024, 1024)))
                webcv2.imshow(filename + ' pred',
                              cv2.resize(pred_canvas, (1024, 1024)))
            if isinstance(pred, dict) and 'thresh_binary' in pred:
                webcv2.imshow(filename + ' thresh_binary',
                              cv2.resize(thresh_binary, (1024, 1024)))
            return {}
        else:
            if isinstance(pred, dict) and 'thresh' in pred:
                return {
                    filename + '_output': pred_canvas,
                    filename + '_thresh': thresh,
                    filename + '_pred': thresh_binary,
                    filename + '_binary': binary
                }
            else:
                return {
                    filename + '_output': pred_canvas,
                    filename + '_pred': thresh_binary,
                    filename + '_binary': binary
                }
コード例 #6
0
 def visualize_batch(self, batch, output):
     images, labels, lengths = batch['image'], batch['label'], batch[
         'length']
     for i in range(images.shape[0]):
         image = NormalizeImage.restore(images[i])
         gt = self.charset.label_to_string(labels[i])
         webcv2.imshow(output[i]['pred_string'] + '_' + str(i) + '_' + gt,
                       image)
         # folder = 'images/dropout/lexicon/'
         # np.save(folder + output[i]['pred_string'] + '_' + gt + '_' + batch['data_ids'][i], image)
     webcv2.waitKey()
     return {
         'image': (np.clip(
             batch['image'][0].cpu().data.numpy().transpose(1, 2, 0) + 0.5,
             0, 1) * 255).astype('uint8')
     }
コード例 #7
0
def main(input_path, output_path, show=False):
    coco = COCO(input_path)

    if show:
        data_dir = "datasets/coco/train2017/"
        dicts = DatasetCatalog.get("coco_2017_train")
        metadata = MetadataCatalog.get("coco_2017_train")
        for dic in dicts:
            for ann in dic["annotations"]:
                coco.compute_rbox(ann)
                ann["bbox"] = ann["rbox"]
                ann["bbox_mode"] = ann["rbox_mode"]
                print(ann["bbox"])

            image_path = dic["file_name"]
            img = utils.convert_PIL_to_numpy(
                    Image.open(image_path), "RGB")
            visualizer = Visualizer(img, metadata=metadata, scale=1)
            vis = visualizer.draw_dataset_dict(dic)
            webcv2.imshow(image_path+"bbox", vis.get_image()[:, :, ::-1])
            webcv2.waitKey()

    skip = False
    if "val" in input_path:
        skip = True

    anns = []
    for ann_id, ann in tqdm.tqdm(coco.anns.items()):
        if skip and ann["iscrowd"]:
            continue
        ann = coco.compute_rbox(ann)
        ann["bbox"] = ann["rbox"]
        anns.append(ann)

    info = dict(date_created=str(datetime.datetime.now()),
                description="Rbox version of {}.".format(
                    input_path))
    coco_dict = dict(
        info=info,
        categories=coco.dataset["categories"],
        annotations=anns,
        images=coco.dataset["images"],
        license=None)

    with open(output_path, "wt") as writer:
        json.dump(coco_dict, writer)
コード例 #8
0
def vis_boxes(box_1, box_2, steps=100, distance=l2):
    diff = (box_2 - box_1) / 100

    xs = []
    ious = []
    losses = []
    fig, iou_scores = plt.subplots()
    max_val = max(box_1.max(), box_2.max()) * 4
    iou_scores.set_xlim(-0.5, 1.5)
    loss_scores = iou_scores.twinx()
    iou_scores.set_ylabel("IoU with %s" % str(distance))
    loss_scores.set_ylabel("L1 Loss")

    for i in range(steps):
        iou_i = iou(box_1, box_2, distance)
        xs.append(i / steps)
        ious.append(iou_i)
        losses.append(np.abs(box_1 - box_2).sum())

        if i % (steps // 3) == 0:
            patch = plt.Rectangle((i / steps, 0),
                                  width=(box_1[2] - box_1[0]) / max_val,
                                  height=(box_1[3] - box_1[1]) / max_val,
                                  fill=False)
            iou_scores.add_patch(patch)
            patch = plt.Rectangle((i / steps, 0),
                                  width=(box_2[2] - box_2[0]) / max_val,
                                  height=(box_2[3] - box_2[1]) / max_val,
                                  fill=False)
            iou_scores.add_patch(patch)

        box_1 += diff
        # examples.add_patch(patch)

    losses = np.array(losses)
    iou_scores.plot(xs, ious, "r", label="IoU")
    loss_scores.plot(xs, losses, "b", label="L1 loss")
    iou_scores.legend(loc="lower right")
    loss_scores.legend()

    image = fig2image(fig)
    webcv2.imshow("image", image)
    webcv2.waitKey()
コード例 #9
0
def process_single(metadata, dic, args):
    masks = BorderMasks([
        x['segmentation'] for x in dic['annotations']
        if not isinstance(x['segmentation'], dict)
    ])
    img = utils.read_image(dic["file_name"], "RGB")
    borders, centers, sizes = masks.masks(mask_size=img.shape[:2])

    if args.show:
        visualizer = Visualizer(img, metadata=metadata)
        vis = visualizer.draw_dataset_dict(dic)
        webcv2.imshow(dic["file_name"], vis.get_image()[:, :, ::-1])
        webcv2.imshow(dic["file_name"] + "-border", borders * 255)
        webcv2.imshow(dic["file_name"] + "-centers", centers * 255)
        webcv2.imshow(dic["file_name"] + "-sizes",
                      (sizes / sizes.max()).sum(-1) * 255)
        webcv2.waitKey()
    else:
        file_name = os.path.basename(dic["file_name"])
        save(borders, cfg.MASK_DIRECTORY, split_name, "borders", file_name)
        save(centers, cfg.MASK_DIRECTORY, split_name, "centers", file_name)
        save(sizes, cfg.MASK_DIRECTORY, split_name, "sizes", file_name)
コード例 #10
0
        def dataset_id_map(ds_id):
            return metadata.thing_dataset_id_to_contiguous_id[ds_id]

    elif "lvis" in args.dataset:
        # LVIS results are in the same format as COCO results, but have a different
        # mapping from dataset category id to contiguous category id in [0, #categories - 1]
        def dataset_id_map(ds_id):
            return ds_id - 1

    else:
        raise ValueError("Unsupported dataset: {}".format(args.dataset))

    os.makedirs(args.output, exist_ok=True)

    for dic in tqdm.tqdm(dicts):
        img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
        basename = os.path.basename(dic["file_name"])

        predictions = create_instances(pred_by_image[dic["image_id"]],
                                       img.shape[:2])
        vis = Visualizer(img, metadata)
        vis_pred = vis.draw_instance_predictions(predictions).get_image()

        vis = Visualizer(img, metadata)
        vis_gt = vis.draw_dataset_dict(dic).get_image()

        concat = np.concatenate((vis_pred, vis_gt), axis=1)
        webcv2.imshow(basename, concat[:, :, ::-1])
        webcv2.waitKey()
        cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])
コード例 #11
0
 def output(vis, fname):
     if args.show:
         print(fname)
         webcv2.imshow("window", vis.get_image()[:, :, ::-1])
         webcv2.waitKey()
コード例 #12
0
        img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
        basename = os.path.basename(dic["file_name"])

        vis = Visualizer(img, metadata)
        vis_pred = vis.draw_proposals_separately(
            proposal_by_image[dic["image_id"]], img.shape[:2],
            args.conf_threshold)

        predictions = create_instances(pred_by_image[dic["image_id"]],
                                       img.shape[:2])
        vis = Visualizer(img, metadata)
        pred = vis.draw_instance_predictions(predictions).get_image()
        vis_pred.append(pred)
        vis = Visualizer(img, metadata)
        gt = vis.draw_dataset_dict(dic).get_image()

        concat = vis.smart_concatenate(vis_pred, min_side=1960)
        vis = np.concatenate([pred, gt], axis=1)

        if args.show:
            webcv2.imshow(basename + ' - Press D for details', vis[:, :, ::-1])
            key = webcv2.waitKey()
            if key == 100:  # 'd'
                webcv2.imshow(basename, concat[:, :, ::-1])
                webcv2.waitKey()
        else:
            cv2.imwrite(os.path.join(args.output, basename), vis[:, :, ::-1])
            cv2.imwrite(
                os.path.join(args.output, basename) + '-proposals.jpg',
                concat[:, :, ::-1])
コード例 #13
0
def main(
        result_path="train_log/retinanet/R_50_FPN/baseline/inference/instances_predictions_coco.pkl",
        result_path_2="../train_log/retinanet/R_50_FPN/baseline/inference/coco_objects365_val_with_masks/instances_predictions.pkl"):
    result_path = smart_path(result_path)
    results = pickle.loads(result_path.read_bytes())

    result_path_2 = smart_path(result_path_2)
    results_2 = pickle.loads(result_path_2.read_bytes())
    # shape (T, K, R, A), where T, K, R, A are number of thresholds,
    # classes, ratios, areas, respectively.
    ar = results["ar"]
    stats = results["ar"]["ar-stats"]["recalls"] * 100
    ar_2 = results_2["ar"]
    stats_2 = results_2["ar"]["ar-stats"]["recalls"] * 100
    plt.style.use(['ieee', "no-latex"])
    fig, axs = plt.subplots(3, 3, sharey=True, figsize=(9, 9))
    markers = ['*', '_', '+', 'x']

    # plot overall AR
    xs = np.arange(4) + 1
    ys = np.array([ar["AR@100"],
         ar["AR- 0  - 1/5@100"],
         ar["AR-1/5 - 1/3@100"],
         ar["AR-1/3 - 3/1@100"]])
    scatter_with_markers(axs[0, 0], xs, ys, markers, c="r")
    axs[0, 0].plot(xs, ys, linestyle="dotted")

    axs[0, 0].set_xticks(xs)
    title(axs[0, 0], "all objects")
    axs[0, 0].set_ylabel("mAR-COCO")
    axs[0, 0].set_xlabel("slenderness")
    axs[0, 0].set_xticklabels(["all", "XS", "S", "R"])

    # merge
    [ax.remove() for ax in axs[0, 1:]]
    ax = fig.add_subplot(axs[0, 2].get_gridspec()[0, 1:])
    
    # plot thresholds
    T = stats.shape[0]
    xs = np.array([1])
    x_labels = []
    thresh  = 0.5
    stride = 0.05
    xticks = []
    for i in range(T):
        ys = stats[i, :-1, 0:4, 0].mean(0)
        xs = np.arange(4) + xs.max() + 1
        scatter_with_markers(ax, xs, ys, markers, c='r')
        ax.plot(xs, ys, c='black', linestyle="dotted")
        x_labels += ["", str(int(thresh * 100) / 100), "", ""]
        thresh += stride
        xticks.append(xs)

    ax.set_xticks(np.concatenate(xticks))
    ax.set_xticklabels(x_labels)
    title(ax, "all objects")
    ax.set_xlabel("threshold")
    ax.legend(
        [Line2D([0], [0], color="b", linewidth=1, linestyle="none", marker=m, c="r")
            for m in markers],
        ["all", "XS", "S", "R"], loc="upper right")

    # plot overall AR
    xs = np.arange(4) + 1
    ys = np.array([ar_2["AR@100"],
         ar_2["AR- 0  - 1/5@100"],
         ar_2["AR-1/5 - 1/3@100"],
         ar_2["AR-1/3 - 3/1@100"]])
    ax = axs[1, 0]
    scatter_with_markers(ax, xs, ys, markers, c="g")
    ax.plot(xs, ys, linestyle="dotted")

    ax.set_xticks(xs)
    title(ax, "all objects")
    ax.set_ylabel("mAR-COCO+")
    ax.set_xlabel("slenderness")
    ax.set_xticklabels(["all", "XS", "S", "R"])

    # merge
    [ax.remove() for ax in axs[1, 1:]]
    ax = fig.add_subplot(axs[1, 2].get_gridspec()[1, 1:])
    
    # plot thresholds
    T = stats.shape[0]
    xs = np.array([1])
    x_labels = []
    thresh  = 0.5
    stride = 0.05
    xticks = []
    for i in range(T):
        ys = stats_2[i, :-1, 0:4, 0].mean(0)
        xs = np.arange(4) + xs.max() + 1
        scatter_with_markers(ax, xs, ys, markers, c='g')
        ax.plot(xs, ys, c='black', linestyle="dotted")
        x_labels += ["", str(int(thresh * 100) / 100), "", ""]
        thresh += stride
        xticks.append(xs)

    ax.set_xticks(np.concatenate(xticks))
    ax.set_xticklabels(x_labels)
    title(ax, "all objects")
    ax.set_xlabel("threshold")
    ax.legend(
        [Line2D([0], [0], color="b", linewidth=1, linestyle="none", marker=m, c="g")
            for m in markers],
        ["all", "XS", "S", "R"], loc="upper right")

    # plot small objects
    ax = axs[2, 0]
    xs = np.arange(4)
    ys = np.array([stats[:, :-1, 0, 1].mean(),
        stats[:, :-1, 1, 1].mean(),
        stats[:, :-1, 2, 1].mean(),
        stats[:, :-1, 3, 1].mean()])
    ys_2 = np.array([stats_2[:, :-1, 0, 1].mean(),
        stats_2[:, :-1, 1, 1].mean(),
        stats_2[:, :-1, 2, 1].mean(),
        stats_2[:, :-1, 3, 1].mean()])
    # scatter_with_markers(ax, xs, ys, markers, c='g')
    scatter_with_markers(ax, xs, ys_2, markers, c='g')
    # ax.plot(xs, ys, c="g", linestyle="dotted")
    ax.plot(xs, ys_2, c="black", linestyle="dotted")
    ax.set_xticks(xs)
    title(ax, "small objects")
    ax.set_ylabel("mAR-COCO+")
    ax.set_xlabel("slenderness")
    ax.set_xticklabels(["all", "XS", "S", "R"])
    
    # plot medium objects
    ax = axs[2, 1]
    xs = np.arange(4)
    ys = np.array([stats[:, :-1, 0, 2].mean(),
        stats[:, :-1, 1, 2].mean(),
        stats[:, :-1, 2, 2].mean(),
        stats[:, :-1, 3, 2].mean()])
    ys_2 = np.array([stats_2[:, :-1, 0, 2].mean(),
        stats_2[:, :-1, 1, 2].mean(),
        stats_2[:, :-1, 2, 2].mean(),
        stats_2[:, :-1, 3, 2].mean()])
    # scatter_with_markers(ax, xs, ys, markers, c='g')
    scatter_with_markers(ax, xs, ys_2, markers, c='g')
    # ax.plot(xs, ys, c="g", linestyle="dotted")
    ax.plot(xs, ys_2, c="black", linestyle="dotted")
    ax.set_xticks(xs)
    title(ax, "medium objects")
    ax.set_xlabel("slenderness")
    ax.set_xticklabels(["all", "XS", "S", "R"])

    # plot large objects
    ax = axs[2, 2]
    xs = np.arange(4)
    ys = np.array([stats[:, :-1, 0, 3].mean(),
        stats[:, :-1, 1, 3].mean(),
        stats[:, :-1, 2, 3].mean(),
        stats[:, :-1, 3, 3].mean()])
    ys_2 = np.array([stats_2[:, :-1, 0, 3].mean(),
        stats_2[:, :-1, 1, 3].mean(),
        stats_2[:, :-1, 2, 3].mean(),
        stats_2[:, :-1, 3, 3].mean()])
    # scatter_with_markers(ax, xs, ys, markers, c='g')
    scatter_with_markers(ax, xs, ys_2, markers, c='g')
    # ax.plot(xs, ys, c="g", linestyle="dotted")
    ax.plot(xs, ys_2, c="black", linestyle="dotted")
    ax.set_xticks(xs)
    title(ax, "large objects")
    ax.set_xlabel("slenderness")
    ax.set_xticklabels(["all", "XS", "S", "R"])

    group = fig2image(fig)
    webcv2.imshow("group", group)
    webcv2.waitKey()
コード例 #14
0
def PlotAll(dataset):

    dicts = list(DatasetCatalog.get(dataset))

    metadata = MetadataCatalog.get(dataset)
    labels = metadata.thing_classes
    ratios = {
        "0-1/5": [0, 1 / 5],
        "1/5-1/3": [1 / 5, 1 / 3],
        "1/3-1": [1 / 3, 1]
    }

    bars = dict()
    for key in ratios.keys():
        bars[key] = [0 for _ in range(len(labels))]

    all_ratios = []
    ratio_counts = {k: 0 for k in ratios.keys()}
    for dic in tqdm(dicts):
        for obj in dic["annotations"]:
            ratio = COCO.compute_ratio(obj, oriented=True)["ratio"]
            for key, ratio_range in ratios.items():
                if between(ratio, ratio_range):
                    bars[key][obj["category_id"]] += 1
                    ratio_counts[key] += 1
            all_ratios.append(ratio)

    print("images", len(dicts))
    print("counts", ratio_counts)

    fig, ax = plt.subplots()
    ax.set_yscale("symlog")
    prev = np.zeros((len(labels), ))
    for key, bar in bars.items():
        bar = np.array(bar)
        ax.bar(labels, bar, bottom=prev, label=key)
        prev = prev + bar
    ax.legend()
    fig.set_size_inches(18.5, 10.5)
    ax.set_xticklabels(labels, rotation="vertical")
    group = fig2image(fig)
    # cv2.imwrite('./group.png', group)
    webcv2.imshow("group", group)

    fig, ax = plt.subplots()
    all_ratios = sorted(all_ratios)
    numbers = [0]
    tick = 0.01
    seperator = tick
    count = 0

    for r in all_ratios:
        count += 1
        while seperator < r:
            seperator += tick
            numbers.append(count)

    ax.plot(np.arange(0, 1, tick), np.array(numbers) / count)
    ax.set_xlabel("slenderness")
    ax.set_title("cumulative distribution function")
    number = fig2image(fig)
    # cv2.imwrite('./number.png', number)
    webcv2.imshow("number", number)
    webcv2.waitKey()
コード例 #15
0
            vis = Visualizer(img, metadata, scale=scale)
            topk_boxes, topk_indices = vis.topk_iou_boxes(
                predictions.pred_boxes,
                Boxes([
                    BoxMode.convert(x["bbox"], BoxMode.XYWH_ABS,
                                    BoxMode.XYXY_ABS)
                    for x in grouped_gt[range_name]
                ]))
            topk_indices = topk_indices.reshape((-1, ))
            # Transform indices to list since shape 1 tensors will be regarded as scalars.
            vis.draw_dataset_dict({"annotations": grouped_gt[range_name]})
            vis_boxes = vis.draw_instance_predictions(
                predictions[topk_indices.tolist()])

            if args.show:
                webcv2.imshow(basename + "-boxes@" + range_name,
                              vis_boxes.get_image()[..., ::-1])
            else:
                save(vis_boxes.get_image()[..., ::-1], args.output, "boxes",
                     basename + "@%s.jpg" % range_name)

            vis_anchor = Visualizer(img, metadata)
            anchors = predictions.anchors.tensor[topk_indices]
            vis_anchor = vis_anchor.overlay_instances(
                boxes=anchors.reshape(-1, 4),
                labels=predictions.scores[topk_indices.reshape(-1).tolist()])

            if args.show:
                webcv2.imshow(basename + "-anchors@" + range_name,
                              vis_anchor.get_image()[..., ::-1])
            else:
                save(vis_anchor.get_image()[..., ::-1], args.output, "anchors",