def show_semseg_result(semseg_json_file, categories_json_file, out_image_file):
    sem_by_image = defaultdict(list)

    with open(semseg_json_file, 'r') as f:
        sem_results = json.load(f)

    print("Semantic segmentation:")
    print("\tJSON file: {}".format(semseg_json_file))


    with open(categories_json_file, 'r') as f:
        categories_list = json.load(f)
    categories = {el['id']: el for el in categories_list}


    for sem in sem_results:
        img_id = sem['image_id']
        sem_by_image[sem['image_id']].append(sem)

    id_generator = IdGenerator(categories)

    #pan_segm_id = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint32)
    pan_segm_id = np.zeros((480, 640), dtype=np.uint32)

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # To Do: change logic of multiple annotations case, for now, it is override
    # but we can easily learn it from panoptic combine script
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!


    for ann in sem_by_image[img_id]:
        mask = COCOmask.decode(ann['segmentation'])

        #print(mask.shape())
        plt.imshow(mask)
        plt.show()
        segment_id = id_generator.get_id(ann['category_id'])
        print("id: ") 
        print(ann['category_id']) 
        pan_segm_id[mask==1] = segment_id
        print("segment_id: ") 
        print(segment_id)

        print(id2rgb(pan_segm_id).shape)
    #print(sem_by_image)
    Image.fromarray(id2rgb(pan_segm_id)).save(
        os.path.join("/home/zhiliu/Documents/Panoptic_Segement/Cocopanopticapi/VanillaPanopticSeg/data/predictions/test_result_for_vox/", out_image_file) 
    )
            def get_ids_area(masks, scores, dedup=False):
                # This helper function creates the final panoptic segmentation image
                # It also returns the area of the masks that appears on the image

                m_id = masks.transpose(0, 1).softmax(-1)

                if m_id.shape[-1] == 0:
                    # We didn't detect any mask :(
                    m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
                else:
                    m_id = m_id.argmax(-1).view(h, w)

                if dedup:
                    # Merge the masks corresponding to the same stuff class
                    for equiv in stuff_equiv_classes.values():
                        if len(equiv) > 1:
                            for eq_id in equiv:
                                m_id.masked_fill_(m_id.eq(eq_id), equiv[0])

                final_h, final_w = to_tuple(target_size)

                seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
                seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)

                np_seg_img = (
                    torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
                )
                m_id = torch.from_numpy(rgb2id(np_seg_img))

                area = []
                for i in range(len(scores)):
                    area.append(m_id.eq(i).sum().item())
                return area, seg_img
예제 #3
0
    def process(self, inputs, outputs):
        from panopticapi.utils import id2rgb

        for input, output in zip(inputs, outputs):
            panoptic_img, segments_info = output["panoptic_seg"]
            panoptic_img = panoptic_img.cpu().numpy()

            file_name = os.path.basename(input["file_name"])
            file_name_png = os.path.splitext(file_name)[0] + ".png"
            with io.BytesIO() as out:
                Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
                segments_info = [
                    self._convert_category_id(x) for x in segments_info
                ]
                if self.gen_png:
                    png = self.gen_2ch_pngs(panoptic_img, segments_info)
                    png_dir = os.path.join(self.output_dir, '2ch_png')
                    os.makedirs(png_dir, exist_ok=True)
                    Image.fromarray(png).save(
                        os.path.join(png_dir, file_name_png))
                self._predictions.append({
                    "image_id": input["image_id"],
                    "file_name": file_name_png,
                    "png_string": out.getvalue(),
                    "segments_info": segments_info,
                })
예제 #4
0
    def update(self, panoptic, image_filename=None, image_id=None):
        from panopticapi.utils import id2rgb

        if image_filename is None:
            raise ValueError('Need to provide image_filename.')
        if image_id is None:
            raise ValueError('Need to provide image_id.')

        # Change void region.
        panoptic[panoptic == self._void_label] = 0

        segments_info = []
        for pan_lab in np.unique(panoptic):
            pred_class = pan_lab // self._label_divisor
            if self._train_id_to_eval_id is not None:
                pred_class = self._train_id_to_eval_id[pred_class]

            segments_info.append(
                {
                    'id': int(pan_lab),
                    'category_id': int(pred_class),
                }
            )
        save_annotation(id2rgb(panoptic), self._panoptic_dir, image_filename, add_colormap=False)
        self._predictions.append(
            {
                'image_id': int(image_id),
                'file_name': image_filename + '.png',
                'segments_info': segments_info,
            }
        )
예제 #5
0
 def _create_single_category(self, item):
     category = {
         'id': item.id,
         'name': item.class_name,
         'supercategory': item.class_name,
         'isthing': 1,
         'color': id2rgb(item.id)
     }
     return category
예제 #6
0
파일: reporters.py 프로젝트: w-hc/pcv
    def process(self, inx, model_outputs, inputs, dset):
        from panoptic.entry import gt_tsr_res_reduction  # HELPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
        sem_pd, vote_pd = model_outputs
        sem_pd, vote_pd = F.softmax(sem_pd, dim=1), F.softmax(vote_pd, dim=1)

        model = self.model
        oracle_mode = self.oracle_mode
        oracle_res = 4
        imgMeta, segments_info, _, pan_gt_mask = dset.pan_getitem(
            inx, apply_trans=False)
        if dset.transforms is not None:
            _, trans_pan_gt = dset.transforms(_, pan_gt_mask)
        else:
            trans_pan_gt = pan_gt_mask.copy()

        pan_gt_ann = {
            'image_id': imgMeta['id'],
            # shameful mogai; can only access image f_name here. alas...
            'file_name': imgMeta['file_name'].split('.')[0] + '.png',
            'segments_info': list(segments_info.values())
        }

        if oracle_mode is not None:
            sem_ora, vote_ora = gt_tsr_res_reduction(oracle_res,
                                                     dset.gt_prod_handle,
                                                     dset.meta, model.pcv,
                                                     trans_pan_gt,
                                                     segments_info)
            if oracle_mode == 'vote':
                pass  # if using model sem pd, maintain stuff pred thresh
            else:  # sem or full oracle, using gt sem pd, do not filter
                self.infer_cfg['stuff_pred_thresh'] = -1

            if oracle_mode == 'sem':
                sem_pd = sem_ora
            elif oracle_mode == 'vote':
                vote_pd = vote_ora
            else:
                sem_pd, vote_pd = sem_ora, vote_ora

        pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
            self.infer_cfg, sem_pd, vote_pd, pan_gt_mask.size)
        pan_pd_ann['image_id'] = pan_gt_ann['image_id']
        pan_pd_ann['file_name'] = pan_gt_ann['file_name']
        self.overall_pred_meta['annotations'].append(pan_pd_ann)

        self.metric.update(pan_gt_ann, rgb2id(np.array(pan_gt_mask)),
                           pan_pd_ann, pan_pd_mask)

        pan_pd_mask = Image.fromarray(id2rgb(pan_pd_mask))
        fname = osp.join(self.pan_mask_dir, pan_pd_ann['file_name'])
        os.makedirs(osp.dirname(fname), exist_ok=True)  # make region subdir
        pan_pd_mask.save(fname)
예제 #7
0
def coco_panoptic_segmentation_to_sa_pixel(coco_path, images_path):
    coco_json = json.load(open(coco_path))
    hex_colors = blue_color_generator(len(coco_json["categories"]))
    annotate_list = coco_json["annotations"]

    cat_id_to_cat = {}
    for cat in coco_json['categories']:
        cat_id_to_cat[cat['id']] = cat['name']

    sa_jsons = {}
    for annotate in tqdm(annotate_list, "Converting"):
        annot_name = Path(annotate["file_name"]).stem
        img_cv = cv2.imread(str(images_path / (annot_name + ".png")))
        if img_cv is None:
            print(
                "'{}' file dosen't exist!".format(
                    images_path / (annot_name + ".png")
                )
            )
            continue

        img = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
        H, W, C = img.shape
        img = img.reshape((H * W, C))
        segments = annotate["segments_info"]
        hex_colors = blue_color_generator(len(segments))

        out_json = []
        for i, seg in enumerate(segments):
            img[np.all(img == id2rgb(seg["id"]),
                       axis=1)] = hex_to_rgb(hex_colors[i])
            dd = {
                "classId": seg["category_id"],
                'className': cat_id_to_cat[seg["category_id"]],
                "probability": 100,
                "visible": True,
                "parts": [{
                    "color": hex_colors[i]
                }],
                "attributes": [],
                "attributeNames": [],
                "imageId": annotate["image_id"]
            }
            out_json.append(dd)

        img = cv2.cvtColor(img.reshape((H, W, C)), cv2.COLOR_RGB2BGR)
        cv2.imwrite(str(images_path / (annot_name + ".jpg___save.png")), img)

        file_name = annot_name + ".jpg___pixel.json"
        sa_jsons[file_name] = out_json
        (images_path / (annot_name + ".png")).unlink()
    return sa_jsons
예제 #8
0
def generate_pred_panoptic(cfg, outputs):
    """
    Take all output of a model and save a json file with al predictions as
    well as all panoptic image prediction.
    This is done in order to use `pq_compute` function from panoptic api
    Args:
    - cfg (Config) : config object
    - outputs (list[dict]) : List of a full epoch of outputs
    """
    # Create prediction dir if needed
    pred_dir = os.path.join(cfg.DATASET_PATH, cfg.PRED_DIR)
    if not os.path.exists(pred_dir): os.makedirs(pred_dir)

    annotations = []
    print("Saving panoptic prediction to compute validation metrics")
    # Loop on each validation output
    for output in tqdm(outputs):
        # Loop on each image of the batch
        for img_panoptic, image_id in zip(output['panoptic'],
                                          output['image_id']):
            img_data = dict()
            img_data['image_id'] = image_id
            # Resize the image to original size
            img_panoptic = F.interpolate(
                img_panoptic.unsqueeze(0).unsqueeze(0).float(),
                size=(1024, 2048),
                mode='nearest')[0, 0, ...]
            # Create segment_info data
            img_data['segments_info'] = []
            img_panoptic = img_panoptic.cpu().numpy()
            for instance in np.unique(img_panoptic):
                if instance == 0:
                    continue
                img_data['segments_info'].append({
                    'id':
                    int(instance),
                    'category_id':
                    int(instance) if instance < 1000 else int(instance / 1000)
                })
            # Save panotic_pred
            img_data['file_name'] = "{}_preds_panoptic.png".format(image_id)
            img = id2rgb(img_panoptic)
            img_to_save = Image.fromarray(img)
            img_to_save.save(os.path.join(pred_dir, img_data['file_name']))
            # Add annotation of a one image
            annotations.append(img_data)

    save_json_file(cfg, annotations)
예제 #9
0
def pq_compute_single_core(proc_id, annotation_set, gt_folder, pred_folder,
                           categories):
    pq_stat = PQStat()
    idx = 0
    for gt_ann, pred_ann in annotation_set:
        if idx % 100 == 0:
            print('Core: {}, {} from {} images processed'.format(
                proc_id, idx, len(annotation_set)))
        idx += 1

        pan_gt = np.array(Image.open(
            os.path.join(gt_folder, gt_ann['file_name'])),
                          dtype=np.uint32)
        pan_gt = rgb2id(pan_gt)
        pan_pred = np.array(Image.open(
            os.path.join(pred_folder, pred_ann['file_name'])),
                            dtype=np.uint32)
        pan_pred = rgb2id(pan_pred)

        # downsize pred and upsize back; resolve the lost segments
        ratio = 8
        if ratio > 1:
            pan_pred = id2rgb(pan_pred)
            pan_pred = Image.fromarray(pan_pred)
            h, w = pan_pred.size

            pan_pred = pan_pred\
                .resize((h // ratio, w // ratio), Image.NEAREST)\
                .resize((h, w), Image.NEAREST)
            pan_pred = np.array(pan_pred, dtype=np.uint32)
            pan_pred = rgb2id(pan_pred)
            acc = []
            _p_segs = pred_ann['segments_info']
            for el in _p_segs:
                iid = el['id']
                if iid not in pan_pred:
                    continue
                acc.append(el)
            pred_ann['segments_info'] = acc

        _single_img_stat = pq_compute_single_img(categories, gt_ann, pan_gt,
                                                 pred_ann, pan_pred)
        pq_stat += _single_img_stat

    print('Core: {}, all {} images processed'.format(proc_id,
                                                     len(annotation_set)))
    return pq_stat
def coco_panoptic_segmentation_to_sa_pixel(coco_path, images_path):
    coco_json = json.load(open(coco_path))
    hex_colors = _blue_color_generator(len(coco_json["categories"]))
    annotate_list = coco_json["annotations"]

    for annotate in tqdm(annotate_list, "Converting"):
        annot_name = os.path.splitext(annotate["file_name"])[0]
        img_cv = cv2.imread(os.path.join(images_path, annot_name + ".png"))
        if img_cv is None:
            print("'{}' file dosen't exist!".format(
                os.path.join(images_path, annot_name + ".png")))
            continue

        img = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
        H, W, C = img.shape
        img = img.reshape((H * W, C))
        segments = annotate["segments_info"]
        hex_colors = _blue_color_generator(len(segments) + 1)

        out_json = []
        for i, seg in enumerate(segments):
            img[np.all(img == id2rgb(seg["id"]),
                       axis=1)] = _hex_to_rgb(hex_colors[i + 1])
            dd = {
                "classId": seg["category_id"],
                "probability": 100,
                "visible": True,
                "parts": [{
                    "color": hex_colors[i + 1]
                }],
                "attributes": [],
                "attributeNames": [],
                "imageId": annotate["image_id"]
            }
            out_json.append(dd)

        with open(os.path.join(images_path, annot_name + ".jpg___pixel.json"),
                  "w") as writer:
            json.dump(out_json, writer, indent=2)

        img = cv2.cvtColor(img.reshape((H, W, C)), cv2.COLOR_RGB2BGR)
        cv2.imwrite(os.path.join(images_path, annot_name + ".jpg___save.png"),
                    img)

        os.remove(os.path.join(images_path, annot_name + ".png"))
예제 #11
0
    def process(self, inputs, outputs):
        from panopticapi.utils import id2rgb

        for input, output in zip(inputs, outputs):
            panoptic_img, segments_info = output["panoptic_seg"]
            panoptic_img = panoptic_img.cpu().numpy()
            if segments_info is None:
                # If "segments_info" is None, we assume "panoptic_img" is a
                # H*W int32 image storing the panoptic_id in the format of
                # category_id * label_divisor + instance_id. We reserve -1 for
                # VOID label, and add 1 to panoptic_img since the official
                # evaluation script uses 0 for VOID label.
                label_divisor = self._metadata.label_divisor
                segments_info = []
                for panoptic_label in np.unique(panoptic_img):
                    if panoptic_label == -1:
                        # VOID region.
                        continue
                    pred_class = panoptic_label // label_divisor
                    isthing = (
                        pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
                    )
                    segments_info.append(
                        {
                            "id": int(panoptic_label) + 1,
                            "category_id": int(pred_class),
                            "isthing": bool(isthing),
                        }
                    )
                # Official evaluation script uses 0 for VOID label.
                panoptic_img += 1

            file_name = os.path.basename(input["file_name"])
            file_name_png = os.path.splitext(file_name)[0] + ".png"
            with io.BytesIO() as out:
                Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
                segments_info = [self._convert_category_id(x) for x in segments_info]
                self._predictions.append(
                    {
                        "image_id": input["image_id"],
                        "file_name": file_name_png,
                        "png_string": out.getvalue(),
                        "segments_info": segments_info,
                    }
                )
예제 #12
0
    def sa_to_output_format(self):
        out_json = self._create_skeleton()
        out_json['categories'] = self._create_categories(
            os.path.join(self.export_root, 'classes_mapper.json')
        )

        panoptic_root = os.path.join(
            self.dataset_name, "panoptic_{}".format(self.dataset_name)
        )

        images = []
        annotations = []
        id_generator = self._make_id_generator()
        jsons = glob.glob(
            os.path.join(self.export_root, '*pixel.json'), recursive=True
        )

        for id_, json_ in tqdm(enumerate(jsons, 1)):
            res = self._sa_to_coco_single(id_, json_, id_generator)

            panoptic_mask = json_[:-len('___pixel.json')] + '.png'

            Image.fromarray(id2rgb(res[2])).save(panoptic_mask)

            annotation = {
                'image_id': res[0]['id'],
                'file_name': panoptic_mask,
                'segments_info': res[1]
            }
            annotations.append(annotation)

            images.append(res[0])

        out_json['annotations'] = annotations
        out_json['images'] = images
        json_data = json.dumps(out_json, indent=4)
        with open(
            os.path.join(self.output_dir, '{}.json'.format(self.dataset_name)),
            'w+'
        ) as coco_json:

            coco_json.write(json_data)

        self.set_num_converted(len(jsons))
예제 #13
0
    def _pan2json(self, results, outfile_prefix):
        """Convert panoptic results to COCO panoptic json style."""
        label2cat = dict((v, k) for (k, v) in self.cat2label.items())
        pred_annotations = []
        outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')

        for idx in range(len(self)):
            img_id = self.img_ids[idx]
            segm_file = self.data_infos[idx]['segm_file']
            pan = results[idx]

            pan_labels = np.unique(pan)
            segm_info = []
            for pan_label in pan_labels:
                sem_label = pan_label % INSTANCE_OFFSET
                # We reserve the length of self.CLASSES for VOID label
                if sem_label == len(self.CLASSES):
                    continue
                # convert sem_label to json label
                cat_id = label2cat[sem_label]
                is_thing = self.categories[cat_id]['isthing']
                mask = pan == pan_label
                area = mask.sum()
                segm_info.append({
                    'id': int(pan_label),
                    'category_id': cat_id,
                    'isthing': is_thing,
                    'area': int(area)
                })
            # evaluation script uses 0 for VOID label.
            pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID
            pan = id2rgb(pan).astype(np.uint8)
            mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file))
            record = {
                'image_id': img_id,
                'segments_info': segm_info,
                'file_name': segm_file
            }
            pred_annotations.append(record)
        pan_json_results = dict(annotations=pred_annotations)
        return pan_json_results
    def sa_to_output_format(self):
        out_json = self._create_skeleton()
        out_json['categories'] = self._create_categories(
            self.export_root / 'classes_mapper.json'
        )

        cat_id_map = json.load(open(self.export_root / 'classes_mapper.json'))

        images = []
        annotations = []
        id_generator = self._make_id_generator()

        jsons_gen = self.export_root.glob('*pixel.json')
        jsons = [path for path in jsons_gen]

        for id_, json_ in tqdm(enumerate(jsons, 1)):
            res = self._sa_to_coco_single(id_, json_, id_generator, cat_id_map)

            panoptic_mask = str(json_)[:-len('___pixel.json')] + '.png'

            Image.fromarray(id2rgb(res[2])).save(panoptic_mask)

            annotation = {
                'image_id': res[0]['id'],
                'file_name': panoptic_mask,
                'segments_info': res[1]
            }
            annotations.append(annotation)

            images.append(res[0])

        out_json['annotations'] = annotations
        out_json['images'] = images
        json_data = json.dumps(out_json, indent=4)
        with open(
            self.output_dir / '{}.json'.format(self.dataset_name), 'w+'
        ) as coco_json:

            coco_json.write(json_data)

        self.set_num_converted(len(jsons))
예제 #15
0
    def process(self, inputs, outputs):
        from panopticapi.utils import id2rgb

        preds = []
        for input, output in zip(inputs, outputs):
            panoptic_img, segments_info = output["panoptic_seg"]
            panoptic_img = panoptic_img.cpu().numpy()

            file_name = os.path.basename(input["file_name"])
            file_name_png = os.path.splitext(file_name)[0] + ".png"
            with io.BytesIO() as out:
                Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
                segments_info = [
                    self._convert_category_id(x) for x in segments_info
                ]
                preds.append({
                    "image_id": input["image_id"],
                    "file_name": file_name_png,
                    "png_string": out.getvalue(),
                    "segments_info": segments_info,
                })
        return {self.evaluator_name: preds}
예제 #16
0
def _create_panoptic_gt_annotations(ann_file):
    categories = [{
        'id': 0,
        'name': 'person',
        'supercategory': 'person',
        'isthing': 1
    }, {
        'id': 1,
        'name': 'dog',
        'supercategory': 'dog',
        'isthing': 1
    }, {
        'id': 2,
        'name': 'wall',
        'supercategory': 'wall',
        'isthing': 0
    }]

    images = [{
        'id': 0,
        'width': 80,
        'height': 60,
        'file_name': 'fake_name1.jpg',
    }]

    annotations = [{
        'segments_info': [{
            'id': 1,
            'category_id': 0,
            'area': 400,
            'bbox': [10, 10, 10, 40],
            'iscrowd': 0
        }, {
            'id': 2,
            'category_id': 0,
            'area': 400,
            'bbox': [30, 10, 10, 40],
            'iscrowd': 0
        }, {
            'id': 3,
            'category_id': 1,
            'iscrowd': 0,
            'bbox': [50, 10, 10, 5],
            'area': 50
        }, {
            'id': 4,
            'category_id': 2,
            'iscrowd': 0,
            'bbox': [0, 0, 80, 60],
            'area': 3950
        }],
        'file_name':
        'fake_name1.png',
        'image_id':
        0
    }]

    gt_json = {
        'images': images,
        'annotations': annotations,
        'categories': categories
    }

    # 4 is the id of the background class annotation.
    gt = np.zeros((60, 80), dtype=np.int64) + 4
    gt_bboxes = np.array([[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]],
                         dtype=np.int64)
    for i in range(3):
        x, y, w, h = gt_bboxes[i]
        gt[y:y + h, x:x + w] = i + 1  # id starts from 1

    gt = id2rgb(gt).astype(np.uint8)
    img_path = osp.join(osp.dirname(ann_file), 'fake_name1.png')
    mmcv.imwrite(gt[:, :, ::-1], img_path)

    mmcv.dump(gt_json, ann_file)
    return gt_json
예제 #17
0
# panoptic
elif 'panoptic' in str(coco_json_file):
    copy_png()
    rename_png()

    pan_loader = []
    for annot in json_data['annotations']:
        for cat in json_data['categories']:
            for si in annot['segments_info']:

                if cat['id'] == si['category_id']:
                    sa_dict = {
                        'classId': cat['id'],
                        'probability': 100,
                        'parts': [{
                            'color': rgb_to_hex(tuple(id2rgb(si['id'])))
                        }],
                        'attributes': [],
                        'attributeNames': [],
                        'imageId': annot['image_id']
                    }

                    pan_loader.append((sa_dict['imageId'], sa_dict))

    for img in json_data['images']:
        f_loader = []
        for img_id, img_data in pan_loader:
            if img['id'] == img_id:
                f_loader.append(img_data)
                with open(
                        os.path.join(main_dir,
                os.path.join(coco_path_folder,
                             os.path.join("panoptic_masks",
                                          img_name + ".png")))
            if img_cv is None:
                print("Error: '{}' file dosen't exist!".format(
                    os.path.join("panoptic_masks", img_name + ".png")))
                break

            img = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
            H, W, C = img.shape
            img = img.reshape((H * W, C))
            segments = annotate["segments_info"]
            hex_colors = blue_color_generator(len(segments))
            out_json = []
            for i, seg in enumerate(segments):
                img[np.all(img == id2rgb(seg["id"]),
                           axis=1)] = hex_to_rgb(hex_colors[i])
                dd = {
                    "classId": seg["category_id"],
                    "probability": 100,
                    "visible": True,
                    "parts": [{
                        "color": hex_colors[i]
                    }],
                    "attributes": [],
                    "attributeNames": [],
                    "imageId": annotate["image_id"]
                }
                out_json.append(dd)
            with open(os.path.join(sa_dir, img_name + ".jpg___pixel.json"),
                      "w") as writer:
예제 #19
0
    def PQ_eval_dirty(
        self, oracle_res=4, save_output=False,
        oracle_mode=None
        # oracle=False, semantic_oracle=False, vote_oracle=False
    ):
        """
        optionally save all predicted files through output writer
        """
        _VALID_ORACLE_MODES = ('full', 'sem', 'vote')

        assert not self.mp_distributed
        model = self.model
        loader = self.val_loader
        dset = loader.dataset
        eval_at = model.curr_epoch - 1
        logger.info("Panoptic Quality eval at epoch {}".format(eval_at))

        # setup space to save pan predictions
        PRED_OUT_NAME = 'pred'
        dump_root = osp.join(self.output_mngr.root, 'pd_dump')
        pred_meta_fname = osp.join(dump_root, '{}.json'.format(PRED_OUT_NAME))
        pred_mask_dir = osp.join(dump_root, PRED_OUT_NAME)
        del dump_root

        overall_pred_meta = {
            'images': list(dset.imgs.values()),
            'categories': list(dset.meta['cats'].values()),
            'annotations': []
        }

        metric = PQMetric(dset.meta)
        timer = CompoundTimer()
        timer.data.tic()

        # sota_sseg = SOTASSeg(dset.name)

        # do nasty
        upsnet_pred_json = json.load(open('/home-nfs/whc/panout/upsnet/coco/val/pred.json', 'r'))
        upsnet_pred_ann = {_['image_id']: _ for _ in upsnet_pred_json['annotations']}
        ups_thing_cat_ids = [_['id'] for _ in upsnet_pred_json['categories'] if _['isthing']]
        ups_stuff_cat_ids = [_['id'] for _ in upsnet_pred_json['categories'] if _['isthing'] == 0]
        def load_pred_mask(filename):
            return rgb2id(np.array(Image.open(f'/home-nfs/whc/panout/upsnet/coco/val/pred/{filename}').convert('RGB')))

        for i, inputs in enumerate(loader):
            x = inputs[0].cuda()
            del inputs
            if self.debug and (i > 5):
                break

            imgMeta, segments_info, _, pan_gt_mask = dset.pan_getitem(
                i, apply_trans=False)

            # rt change
            # to get ground truth instance centroid
            # lo_pan_mask = pan_gt_mask.resize(
            #     np.array(_.size, dtype=np.int) // 4, resample=Image.NEAREST
            # )
            # tmp_handle = dset.gt_prod_handle(dset.meta, dset.pcv,
            #     lo_pan_mask, segments_info)
            # tmp_handle.generate_gt()
            # gt_ins_centroids = tmp_handle.ins_centroids

            if dset.transforms is not None:
                _, trans_pan_gt = dset.transforms(_, pan_gt_mask)
            else:
                trans_pan_gt = pan_gt_mask.copy()

            pan_gt_mask = pan_gt_mask
            pan_gt_ann = {
                'image_id': imgMeta['id'],
                # shameful mogai; can only access image f_name here. alas...
                'file_name': imgMeta['file_name'].split('.')[0] + '.png',
                'segments_info': list(segments_info.values())
            }

            # torch.cuda.synchronize()
            timer.data.toc()
            timer.compute.tic()

            if oracle_mode != 'full':  # way too ugly
                from fabric.utils.timer import global_timer
                global_timer.network.tic()
                sem_pd, vote_pd = model.infer(x, softmax_normalize=True)
                global_timer.network.toc()

            if oracle_mode is not None:
                assert oracle_mode in _VALID_ORACLE_MODES
                sem_ora, vote_ora = gt_tsr_res_reduction(
                    oracle_res, self.gt_prod_handle,
                    dset.meta, model.pcv, trans_pan_gt, segments_info
                )
                if oracle_mode == 'vote':
                    pass  # if using model sem pd, maintain stuff pred thresh
                else:  # sem or full oracle, using gt sem pd, do not filter
                    model.dset_meta['stuff_pred_thresh'] = -1

                if oracle_mode == 'sem':
                    sem_pd = sem_ora
                elif oracle_mode == 'vote':
                    vote_pd = vote_ora
                else:
                    sem_pd, vote_pd = sem_ora, vote_ora

            pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
                cfg.pcv, sem_pd, vote_pd, pan_gt_mask.size
            )

            # if oracle:
            #     sem_pd, vote_pd = gt_tsr_res_reduction(
            #         oracle_res, self.gt_prod_handle,
            #         dset.meta, model.pcv, trans_pan_gt, segments_info
            #     )
            #     model.dset_meta['stuff_pred_thresh'] = -1
            #     pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
            #         sem_pd, vote_pd, pan_gt_mask.size)
            # elif semantic_oracle:
            #     sem_pd, vote_pd = model.infer(x, softmax_normalize=True)
            #     sem_pd, _ = gt_tsr_res_reduction(
            #         oracle_res, self.gt_prod_handle,
            #         dset.meta, model.pcv, trans_pan_gt, segments_info
            #     )
            #     model.dset_meta['stuff_pred_thresh'] = -1
            #     pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
            #         sem_pd, vote_pd, pan_gt_mask.size)
            # elif vote_oracle:
            #     sem_pd, vote_pd = model.infer(x, softmax_normalize=True)
            #     _, vote_pd = gt_tsr_res_reduction(
            #         oracle_res, self.gt_prod_handle,
            #         dset.meta, model.pcv, trans_pan_gt, segments_info
            #     )
            #     pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
            #         sem_pd, vote_pd, pan_gt_mask.size)
            # else:
            #     # _sem_pd, _vote_pd = gt_tsr_res_reduction(
            #     #     oracle_res, self.gt_prod_handle,
            #     #     dset.meta, model.pcv, trans_pan_gt, segments_info
            #     # )
            #     sem_pd, vote_pd = model.infer(x, softmax_normalize=True)
            #     # if hasattr(self, 'model2'):
            #     #     _sem_pd, _vote_pd = self.model2.infer(x, softmax_normalize=True)
            #     #     sem_pd = (sem_pd + _sem_pd) / 2
            #     #     vote_pd = (vote_pd + _vote_pd) / 2
            #     # sem_pd, vote_pd = model.flip_infer(x, softmax_normalize=True)
            #     # sem_pd = sota_sseg.get(i, sem_pd.shape[-2:])
            #     # model.dset_meta['tmp_gt_ins_centroids'] = gt_ins_centroids # rt change
            #     pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
            #         sem_pd, vote_pd, pan_gt_mask.size)
            pan_pd_ann['image_id'] = pan_gt_ann['image_id']
            pan_pd_ann['file_name'] = pan_gt_ann['file_name']
            overall_pred_meta['annotations'].append(pan_pd_ann)

            metric.update(
                pan_gt_ann, rgb2id(np.array(pan_gt_mask)), pan_pd_ann, pan_pd_mask
            )

            if save_output:
                # pan_pd_mask = Image.fromarray(id2rgb(pan_pd_mask))
                # fname = osp.join(pred_mask_dir, pan_pd_ann['file_name'])
                # os.makedirs(osp.dirname(fname), exist_ok=True)  # make region subdir
                # pan_pd_mask.save(fname)
                fname = osp.join(pred_mask_dir, pan_pd_ann['file_name'])
                os.makedirs(osp.dirname(fname), exist_ok=True)  # make region subdir
                os.makedirs(osp.dirname(fname.replace(pred_mask_dir, pred_mask_dir+'_d2')), exist_ok=True)  # make region subdir
                # os.makedirs(osp.dirname(fname.replace(pred_mask_dir, pred_mask_dir+'_d2_ups')), exist_ok=True)  # make region subdir

                from .vis import d2_vis
                im = dset.pan_getitem(i, apply_trans=False)[2]
                if cfg.data.dataset.params['caffe_mode']:
                    im = np.array(im)[:, :, ::-1]
                im = np.array(im)
                Image.fromarray(
                    d2_vis(
                        dset.meta,
                        pan_pd_mask,
                        pan_pd_ann,
                        im
                    )
                ).save(fname.replace(pred_mask_dir, pred_mask_dir+'_d2'))

                from .vis import d2_vis
                im = dset.pan_getitem(i, apply_trans=False)[2]
                if cfg.data.dataset.params['caffe_mode']:
                    im = np.array(im)[:, :, ::-1]
                im = np.array(im)
                tmp_ann = upsnet_pred_ann[pan_gt_ann['image_id']]
                for seg in tmp_ann['segments_info']:
                    if seg['category_id'] in ups_thing_cat_ids:
                        seg['isthing'] = 1
                    else:
                        seg['isthing'] = 0
                Image.fromarray(
                    d2_vis(
                        dset.meta,
                        load_pred_mask(pan_gt_ann['file_name']),
                        tmp_ann,
                        im
                    )
                ).save(fname.replace(pred_mask_dir, pred_mask_dir+'_d2').replace('.png', '_ups.png'))

                pan_pd_mask = Image.fromarray(id2rgb(pan_pd_mask))
                pan_pd_mask.save(fname)

            # torch.cuda.synchronize()
            timer.compute.toc()

            if (i % 50) == 0:
                logger.info(timer)
                logger.info(
                    "iter {}, eta to val end {}".format(i, timer.eta(i, len(loader)))
                )
                print(metric)
            timer.data.tic()

        if save_output:
            with open(pred_meta_fname, 'w') as f:
                json.dump(overall_pred_meta, f)
            self.output_mngr.save_f(
                metric.results, osp.join(self.output_mngr.root, 'score.pkl')
            )
        logger.info("eval complete \n{}".format(metric))
        return metric
예제 #20
0
    def PQ_test(self, save_output=False):
        assert not self.mp_distributed
        model = self.model
        loader = self.test_loader
        dset = loader.dataset
        eval_at = model.curr_epoch - 1
        logger.info("Panoptic Quality eval at epoch {}".format(eval_at))

        # PRED_OUT_NAME = 'panoptic_{}2017_pcv_results'.format(dset.split)
        PRED_OUT_NAME = 'pred'
        dump_root = osp.join(self.output_mngr.root, 'pd_dump')
        pred_meta_fname = osp.join(dump_root, '{}.json'.format(PRED_OUT_NAME))
        pred_mask_dir = osp.join(dump_root, PRED_OUT_NAME)

        overall_pred_meta = {
            'images': list(dset.imgs.values()),
            'categories': list(dset.meta['cats'].values()),
            'annotations': []
        }

        timer = CompoundTimer()
        timer.data.tic()

        for i, inputs in enumerate(loader):
            if self.debug and (i > 5):
                break
            x = inputs[0].cuda()
            del inputs
            imgMeta, _ = dset.get_meta(i)

            timer.data.toc()
            timer.compute.tic()

            sem_pd, vote_pd = model.infer(x, softmax_normalize=True)
            # if hasattr(self, 'model2'):
            #     _sem_pd, _vote_pd = self.model2.infer(x, softmax_normalize=True)
            #     sem_pd = (sem_pd + _sem_pd) / 2
            #     vote_pd = (vote_pd + _vote_pd) / 2
            img_original_size = (imgMeta['width'], imgMeta['height'])
            pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
                cfg.pcv, sem_pd, vote_pd, img_original_size
            )
            pan_pd_ann['image_id'] = imgMeta['id']
            pan_pd_ann['file_name'] = imgMeta['file_name'].split('.')[0] + '.png'
            overall_pred_meta['annotations'].append(pan_pd_ann)

            if save_output:
                fname = osp.join(pred_mask_dir, pan_pd_ann['file_name'])
                os.makedirs(osp.dirname(fname), exist_ok=True)  # make region subdir
                os.makedirs(osp.dirname(fname.replace(pred_mask_dir, pred_mask_dir+'_d2')), exist_ok=True)  # make region subdir

                from .vis import d2_vis
                im = dset.pan_getitem(i, apply_trans=False)[2]
                if cfg.data.dataset.params['caffe_mode']:
                    im = np.array(im)[:, :, ::-1]
                im = np.array(im)
                Image.fromarray(
                    d2_vis(
                        dset.meta,
                        pan_pd_mask,
                        pan_pd_ann,
                        im
                    )
                ).save(fname.replace(pred_mask_dir, pred_mask_dir+'_d2'))

                pan_pd_mask = Image.fromarray(id2rgb(pan_pd_mask))
                pan_pd_mask.save(fname)

            # torch.cuda.synchronize()
            timer.compute.toc()

            if (i % 50) == 0:
                logger.info(timer)
                logger.info(
                    "iter {}/{}, eta to val end {}".format(
                        i, len(loader), timer.eta(i, len(loader))
                    )
                )
            timer.data.tic()

        if save_output:
            with open(pred_meta_fname, 'w') as f:
                json.dump(overall_pred_meta, f)
        logger.info("test eval complete")
def combine_to_panoptic_single_core(proc_id, img_ids, img_id2img,
                                    inst_by_image, sem_by_image,
                                    segmentations_folder, overlap_thr,
                                    stuff_area_limit, categories):
    panoptic_json = []
    id_generator = IdGenerator(categories)

    for idx, img_id in enumerate(img_ids):
        img = img_id2img[img_id]

        if idx % 100 == 0:
            print('Core: {}, {} from {} images processed.'.format(
                proc_id, idx, len(img_ids)))

        pan_segm_id = np.zeros((img['height'], img['width']), dtype=np.uint32)
        used = None
        annotation = {}
        try:
            annotation['image_id'] = int(img_id)
        except Exception:
            annotation['image_id'] = img_id

        annotation['file_name'] = img['file_name'].replace('.jpg', '.png')

        segments_info = []
        for ann in inst_by_image[img_id]:
            area = COCOmask.area(ann['segmentation'])
            if area == 0:
                continue
            if used is None:
                intersect = 0
                used = copy.deepcopy(ann['segmentation'])
            else:
                intersect = COCOmask.area(
                    COCOmask.merge([used, ann['segmentation']],
                                   intersect=True))
            if intersect / area > overlap_thr:
                continue
            used = COCOmask.merge([used, ann['segmentation']], intersect=False)

            mask = COCOmask.decode(ann['segmentation']) == 1
            if intersect != 0:
                mask = np.logical_and(pan_segm_id == 0, mask)
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask] = segment_id
            segments_info.append(panoptic_ann)

        for ann in sem_by_image[img_id]:
            mask = COCOmask.decode(ann['segmentation']) == 1
            mask_left = np.logical_and(pan_segm_id == 0, mask)
            if mask_left.sum() < stuff_area_limit:
                continue
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask_left] = segment_id
            segments_info.append(panoptic_ann)

        annotation['segments_info'] = segments_info
        panoptic_json.append(annotation)

        Image.fromarray(id2rgb(pan_segm_id)).save(
            os.path.join(segmentations_folder, annotation['file_name']))

    return panoptic_json
예제 #22
0
def build_panoptic_area(img_id, output_path, detection_path,
                        segmentation_path):
    """
    Build panoptic segmentation of the specified image.
    Sort segments by area: write first smaller objects to avoid overlapping.
    :param img_id: image identifier (for retrieving file name)
    :param output_path: path to store panoptic segmentation results (png)
    :param detection_path: input path for detection
    :param segmentation_path: input path for semantic segmentation
    :return: the json annotation with class information for each panoptic segment.
    """
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    #Read categories and create IdGenerator (official panopticapi repository)
    categories = load_panoptic_category_info()
    id_generator = IdGenerator(categories)

    #Parameters:
    overlap_thr = 0.5
    stuff_area_limit = 64 * 64

    #read segmentation data
    segm_probs = json.load(
        open(segmentation_path + '/' + img_id + '_prob.json', 'r'))
    segm_labelmap = np.array(
        Image.open(segmentation_path + '/' + img_id + '_0.png'),
        np.uint8)  #.labelmap.astype(np.uint8)).save()
    #read detection data
    detection = json.load(
        open(detection_path + '/' + img_id + '_prob.json', 'r'))

    pan_segm_id = np.zeros(segm_labelmap.shape, dtype=np.uint32)
    used = np.full(segm_labelmap.shape, False)

    annotation = {}
    try:
        annotation['image_id'] = int(img_id)
    except Exception:
        annotation['image_id'] = img_id

    annotation['file_name'] = img_id + '.png'

    segments_info = []

    for obj in detection:  #for ann in ...
        obj_mask = extract_mask_bool(obj['mask'])
        obj_area = np.count_nonzero(obj_mask)
        obj['area'] = obj_area
        obj['mask'] = obj_mask

    detection.sort(key=lambda x: x['area'],
                   reverse=False)  ##First smaller, than bigger

    for obj in detection:  #for ann in ...
        obj_mask = obj['mask']  #extract_mask_bool(obj['mask'])
        obj_area = obj['area']  #np.count_nonzero(obj_mask)
        if obj_area == 0:
            continue
        #Filter out objects with intersection > 50% with used area
        intersection_mask = used & obj_mask
        intersect_area = np.count_nonzero(intersection_mask)
        if 1.0 * intersect_area / obj_area > overlap_thr:
            continue
        used = used | obj_mask

        segment_id = id_generator.get_id(obj['class'])
        panoptic_ann = {}
        panoptic_ann['id'] = segment_id
        panoptic_ann['category_id'] = obj['class']
        if intersect_area > 0:
            pan_segm_id[obj_mask & (~intersection_mask)] = segment_id
        else:
            pan_segm_id[obj_mask] = segment_id
        segments_info.append(panoptic_ann)

    #
    #
    for segm_class in np.unique(segm_labelmap):
        segm_class = int(segm_class)
        if segm_class == 183:  #void class
            continue

        #Check class: exclude non-stuff objects
        category = categories[segm_class]
        if category['isthing'] == 1:
            continue

        segm_mask = (segm_labelmap == segm_class)
        mask_left = segm_mask & (~used)
        # Filter out segments with small area
        if np.count_nonzero(mask_left) < stuff_area_limit:
            continue
        segment_id = id_generator.get_id(segm_class)
        panoptic_ann = {}
        panoptic_ann['id'] = segment_id
        panoptic_ann['category_id'] = segm_class
        used = used | mask_left
        pan_segm_id[mask_left] = segment_id
        segments_info.append(panoptic_ann)

    annotation['segments_info'] = segments_info

    # Save annotated image
    Image.fromarray(id2rgb(pan_segm_id)).save(
        os.path.join(output_path, annotation['file_name']))

    ##############
    ##remove segments with zero area
    ids = set(np.unique(pan_segm_id))
    segments_info_cleaned = []
    for seg in segments_info:
        if seg['id'] in ids:
            segments_info_cleaned.append(seg)
    annotation['segments_info'] = segments_info_cleaned
    ##################

    return annotation
예제 #23
0
def sa_pixel_to_coco_panoptic(dataset_name, export_root, thing_ids):
    os.makedirs(os.path.join(dataset_name, "annotations"), exist_ok=True)

    info = {
        'description':
            'This is stable 1.0 version of the ' + dataset_name + ' dataset.',
        'url':
            'https://superannotate.ai',
        'version':
            '1.0',
        'year':
            2019,
        'contributor':
            'Annotator LLC',
        'date_created':
            '2019-11-15 11:47:32.67823'
    }

    licences = [
        {
            'url': 'https://superannotate.ai',
            'id': 1,
            'name': 'Superannotate License'
        }
    ]

    categories = []
    dbid_to_catid = {}
    classes = json.load(
        open(os.path.join(export_root, "classes", "classes.json"))
    )
    for idx, dbclass in enumerate(classes, 1):
        category = {
            "id": idx,
            "name": dbclass["name"],
            "supercategory": dbclass["name"],
            "isthing": dbclass["id"] in thing_ids,
            "color": id2rgb(int(dbclass["color"][1:], 16))
        }

        dbid_to_catid[dbclass["id"]] = category["id"]
        categories.append(category)

    print("Converting annotations for {} dataset ...".format(dataset_name))

    id_generator = IdGenerator({cat['id']: cat for cat in categories})
    panoptic_root = os.path.join(
        dataset_name, "panoptic_{}".format(dataset_name)
    )
    os.makedirs(panoptic_root, exist_ok=True)
    jsons = glob.glob(os.path.join(export_root, "*.json"))
    images = []
    annotations = []
    for idx, filepath in tqdm(enumerate(jsons, 1)):
        filename = os.path.basename(filepath)
        imagename = filename[:-len('___pixel.json')] + '___lores.jpg'

        width, height = Image.open(os.path.join(export_root, imagename)).size
        image_info = {
            "id": idx,
            "file_name": imagename,
            "height": height,
            "width": width,
            "license": 1
        }
        images.append(image_info)

        segments_info = []
        sa_ann_json = json.load(open(os.path.join(export_root, filename)))

        sa_bluemask_path = os.path.join(
            export_root, filename[:-len('___pixel.json')] + '___save.png'
        )
        sa_bluemask_rgb = np.asarray(
            Image.open(sa_bluemask_path).convert('RGB'), dtype=np.uint32
        )
        ann_mask = np.zeros((height, width), dtype=np.uint32)
        flat_mask = (sa_bluemask_rgb[:, :, 0] <<
                     16) | (sa_bluemask_rgb[:, :, 1] <<
                            8) | (sa_bluemask_rgb[:, :, 2])

        for instance in sa_ann_json:
            parts = [int(part["color"][1:], 16) for part in instance["parts"]]
            category_id = dbid_to_catid[instance["classId"]]
            instance_bitmask = np.isin(flat_mask, parts)
            segment_id = id_generator.get_id(category_id)
            ann_mask[instance_bitmask] = segment_id
            coco_instance_mask = cocomask.encode(
                np.asfortranarray(instance_bitmask)
            )
            bbox = cocomask.toBbox(coco_instance_mask).tolist()
            area = int(cocomask.area(coco_instance_mask))

            segment_info = {
                "id": segment_id,
                "category_id": category_id,
                "area": area,
                "bbox": bbox,
                "iscrowd": 0
            }
            segments_info.append(segment_info)
        panopticmask = imagename[:-len("jpg")] + "png"
        Image.fromarray(id2rgb(ann_mask)).save(
            os.path.join(panoptic_root, panopticmask)
        )

        annotation = {
            "image_id": idx,
            "file_name": panopticmask,
            "segments_info": segments_info
        }
        annotations.append(annotation)

    panoptic_data = {
        "info": info,
        "licences": licences,
        "images": images,
        "annotations": annotations,
        "categories": categories
    }

    json_data = json.dumps(panoptic_data, indent=4)
    with open(
        os.path.join(
            dataset_name, "annotations",
            "panoptic_{}.json".format(dataset_name)
        ), "w+"
    ) as coco_json:
        coco_json.write(json_data)
예제 #24
0
    def PQ_eval(
        self, oracle_res=4, save_output=False,
        oracle_mode=None
        # oracle=False, semantic_oracle=False, vote_oracle=False
    ):
        """
        optionally save all predicted files through output writer
        """
        _VALID_ORACLE_MODES = ('full', 'sem', 'vote')

        assert not self.mp_distributed
        model = self.model
        loader = self.val_loader
        dset = loader.dataset
        eval_at = model.curr_epoch - 1
        logger.info("Panoptic Quality eval at epoch {}".format(eval_at))

        # setup space to save pan predictions
        PRED_OUT_NAME = 'pred'
        dump_root = osp.join(self.output_mngr.root, 'pd_dump')
        pred_meta_fname = osp.join(dump_root, '{}.json'.format(PRED_OUT_NAME))
        pred_mask_dir = osp.join(dump_root, PRED_OUT_NAME)
        del dump_root

        overall_pred_meta = {
            'images': list(dset.imgs.values()),
            'categories': list(dset.meta['cats'].values()),
            'annotations': []
        }

        metric = PQMetric(dset.meta)
        timer = CompoundTimer()
        timer.data.tic()

        for i, inputs in enumerate(loader):
            x = inputs[0].cuda()
            del inputs
            if self.debug and (i > 5):
                break

            imgMeta, segments_info, _, pan_gt_mask = dset.pan_getitem(
                i, apply_trans=False)

            if dset.transforms is not None:
                _, trans_pan_gt = dset.transforms(_, pan_gt_mask)
            else:
                trans_pan_gt = pan_gt_mask.copy()

            pan_gt_mask = pan_gt_mask
            pan_gt_ann = {
                'image_id': imgMeta['id'],
                # shameful mogai; can only access image f_name here. alas...
                'file_name': imgMeta['file_name'].split('.')[0] + '.png',
                'segments_info': list(segments_info.values())
            }

            # torch.cuda.synchronize()
            timer.data.toc()
            timer.compute.tic()

            if oracle_mode != 'full':  # way too ugly
                # from fabric.utils.timer import global_timer
                # global_timer.network.tic()
                sem_pd, vote_pd = model.infer(x, softmax_normalize=True)
                # global_timer.network.toc()

            if oracle_mode is not None:
                assert oracle_mode in _VALID_ORACLE_MODES
                sem_ora, vote_ora = gt_tsr_res_reduction(
                    oracle_res, self.gt_prod_handle,
                    dset.meta, model.pcv, trans_pan_gt, segments_info
                )
                if oracle_mode == 'vote':
                    pass  # if using model sem pd, maintain stuff pred thresh
                else:  # sem or full oracle, using gt sem pd, do not filter
                    model.dset_meta['stuff_pred_thresh'] = -1

                if oracle_mode == 'sem':
                    sem_pd = sem_ora
                elif oracle_mode == 'vote':
                    vote_pd = vote_ora
                else:
                    sem_pd, vote_pd = sem_ora, vote_ora

            pan_pd_mask, pan_pd_ann = model.stitch_pan_mask(
                cfg.pcv, sem_pd, vote_pd, pan_gt_mask.size
            )

            pan_pd_ann['image_id'] = pan_gt_ann['image_id']
            pan_pd_ann['file_name'] = pan_gt_ann['file_name']
            overall_pred_meta['annotations'].append(pan_pd_ann)

            metric.update(
                pan_gt_ann, rgb2id(np.array(pan_gt_mask)), pan_pd_ann, pan_pd_mask
            )

            if save_output:
                # pan_pd_mask = Image.fromarray(id2rgb(pan_pd_mask))
                # fname = osp.join(pred_mask_dir, pan_pd_ann['file_name'])
                # os.makedirs(osp.dirname(fname), exist_ok=True)  # make region subdir
                # pan_pd_mask.save(fname)
                fname = osp.join(pred_mask_dir, pan_pd_ann['file_name'])
                os.makedirs(osp.dirname(fname), exist_ok=True)  # make region subdir
                pan_pd_mask = Image.fromarray(id2rgb(pan_pd_mask))
                pan_pd_mask.save(fname)

            # torch.cuda.synchronize()
            timer.compute.toc()

            if (i % 50) == 0:
                logger.info(timer)
                logger.info(
                    "iter {}, eta to val end {}".format(i, timer.eta(i, len(loader)))
                )
                print(metric)
            timer.data.tic()

        if save_output:
            with open(pred_meta_fname, 'w') as f:
                json.dump(overall_pred_meta, f)
            self.output_mngr.save_f(
                metric.results, osp.join(self.output_mngr.root, 'score.pkl')
            )
        logger.info("eval complete \n{}".format(metric))
        return metric