Beispiel #1
0
    def __init__(self, config):
        """Create a quality that uses `nomagic_net` as a quality function."""
        from nomagic_submission import ConvNetModel
        from tensorpack import SaverRestore
        from tensorpack.predict import OfflinePredictor
        from tensorpack.predict.config import PredictConfig

        GraspQualityFunction.__init(self)

        # Store parameters.
        self._model_path = config["gqcnn_model"]
        self._batch_size = config["batch_size"]
        self._crop_height = config["crop_height"]
        self._crop_width = config["crop_width"]
        self._im_height = config["im_height"]
        self._im_width = config["im_width"]
        self._num_channels = config["num_channels"]
        self._pose_dim = config["pose_dim"]
        self._gripper_mode = config["gripper_mode"]
        self._data_mean = config["data_mean"]
        self._data_std = config["data_std"]

        # Init config.
        model = ConvNetModel()
        self._config = PredictConfig(model=model,
                                     session_init=SaverRestore(
                                         self._model_path),
                                     output_names=["prob"])
        self._predictor = OfflinePredictor(self._config)
    def __init__(self, config):
        """Create a quality that uses nomagic_net as a quality function. """
        from nomagic_submission import ConvNetModel
        from tensorpack.predict.config import PredictConfig
        from tensorpack import SaverRestore
        from tensorpack.predict import OfflinePredictor

        # store parameters
        self._model_path = config['gqcnn_model']
        self._batch_size = config['batch_size']
        self._crop_height = config['crop_height']
        self._crop_width = config['crop_width']
        self._im_height = config['im_height']
        self._im_width = config['im_width']
        self._num_channels = config['num_channels']
        self._pose_dim = config['pose_dim']
        self._gripper_mode = config['gripper_mode']

        # init config
        model = ConvNetModel()
        self._config = PredictConfig(model=model,
                                     session_init=SaverRestore(
                                         self._model_path),
                                     output_names=['prob'])
        self._predictor = OfflinePredictor(self._config)
Beispiel #3
0
    def run(self):
        predictor = OfflinePredictor(self.gen_pred_config())
        for num, data_dir in enumerate(self.inf_data_list):
            save_dir = os.path.join(self.inf_output_dir, str(num))
            print(save_dir)

            file_list = glob.glob(
                os.path.join(data_dir, "*{}".format(self.inf_imgs_ext)))
            file_list.sort()  # ensure same order

            rm_n_mkdir(save_dir)
            for filename in file_list:
                filename = os.path.basename(filename)
                basename = filename.split(".")[0]
                print(data_dir, basename, end=" ", flush=True)

                ##
                img = cv2.imread(os.path.join(data_dir, filename))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ##
                pred_map = self.__gen_prediction(img, predictor)
                sio.savemat(
                    os.path.join(save_dir, "{}.mat".format(basename)),
                    {"result": [pred_map]},
                )
                print(f"Finished. {datetime.now().strftime('%H:%M:%S.%f')}")
Beispiel #4
0
    def run(self):
        model_path = self.inf_model_path

        MODEL_MAKER = Model_NP_XY if self.model_mode == 'np+xy' else Model_NP_DIST

        pred_config = PredictConfig(
            model=MODEL_MAKER(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)

        for norm_target in self.inf_norm_codes:
            norm_dir = '%s/%s/' % (self.inf_norm_root_dir, norm_target)
            norm_save_dir = '%s/%s/' % (self.inf_output_dir, norm_target)

            # TODO: cache list to check later norm dir has same number of files
            file_list = glob.glob('%s/*%s' % (norm_dir, self.inf_imgs_ext))
            file_list.sort()  # ensure same order

            rm_n_mkdir(norm_save_dir)
            for filename in file_list:
                filename = os.path.basename(filename)
                basename = filename.split('.')[0]
                print(basename, norm_target, end=' ', flush=True)

                ##
                img = cv2.imread(norm_dir + filename)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ##
                pred_map = self.__gen_prediction(img, predictor)
                sio.savemat('%s/%s.mat' % (norm_save_dir, basename),
                            {'result': [pred_map]})
                print('FINISH')
Beispiel #5
0
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
    """
    Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
    """
    df = get_train_dataflow()
    df.reset_state()

    pred = OfflinePredictor(
        PredictConfig(model=model,
                      session_init=SmartInit(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_{}_proposals/boxes'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'generate_{}_proposals/scores'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'fastrcnn_all_scores',
                          'output/boxes',
                          'output/scores',
                          'output/labels',
                      ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df), nr_visualize):
            img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp[
                'gt_labels']

            rpn_boxes, rpn_scores, all_scores, \
                final_boxes, final_scores, final_labels = pred(
                    img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_scores[good_proposals_ind])

            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_scores, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
Beispiel #6
0
def test_checkpoint(args):

    model = AttentionOCR()
    predcfg = PredictConfig(model=model,
                            session_init=SmartInit(args.checkpoint_path),
                            input_names=model.get_inferene_tensor_names()[0],
                            output_names=model.get_inferene_tensor_names()[1])

    predictor = OfflinePredictor(predcfg)
    list_dict = []
    with open("result/model-500000-512.txt", "w") as f:
        ned = 0.
        count = 0
        for filename in os.listdir(args.img_folder)[500:]:
            results = {}
            img_path = os.path.join(args.img_folder, filename)
            print("----> image path: ", img_path)
            name = filename.split('_')[0]
            image = cv2.imread(img_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            height, width = image.shape[:2]
            points = [[0, 0], [width - 1, 0], [width - 1, height - 1],
                      [0, height - 1]]

            image = preprocess(image, points, cfg.image_size)

            before = time.time()
            preds, probs = predictor(np.expand_dims(image, axis=0),
                                     np.ones([1, cfg.seq_len + 1], np.int32),
                                     False, 1.)
            print(preds)
            print(probs)

            after = time.time()
            text, confidence = label2str(preds[0], probs[0], cfg.label_dict)
            print("Text: ", text)
            print("Label: ", name)
            print("confidence: ", confidence)
            print("cal_sim: ", cal_sim(text, name))
            ned += cal_sim(text, name)
            count += 1
            print("-------------------------------")
            f.write("Path: {}".format(img_path))
            f.write("\n")
            f.write("Text: {}".format(text))
            f.write("\n")
            f.write("Label: {}".format(name))
            f.write("\n")
            f.write("Confidence: {}".format(confidence))
            f.write("\n")
            f.write("1-N.E.D: {}".format(cal_sim(text, name)))
            f.write("\n")
            f.write("---------------------------------------------")
            f.write("\n")
        f.write("Total {} Images | Average NED: {}".format(count, ned / count))
Beispiel #7
0
 def load_model(self):
     print('Loading Model...')
     model_path = self.model_path
     model_constructor = self.get_model()
     pred_config = PredictConfig(model=model_constructor(
         self.nr_types, self.input_shape, self.mask_shape, self.input_norm),
                                 session_init=get_model_loader(model_path),
                                 input_names=self.input_tensor_names,
                                 output_names=self.output_tensor_names)
     self.predictor = OfflinePredictor(pred_config)
Beispiel #8
0
    def run(self, save_only): 
        if self.inf_auto_find_chkpt:
            self.inf_model_path = os.path.join(self.save_dir, str(max([int(x) for x in [name for name in os.listdir(self.save_dir) if os.path.isdir(os.path.join(self.save_dir, name))]])))
            print(f"Inference model path: <{self.inf_model_path}>")
            print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \
                        (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.inf_model_path, self.inf_auto_metric, self.inf_auto_comparator)
            print('Selecting: %s' % model_path)
            print('Having Following Statistics:')
            for key, value in stat.items():
                print('\t%s: %s' % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model        = model_constructor(),
            session_init = get_model_loader(model_path),
            input_names  = self.eval_inf_input_tensor_names,
            output_names = self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)
        
        if save_only:
            exporter = ModelExporter(pred_config)
            rm_n_mkdir(self.model_export_dir)
            print ('{}/compact.pb'.format(self.model_export_dir))
            exporter.export_compact(filename='{}/compact.pb'.format(self.model_export_dir))
            exporter.export_serving(os.path.join(self.model_export_dir, 'serving'), signature_name='serving_default')
            return

        for num, data_dir in enumerate(self.inf_data_list):
            save_dir = os.path.join(self.inf_output_dir, str(num))

            file_list = glob.glob(os.path.join(data_dir, '*{}'.format(self.inf_imgs_ext)))
            file_list.sort() # ensure same order

            rm_n_mkdir(save_dir)
            for filename in file_list:
                filename = os.path.basename(filename)
                basename = filename.split('.')[0]
                print(data_dir, basename, end=' ', flush=True)

                ##
                img = cv2.imread(os.path.join(data_dir, filename))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ##
                pred_map = self.__gen_prediction(img, predictor)
                sio.savemat(os.path.join(save_dir,'{}.mat'.format(basename)), {'result':[pred_map]})
                print(f"Finished. {datetime.now().strftime('%H:%M:%S.%f')}")
Beispiel #9
0
 def load_model(self):
     """Loads the model and checkpoints"""
     print("Loading Model...")
     model_path = self.model_path
     model_constructor = self.get_model()
     pred_config = PredictConfig(
         model=model_constructor(
             self.nr_types, self.patch_input_shape, self.patch_output_shape, self.input_norm
         ),
         session_init=get_model_loader(model_path),
         input_names=self.input_tensor_names,
         output_names=self.output_tensor_names,
     )
     self.predictor = OfflinePredictor(pred_config)
Beispiel #10
0
    def load_model(self):
        """
        Loads the model and checkpoints according to the model stated in config.py
        """

        print('Loading Model...')
        model_path = self.model_path
        model_constructor = self.get_model()
        pred_config = PredictConfig(model=model_constructor(
            self.nr_types, self.input_shape, self.mask_shape, self.input_norm),
                                    session_init=get_model_loader(model_path),
                                    input_names=self.input_tensor_names,
                                    output_names=self.output_tensor_names)
        self.predictor = OfflinePredictor(pred_config)
Beispiel #11
0
    def run(self):

        if self.inf_auto_find_chkpt:
            print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \
                        (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.save_dir,
                                               self.inf_auto_metric,
                                               self.inf_auto_comparator)
            print('Selecting: %s' % model_path)
            print('Having Following Statistics:')
            for key, value in stat.items():
                print('\t%s: %s' % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)

        for data_dir_set in self.inf_data_list:
            data_root_dir = data_dir_set[0]
            data_out_code = data_dir_set[1]

            for subdir in data_dir_set[2:]:
                data_dir = '%s/%s/' % (data_root_dir, subdir)
                save_dir = '%s/%s/%s' % (self.inf_output_dir, data_out_code,
                                         subdir)

                file_list = glob.glob('%s/*%s' % (data_dir, self.inf_imgs_ext))
                file_list.sort()  # ensure same order

                rm_n_mkdir(save_dir)
                for filename in file_list:
                    filename = os.path.basename(filename)
                    basename = filename.split('.')[0]
                    print(data_dir, basename, end=' ', flush=True)

                    ##
                    img = cv2.imread(data_dir + filename)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                    ##
                    pred_map = self.__gen_prediction(img, predictor)
                    sio.savemat('%s/%s.mat' % (save_dir, basename),
                                {'result': [pred_map]})
                    print('FINISH')
Beispiel #12
0
    def run(self):

        if self.inf_auto_find_chkpt:
            print(
                '-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison'
                % (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.save_dir,
                                               self.inf_auto_metric,
                                               self.inf_auto_comparator)
            print("Selecting: %s" % model_path)
            print("Having Following Statistics:")
            for key, value in stat.items():
                print("\t%s: %s" % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names,
            create_graph=False,
        )
        predictor = OfflinePredictor(pred_config)

        for data_dir in self.inf_data_list:
            save_dir = self.inf_output_dir + "/raw/"
            file_list = glob.glob("%s/*%s" % (data_dir, self.inf_imgs_ext))
            file_list.sort()  # ensure same order

            rm_n_mkdir(save_dir)
            for filename in file_list:
                start = time.time()
                filename = os.path.basename(filename)
                basename = filename.split(".")[0]
                print(data_dir, basename, end=" ", flush=True)

                ##
                img = cv2.imread(data_dir + filename)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                pred_map = self.__gen_prediction(img, predictor)

                np.save("%s/%s.npy" % (save_dir, basename), [pred_map])
                end = time.time()
                diff = str(round(end - start, 2))
                print("FINISH. TIME: %s" % diff)
def init_predictor():
    register_coco(cfg.DATA.BASEDIR)
    MODEL = ResNetFPNModel()
    finalize_configs(is_training=False)

    predcfg = PredictConfig(
        model=MODEL,
        #session_init=SmartInit("/home/jetson/Documents/trained_model/500000_17/checkpoint"),
        session_init=SmartInit(
            "/home/jetson/Documents/trained_model/255000_04.01/checkpoint"),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1])

    predictor = OfflinePredictor(predcfg)

    return predictor
Beispiel #14
0
def inference(input_data, hps, predictor=None):

    # input data is numpy has shape: [M, T, D]
    # return softmax with C classes
    if predictor is None:
        M = ModelDesc(hps)
        pred_config = PredictConfig(session_init=SmartInit(hps.checkpoint_path),
                                    model=M,
                                    input_names=['x'],
                                    output_names=['predict/y_pred']
                                    )
        predictor = OfflinePredictor(pred_config)

    outputs = predictor(input_data)
    rs = softmax(outputs[0])

    return rs
Beispiel #15
0
def evaluate_rcnn(model_name, paper_arxiv_id, cfg_list, model_file):
    evaluator = COCOEvaluator(
        root=COCO_ROOT, model_name=model_name, paper_arxiv_id=paper_arxiv_id
    )
    category_id_to_coco_id = {
        v: k for k, v in COCODetection.COCO_id_to_category_id.items()
    }

    cfg.update_config_from_args(cfg_list)  # TODO backup/restore config
    finalize_configs(False)
    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
    predcfg = PredictConfig(
        model=MODEL,
        session_init=SmartInit(model_file),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1],
    )
    predictor = OfflinePredictor(predcfg)

    def xyxy_to_xywh(box):
        box[2] -= box[0]
        box[3] -= box[1]
        return box

    df = get_eval_dataflow("coco_val2017")
    df.reset_state()
    for img, img_id in tqdm.tqdm(df, total=len(df)):
        results = predict_image(img, predictor)
        res = [
            {
                "image_id": img_id,
                "category_id": category_id_to_coco_id.get(
                    int(r.class_id), int(r.class_id)
                ),
                "bbox": xyxy_to_xywh([round(float(x), 4) for x in r.box]),
                "score": round(float(r.score), 3),
            }
            for r in results
        ]
        evaluator.add(res)
        if evaluator.cache_exists:
            break

    evaluator.save()
Beispiel #16
0
    def _prepare(self):
        disable_layer_logging()
        self.predictor = OfflinePredictor(self.pred_config)
        with self.predictor.graph.as_default():
            vars_to_update = self._params_to_update()
            self.sess_updater = SessionUpdate(
                self.predictor.session, vars_to_update)
        # TODO setup callback for explore?
        self.predictor.graph.finalize()

        self.weight_lock = threading.Lock()

        # start a thread to wait for notification
        def func():
            self.condvar.acquire()
            while True:
                self.condvar.wait()
                self._trigger_evt()
        self.evt_th = threading.Thread(target=func)
        self.evt_th.daemon = True
        self.evt_th.start()
    def apply_model(self):
        # Using tensorpack!! predictor
        model_constructor = importlib.import_module("model.graph")
        model_constructor = model_constructor.Model_NP_HV

        pred_config = PredictConfig(
            session_init=SaverRestoreRelaxed(self.inf_model_path),
            model=model_constructor(),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names,
        )
        ###
        predictor = OfflinePredictor(pred_config)
        img = cv2.cvtColor(cv2.imread(self.input_img_path), cv2.COLOR_BGR2RGB)
        basename = os.path.basename(self.input_img_path).split(".")[0]
        ###
        pred_map = self.__gen_prediction(img, predictor, compact=False)
        sio.savemat(
            os.path.join(self.save_dir, "{}.mat".format(basename)),
            {"result": [pred_map]},
        )
        print(f"Finished. {datetime.now().strftime('%H:%M:%S.%f')}")
Beispiel #18
0
def eval(args, filenames, polygons, labels, label_dict=cfg.label_dict):
    Normalized_ED = 0.
    total_num = 0
    total_time = 0

    model = AttentionOCR()
    predcfg = PredictConfig(model=model,
                            session_init=SmartInit(args.checkpoint_path),
                            input_names=model.get_inferene_tensor_names()[0],
                            output_names=model.get_inferene_tensor_names()[1])

    predictor = OfflinePredictor(predcfg)

    for filename, points, label in zip(filenames, polygons, labels):
        image = cv2.imread(filename)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = preprocess(image, points, cfg.image_size)

        before = time.time()
        preds, probs = predictor(np.expand_dims(image, axis=0),
                                 np.ones([1, cfg.seq_len + 1], np.int32),
                                 False, 1.)
        after = time.time()

        total_time += after - before
        preds, probs = label2str(preds[0], probs[0], label_dict)
        print(label)
        print(preds, probs)

        sim = cal_sim(preds, label)

        total_num += 1
        Normalized_ED += sim

    print("total_num: %d, N.E.D: %.4f, average time: %.4f" %
          (total_num, Normalized_ED / total_num, total_time / total_num))
    def run(self, data_dir, output_dir, model_path, ambi_path, img_ext='.png'):

        if (not data_dir):
            print('Using Config file path for data_dir.')
            data_dir = self.inf_data_dir
        if (not output_dir):
            print('Using Config file path for output_dir.')
            output_dir = self.inf_output_dir
        if (not model_path):
            print('Using placeholder path for model_dir.')
            model_path = '/home/dm1/shikhar/hover_net_modified/v2_multitask/np_hv/07/model-35854.index'
        if (not img_ext):
            print('Using Config img ext value img_ext.')
            img_ext = self.inf_imgs_ext
        if (not ambi_path):
            # Hard coding path here for single test run.
            ambi_path = '/home/dm1/shikhar/check_sandbox/testing_code/MoNuSAC_testing_data/MoNuSAC_testing_ambiguous_regions'

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)

        #file_list = glob.glob('%s/*%s' % (data_dir, img_ext))
        #file_list.sort() # ensure same order
        #if(not file_list):
        # print('No Images found in data_dir! Check script arg-paths')
        # Create Output Directory
        #rm_n_mkdir(output_dir)
        # Expecting MoNuSAC's input data directory tree (Patient Name -> Image Name -> )

        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)
        os.chdir(output_dir)
        patients = [x[0] for x in os.walk(data_dir)
                    ]  #Total patients in the data_path
        print(len(patients))

        for patient_loc in patients:
            patient_name = patient_loc[len(data_dir) + 1:]  #Patient name
            print(patient_name, flush=True)

            ## To make patient's name directory in the destination folder
            try:
                os.mkdir(patient_name)
            except OSError:
                print("\n Creation of the patient's directory %s failed" %
                      patient_name,
                      flush=True)

            sub_images = glob(str(patient_loc) + '/*' + str(img_ext))
            for sub_image_loc in sub_images:
                sub_image_name = sub_image_loc[len(data_dir) +
                                               len(patient_name) + 1:-4]
                print(sub_image_name)

                ## To make sub_image directory under the patient's folder
                sub_image = './' + patient_name + sub_image_name  #Destination path
                try:
                    os.mkdir(sub_image)
                except OSError:
                    print("\n Creation of the patient's directory %s failed" %
                          sub_image)

                image_name = sub_image_loc
                if (img_ext == '.svs'):
                    img = openslide.OpenSlide(image_name)
                    cv2.imwrite(
                        sub_image_loc[:-4] + '.png',
                        np.array(
                            img.read_region((0, 0), 0,
                                            img.level_dimensions[0])))
                    img = cv2.imread(sub_image_loc[:-4] + '.png')
                else:
                    img = cv2.imread(image_name)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ## Generate Prediction Map
                pred_map = self.__gen_prediction(img, predictor)
                pred = pred_map
                # Process Prediction Map
                pred_inst = pred[..., self.nr_types:]
                pred_type = pred[..., :self.nr_types]
                pred_inst = np.squeeze(pred_inst)
                pred_type = np.argmax(pred_type, axis=-1)
                pred_inst = postproc.hover.proc_np_hv(pred_inst,
                                                      marker_mode=marker_mode,
                                                      energy_mode=energy_mode,
                                                      rgb=img)
                pred_inst = remap_label(pred_inst, by_size=True)

                # Read Ambiguous Region mask if any
                ambi_mask_final = None
                save_mask = None
                full_ambi_path = ambi_path + '/' + patient_name + '/' + sub_image_name + '/Ambiguous'
                ambi_masks = glob(full_ambi_path + '/*')
                if (ambi_masks):
                    try:
                        ambi_mask_final = cv2.imread(ambi_masks[0])
                        print('Ambiguous Mask Found: ', ambi_mask_final.shape)
                        save_mask = ambi_mask_final
                        gray = cv2.cvtColor(ambi_mask_final,
                                            cv2.COLOR_BGR2GRAY)
                        count, ambi = cv2.connectedComponents(gray)

                        match_iou = 0.01  # Modify parameter experimentally
                        # Remove Ambiguous region
                        pairwise_iou = get_iou(true=ambi, pred=pred_inst)
                        matched_regions = np.array(pairwise_iou >= match_iou,
                                                   np.uint8)
                        matched_region_list = np.nonzero(matched_regions)[1]
                        pred_inst_copy = pred_inst.copy()
                        for id in matched_region_list:
                            region_id = id + 1
                            pred_inst[pred_inst == region_id] = 0

                        # Re-Order Cleaned pred_inst
                        pred_inst = remap_label(pred_inst, by_size=True)

                    except Exception as e:
                        print('\n\t [Warn] Ambiguous Region not removed : ', e)
                else:
                    print('\n\t No Ambiguous Masks for this image: ',
                          full_ambi_path)

                # Map Instances to Labels for creating submission format
                pred_id_list = list(
                    np.unique(pred_inst))[1:]  # exclude background ID
                pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32)
                for idx, inst_id in enumerate(pred_id_list):
                    inst_type = pred_type[pred_inst == inst_id]
                    type_list, type_pixels = np.unique(inst_type,
                                                       return_counts=True)
                    type_list = list(zip(type_list, type_pixels))
                    type_list = sorted(type_list,
                                       key=lambda x: x[1],
                                       reverse=True)
                    inst_type = type_list[0][0]
                    if inst_type == 0:  # ! pick the 2nd most dominant if exist
                        if len(type_list) > 1:
                            inst_type = type_list[1][0]
                        else:
                            print('[Warn] Instance has `background` type')
                    pred_inst_type[idx] = inst_type

                # Write Instance Maps based on their Classes/Labels to the folders
                for class_id in range(1, self.nr_types):
                    separated_inst = pred_inst.copy()
                    separated_inst[pred_inst_type[separated_inst -
                                                  1] != [class_id]] = 0
                    separated_inst = separated_inst.astype(np.uint8)
                    # Create directory for each label
                    label = class_id_mapping[class_id]
                    sub_path = sub_image + '/' + label
                    try:
                        os.mkdir(sub_path)
                    except OSError:
                        print("Creation of the directory %s failed" % label)
                    else:
                        print("Successfully created the directory %s " % label)

                    # Check if Mask is empty then write
                    check = np.unique(separated_inst)
                    if ((len(check) == 1) & (check[0] == 0)):
                        print('Empty inst. Not writing.', check)
                    else:
                        sio.savemat(sub_path + '/mask.mat',
                                    {'n_ary_mask': separated_inst})
Beispiel #20
0
    return rs


if __name__ == "__main__":
    checkpoint_path = './train_log/train/checkpoint'
    hps = get_default_hparams()

    mongo_client = MongoClient('localhost', 27017)
    db = mongo_client.crypto_currency
    collection = db['ohlcv']
    market = 'binance'
    symbol = 'BNB/BTC'
    timewindow = '1h'
    query = {'market': market, 'symbol': symbol, 'timewindow': timewindow}

    df_data = pd.DataFrame(list(collection.find(query)))[-hps.M - 100:]
    df_data = df_data[hps.attributes_normalize_mean]

    model = ModelDesc(hps)
    pred_config = PredictConfig(session_init=SmartInit(hps.checkpoint_path),
                                model=model,
                                input_names=['x'],
                                output_names=['predict/y_pred']
                                )
    predictor = OfflinePredictor(pred_config)

    # input_data = np.random.rand(hps.M, hps.T, hps.D)
    input_data =  test_segment(df_data, hps)

    rs = inference(input_data[-hps.M:], hps=hps, predictor=predictor)
Beispiel #21
0
    def run(self, data_dir, output_dir, model_path, img_ext='.png'):

        if (not data_dir):
            print('Using Config file path for data_dir.')
            data_dir = self.inf_data_dir
        if (not output_dir):
            print('Using Config file path for output_dir.')
            output_dir = self.inf_output_dir
        if (not model_path):
            print('Using placeholder path for model_dir.')
            model_path = '/home/dm1/shikhar/hover_net_modified/v2_multitask/np_hv/07/model-35854.index'
        if (not img_ext):
            print('Using Config img ext value img_ext.')
            img_ext = self.inf_imgs_ext

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)

        #file_list = glob.glob('%s/*%s' % (data_dir, img_ext))
        #file_list.sort() # ensure same order
        #if(not file_list):
        # print('No Images found in data_dir! Check script arg-paths')
        # Create Output Directory
        #rm_n_mkdir(output_dir)
        # Expecting MoNuSAC's input data directory tree (Patient Name -> Image Name -> )

        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)
        os.chdir(output_dir)
        patients = [x[0] for x in os.walk(data_dir)
                    ]  #Total patients in the data_path
        print(len(patients))

        for patient_loc in patients:
            patient_name = patient_loc[len(data_dir) + 1:]  #Patient name
            print(patient_name, flush=True)

            ## To make patient's name directory in the destination folder
            try:
                os.mkdir(patient_name)
            except OSError:
                print("\n Creation of the patient's directory %s failed" %
                      patient_name,
                      flush=True)

            sub_images = glob(str(patient_loc) + '/*' + str(img_ext))
            for sub_image_loc in sub_images:
                sub_image_name = sub_image_loc[len(data_dir) +
                                               len(patient_name) + 1:-4]
                print(sub_image_name)

                ## To make sub_image directory under the patient's folder
                sub_image = './' + patient_name + sub_image_name  #Destination path
                try:
                    os.mkdir(sub_image)
                except OSError:
                    print("\n Creation of the patient's directory %s failed" %
                          sub_image)

                image_name = sub_image_loc
                if (img_ext == '.svs'):
                    img = openslide.OpenSlide(image_name)
                    cv2.imwrite(
                        sub_image_loc[:-4] + '.png',
                        np.array(
                            img.read_region((0, 0), 0,
                                            img.level_dimensions[0])))
                    img = cv2.imread(sub_image_loc[:-4] + '.png')
                else:
                    img = cv2.imread(image_name)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ## Generate Prediction Map
                pred_map = self.__gen_prediction(img, predictor)
                pred = pred_map
                # Process Prediction Map
                pred_inst = pred[..., self.nr_types:]
                pred_type = pred[..., :self.nr_types]
                pred_inst = np.squeeze(pred_inst)
                pred_type = np.argmax(pred_type, axis=-1)
                pred_inst = postproc.hover.proc_np_hv(pred_inst,
                                                      marker_mode=marker_mode,
                                                      energy_mode=energy_mode,
                                                      rgb=img)
                pred_inst = remap_label(pred_inst, by_size=True)

                # Map Instances to Labels for creating submission format
                pred_id_list = list(
                    np.unique(pred_inst))[1:]  # exclude background ID
                pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32)
                for idx, inst_id in enumerate(pred_id_list):
                    inst_type = pred_type[pred_inst == inst_id]
                    type_list, type_pixels = np.unique(inst_type,
                                                       return_counts=True)
                    type_list = list(zip(type_list, type_pixels))
                    type_list = sorted(type_list,
                                       key=lambda x: x[1],
                                       reverse=True)
                    inst_type = type_list[0][0]
                    if inst_type == 0:  # ! pick the 2nd most dominant if exist
                        if len(type_list) > 1:
                            inst_type = type_list[1][0]
                        else:
                            print('[Warn] Instance has `background` type')
                    pred_inst_type[idx] = inst_type

                # Write Instance Maps based on their Classes/Labels to the folders
                for class_id in range(1, self.nr_types):
                    separated_inst = pred_inst.copy()
                    separated_inst[pred_inst_type[separated_inst -
                                                  1] != [class_id]] = 0
                    # Create directory for each label
                    label = class_id_mapping[class_id]
                    sub_path = sub_image + '/' + label
                    try:
                        os.mkdir(sub_path)
                    except OSError:
                        print("Creation of the directory %s failed" % label)
                    else:
                        print("Successfully created the directory %s " % label)

                    sio.savemat(sub_path + '/maskorempty.mat',
                                {'n_ary_mask': separated_inst})
Beispiel #22
0
    def run(self):
        if self.inf_auto_find_chkpt:
            print(
                '-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison'
                % (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.save_dir,
                                               self.inf_auto_metric,
                                               self.inf_auto_comparator)
            print("Selecting: %s" % model_path)
            print("Having Following Statistics:")
            for key, value in stat.items():
                print("\t%s: %s" % (key, value))
        else:
            model_path = self.inf_model_path
        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names,
            create_graph=False)
        predictor = OfflinePredictor(pred_config)

        ####
        save_dir = self.inf_output_dir
        predict_list = [["case", "prediction"]]

        file_load_img = HDF5Matrix(
            self.inf_data_list[0] + "camelyonpatch_level_2_split_test_x.h5",
            "x")
        file_load_lab = HDF5Matrix(
            self.inf_data_list[0] + "camelyonpatch_level_2_split_test_y.h5",
            "y")

        true_list = []
        prob_list = []
        pred_list = []

        num_ims = file_load_img.shape[0]
        last_step = math.floor(num_ims / self.inf_batch_size)
        last_step = self.inf_batch_size * last_step
        last_batch = num_ims - last_step
        count = 0
        for start_batch in range(0, last_step + 1, self.inf_batch_size):
            sys.stdout.write("\rProcessed (%d/%d)" % (start_batch, num_ims))
            sys.stdout.flush()
            if start_batch != last_step:
                img = file_load_img[start_batch:start_batch +
                                    self.inf_batch_size]
                img = img.astype("uint8")
                lab = np.squeeze(file_load_lab[start_batch:start_batch +
                                               self.inf_batch_size])
            else:
                img = file_load_img[start_batch:start_batch + last_batch]
                img = img.astype("uint8")
                lab = np.squeeze(file_load_lab[start_batch:start_batch +
                                               last_batch])

            prob, pred = self.__gen_prediction(img, predictor)

            for j in range(prob.shape[0]):
                predict_list.append([str(count), str(prob[j])])
                count += 1

            prob_list.extend(prob)
            pred_list.extend(pred)
            true_list.extend(lab)

        prob_list = np.array(prob_list)
        pred_list = np.array(pred_list)
        true_list = np.array(true_list)
        accuracy = (pred_list == true_list).sum() / np.size(true_list)
        error = (pred_list != true_list).sum() / np.size(true_list)

        print("Accurcy (%): ", 100 * accuracy)
        print("Error (%): ", 100 * error)
        if self.model_mode == "class_pcam":
            auc = roc_auc_score(true_list, prob_list)
            print("AUC: ", auc)

        # Save predictions to csv
        rm_n_mkdir(save_dir)
        for result in predict_list:
            predict_file = open("%s/predict.csv" % save_dir, "a")
            predict_file.write(result[0])
            predict_file.write(",")
            predict_file.write(result[1])
            predict_file.write("\n")
            predict_file.close()
Beispiel #23
0
def predict_unlabeled(model,
                      model_path,
                      nr_visualize=100,
                      output_dir='output_patch_samples'):
    """Predict the pseudo label information of unlabeled data."""

    assert cfg.EVAL.PSEUDO_INFERENCE, 'set cfg.EVAL.PSEUDO_INFERENCE=True'
    df, dataset_size = get_eval_unlabeled_dataflow(cfg.DATA.TRAIN,
                                                   return_size=True)
    df.reset_state()
    predcfg = PredictConfig(
        model=model,
        session_init=SmartInit(model_path),
        input_names=['image'],  # ['image', 'gt_boxes', 'gt_labels'],
        output_names=[
            'generate_{}_proposals/boxes'.format(
                'fpn' if cfg.MODE_FPN else 'rpn'),
            'generate_{}_proposals/scores'.format(
                'fpn' if cfg.MODE_FPN else 'rpn'),
            'fastrcnn_all_scores',
            'output/boxes',
            'output/scores',  # score of the labels
            'output/labels',
        ])
    pred = OfflinePredictor(predcfg)

    if os.path.isdir(output_dir):
        if os.path.isfile(os.path.join(output_dir, 'pseudo_data.npy')):
            os.remove(os.path.join(output_dir, 'pseudo_data.npy'))
        if not os.path.isdir(os.path.join(output_dir, 'vis')):
            os.makedirs(os.path.join(output_dir, 'vis'))
        else:
            shutil.rmtree(os.path.join(output_dir, 'vis'))
            fs.mkdir_p(output_dir + '/vis')
    else:
        fs.mkdir_p(output_dir)
        fs.mkdir_p(output_dir + '/vis')
    logger.warning('-' * 100)
    logger.warning('Write to {}'.format(output_dir))
    logger.warning('-' * 100)

    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df), nr_visualize):
            img, img_id = dp  # dp['image'], dp['img_id']
            rpn_boxes, rpn_scores, all_scores, \
                final_boxes, final_scores, final_labels = pred(img)
            outs = {
                'proposals_boxes': rpn_boxes,  # (?,4)
                'proposals_scores': rpn_scores,  # (?,)
                'boxes': final_boxes,
                'scores': final_scores,
                'labels': final_labels
            }
            ratios = [10,
                      10]  # [top 20% as background, bottom 20% as background]
            bg_ind, fg_ind = custom.find_bg_and_fg_proposals(all_scores,
                                                             ratios=ratios)

            bg_viz = draw_predictions(img, rpn_boxes[bg_ind],
                                      all_scores[bg_ind])

            fg_viz = draw_predictions(img, rpn_boxes[fg_ind],
                                      all_scores[fg_ind])

            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_scores, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches([bg_viz, fg_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            assert cv2.imwrite('{}/vis/{:03d}.png'.format(output_dir, idx),
                               viz)
            pbar.update()
    logger.info('Write {} samples to {}'.format(nr_visualize, output_dir))

    ## Parallel inference the whole unlabled data
    pseudo_preds = collections.defaultdict(list)

    num_tower = max(cfg.TRAIN.NUM_GPUS, 1)
    graph_funcs = MultiTowerOfflinePredictor(predcfg, list(
        range(num_tower))).get_predictors()
    dataflows = [
        get_eval_unlabeled_dataflow(cfg.DATA.TRAIN,
                                    shard=k,
                                    num_shards=num_tower)
        for k in range(num_tower)
    ]

    all_results = multithread_predict_dataflow(dataflows, graph_funcs)

    for id, result in tqdm.tqdm(enumerate(all_results)):
        img_id = result['image_id']
        outs = {
            'proposals_boxes':
            result['proposal_box'].astype(np.float16),  # (?,4)
            'proposals_scores':
            result['proposal_score'].astype(np.float16),  # (?,)
            # 'frcnn_all_scores': result['frcnn_score'].astype(np.float16),
            'boxes': result['bbox'].astype(np.float16),  # (?,4)
            'scores': result['score'].astype(np.float16),  # (?,)
            'labels': result['category_id'].astype(np.float16)  # (?,)
        }
        pseudo_preds[img_id] = outs
    logger.warn('Writing to {}'.format(
        os.path.join(output_dir, 'pseudo_data.npy')))
    try:
        dd.io.save(os.path.join(output_dir, 'pseudo_data.npy'), pseudo_preds)
    except RuntimeError:
        logger.error('Save failed. Check reasons manually...')
def eval_retrieval(evalargs):
    # Set up evaluation:
    save_dir = evalargs.save_dir
    mkdir_p(evalargs.save_dir)

    model_configs = get_model_config(evalargs.ModelPath)

    # Set up graph:
    pred_config = PredictConfig(
        model=DH3D(model_configs),
        session_init=get_model_loader(evalargs.ModelPath),
        input_names=['pointclouds'],
        output_names=['globaldesc'],  # ['globaldesc'], output_weights
    )
    predictor = OfflinePredictor(pred_config)

    # Data:
    df, totalbatch = get_eval_global_testdata(model_configs,
                                              evalargs.data_path,
                                              evalargs.ref_gt_file)

    # Predict:
    pcdnum = 0
    for [pcds, names] in df:  # pcds is a list, batchsize x numpts x 3
        batch = pcds.shape[0]
        if totalbatch > batch:
            numpts = pcds.shape[1]
            pcddim = pcds.shape[2]
            padzeros = np.zeros([totalbatch - batch, numpts, pcddim],
                                dtype=np.float32)
            pcds = np.vstack([pcds, padzeros])
        results = predictor(pcds)

        global_feats = results[0]

        for i in range(batch):
            pcdnum += 1
            globaldesc = global_feats[i]
            name = names[i]
            savename = os.path.join(evalargs.save_dir, name)
            basedir = os.path.dirname(savename)
            mkdir_p(basedir)
            globaldesc.tofile(savename)

    print('predicted {} poitnclouds \n'.format(pcdnum))

    # Evaluation recall:
    if evalargs.eval_recall:
        evaluator = GlobalDesc_eval(
            result_savedir='./',
            desc_dir=save_dir,
            database_file=os.path.join(evalargs.data_path,
                                       evalargs.ref_gt_file),
            query_file=os.path.join(evalargs.data_path, evalargs.qry_gt_file),
            max_num_nn=25)
        evaluator.evaluate()
        print("evaluation finished!\n")

    if evalargs.delete_tmp:
        # delete all the descriptors
        descdirs = [os.path.join(save_dir, f) for f in os.listdir(save_dir)]
        descdirs = [d for d in descdirs if os.path.isdir(d)]
        for d in descdirs:
            shutil.rmtree(d)
Beispiel #25
0
def eval_child(model_cls, args, log_dir, model_dir, collect_hallu_stats=True):
    """
    Args:
        model_cls (PetridishModel) :
        args :
        log_dir (str): where to log
        model_dir (str) : where to load from
        collect_hallu_stats (bool) : whether to collect hallu stats if there are any.
    Return:
        eval_vals (list) : a list of evaluation related value.
        The first is the vaildation error on the specified validation set;
        it is followed by hallucination stats.
    """
    ckpt = tf.train.latest_checkpoint(model_dir)
    if not ckpt:
        logger.info("No model exists. Do not sort")
        return []
    args.compute_hallu_stats = True
    (model, args, ds_val, insrc_val, output_names,
     output_funcs) = get_training_params(model_cls, args, is_training=False)
    n_outputs = len(output_names)
    logger.info("{} num vals present. Will use the final perf {} as eval score".format(\
        n_outputs, output_names[-1]))
    stats_handlers = [StatCounter() for _ in range(n_outputs)]

    # additional handlers for hallucinations
    if collect_hallu_stats:
        hallu_stats_names = get_net_info_hallu_stats_output_names(
            model.net_info)
        stats_handlers.extend([StatCounter() for _ in hallu_stats_names])
        output_names.extend(hallu_stats_names)
    # Note at this point stats_handlers[n_outputs-1:] contains all
    # the value needed for evaluation.

    # batch size counter
    sample_counter = StatCounter()
    # ignore loading certain variables during inference
    ignore_names = getattr(model, 'load_ignore_var_names', [])
    pred_config = PredictConfig(model=model,
                                input_names=model._input_names,
                                output_names=output_names,
                                session_init=SaverRestore(ckpt,
                                                          ignore=ignore_names))
    predictor = OfflinePredictor(pred_config)

    # two types of input, dataflow or input_source
    if ds_val:
        gen = ds_val.get_data()
        ds_val.reset_state()
        input_sess = None
    else:
        if not insrc_val.setup_done():
            insrc_val.setup(model.get_inputs_desc())
        sess_config = get_default_sess_config()
        sess_config.device_count['GPU'] = 0
        input_tensors = insrc_val.get_input_tensors()
        sess_creater = tf.train.ChiefSessionCreator(config=sess_config)
        input_sess = tf.train.MonitoredSession(sess_creater)

        def _gen_func():
            insrc_val.reset_state()
            for _ in range(insrc_val.size()):
                yield input_sess.run(input_tensors)

        gen = _gen_func()

    for dp_idx, dp in enumerate(gen):
        output = predictor(*dp)
        batch_size = output[n_outputs - 1].shape[0]
        sample_counter.feed(batch_size)
        for o, handler in zip(output, stats_handlers):
            handler.feed(np.sum(o))
        if (args.debug_steps_per_epoch
                and dp_idx + 1 >= args.debug_steps_per_epoch):
            # stop early during debgging
            break
    eval_vals = []
    N = float(sample_counter.sum)
    for hi, handler in enumerate(stats_handlers):
        stat = handler.sum / float(N)
        logger.info('Stat {} has an avg of {}'.format(hi, stat))
        if hi < n_outputs:
            o_func = output_funcs[hi]
            if o_func is not None:
                stat = o_func(stat)
        if hi >= n_outputs - 1:
            # Note that again n_outputs - 1 is the eval val
            # followed by hallu stats.
            eval_vals.append(stat)
    if input_sess:
        input_sess.close()
    logger.info("evaluation_value={}".format(eval_vals))
    return eval_vals
Beispiel #26
0
    if args.visualize:
        do_visualize(MODEL, args.load)
    else:
        predcfg = PredictConfig(
            model=MODEL,
            session_init=get_model_loader(args.load),
            input_names=MODEL.get_inference_tensor_names()[0],
            output_names=MODEL.get_inference_tensor_names()[1])

        if args.compact:
            ModelExporter(predcfg).export_compact(args.compact, optimize=False)
        elif args.serving:
            ModelExporter(predcfg).export_serving(args.serving, optimize=False)

        if args.predict:
            predictor = OfflinePredictor(predcfg)
            for image_file in args.predict:
                do_predict(predictor, image_file)
        elif args.evaluate:
            assert args.evaluate.endswith('.json'), args.evaluate
            do_evaluate(predcfg, args.evaluate)
        elif args.benchmark:
            df = get_eval_dataflow(cfg.DATA.VAL[0])
            df.reset_state()
            predictor = OfflinePredictor(predcfg)
            for img in tqdm.tqdm(df, total=len(df)):
                # This include post-processing time, which is done on CPU and not optimized
                # To exclude it, modify `predict_image`.
                predict_image(img[0], predictor)
Beispiel #27
0
    for cat_name in ['airplane', 'chair', 'table']:
        f.write(cat_name)
        f.write('\n')

        tf.reset_default_graph()
        test_dataset = KeypointDataset(args.kpnet_root, name2id[cat_name],
                                       'splits/test.txt')

        cfg = OmegaConf.load('config/config.yaml')
        cfg.cat_name = cat_name

        model_path = os.path.join('outputs', cat_name, 'tflogs', 'checkpoint')

        predictor = OfflinePredictor(
            config=PredictConfig(model=Model(cfg),
                                 input_names=['pc', 'pc_feature'],
                                 output_names=['encoder/z'],
                                 session_init=SaverRestore(model_path)))
        pred_all_iou = {cat_name: {}}
        gt_all = {cat_name: {}}

        for i in range(len(test_dataset.mesh_names)):
            mesh_name = test_dataset.mesh_names[i]
            if mesh_name not in pred_all_iou[cat_name]:
                pred_all_iou[cat_name][mesh_name] = []

            if mesh_name not in gt_all[cat_name]:
                gt_all[cat_name][mesh_name] = []

        for i, data in tqdm(enumerate(test_dataset)):
            mesh_name, pc, feature, label = data