Esempio n. 1
0
def visualize_regression_prediction_i(iou, iou_pred, i):
    if os.path.isfile(get_save_path_input_i(i)):

        probs, gt, path = probs_gt_load(i)
        input_image = Image.open(path).convert("RGB")
        input_image = np.asarray(input_image.resize(probs.shape[:2][::-1]))
        components = components_load(i)

        pred = np.asarray(np.argmax(probs, axis=-1), dtype='int')
        gt[gt == 255] = 0
        predc = np.asarray(
            [trainId2label[pred[p, q]].color for p in range(pred.shape[0]) for q in range(pred.shape[1])])
        gtc = np.asarray([trainId2label[gt[p, q]].color for p in range(gt.shape[0]) for q in range(gt.shape[1])])
        predc = predc.reshape(input_image.shape)
        gtc = gtc.reshape(input_image.shape)

        img_iou = visualize_segments(components, iou)

        I4 = predc / 2.0 + input_image / 2.0
        I3 = gtc / 2.0 + input_image / 2.0

        img_pred = visualize_segments(components, iou_pred)
        img = np.concatenate((img_iou, img_pred), axis=1)
        img2 = np.concatenate((I3, I4), axis=1)
        img = np.concatenate((img, img2), axis=0)
        image = Image.fromarray(img.astype('uint8'), 'RGB')

        seg_dir = CONFIG.IOU_SEG_VIS_DIR
        if not os.path.exists(seg_dir):
            os.makedirs(seg_dir)
        image.save(seg_dir + "img" + str(i) + ".png")
        plt.close()

        print("stored:", seg_dir + "img" + str(i) + ".png")
Esempio n. 2
0
    def compute_metrics_i(self, i, rule, ground_truth_analysis, alpha):
        """
        perform metrics computation for one image
        :param i: (int) id of the image to be processed
        :param rule: (str) decision rule
        :param ground_truth_analysis: (boolean) if True, compute metrics for gt mask
        :param alpha: (float) degree of interpolation between ML and Bayes
        """
        if os.path.isfile(get_save_path_input_i(i)) and self.rewrite:
            start = time.time()
            probs, gt, _ = probs_gt_load(i)

            # map rider class to person by excluding rider class
            probs[:, :, self.label_rider] = 0
            probs /= np.sum(probs, axis=-1, keepdims=True)
            gt[gt == self.label_rider] = self.label_person
            pred_bay = prediction(probs, gt)
            if "bayes" in rule:
                metrics, components = compute_metrics_components(probs, gt)
                metrics_dump(metrics, i, "bayes")
                components_dump(components, i, "bayes")
                if ground_truth_analysis:
                    metrics_gt_bay, components_gt_bay = compute_metrics_mask(
                        gt, pred_bay)
                    metrics_dump(metrics_gt_bay, i, "gt_bayes")
                    components_dump(components_gt_bay, i, "gt_bayes")
            if "ml" in rule:
                priors = (1 - alpha) * np.ones(
                    self.ml_priors.shape,
                    dtype="uint8") + alpha * self.ml_priors
                probs_ml = probs / priors
                probs_ml /= np.sum(probs_ml, axis=-1, keepdims=True)

                # map rider class to person by excluding rider class
                probs_ml[:, :, self.label_rider] = 0
                probs_ml /= np.sum(probs_ml, axis=-1, keepdims=True)

                pred_ml = prediction(probs_ml, gt)
                metrics_ml, components_ml = compute_metrics_components(
                    probs_ml, gt)
                metrics_dump(metrics_ml, i, "ml_" + str(alpha))
                components_dump(components_ml, i, "ml_" + str(alpha))
                if alpha != 0:
                    metrics_bay_ml, _ = compute_metrics_mask(pred_ml, pred_bay)
                    metrics_dump(metrics_bay_ml, i,
                                 "ml_" + str(alpha) + "_bayes")
                if ground_truth_analysis:
                    metrics_gt_ml, components_gt_ml = compute_metrics_mask(
                        gt, pred_ml)
                    metrics_dump(metrics_gt_ml, i, "gt_ml_" + str(alpha))
                    components_dump(components_gt_ml, i, "gt")
            print("image", i,
                  "processed in {}s\r".format(round(time.time() - start)))
Esempio n. 3
0
 def compute_metrics_i(self, i, alpha, thresh):
     if os.path.isfile(get_save_path_input_i(i)) and self.rewrite:
         start = time.time()
         _, gt, _ = probs_gt_load(i)
         gt[gt == self.label_rider] = self.label_person
         pred_fusion = prediction_load(i, "fusion_" + alpha + thresh)
         ml_fusion, _ = compute_metrics_mask(pred_fusion, gt)
         gt_fusion, _ = compute_metrics_mask(gt, pred_fusion)
         metrics_dump(ml_fusion, i, "ml_fusion_" + alpha + thresh)
         metrics_dump(gt_fusion, i, "gt_fusion_" + alpha + thresh)
         print("image", i,
               "processed in {}s\r".format(round(time.time() - start)))
Esempio n. 4
0
    def fuse_mask_i(self, i, keep_I, t_string, visualize):
        """
        generate fused mask of one image after processed by ML and MetaSeg
        :param i: (int) image id
        :param keep_I: (list)
        :param thresh: (str)
        :param visualize: (boolean)
        """
        start = time.time()
        keep_I = [
            j + 1 - self.start_ml[i] for j in keep_I
            if self.start_ml[i] <= j < self.start_ml[i + 1]
        ]
        probs, gt, _ = probs_gt_load(i)

        # map rider class to person by excluding rider class
        probs[:, :, self.label_rider] = 0
        probs /= np.sum(probs, axis=-1, keepdims=True)
        gt[gt == self.label_rider] = self.label_person

        priors = (1 - self.alpha) * np.ones(
            self.ml_priors.shape, dtype="uint8") + self.alpha * self.ml_priors
        pred_bay = prediction(probs, gt)
        pred_ml = prediction(probs / priors, gt)
        # components_bay = np.absolute(components_load(i, "ml_0.0"))
        components_ml = np.absolute(components_load(i,
                                                    "ml_" + str(self.alpha)))

        fusion = np.copy(pred_bay)
        fusion[np.isin(components_ml,
                       keep_I)] = pred_ml[np.isin(components_ml, keep_I)]

        # components_fusion = components_bay
        # components_fusion[np.isin(components_ml, keep_I)] = components_ml[np.isin(components_ml, keep_I)]

        if not os.path.exists(
                os.path.dirname(get_save_path_prediction_i(i, "ml_0.0"))):
            prediction_dump(pred_bay, i, "ml_0.0")
        prediction_dump(pred_ml, i, "ml_" + str(self.alpha))
        prediction_dump(fusion, i, "fusion_" + str(self.alpha) + t_string)
        # components_dump(components_fusion, i, "fusion_" + str(self.alpha))

        if visualize:
            top = np.concatenate((pred_bay, pred_ml), axis=1)
            bot = np.concatenate((gt, fusion), axis=1)
            collage = np.concatenate((top, bot), axis=0)
            save_prediction_mask(
                collage, i,
                "fusion_" + str(self.alpha) + t_string + "/collage")

        print("image", i,
              "processed in {}s\r".format(round(time.time() - start)))
Esempio n. 5
0
 def add_heatmap_as_metric_i(self, heat_dir, key, i):
     """
     derive aggregated metrics per image and add to metrics dictionary
     :param heat_dir:  (str) directory with heatmaps as numpy arrays
     :param key:       (str) new key to access added metric
     :param i:         (int) id of the image to be processed
     """
     _, _, path = probs_gt_load(i)
     heat_name = os.path.basename(path)[:-4] + ".npy"
     heatmap = np.load(heat_dir + heat_name)
     metrics = metrics_load(i)
     components = components_load(i)
     keys = [key, key + "_in", key + "_bd", key + "_rel", key + "_rel_in"]
     heat_metric = {k: [] for k in keys}
     for comp_id in range(1, abs(np.min(components)) + 1):
         values = compute_metrics_from_heatmap(heatmap, components, comp_id)
         for j, k in enumerate(keys):
             heat_metric[k].append(values[j])
     metrics.update(heat_metric)
     metrics_dump(metrics, i)