示例#1
0
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)
        outputs = self._convert(model.get_output())
        targets = self._convert(model.get_labels())
        batch_idx = self._convert(model.get_batch())
        if batch_idx is None:
            raise ValueError(
                "Your model need to set the batch_idx variable in its set_input function."
            )

        nb_batches = batch_idx.max() + 1

        # pred to the groundtruth classes (selected by seg_classes[cat])
        for b in range(nb_batches):
            segl = targets[batch_idx == b]
            cat = self._seg_to_class[segl[0]]
            logits = outputs[batch_idx == b, :]  # (num_points, num_classes)
            segp = logits[:, self._class_seg_map[cat]].argmax(
                1) + self._class_seg_map[cat][0]
            part_ious = np.zeros(len(self._class_seg_map[cat]))
            for l in self._class_seg_map[cat]:
                if np.sum((segl == l) | (segp == l)) == 0:
                    # part is not present in this shape
                    part_ious[l - self._class_seg_map[cat][0]] = 1
                else:
                    part_ious[l - self._class_seg_map[cat][0]] = float(
                        np.sum((segl == l) & (segp == l))) / float(
                            np.sum((segl == l) | (segp == l)))
            self._shape_ious[cat].append(np.mean(part_ious))

        self._miou_per_class, self._Cmiou, self._Imiou = self._get_metrics_per_class(
        )
示例#2
0
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        if not self._dataset.has_labels(self._stage):
            return

        super().track(model)

        outputs = model.get_output()
        targets = model.get_labels()

        # Mask ignored label
        mask = targets != self._ignore_label
        outputs = outputs[mask]
        targets = targets[mask]

        outputs = self._convert(outputs)
        targets = self._convert(targets)

        if len(targets) == 0:
            return

        assert outputs.shape[0] == len(targets)
        self._confusion_matrix.count_predicted_batch(targets, np.argmax(outputs, 1))

        self._acc = 100 * self._confusion_matrix.get_overall_accuracy()
        self._macc = 100 * self._confusion_matrix.get_mean_class_accuracy()
        self._miou = 100 * self._confusion_matrix.get_average_intersection_union()
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        super().track(model)
        if self._stage != "train":
            batch_idx, batch_idx_target = model.get_batch()
            batch_xyz, batch_xyz_target = model.get_xyz()  # type: ignore
            batch_ind, batch_ind_target, batch_size_ind = model.get_ind(
            )  # type: ignore
            batch_feat, batch_feat_target = model.get_output()

            nb_batches = batch_idx.max() + 1
            cum_sum = 0
            cum_sum_target = 0
            begin = 0
            end = batch_size_ind[0].item()
            for b in range(nb_batches):
                xyz = batch_xyz[batch_idx == b]
                xyz_target = batch_xyz_target[batch_idx_target == b]
                feat = batch_feat[batch_idx == b]
                feat_target = batch_feat_target[batch_idx_target == b]
                # as we have concatenated ind,
                # we need to substract the cum_sum because we deal
                # with each batch independently
                # ind = batch_ind[b * len(batch_ind) / nb_batches : (b + 1) * len(batch_ind) / nb_batches] - cum_sum
                # ind_target = (batch_ind_target[b * len(batch_ind_target) / nb_batches : (b + 1) * len(batch_ind_target) / nb_batches]- cum_sum_target)
                ind = batch_ind[begin:end] - cum_sum
                ind_target = batch_ind_target[begin:end] - cum_sum_target
                # print(begin, end)
                if b < nb_batches - 1:
                    begin = end
                    end = begin + batch_size_ind[b + 1].item()
                cum_sum += len(xyz)
                cum_sum_target += len(xyz_target)
                rand = torch.randperm(len(feat))[:self.num_points]
                rand_target = torch.randperm(
                    len(feat_target))[:self.num_points]

                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)

                # print(matches_gt.max(0), len(xyz), len(xyz_target), len(matches_gt))
                # print(batch_ind.shape, nb_batches)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]],
                                        xyz_target[matches_gt[:, 1]])

                matches_pred = get_matches(feat[rand],
                                           feat_target[rand_target])
                T_pred = fast_global_registration(
                    xyz[rand][matches_pred[:, 0]],
                    xyz_target[rand_target][matches_pred[:, 1]])

                hit_ratio = compute_hit_ratio(
                    xyz[rand][matches_pred[:, 0]],
                    xyz_target[rand_target][matches_pred[:,
                                                         1]], T_gt, self.tau_1)

                trans_error, rot_error = compute_transfo_error(T_pred, T_gt)
                self._hit_ratio.add(hit_ratio.item())
                self._feat_match_ratio.add(
                    float(hit_ratio.item() > self.tau_2))
                self._trans_error.add(trans_error.item())
                self._rot_error.add(rot_error.item())
    def track(self,
              model: model_interface.TrackerInterface,
              full_res: bool = False,
              data: Data = None,
              **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)
        self._conv_type = model.conv_type
        outputs = self._convert(model.get_output())
        targets = self._convert(model.get_labels())
        batch_idx = self._convert(model.get_batch())
        if batch_idx is None:
            raise ValueError(
                "Your model need to set the batch_idx variable in its set_input function."
            )

        nb_batches = batch_idx.max() + 1

        if self._stage != "train" and full_res:
            self._add_votes(data, outputs, batch_idx)

        # pred to the groundtruth classes (selected by seg_classes[cat])
        for b in range(nb_batches):
            segl = targets[batch_idx == b]
            cat = self._seg_to_class[segl[0]]
            logits = outputs[batch_idx == b, :]  # (num_points, num_classes)
            segp = logits[:, self._class_seg_map[cat]].argmax(
                1) + self._class_seg_map[cat][0]
            part_ious = self._compute_part_ious(segl, segp, cat)
            self._shape_ious[cat].append(np.mean(part_ious))

        self._miou_per_class, self._Cmiou, self._Imiou = ShapenetPartTracker._get_metrics_per_class(
            self._shape_ious)
示例#5
0
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)

        outputs = model.get_output()
        # targets = model.get_labels().flatten()
        targets = model.get_labels()

        avg_loss_dimensions, avg_loss_epsilons, avg_loss_offsets, mae_a1, mae_a2, mae_a3, mae_x0, mae_y0, mae_z0, mae_e1, mae_e2 = self.compute_loss_by_components(
            outputs, targets)

        self._loss_dimension.add(avg_loss_dimensions.detach().cpu().numpy())
        self._loss_epsilon.add(avg_loss_epsilons.detach().cpu().numpy())
        self._loss_offset.add(avg_loss_offsets.detach().cpu().numpy())

        self._loss_mae_a1.add(mae_a1.detach().cpu().numpy())
        self._loss_mae_a2.add(mae_a2.detach().cpu().numpy())
        self._loss_mae_a3.add(mae_a3.detach().cpu().numpy())

        self._loss_mae_x0.add(mae_x0.detach().cpu().numpy())
        self._loss_mae_y0.add(mae_y0.detach().cpu().numpy())
        self._loss_mae_z0.add(mae_z0.detach().cpu().numpy())

        self._loss_mae_e1.add(mae_e1.detach().cpu().numpy())
        self._loss_mae_e2.add(mae_e2.detach().cpu().numpy())
示例#6
0
    def track(self, model: model_interface.TrackerInterface, full_res=False, data=None, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)

        # Train mode or low res, nothing special to do
        if self._stage == "train" or not full_res:
            return

        # Test mode, compute votes in order to get full res predictions
        if self._test_area is None:
            self._test_area = self._dataset.test_data.clone()
            if self._test_area.y is None:
                raise ValueError("It seems that the test area data does not have labels (attribute y).")
            self._test_area.prediction_count = torch.zeros(self._test_area.y.shape[0], dtype=torch.int)
            self._test_area.votes = torch.zeros((self._test_area.y.shape[0], self._num_classes), dtype=torch.float)
            self._test_area.to(model.device)

        # Gather origin ids and check that it fits with the test set
        inputs = data if data is not None else model.get_input()
        if inputs[SaveOriginalPosId.KEY] is None:
            raise ValueError("The inputs given to the model do not have a %s attribute." % SaveOriginalPosId.KEY)

        originids = inputs[SaveOriginalPosId.KEY]
        if originids.dim() == 2:
            originids = originids.flatten()
        if originids.max() >= self._test_area.pos.shape[0]:
            raise ValueError("Origin ids are larger than the number of points in the original point cloud.")

        # Set predictions
        outputs = model.get_output()
        self._test_area.votes[originids] += outputs
        self._test_area.prediction_count[originids] += 1
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)

        outputs = model.get_output()
        targets = model.get_labels().flatten()

        self._acc.add(100 * self.compute_acc(outputs, targets))
示例#8
0
    def track(self, model: TrackerInterface, **kwargs):
        """ Track metrics for panoptic segmentation
        """
        BaseTracker.track(self, model)
        outputs: PanopticResults = model.get_output()
        labels: PanopticLabels = model.get_labels()

        # Track semantic
        super()._compute_metrics(outputs.semantic_logits, labels.y)
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        if not self._dataset.has_labels(self._stage):
            return

        super().track(model)

        outputs = model.get_output()
        targets = model.get_labels()
        self._compute_metrics(outputs, targets)
示例#10
0
 def track(self, model: model_interface.TrackerInterface, **kwargs):
     if self._finalised:
         raise RuntimeError(
             "Cannot track new values with a finalised tracker, you need to reset it first"
         )
     losses = self._convert(model.get_current_losses())
     self._append_losses(losses)
    def track(self, model: TrackerInterface, data=None, track_boxes=False, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        if tracking boxes, you must provide a labeled "data" object with the following attributes:
            - id_scan: id of the scan to which the boxes belong to
            - instance_box_cornerimport torchnet as tnts - gt box corners
            - box_label_mask - mask for boxes (0 = no box)
            - sem_cls_label - semantic label for each box
        """
        super().track(model)

        outputs: VoteNetResults = model.get_output()

        total_num_proposal = outputs.objectness_label.shape[0] * outputs.objectness_label.shape[1]
        pos_ratio = torch.sum(outputs.objectness_label.float()).item() / float(total_num_proposal)
        self._pos_ratio.add(pos_ratio)
        self._neg_ratio.add(torch.sum(outputs.objectness_mask.float()).item() / float(total_num_proposal) - pos_ratio)

        obj_pred_val = torch.argmax(outputs.objectness_scores, 2)  # B,K
        self._obj_acc.add(
            torch.sum((obj_pred_val == outputs.objectness_label.long()).float() * outputs.objectness_mask).item()
            / (torch.sum(outputs.objectness_mask) + 1e-6).item()
        )

        if data is None or self._stage == "train" or not track_boxes:
            return

        self._add_box_pred(outputs, data, model.conv_type)
示例#12
0
    def track(self,
              model: TrackerInterface,
              data=None,
              iou_threshold=0.25,
              track_instances=True,
              min_cluster_points=10,
              **kwargs):
        """ Track metrics for panoptic segmentation
        """
        self._iou_threshold = iou_threshold
        BaseTracker.track(self, model)
        outputs: PanopticResults = model.get_output()
        labels: PanopticLabels = model.get_labels()

        # Track semantic
        super()._compute_metrics(outputs.semantic_logits, labels.y)

        if not data:
            return
        assert data.pos.dim() == 2, "Only supports packed batches"

        # Object accuracy
        clusters = PanopticTracker._extract_clusters(outputs,
                                                     min_cluster_points)
        if not clusters:
            return

        predicted_labels = outputs.semantic_logits.max(1)[1]
        tp, fp, acc = self._compute_acc(clusters, predicted_labels, labels,
                                        data.batch, labels.num_instances,
                                        iou_threshold)
        self._pos.add(tp)
        self._neg.add(fp)
        self._acc_meter.add(acc)

        # Track instances for AP
        if track_instances:
            pred_clusters = self._pred_instances_per_scan(
                clusters, predicted_labels, outputs.cluster_scores, data.batch,
                self._scan_id_offset)
            gt_clusters = self._gt_instances_per_scan(labels.instance_labels,
                                                      labels.y, data.batch,
                                                      self._scan_id_offset)
            self._ap_meter.add(pred_clusters, gt_clusters)
            self._scan_id_offset += data.batch[-1].item() + 1
示例#13
0
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        """ Add model predictions (accuracy)
        """
        super().track(model)

        outputs = self._convert(model.get_output())
        N = len(outputs) // 2

        self._acc = compute_accuracy(outputs[:N], outputs[N:])
示例#14
0
    def track(self, model: model_interface.TrackerInterface, full_res=False, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)

        # Set conv type
        self._conv_type = model.conv_type

        # Train mode or low res, nothing special to do
        if not full_res or self._stage == "train" or kwargs.get("data") is None:
            return

        self._vote(kwargs.get("data"), model.get_output())