Example #1
0
    def train_batch(self, batch: TorchData, epoch_idx: int,
                    batch_idx: int) -> Dict[str, torch.Tensor]:
        samples, targets = batch
        samples = utils.NestedTensor(samples["tensors"], samples["mask"])
        outputs = self.model(samples)
        loss_dict = self.criterion(outputs, targets)
        weight_dict = self.criterion.weight_dict
        losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys()
                     if k in weight_dict)
        self.context.backward(losses)
        self.context.step_optimizer(self.optimizer,
                                    clip_grads=self.clip_grads_fn)

        # Compute losses for logging
        loss_dict_scaled = {
            f"{k}_scaled": v * weight_dict[k]
            for k, v in loss_dict.items() if k in weight_dict
        }
        loss_dict["sum_unscaled"] = sum(loss_dict.values())
        loss_dict["sum_scaled"] = sum(loss_dict_scaled.values())
        loss_dict.update(loss_dict_scaled)

        loss_dict["loss"] = losses

        return loss_dict
Example #2
0
    def evaluate_full_dataset(
            self, data_loader: torch.utils.data.DataLoader) -> Dict[str, Any]:
        # This is slow, need to have custom reducer to suppport multi-GPU eval.
        iou_types = tuple(k for k in ("segm", "bbox")
                          if k in self.postprocessors.keys())
        coco_evaluator = CocoEvaluator(self.base_ds, iou_types)
        results = {}
        loss_dict_aggregated = defaultdict(int)
        with torch.no_grad():
            for i, batch in enumerate(data_loader):
                samples, targets = self.context.to_device(batch)
                samples = utils.NestedTensor(samples["tensors"],
                                             samples["mask"])

                outputs = self.model(samples)
                loss_dict = self.criterion(outputs, targets, eval=True)
                weight_dict = self.criterion.weight_dict

                # Compute losses for logging
                loss_dict_scaled = {
                    f"{k}_scaled": v * weight_dict[k]
                    for k, v in loss_dict.items() if k in weight_dict
                }
                loss_dict["sum_unscaled"] = sum(loss_dict.values())
                loss_dict["sum_scaled"] = sum(loss_dict_scaled.values())
                loss_dict.update(loss_dict_scaled)

                for k in loss_dict:
                    loss_dict_aggregated[k] += loss_dict[k]

                orig_target_sizes = torch.stack(
                    [t["orig_size"] for t in targets], dim=0)
                res = self.postprocessors["bbox"](outputs, orig_target_sizes)
                results.update({
                    target["image_id"].item(): output
                    for target, output in zip(targets, res)
                })

        for k in loss_dict_aggregated:
            loss_dict_aggregated[k] /= i + 1

        coco_evaluator.update(results)
        for iou_type in coco_evaluator.iou_types:
            coco_eval = coco_evaluator.coco_eval[iou_type]
            coco_evaluator.eval_imgs[iou_type] = np.concatenate(
                coco_evaluator.eval_imgs[iou_type], 2)
            create_common_coco_eval(coco_eval, coco_evaluator.img_ids,
                                    coco_evaluator.eval_imgs[iou_type])
        coco_evaluator.accumulate()
        coco_evaluator.summarize()

        coco_stats = coco_evaluator.coco_eval["bbox"].stats.tolist()

        loss_dict_aggregated["mAP"] = coco_stats[0]
        loss_dict_aggregated["mAP_50"] = coco_stats[1]
        loss_dict_aggregated["mAP_75"] = coco_stats[2]
        loss_dict_aggregated["mAP_small"] = coco_stats[3]
        loss_dict_aggregated["mAP_medium"] = coco_stats[4]
        loss_dict_aggregated["mAP_large"] = coco_stats[5]
        return loss_dict_aggregated
Example #3
0
    def evaluate_batch(self, batch):
        samples, targets = batch
        samples = utils.NestedTensor(samples["tensors"], samples["mask"])

        outputs = self.model(samples)
        loss_dict = self.criterion(outputs, targets, eval=True)
        weight_dict = self.criterion.weight_dict

        # Compute losses for logging
        loss_dict_scaled = {
            f"{k}_scaled": v * weight_dict[k]
            for k, v in loss_dict.items() if k in weight_dict
        }
        loss_dict["sum_unscaled"] = sum(loss_dict.values())
        loss_dict["sum_scaled"] = sum(loss_dict_scaled.values())
        loss_dict.update(loss_dict_scaled)

        orig_target_sizes = torch.stack([t["orig_size"] for t in targets],
                                        dim=0)
        res = self.postprocessors["bbox"](outputs, orig_target_sizes)
        res = [{k: v.cpu() for k, v in r.items()} for r in res]
        if len(self.cat_ids):
            for row in res:
                row["labels"] = torch.tensor(
                    [self.cat_ids[l.item()] for l in row["labels"]],
                    dtype=torch.int64)
        result = [(target["image_id"].item(), output)
                  for target, output in zip(targets, res)]
        self.reducer.update(result)
        return loss_dict