예제 #1
0
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
    all_predictions = all_gather(predictions_per_gpu)
    if not is_main_process():
        return

    predictions = {}
    for p in all_predictions:
        predictions.update(p)

    return predictions
예제 #2
0
    def after_train_epoch(self, trainer):
        if not self.every_n_epochs(trainer, self.interval):
            return
        trainer.model.eval()
        results = [None for _ in range(len(self.dataset))]
        detections = {}
        if trainer.rank == 0:
            prog_bar = torchie.ProgressBar(len(self.dataset))
        for idx in range(trainer.rank, len(self.dataset), trainer.world_size):
            data = self.dataset[idx]
            data_gpu = scatter(collate_kitti([data], samples_per_gpu=1),
                               [torch.cuda.current_device()])[0]

            # compute output
            with torch.no_grad():
                output = trainer.model(data_gpu, return_loss=False)

                token = output["metadata"]["token"]
                for k, v in output.items():
                    if k not in [
                            "metadata",
                    ]:
                        output[k] = v.to(cpu_device)
                detections.update({
                    token: output,
                })

            detections[idx] = result

            batch_size = trainer.world_size
            if trainer.rank == 0:
                for _ in range(batch_size):
                    prog_bar.update()

        all_predictions = all_gather(detections)

        if trainer.rank != 0:
            return

        predictions = {}
        for p in all_predictions:
            predictions.update(p)

        result_dict, _ = self.dataset.evaluation(predictions, None)

        for k, v in result_dict["results"].items():
            print(f"Evaluation {k}: {v}")