Example #1
0
    def get_results(self):
        """
        Reruns the evaluation using the accumulated detections, returns VOC results with IOU and
        Accuracy metrics

        :return: dict with PASCAL VOC metrics
        """
        if self.cached_results:
            return self.results

        self.voc_evaluator = ConfusionMatrix(21)
        self.voc_evaluator.update(self.targets.astype(np.int64),
                                  self.outputs.astype(np.int64))

        acc_global, acc, iu = self.voc_evaluator.compute()

        self.results = {
            "Accuracy": acc_global.item(),
            "Mean IOU": iu.mean().item(),
        }

        self.speed_mem_metrics[
            'Max Memory Allocated (Total)'] = get_max_memory_allocated()

        return self.results
Example #2
0
    def get_results(self):
        if self.cached_results:
            return self.results
        self.results = self.metrics.get_results()
        self.speed_mem_metrics['Max Memory Allocated (Total)'] = get_max_memory_allocated()

        return self.results
Example #3
0
    def get_results(self):
        """
        Gets the results for the evaluator.

        :return: dict with `EM` (exact match score) and `F1`.
        """

        if self.cached_results:
            return self.results
        self.results = self.metrics.get_results()
        self.speed_mem_metrics[
            'Max Memory Allocated (Total)'] = get_max_memory_allocated()

        return self.results
Example #4
0
    def get_results(self):
        """ 
        Calculates the perplexity and measure the performance of the model
        
        :return: dict with `Test perplexity`
        """
        if self.cached_results:
            return self.results
        perplexity = np.exp(self._neglogloss / self.dataset.testset_size)

        self.results = {'Test perplexity': perplexity}
        self.speed_mem_metrics[
            'Max Memory Allocated (Total)'] = get_max_memory_allocated()
        exec_speed = (time.time() - self.init_time)
        count = self.dataset.testset_size
        self.speed_mem_metrics['Tasks / Evaluation Time'] = count / exec_speed
        self.speed_mem_metrics['Tasks'] = count
        self.speed_mem_metrics['Evaluation Time'] = exec_speed
        return self.results
Example #5
0
    def get_results(self):
        """
        Reruns the evaluation using the accumulated detections, returns COCO results with AP metrics

        :return: dict with COCO AP metrics
        """
        if self.cached_results:
            return self.results

        self.coco_evaluator = CocoEvaluator(self.coco, self.iou_types)
        self.coco_evaluator.update(self.detections)
        self.coco_evaluator.evaluate()
        self.coco_evaluator.accumulate()
        self.coco_evaluator.summarize()

        self.results = get_coco_metrics(self.coco_evaluator)
        self.speed_mem_metrics[
            'Max Memory Allocated (Total)'] = get_max_memory_allocated()

        return self.results