Beispiel #1
0
    def _eval(self):
        logdir = self._output_dir
        if cfg.TRAINER == 'replicated':
            all_results = multithread_predict_dataflow(self.dataflows, self.predictors)
        else:
            filenames = [os.path.join(
                logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)
            ) for rank in range(hvd.local_size())]

            if self._horovod_run_eval:
                local_results = predict_dataflow(self.dataflow, self.predictor)
                fname = filenames[hvd.local_rank()]
                with open(fname, 'w') as f:
                    json.dump(local_results, f)
            self.barrier.eval()
            if hvd.rank() > 0:
                return
            all_results = []
            for fname in filenames:
                with open(fname, 'r') as f:
                    obj = json.load(f)
                all_results.extend(obj)
                os.unlink(fname)

        output_file = os.path.join(
            logdir, '{}-outputs{}.json'.format(self._eval_dataset, self.global_step))

        scores = DetectionDataset().eval_or_save_inference_results(
            all_results, self._eval_dataset, output_file)
        for k, v in scores.items():
            self.trainer.monitors.put_scalar(k, v)
Beispiel #2
0
    def _eval(self):
        logdir = self._output_dir
        if cfg.TRAINER == 'replicated':
            all_results = multithread_predict_dataflow(self.dataflows,
                                                       self.predictors)
        else:
            if self.batched:
                local_results = predict_dataflow_batch(self.dataflow,
                                                       self.predictor)
            else:
                local_results = predict_dataflow(self.dataflow, self.predictor)

            results = gather_result_from_all_processes(local_results)
            if hvd.rank() > 0:
                return
            all_results = []
            for item in results:
                if item is not None:
                    all_results.extend(item)

        output_file = os.path.join(
            logdir, '{}-outputs{}'.format(self._eval_dataset,
                                          self.global_step))

        scores = DetectionDataset().eval_or_save_inference_results(
            all_results, self._eval_dataset, output_file)
        for k, v in scores.items():
            self.trainer.monitors.put_scalar(k, v)
Beispiel #3
0
 def background_coco(all_results):
     output_file = os.path.join(
         logdir, '{}-outputs{}'.format(self._eval_dataset,
                                       self.global_step))
     scores = DetectionDataset().eval_or_save_inference_results(
         all_results, self._eval_dataset, output_file)
     cfg.TRAIN.SHOULD_STOP = scores[
         'mAP(bbox)/IoU=0.5:0.95'] >= cfg.TEST.BOX_TARGET and scores[
             'mAP(segm)/IoU=0.5:0.95'] >= cfg.TEST.MASK_TARGET
     for k, v in scores.items():
         self.trainer.monitors.put_scalar(k, v)
     return