def _eval(self): logdir = args.logdir if cfg.TRAINER == 'replicated': all_results = multithread_eval_coco(self.dataflows, self.predictors) else: filenames = [os.path.join( logdir, 'outputs{}-part{}.json'.format(self.global_step, rank) ) for rank in range(hvd.local_size())] if self._horovod_run_eval: local_results = eval_coco(self.dataflow, self.predictor) fname = filenames[hvd.local_rank()] with open(fname, 'w') as f: json.dump(local_results, f) self.barrier.eval() if hvd.rank() > 0: return all_results = [] for fname in filenames: with open(fname, 'r') as f: obj = json.load(f) all_results.extend(obj) os.unlink(fname) output_file = os.path.join( logdir, 'outputs{}.json'.format(self.global_step)) with open(output_file, 'w') as f: json.dump(all_results, f) try: scores = print_coco_metrics(output_file) for k, v in scores.items(): self.trainer.monitors.put_scalar(k, v) except Exception: logger.exception("Exception in COCO evaluation.")
def offline_evaluate(pred_config, output_file): num_gpu = cfg.TRAIN.NUM_GPUS graph_funcs = MultiTowerOfflinePredictor(pred_config, list( range(num_gpu))).get_predictors() predictors = [] dataflows = [] for k in range(num_gpu): predictors.append( lambda img, pred=graph_funcs[k]: detect_one_image(img, pred)) dataflows.append(get_eval_dataflow(shard=k, num_shards=num_gpu)) if num_gpu > 1: all_results = multithread_eval_coco(dataflows, predictors) else: all_results = eval_coco(dataflows[0], predictors[0]) with open(output_file, 'w') as f: json.dump(all_results, f) print_coco_metrics(output_file)
def offline_evaluate(pred_config, output_file): num_gpu = cfg.TRAIN.NUM_GPUS graph_funcs = MultiTowerOfflinePredictor(pred_config, list( range(num_gpu))).get_predictors() predictors = [] for k in range(num_gpu): predictors.append( lambda img, pred=graph_funcs[k]: detect_one_image(img, pred)) for dataset in cfg.DATA.VAL: logger.info("Evaluating {} ...".format(dataset)) dataflows = [ get_eval_dataflow(dataset, shard=k, num_shards=num_gpu) for k in range(num_gpu) ] if num_gpu > 1: all_results = multithread_eval_coco(dataflows, predictors) else: all_results = eval_coco(dataflows[0], predictors[0]) output = output_file + '-' + dataset with open(output, 'w') as f: json.dump(all_results, f) print_coco_metrics(dataset, output)