def offline_evaluate(pred_func, output_file): df = get_eval_dataflow() all_results = eval_coco( df, lambda img: detect_one_image(img, pred_func)) with open(output_file, 'w') as f: json.dump(all_results, f) print_evaluation_scores(output_file)
def _eval(self): all_results = eval_coco(self.df, lambda img: detect_one_image(img, self.pred)) output_file = os.path.join( logger.get_logger_dir(), 'outputs{}.json'.format(self.global_step)) with open(output_file, 'w') as f: json.dump(all_results, f) scores = print_evaluation_scores(output_file) for k, v in scores.items(): self.trainer.monitors.put_scalar(k, v)
def _eval(self): logdir = args.logdir if cfg.TRAINER == 'replicated': with ThreadPoolExecutor(max_workers=self.num_predictor, thread_name_prefix='EvalWorker') as executor, \ tqdm.tqdm(total=sum([df.size() for df in self.dataflows])) as pbar: futures = [] for dataflow, pred in zip(self.dataflows, self.predictors): futures.append(executor.submit(eval_coco, dataflow, pred, pbar)) all_results = list(itertools.chain(*[fut.result() for fut in futures])) else: if self._horovod_run_eval: local_results = eval_coco(self.dataflow, self.predictor) output_partial = os.path.join( logdir, 'outputs{}-part{}.json'.format(self.global_step, hvd.local_rank())) with open(output_partial, 'w') as f: json.dump(local_results, f) self.barrier.eval() if hvd.rank() > 0: return all_results = [] for k in range(hvd.local_size()): output_partial = os.path.join( logdir, 'outputs{}-part{}.json'.format(self.global_step, k)) with open(output_partial, 'r') as f: obj = json.load(f) all_results.extend(obj) os.unlink(output_partial) output_file = os.path.join( logdir, 'outputs{}.json'.format(self.global_step)) with open(output_file, 'w') as f: json.dump(all_results, f) try: scores = print_evaluation_scores(output_file) for k, v in scores.items(): self.trainer.monitors.put_scalar(k, v) except Exception: logger.exception("Exception in COCO evaluation.")
def offline_evaluate(pred_func, output_file): df = get_eval_dataflow() all_results = eval_coco(df, lambda img: detect_one_image(img, pred_func)) with open(output_file, 'w') as f: json.dump(all_results, f) print_evaluation_scores(output_file)
if args.evaluate: batch_size = cfg.PREPROC.EVAL_BATCH_SIZE if args.predict: batch_size = 1 MODEL = PredModel(batch_size, input_shape) pred_config = PredictConfig( \ model=MODEL, \ session_init=get_model_loader(args.load), \ input_names=MODEL.get_inference_tensor_names()[0], \ output_names=MODEL.get_inference_tensor_names()[1]) if args.evaluate: assert args.evaluate.endswith('.json') or args.evaluate.endswith( '.npz'), args.evaluate if args.evalfromjson: ret = print_evaluation_scores(args.evaluate) else: ret = do_evaluate(pred_config, args.evaluate, batch_size) print('mIoU = {:.3f}'.format(ret['miou'])) else: pred = OfflinePredictor(pred_config) if args.export_graph: from tensorflow.python.framework import graph_io export_path, export_name = os.path.split(args.export_graph) graph_io.write_graph(pred.sess.graph, export_path, export_name + 'txt', as_text=True) graph_io.write_graph(pred.sess.graph, export_path, export_name,