def offline_evaluate(pred_func, output_file): df = get_eval_dataflow() all_results = eval_on_dataflow( df, lambda img: detect_one_image(img, pred_func)) with open(output_file, 'w') as f: json.dump(all_results, f) print_evaluation_scores(output_file)
def _eval(self): all_results = eval_on_dataflow( self.df, lambda img: detect_one_image(img, self.pred)) output_file = os.path.join(logger.get_logger_dir(), 'outputs{}.json'.format(self.global_step)) with open(output_file, 'w') as f: json.dump(all_results, f) print_evaluation_scores(output_file)
def _eval(self): all_results = eval_on_dataflow(self.df, lambda img: detect_one_image(img, self.pred)) output_file = os.path.join( logger.get_logger_dir(), 'outputs{}.json'.format(self.global_step)) with open(output_file, 'w') as f: json.dump(all_results, f) scores = print_evaluation_scores(output_file) for k, v in scores.items(): self.trainer.monitors.put_scalar(k, v)
def offline_evaluate(pred_func, output_file): df = get_test_dataflow(add_mask=False) all_results = eval_on_dataflow( df, lambda img: detect_one_image(img, pred_func)) #print(all_results) # input() with open(output_file, 'w') as f: json.dump(all_results, f, cls=MyEncoder) ret = print_evaluation_scores(output_file) print(ret)
def _eval(self): all_results, local_score = eval_on_dataflow( self.df, lambda img: detect_one_image(img, self.pred)) """ output_file = os.path.join( logger.get_logger_dir(), 'outputs{}.json'.format(self.global_step)) with open(output_file, 'w') as f: json.dump(all_results, f, cls=MyEncoder) scores = print_evaluation_scores(output_file) """ scores = {} scores['local'] = local_score for k, v in scores.items(): self.trainer.monitors.put_scalar(k, v)
def offline_evaluate(model_path, output_file): pred = OfflinePredictor( PredictConfig(model=Model(), session_init=get_model_loader(model_path), input_names=['image'], output_names=[ 'fastrcnn_fg_probs', 'fastrcnn_fg_boxes', ])) df = get_eval_dataflow() df = PrefetchDataZMQ(df, 1) all_results = eval_on_dataflow(df, lambda img: detect_one_image(img, pred)) with open(output_file, 'w') as f: json.dump(all_results, f) print_evaluation_scores(output_file)
def offline_evaluate(pred_func, output_file): df = get_test_dataflow() all_results = eval_on_dataflow( df, lambda img: detect_one_image(img, pred_func)) print(all_results)
if args.visualize or args.evaluate or args.predict: # autotune is too slow for inference os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0' assert args.load print_config() if args.evaluate: pred = OfflinePredictor( PredictConfig(model=Model(), session_init=get_model_loader(args.load), input_names=['image'], output_names=get_model_output_names())) df = get_test_dataflow(add_mask=True) df.reset_state() all_results, local_score = eval_on_dataflow( df, lambda img: detect_one_image(img, pred)) print("F2 Score: ", local_score) elif args.predict: imgs = Detection.load_many( config.BASEDIR, config.TEST_DATASET, add_gt=False) # to load the class names into caches # filter with zero-ship imgs = [(img['image_data'], img['id']) for img in imgs] pred = OfflinePredictor( PredictConfig(model=Model(), session_init=get_model_loader(args.load), input_names=['image'], output_names=get_model_output_names())) predict_many(pred, imgs) else: