def main(argv=None): """ Establishes a TensorFlow session, restores model, and runs detector on image(s), writing bounding box information to a text file. """ image_filenames = data_tools.get_filenames( FLAGS.image_path, str.split(FLAGS.filename_pattern, ','), FLAGS.filename_extension) if not image_filenames: print("No matching images. Exiting...") return with tf.get_default_graph().as_default(): input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') f_score, f_geometry = model.outputs(input_images, is_training=False) with tf.Session() as sess: restore_model(sess) for image_file in image_filenames: predict(sess, image_file, FLAGS.pyramid_levels, input_images, f_score, f_geometry, (FLAGS.tile_size, FLAGS.tile_size))
def main(argv=None): """Loads up ground truth and prediction files, calculates and prints statistics """ # Load file lists prediction_files = data_tools.get_filenames( FLAGS.pred_path, str.split(FLAGS.filename_pattern,','), 'txt') ground_truth_files = data_tools.get_paired_filenames( prediction_files, FLAGS.gt_path, 'json' ) assert len(ground_truth_files) == len(prediction_files) # Load files contents and package for stats evaluation predictions = {} ground_truths = {} for pred_file,truth_file in zip(prediction_files,ground_truth_files): base = os.path.splitext(os.path.basename(pred_file))[0] [_,gt_polys,gt_labels] = data_tools.parse_boxes_from_json( truth_file ) [_,polys,labels,scores] = data_tools.parse_boxes_from_text( pred_file ) if FLAGS.score_thresh: # Filter predictions if necessary polys,labels,scores = threshold_predictions( polys, labels, scores) predictions[base] = { 'polygons' : polys, 'labels' : labels, 'scores' : scores } ground_truths[base] = {'polygons' : gt_polys, 'labels' : gt_labels } # Calculate statistics on predictions for ground truths sample_stats,total_stats = stats.evaluate_predictions( ground_truths, predictions, match_labels=FLAGS.match_labels, iou_match_thresh=FLAGS.iou_thresh) # Display save the results print sample_stats print total_stats if FLAGS.save_result: import json with open(os.path.join(FLAGS.pred_path,FLAGS.save_result+'.json'),'w') \ as fd: json.dump({'individual': sample_stats, 'overall': total_stats}, fd, indent=4)
def _get_filenames(image_path, gt_path, file_patterns, image_ext, gt_ext): """ Construct pairs of complete file names from the base path and patterns for images and their corresponding ground truths Parameters: image_path : Directory containing the images gt_path : Directory containing the ground truth files file_patterns : List of wildcard basename patterns for reading input files image_ext : Extension of image files (appended to file_patterns) gt_ext : Extension of ground truth files Returns: img_files : A list of image file names (with path and extension) gt_files : A list of ground truth file names (with path and extension) """ img_files = data_tools.get_filenames(image_path, file_patterns, image_ext) gt_files = data_tools.get_paired_filenames(img_files, gt_path, gt_ext) return img_files, gt_files