Ejemplo n.º 1
0
    # Something has gone bonkers if there are images in the results that
    # aren't in the request
    extra_images = result_images_set - requested_images_set
    assert len(extra_images) == 0

# ...for each folder

#%% Post-processing (no ground truth)

html_output_files = []

# i_folder = 0; folder_name_raw = folder_names[i_folder]
for i_folder, folder_name_raw in enumerate(folder_names):

    options = PostProcessingOptions()
    options.image_base_dir = image_base
    options.parallelize_rendering = True
    options.include_almost_detections = True
    options.num_images_to_sample = 5000
    options.confidence_threshold = 0.8
    options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
    options.ground_truth_json_file = None

    folder_name = path_utils.clean_filename(folder_name_raw)
    if len(folder_name) == 0:
        folder_token = ''
    else:
        folder_token = folder_name + '_'
    output_base = os.path.join(postprocessing_output_folder,folder_token + \
        job_set_name + '_{:.3f}'.format(options.confidence_threshold))
    # Something has gone bonkers if there are images in the results that
    # aren't in the request
    extra_images = result_images_set - requested_images_set
    assert len(extra_images) == 0

# ...for each folder

#%% Post-processing (no ground truth)

html_output_files = []

# i_folder = 0; folder_name_raw = folder_names[i_folder]
for i_folder, folder_name_raw in enumerate(folder_names):

    options = PostProcessingOptions()
    options.image_base_dir = read_only_sas_url
    options.parallelize_rendering = True
    options.include_almost_detections = True
    options.num_images_to_sample = 5000
    options.confidence_threshold = 0.8
    options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
    options.ground_truth_json_file = None
    options.separate_detections_by_category = True

    folder_name = path_utils.clean_filename(folder_name_raw)
    if len(folder_name) == 0:
        folder_token = ''
    else:
        folder_token = folder_name + '_'
    output_base = os.path.join(postprocessing_output_folder, folder_token + \
Ejemplo n.º 3
0
    
# ...for each folder
    
    
#%% Post-processing (no ground truth)

html_output_files = []

for i_folder,folder_name in enumerate(folder_names):
        
    output_base = os.path.join(postprocessing_output_folder,folder_name)
    os.makedirs(output_base,exist_ok=True)
    print('Processing {} to {}'.format(folder_name,output_base))
    api_output_file = folder_name_to_combined_output_file[folder_name]

    options = PostProcessingOptions()
    options.image_base_dir = image_base
    options.parallelize_rendering = True
    options.include_almost_detections = True
    options.num_images_to_sample = 5000
    options.confidence_threshold = 0.8
    options.almost_detection_confidence_threshold = 0.75
    options.ground_truth_json_file = None
    
    options.api_output_file = api_output_file
    options.output_dir = output_base
    ppresults = process_batch_results(options)
    html_output_files.append(ppresults.output_html_file)
    
for fn in html_output_files:
    os.startfile(fn)
Ejemplo n.º 4
0
parser.add_argument(
    '--binary_threshold',
    type=float,
    help='Switches to two-class mode using the threshold in [0,1] provided')
args = parser.parse_args()

# Validate parameters
assert os.path.isfile(args.frozen_graph)
assert os.path.isfile(args.image_list) and os.path.isfile(args.class_list)
if args.image_dir:
    os.path.isdir(args.image_dir)
os.makedirs(args.output_dir, exist_ok=True)
assert args.num_samples > 0

# Tranfer parameters to post-processing format
options = PostProcessingOptions()
options.output_dir = args.output_dir

# Load frozen graph
print('Loading model...')
model_graph = tf.Graph()
with model_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(args.frozen_graph, 'rb') as fid:
        od_graph_def.ParseFromString(fid.read())
        tf.import_graph_def(od_graph_def, name='')
graph = model_graph

# Reading image list
with open(args.image_list, 'rt') as fi:
    image_list = fi.read().splitlines()
Ejemplo n.º 5
0

#%% Precision-recall analysis

from api.batch_processing.postprocessing.postprocess_batch_results import PostProcessingOptions
from api.batch_processing.postprocessing.postprocess_batch_results import process_batch_results

api_output_file = r'g:\auckland-doc\auckland-doc_20200801\combined_api_outputs\auckland-doc_202008012020.08.01_reformatMaukahuka_Auckland_Island2_TestingSummer_Trial_2019_detections.filtered_rde_0.60_0.85_5_0.05.json'
postprocessing_output_folder = r'G:\auckland-doc\auckland-doc_20200801\postprocessing'
image_base = r'E:\auckland-test\2_Testing'
ground_truth_json_file = output_json_filename

output_base = os.path.join(postprocessing_output_folder,'pr_analysis')
os.makedirs(output_base,exist_ok=True)

options = PostProcessingOptions()
options.unlabeled_classes.append('openadjusted')
options.image_base_dir = image_base
options.parallelize_rendering = True
options.include_almost_detections = True
options.num_images_to_sample = 2500
options.confidence_threshold = 0.75
options.almost_detection_confidence_threshold = 0.7
options.ground_truth_json_file = ground_truth_json_file
options.allow_missing_images = True
options.ground_truth_filename_replacements = {} 
options.api_output_filename_replacements = {'2020.08.01_reformat\\Maukahuka_Auckland_Island\\2_Testing\\':''}
options.api_output_file = api_output_file
options.output_dir = output_base
ppresults = process_batch_results(options)
os.startfile(ppresults.output_html_file)
Ejemplo n.º 6
0
        'animal': detection_thresholds[j],
        'person': detection_thresholds[j],
        'vehicle': detection_thresholds[j]
    }
    options.pairwise_options.append(pairwise_options)

results = compare_batch_results(options)

from path_utils import open_file  # from ai4eutils
open_file(results.html_output_file)

#%% Post-processing (no ground truth)

render_animals_only = False

options = PostProcessingOptions()
options.image_base_dir = input_path
options.parallelize_rendering = True
options.include_almost_detections = True
options.num_images_to_sample = 7500
options.parallelize_rendering_n_cores = n_rendering_threads
options.confidence_threshold = 0.8
options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
options.ground_truth_json_file = None
options.separate_detections_by_category = True
# options.sample_seed = 0

if render_animals_only:
    # Omit some pages from the output, useful when animals are rare
    options.rendering_bypass_sets = [
        'detections_person', 'detections_vehicle', 'detections_person_vehicle',
Ejemplo n.º 7
0
    # aren't in the request
    extra_images = result_images_set - requested_images_set
    assert len(extra_images) == 0

# ...for each folder


#%% Post-processing (no ground truth)

render_animals_only = False
html_output_files = []

# i_folder = 0; folder_name_raw = folder_names[i_folder]
for i_folder, folder_name_raw in enumerate(folder_names):

    options = PostProcessingOptions()
    options.image_base_dir = read_only_sas_url
    options.parallelize_rendering = True
    options.include_almost_detections = True
    options.num_images_to_sample = 7500
    options.confidence_threshold = 0.8
    options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
    options.ground_truth_json_file = None
    options.separate_detections_by_category = True

    if render_animals_only:
        # Omit some pages from the output, useful when animals are rare
        options.rendering_bypass_sets = ['detections_person','detections_vehicle',
                                         'detections_person_vehicle','non_detections']
    
    folder_name = path_utils.clean_filename(folder_name_raw)
Ejemplo n.º 8
0
        else:
            opener = "open" if sys.platform == "darwin" else "xdg-open"
            subprocess.call([opener, filename])

    from api.batch_processing.postprocessing.postprocess_batch_results import (
        PostProcessingOptions, process_batch_results)

    input_base = '/media/user/lila-01/lila/snapshot-safari/MTZ/MTZ_public'

    temporary_results_file = os.path.expanduser('~/tmp/filtered_results.json')
    with open(temporary_results_file, 'w') as f:
        json.dump(md_results, f, indent=2)

    output_base = os.path.expanduser('~/tmp/pr-image-level-preview')
    os.makedirs(output_base, exist_ok=True)

    options = PostProcessingOptions()
    options.image_base_dir = input_base
    options.parallelize_rendering = True
    options.include_almost_detections = True
    options.num_images_to_sample = 10000
    options.confidence_threshold = 0.75
    options.classification_confidence_threshold = 0.75
    options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
    options.ground_truth_json_file = ground_truth_file
    options.separate_detections_by_category = True
    options.api_output_file = temporary_results_file
    options.output_dir = output_base
    ppresults = process_batch_results(options)
    open_file(ppresults.output_html_file)