def show_failed_samples(grading_data_dir_name,
                        subset_name,
                        num_of_samples=100,
                        run_num='run_1'):
    """Predicted Samples Viewer"""
    # Count all iamges
    path = os.path.join('..', 'data', grading_data_dir_name)
    ims = np.array(plotting_tools.get_im_files(path, subset_name))
    print('All images: ', len(ims))

    # Show samples
    im_files, n_preds, n_false_pos, n_false_neg = get_failed_im_file_sample(
        grading_data_dir_name,
        subset_name,
        run_num,
        n_file_names=num_of_samples)

    print(
        'number of validation samples intersection over the union evaulated on {}'
        .format(n_preds))
    print(
        'number false positives: {}(P={:.6}), number false negatives: {}(P={:.6})'
        .format(n_false_pos, n_false_pos / n_preds, n_false_neg,
                n_false_neg / n_preds))
    print('number failed: {}(P={:.6})'.format(
        n_false_pos + n_false_neg, (n_false_pos + n_false_neg) / n_preds))
    print()

    print('Sample images: ', len(im_files))
    for i in range(len(im_files[:num_of_samples])):
        print(i)
        im_tuple = plotting_tools.load_images(im_files[i])
        plotting_tools.show_images(im_tuple)
Exemplo n.º 2
0
val_no_targ, pred_no_targ = model_tools.write_predictions_grade_set(
    model, run_num, 'patrol_non_targ', 'sample_evaluation_data')

val_following, pred_following = model_tools.write_predictions_grade_set(
    model, run_num, 'following_images', 'sample_evaluation_data')

# Now lets look at your predictions, and compare them to the ground truth labels and original images.
# Run each of the following cells to visualize some sample images from the predictions in the validation set.

# In[15]:

# images while following the target
im_files = plotting_tools.get_im_file_sample('sample_evaluation_data',
                                             'following_images', run_num)
for i in range(3):
    im_tuple = plotting_tools.load_images(im_files[i])
    plotting_tools.show_images(im_tuple)

# In[16]:

# images while at patrol without target
im_files = plotting_tools.get_im_file_sample('sample_evaluation_data',
                                             'patrol_non_targ', run_num)
for i in range(3):
    im_tuple = plotting_tools.load_images(im_files[i])
    plotting_tools.show_images(im_tuple)

# In[17]:

# images while at patrol with target
im_files = plotting_tools.get_im_file_sample('sample_evaluation_data',
Exemplo n.º 3
0
    random.shuffle(x)
    return x


im_files0 = get_files(pred_folder)

im_files = shuffle(im_files0)

ipdb.set_trace()

# I don't need to go with any of these things.

validation_path
output_path = pred_folder
scoring_utils.score_run(gt_folder, output_path)

for i in range(30):
    pred_name = im_files[i]
    base_pred_name = os.path.basename(pred_name)
    im_name, mask_name = get_img_mask(base_pred_name, gt_folder)
    if not os.path.exists(im_name):
        print('{} does not exist'.format(im_name))

    if not os.path.exists(mask_name):
        print('{} does not exist'.format(mask_name))

    new_im_files = (im_name, mask_name, pred_name)
    im_tuple = plotting_tools.load_images(new_im_files)
    plotting_tools.show_images(im_tuple, fig_id=3)
    ipdb.set_trace()