def get_failed_im_file_sample(grading_dir_name,
                              subset_name,
                              out_folder_suffix='run_1',
                              n_file_names=10):
    '''Get list of failed samples and failed status'''

    im_files = np.array(
        plotting_tools.get_im_file_sample(grading_dir_name,
                                          subset_name,
                                          out_folder_suffix,
                                          n_file_names=None))
    gt_files = im_files[:, 1]
    pred_files = im_files[:, 2]

    n_preds = len(gt_files)
    n_false_neg = 0
    n_false_pos = 0

    failed_indexes = []
    for e, gt_file in enumerate(gt_files):
        gt_mask = imageio.imread(gt_file).clip(0, 1)
        pred_mask = (imageio.imread(pred_files[e]) > 127).astype(np.int)

        if gt_mask.shape[0] != pred_mask.shape[0]:
            gt_mask = misc.imresize(gt_mask, pred_mask.shape)

        if gt_mask[:, :, 2].sum() > 3:
            if pred_mask[:, :, 2].sum() > 3:
                pass
            else:
                n_false_neg += 1
                failed_indexes.append(e)
        else:
            if pred_mask[:, :, 2].sum() > 3:
                n_false_pos += 1
                failed_indexes.append(e)

    random_sample_indexes = np.random.permutation(
        failed_indexes)[:n_file_names]
    if 0 < len(random_sample_indexes):
        ims_subset = im_files[random_sample_indexes, 0]
        masks_subset = im_files[random_sample_indexes, 1]
        preds_subset = im_files[random_sample_indexes, 2]
    else:
        ims_subset = np.array([], dtype=str)
        masks_subset = np.array([], dtype=str)
        preds_subset = np.array([], dtype=str)

    return list(zip(ims_subset, masks_subset,
                    preds_subset)), n_preds, n_false_pos, n_false_neg
Beispiel #2
0
val_with_targ, pred_with_targ = model_tools.write_predictions_grade_set(
    model, run_num, 'patrol_with_targ', 'sample_evaluation_data')

val_no_targ, pred_no_targ = model_tools.write_predictions_grade_set(
    model, run_num, 'patrol_non_targ', 'sample_evaluation_data')

val_following, pred_following = model_tools.write_predictions_grade_set(
    model, run_num, 'following_images', 'sample_evaluation_data')

# Now lets look at your predictions, and compare them to the ground truth labels and original images.
# Run each of the following cells to visualize some sample images from the predictions in the validation set.

# In[15]:

# images while following the target
im_files = plotting_tools.get_im_file_sample('sample_evaluation_data',
                                             'following_images', run_num)
for i in range(3):
    im_tuple = plotting_tools.load_images(im_files[i])
    plotting_tools.show_images(im_tuple)

# In[16]:

# images while at patrol without target
im_files = plotting_tools.get_im_file_sample('sample_evaluation_data',
                                             'patrol_non_targ', run_num)
for i in range(3):
    im_tuple = plotting_tools.load_images(im_files[i])
    plotting_tools.show_images(im_tuple)

# In[17]:
Beispiel #3
0
# In[123]:

# If you need to load a model which you previously trained you can uncomment the codeline that calls the function below.

# weight_file_name = 'model_weights'
# restored_model = model_tools.load_network(weight_file_name)

# In[124]:

# generate predictions, save in the runs, directory.
run_number = 'run1'
validation_path, output_path = model_tools.write_predictions_grade_set(
    model, run_number, 'validation')

# In[127]:

# take a look at predictions
# validation_path = 'validation'
im_files = plotting_tools.get_im_file_sample(run_number, validation_path)
for i in range(10):
    im_tuple = plotting_tools.load_images(im_files[i])
    plotting_tools.show_images(im_tuple)

# ## Evaluation<a id='evaluation'></a>
# Let's evaluate your model!

# In[128]:

scoring_utils.score_run(validation_path, output_path)