def test_prediction_csv(dir_images_name='database/test/images',
                        csv_name='test_prediction.csv'):
    dir_images = np.sort(os.listdir(dir_images_name))
    mask_otsu_all = []
    mask_aux_all = []
    score = []
    with open(csv_name, 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(["ImageId", "EncodedPixels"])
        for i in np.arange(np.size(dir_images)):
            # - - - Llamada a la función 'skin_lesion_segmentation'
            # Implementa el método propuesto para la segmentación de la lesión
            # y proporciona a su salida la máscara predicha.
            predicted_mask = skin_lesion_segmentation(dir_images_name + '/' +
                                                      dir_images[i])
            # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

            # - - - Codificación RLE y escritura en fichero .csv
            encoded_pixels = rle_encode(predicted_mask)
            writer.writerow([dir_images[i][:-4], encoded_pixels])
            print('Máscara ' + str(i) + ' codificada.')
예제 #2
0
def create_submission(model,
                      datagen,
                      orig_imgsize=(1918, 1280),
                      resize=False,
                      sort=True):
    # Create the rle
    timestr = time.strftime("%Y%m%d-%H%M%S")
    submission_name = os.path.join(
        SUBMISSION_PATH,
        'carvana-sub-%s-%s.csv' % (model_func.__name__, timestr))
    print("Saving Submission file to: %s" % submission_name)
    # Open and memory efficiently write to the file
    with open(submission_name, 'w') as csvfile:
        # Initialize the header and writer
        fieldnames = ['img', 'rle_mask']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        # Loop through all the test samples and encode them
        for step in tqdm(range(datagen._generator.steps_per_epoch)):
            batch_img, batch_long_ids = next(datagen)
            batch_preds = model.predict_on_batch(batch_img)

            if resize:
                # Resize predictions
                zoom_factor = (1., orig_imgsize[1] / batch_preds.shape[1],
                               orig_imgsize[0] / batch_preds.shape[2])
                resized_batch_preds = ndi.interpolation.zoom(batch_preds,
                                                             zoom_factor,
                                                             order=0,
                                                             mode='nearest')
            else:
                # We need to remove the padding
                padding = infer_padding(batch_preds.shape[1:3], orig_imgsize)
                if padding != (0, 0, 0, 0):
                    left, right, top, bottom = padding
                    resized_batch_preds = batch_preds[:, top:batch_preds.
                                                      shape[2] - bottom,
                                                      left:batch_preds.
                                                      shape[2] - right]
                else:
                    resized_batch_preds = batch_preds
            # Threshold the image
            resized_batch_preds = np.rint(resized_batch_preds)

            assert resized_batch_preds.shape[1:] == (
                orig_imgsize[1], orig_imgsize[0]
            ), "Resized images have wrong size of %s" % resized_batch_preds.shape

            for i in range(resized_batch_preds.shape[0]):
                sample_ind = step * BATCH_SIZE + i

                if DEBUG:
                    print("Index: ", sample_ind)
                    print("Expected Long ID: ",
                          datagen._generator.dataset.long_ids[sample_ind])
                    print("Actual Long ID: ", batch_long_ids[i])
                    print("Resized: ", resize)
                    examine_prediction(
                        datagen._generator.dataset.long_ids[sample_ind],
                        batch_img[i],
                        batch_preds[i],
                        resized_batch_preds[i],
                        orig_imgsize=orig_imgsize)

                assert datagen._generator.dataset.long_ids[
                    sample_ind] == batch_long_ids[
                        i], "Long IDs don't match! %s and %s" % (
                            datagen._generator.dataset.long_ids[sample_ind],
                            batch_long_ids[i])
                # Write the row into csvfile
                writer.writerow({
                    'img':
                    datagen._generator.dataset.long_ids[sample_ind] + '.jpg',
                    'rle_mask':
                    rle.rle_to_string(rle.rle_encode(resized_batch_preds[i]))
                })
    if sort:
        print("Sorting CSVfile")
        df = pd.read_csv(submission_name)
        df = df.sort_values(by=['img'])
        df.to_csv(submission_name, index=False)
    print("Submission created at: %s" % submission_name)
total = len(img_list)

out_pred_rows = []

predictions = pd.read_csv('csvs/768_resnet_noship_prob.csv')

THRESHOLD = 0.5

if not INVERT_PRED:
    has_ship_set = set(predictions.loc[predictions['Probability'] > THRESHOLD]
                       ['ImageId'].tolist())
else:
    has_ship_set = set(predictions.loc[predictions['Probability'] <= THRESHOLD]
                       ['ImageId'].tolist())

full_submission = rle_encode(np.ones((768, 768, 1)).T)

for i in tqdm(range(total)):
    if img_list[i] in has_ship_set:
        out_pred_rows += [{
            'ImageId': img_list[i],
            'EncodedPixels': full_submission
        }]
    else:
        out_pred_rows += [{'ImageId': img_list[i], 'EncodedPixels': None}]

submission_df = pd.DataFrame(out_pred_rows)[['ImageId', 'EncodedPixels']]
submission_df.to_csv('csvs/05_thresh_768_resnet_noship.csv', index=False)
print(submission_df.head())
# In[ ]:
예제 #4
0
            aligned_segs = np.stack(tta_batch_reverse(first_segs), 3)
            avg_seg = np.sum(aligned_segs, axis=3, keepdims=True)

            num_augs = aligned_segs.shape[3]
            comparison = THRESHOLD * TTA_INTERSECT_RATIO * num_augs

            test_pred = np.greater(avg_seg, comparison).astype('float32')[0:,...]

        for pred, c_img_name in zip(test_pred[0:,:,:,:], test_names):
            
            # transpose rows/cols due to airbus data being col major order
            ships_pred = instance_segment_mask(pred, smallest_size=SMALL_OBJ_SIZE)
            if len(ships_pred) > 0:
                for ship in ships_pred:
                    c_rle = rle_encode(np.transpose(ship, axes=[1,0,2]))
                    out_pred_rows += [{'ImageId': c_img_name, 'EncodedPixels': c_rle}]
            else:
                out_pred_rows += [{'ImageId': c_img_name, 'EncodedPixels': None}]

        if i % 100 == 0:
            gc.collect()

else:
    out_pred_rows = [{'ImageId': c_img_name, 'EncodedPixels': None} for c_img_name in img_list]

submission_df = pd.DataFrame(out_pred_rows)[['ImageId', 'EncodedPixels']]
submission_df.to_csv('resnet34_768_ohem_best_loss_th_05_so_60_submission.csv', index=False)
print(submission_df.head())
# In[ ]:
예제 #5
0
print("--------Making Predictions-------\n")
predictions = predModel.predict(input_fn=pred_fn)
p = list(predictions)
p = np.reshape(p,(18000,101,101),'F')
p = np.round(p)
p_rle = []
"""
testP = p[:20]
for i in range(len(testP)):
    implt=plt.imshow(testP[i],cmap='gray')
    plt.show()
    print(data['ids'][i])
"""

for i in range(len(p)):
    p_rle.append(rle.rle_encode(p[i]))
    if i%1000 == 0:
        print("Converting RLEs:",i,"of 18000\n")

preds = {
    'id': data['ids'],
    'rle_mask'  : p_rle
}

'''
df = pd.DataFrame.from_dict(preds)
print(df.head())
'''

#preds = [x for x in predictions]
#print(preds[:10])