def generate_submission(model_file: str = "./last_best_model.h5",
                        test_set_dir: str = "./test_set_images/",
                        pr_out_path: str = "./out/",
                        backbone: str = "efficientnetb4",
                        sub_fn: str = "submission.csv"):
    """
    Load weights, generate predictions and submission.

    Parameters
    ----------
    model_file : string
        Path of the file from which to load the weights of the model.
    test_set_dir : string
        Path of the directory containing the images to use for prediction.
    pr_out_path : string
        Path of directory for predictions output.
    backbone : string
        The backbone used for unet.
    sub_fn : string
        Path of the generated submission file.

    Returns
    -------

    -
    """

    if not os.path.exists(pr_out_path):
        os.makedirs(pr_out_path)

    print("=== Loading weights ===")

    model = sm.Unet(backbone, encoder_weights="imagenet")
    model.load_weights(model_file)

    print("=== Weights loaded ===")
    print("=== Generating predictions ===")

    def clean_line():
        print(" " * (shutil.get_terminal_size().columns - 1), end='\r')

    for image_fn in os.listdir(test_set_dir):
        clean_line()
        print(f"Generating prediction: {image_fn}", end='\r')
        image = cv2.imread(os.path.join(test_set_dir, image_fn))
        image = np.array([image])
        pr_mask = model.predict(image).round()
        cv2.imwrite(os.path.join(pr_out_path, image_fn),
                    pr_mask.squeeze() * 255)

    clean_line()
    print("=== Predictions generated ===")

    img_fns = [os.path.join(pr_out_path, x) for x in os.listdir(pr_out_path)]
    masks_to_submission(sub_fn, *img_fns)

    print("=== Submission generated ===")
Beispiel #2
0
def save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image):
    # Make folder for current run
    output_dir = os.path.join(runs_dir, str(time.time()))
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.makedirs(output_dir)

    # Run NN on test images and save them to HD
    print('Training Finished. Saving test images to: {}'.format(output_dir))
    image_outputs = gen_test_output(
        sess, logits, keep_prob, input_image, os.path.join(data_dir, 'data_road/test_set_images'), image_shape)
    for name, image in image_outputs:
        scipy.misc.imsave(os.path.join(output_dir, name), image)

    complete_label_files = glob(output_dir + '/test_*.png')
    submission_filename = os.path.join(output_dir, "fcn_16" + '_patch8' + '.csv')
    masks_to_submission(submission_filename, *complete_label_files)
def post_process(nimage = 50, threshold_type = "median", convolution_patch_size = 8, kernel_size = 5):
    #
    #   DESCRIPTION
    #       This function is called after all predictions are made and patches are merged for each rotated image
    #       Images go through the following process: 
    #           - all the 8 rotations are merged back together forming one perdiction image 
    #           - it is median thresholded
    #           - and finally it is convolved by a cross kernel 
    #       Image is saved 
    #       All images are used to produce the final submission file 
    #       
    #    INPUTS 
    #       nimage number of image to process 
    #       threshold_type either median mean or percentile
    #       convolution_patch_size, kernel_size options of the cross kernel convolution 
    #
    image_names = []
    folder = PREDICTION_FOLDER
    submission_file = SUBMISSION_FILE 
    for i in range(1,nimage+1):
        # Load the rotated images 
        image_names.append(folder+'/prediction_' + '%.3d' % i  + '.png')
        rot0 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota0.png')
        rot1 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota1.png')
        rot2 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota2.png')
        rot3 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota3.png')
        rot4 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota4.png')
        rot5 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota5.png')
        rot6 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota6.png')
        rot7 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota7.png')
        
        # Get the unrotated image 
        unrot =  unrotate(rot0, rot1, rot2, rot3, rot4, rot5, rot6, rot7)
        unrot = threshold_image_by(unrot, threshold_type) 

        # Cross Kernel Convolution 
        # unrot = isolate(unrot,convolution_patch_size, kernel_size) 

        # Format prediction image and save 
        unrot = format_image(unrot)
        Image.fromarray(unrot).save(folder+'/prediction_' + '%.3d' % i  + '.png')

    # Once all prediction images have been reconstructed build the submission file     
    sub.masks_to_submission(submission_file, *image_names)
def mk_submission(size, size_ker, submission_file):
'''
INPUTS:
    size: int, the size of the resolution of the prediction (see isolate)
    size_ker: int, size of the convolution kernel (see isolate)
    submission_file: string, name of the submission file where to write the results.
OUTPUTS:
    None
'''
    image_name = []
    ## File where to save predictions

    ## Folder containingthe predictions
    folder = 'predictions'

    ## Iteration over each image of the test set
    for i in range(1,51):
        print(i)
        ## Name of the final prediction file to save (output)
        image_name.append(folder+'/prediction_' + '%.3d' % i  + '.png')
        ## loading the rotated images
        rot0 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota0.png')
        rot1 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota1.png')
        rot2 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota2.png')
        rot3 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota3.png')
        rot4 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota4.png')
        rot5 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota5.png')
        rot6 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota6.png')
        rot7 =mpimg.imread(folder+'/prediction_' + '%.3d' % i  + '_rota7.png')

        ## Recombinig the rotated images
        unrot =  unrotate(rot0, rot1, rot2, rot3, rot4, rot5, rot6, rot7)

        ## Kernel convolution
        unrot = isolate(unrot,size, size_ker)

        unrot = format_image(unrot)
        
        Image.fromarray(unrot).save(folder+'/prediction_' + '%.3d' % i  + '.png')

    ## Saving the submission
    sub.masks_to_submission(submission_file, *image_name)
    return 'sbravaradjan'
Beispiel #5
0
def main(_):
    #generate the architecture of the workspace if it is needed
    if FLAGS.init_archi:
        archi.main()

    #generate train_data and test_date for model1
    train_data, train_labels = build_train_data(FLAGS, archi.AUGMENTED_IMAGES,
                                                archi.AUGMENTED_GNDTRUTH)
    test_data = build_test_data(FLAGS, archi.TEST_IMAGES)

    #generate model1 and train
    option = dnet.Options(FLAGS.num_channels, FLAGS.num_epochs_M1,
                          FLAGS.batch_size, FLAGS.learning_rate,
                          FLAGS.keep_prob, archi.MODEL1)
    model = dnet.DeconvNet(train_data, train_labels, option)
    if FLAGS.restore_M1:
        model.restore()
    else:
        model.init()

    if not FLAGS.restore_M1 or FLAGS.cont_train_M1:
        model.train()

    # Predict on testing data and save images
    print("Predict on testing data with model1")
    predictions_test = model.predict(test_data)
    for i in range(FLAGS.num_test):
        Image.fromarray((predictions_test[i] * 255).astype("uint8")).save(
            os.path.join(archi.PREDICTIONS_TEST, 'test_{}.png'.format(i + 1)))
    predictions_test = build_test_data(FLAGS, archi.PREDICTIONS_TEST)

    # Predict on train data and save images if we need to train the second model
    if not FLAGS.restore_M2 or FLAGS.cont_train_M2:
        print("Predict on training data with model1")
        predictions_train = model.predict(train_data)
        for i in range(FLAGS.num_train):
            Image.fromarray((predictions_train[i] * 255).astype("uint8")).save(
                os.path.join(archi.PTRAIN_IMAGES,
                             'satImage_{:03d}.png'.format(i + 1)))
        predictions_train, _ = build_train_data(FLAGS, archi.PTRAIN_IMAGES,
                                                archi.PTRAIN_GNDTRUTH)

    #generate model2 and train
    option = dnet.Options(FLAGS.num_channels, FLAGS.num_epochs_M2,
                          FLAGS.batch_size, FLAGS.learning_rate,
                          FLAGS.keep_prob, archi.MODEL2)
    model = dnet.DeconvNet(predictions_train, train_labels, option)
    if FLAGS.restore_M2:
        model.restore()
    else:
        model.init()

    if not FLAGS.restore_M2 or FLAGS.cont_train_M2:
        model.train()

    # Predict on predictions_test data and save images
    print("Predict on testing data with model2")
    post_processing = model.predict(predictions_test)
    for i in range(FLAGS.num_test):
        Image.fromarray((post_processing[i] * 255).astype("uint8")).save(
            os.path.join(archi.POST_PROCESSING, 'test_{}.png'.format(i + 1)))

    # Create submission
    submission_filename = 'submission.csv'
    image_filenames = []
    for i in range(FLAGS.num_test):
        image_filename = archi.POST_PROCESSING + 'test_{}.png'.format(i + 1)
        image_filenames.append(image_filename)
    msub.masks_to_submission(submission_filename, *image_filenames)
Beispiel #6
0
                                           edges)
    X_crf_test.append((X_test[i], np.asarray(edges),
                       np.asarray(edges_features).reshape(-1, 1)))
    sys.stdout.write("%d/%d" % (i + 1, len(test_imgs)))
    sys.stdout.flush()
sys.stdout.write('\n')

print('Predicting on test set')
Z_crf_test = my_ssvm.predict(X_crf_test)

test_dir_res = 'test_set_res'
print('Writing segmentations on test set')
#im = hp.sp_label_to_img(sp_labels_test[0],Z_crf_test[0])
#im_overlay = color.label2rgb(im,test_imgs[0],alpha=0.5)
#plt.imshow(im_overlay); plt.show()
assert os.path.isdir(
    test_dir_res), "Directory " + test_dir_res + " must exist!"

res_paths = list()
for i in range(len(Z_crf_test)):
    this_out_filename = os.path.splitext(os.path.basename(files_test[i]))[0]
    this_im = hp.sp_label_to_img(sp_labels_test[i], Z_crf_test[i])
    this_im_arr = Image.fromarray((this_im * 255).astype(np.uint8))
    this_path = os.path.join(test_dir_res, this_out_filename + '.png')
    this_im_arr.save(this_path)
    res_paths.append(this_path)

print('Writing submission file')
submit.masks_to_submission('submission.csv', res_paths)
print('Done')
Beispiel #7
0
def test_net(model_choice,
             resize,
             image_size,
             TTA,
             ensemble,
             test_set_output,
             test_with_labels,
             only_test_single,
             test_image_name,
             test_root,
             validate_root,
             num_test=50):
    '''
    Model test, which includes three different tests:
        1. If test_set_output = 1, we output the prediction masks of all test images in directory ./output. 
            A submission file is also an output, as required in the competition.
        2. If test_with_labels = 1, we test all the images in the dataset and print the F1 and average loss.
        3. If only_test_single = 1, we only test a single image, i.e. pass it to the network. 
            It also outputs the original image coverred by the prediction mask, saved as test.png.
    
    
    @model_choice: 1 for LinkNet, 2 for D-LinkNet, 3 for D-LinkNet+.    
    @resize: boolean flag for image resizing.    
    @image_size: the image size for the images to trained.
    @TTA: boolean flag for test time augmentation. 
    @ensemble: boolean flag to enable ensemble when testing
    @test_set_output: boolean flag for testing all the images in the test dataset.
    @test_with_labels: boolean flag for testing on a validation dataset, with labels provided.
    @only_test_single: boolean flag for testing a single image.
    @test_image_name: the name of the image to be tested.
    @test_root: root directory for test dataset.
    @validate_root: root directory for validation dataset.
    @num_test: number of test images in the test dataset.
    '''

    net = utils.create_models(model_choice)
    linkNet = None
    DlinkNet = None

    weights_name = './parameters/weights' + str(model_choice)
    #    net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
    if RUN_ON_GPU:
        net.load_state_dict(torch.load(weights_name))
    else:
        net.load_state_dict(
            torch.load(weights_name,
                       map_location=lambda storage, loc: storage))
    net.eval()

    if ensemble:
        linkNet = utils.create_models(0)
        DlinkNet = utils.create_models(1)
        if RUN_ON_GPU:
            linkNet.load_state_dict(torch.load('./parameters/weights0'))
            DlinkNet.load_state_dict(torch.load('./parameters/weights1'))
        else:
            linkNet.load_state_dict(
                torch.load('./parameters/weights0',
                           map_location=lambda storage, loc: storage))
            DlinkNet.load_state_dict(
                torch.load('./parameters/weights1',
                           map_location=lambda storage, loc: storage))
        linkNet.eval()
        DlinkNet.eval()

    if test_with_labels:
        loss, f1 = test.test_batch_with_labels(net,
                                               validate_root,
                                               resize=resize,
                                               batch_size=1,
                                               image_size=image_size,
                                               smooth=1.0,
                                               lam=1.0)
        print('F1 is evaluated as ', f1)
        print('Average batch loss is ', loss)

    if only_test_single:
        if ensemble:
            mask, image = test.test_single_with_ensemble(linkNet,
                                                         DlinkNet,
                                                         net,
                                                         test_image_name,
                                                         size=image_size,
                                                         resize=resize)
        elif TTA:
            mask, image = test.test_single_with_TTA(net,
                                                    test_image_name,
                                                    size=image_size,
                                                    resize=resize)
        else:
            mask, image = test.test_single_image(net,
                                                 test_image_name,
                                                 size=image_size,
                                                 resize=resize)
        io.imshow(image)
        io.imsave('test.png', image)

    if test_set_output:
        if not os.path.exists('./output'):
            os.makedirs('./output')

        for i in range(1, num_test + 1):
            t = 'test_' + str(i)
            name = test_root + t + '/' + t + '.png'
            if ensemble:
                mask, image = test.test_single_with_ensemble(linkNet,
                                                             DlinkNet,
                                                             net,
                                                             name,
                                                             size=image_size,
                                                             resize=resize)
            elif TTA:
                mask, image = test.test_single_with_TTA(net,
                                                        name,
                                                        size=image_size,
                                                        resize=resize)
            else:
                mask, image = test.test_single_image(net,
                                                     name,
                                                     size=image_size,
                                                     resize=resize)
            io.imsave('./output/' + 'test' + str(i) + '.png', mask)

        submission_filename = 'submission.csv'

        image_filenames = []
        for i in range(1, num_test + 1):
            image_filename = 'output/test' + str(i) + '.png'
            print(image_filename)
            image_filenames.append(image_filename)
        mask_to_submission.masks_to_submission(submission_filename,
                                               *image_filenames)
Beispiel #8
0
              metrics=['accuracy'])
''' Loading the pre-trained weights into the model '''
model.load_weights(weight_path)
print('Weights loaded, creating predictions...\n')
''' Creating predictions and overlay images on the testing set '''
filenames = []
if not os.path.isdir(prediction_test_dir):
    os.mkdir(prediction_test_dir)

for i in range(1, TESTING_SIZE + 1):
    if (i % np.floor(TESTING_SIZE / 10) == 0):
        print(str(int(np.floor(i / np.floor(TESTING_SIZE / 10)) * 10)),
              '% done')

    groundtruth_prediction, original_img = get_pred_img_pixelwise(
        test_data_filename, i, 'test', model, PIXEL_DEPTH, IMG_DIMENSION,
        prediction_test_dir)
    gt_filename = prediction_test_dir + "gt_pred_" + str(i) + ".png"
    filenames.append(gt_filename)
    groundtruth_prediction.save(gt_filename)

    overlay = make_img_overlay_pixel(original_img, groundtruth_prediction,
                                     PIXEL_DEPTH)
    overlay.save(prediction_test_dir + "overlay_" + str(i) + ".png")

masks_to_submission(submission_path, *filenames)
print('\nFinished creating predictions! Submission file saved to',
      submission_path)
print('Have a nice day :)\n')
print('Finished.\n\n')