def evaluate_carla_segmentation_model(path_to_checkpoint, data_folder):

    segm_dir = data_folder / "segmentation"
    img_dir = data_folder / "rgb"

    results = evaluate(checkpoints_path=path_to_checkpoint, inp_images_dir=img_dir, annotations_dir=segm_dir)
    return results
def evaluate_mapillary_segmentation_model(path_to_checkpoint, data_folder):

    segm_dir = data_folder / "mapillary_depth_segm/segm_annotations_prepped_val"
    img_dir = data_folder / "mapillary_depth_segm/images_prepped_val"

    results = evaluate(checkpoints_path=path_to_checkpoint, inp_images_dir=img_dir, annotations_dir=segm_dir)
    return results
def test_model():
    model_name = "fcn_8"
    h = 224
    w = 256
    n_c = 100
    check_path = tempfile.mktemp()

    m = all_models.model_from_name[model_name](n_c,  input_height=h, input_width=w)

    m.train(train_images=tr_im,
            train_annotations=tr_an,
            steps_per_epoch=2,
            epochs=2,
            checkpoints_path=check_path
            )

    m.predict_segmentation(np.zeros((h, w, 3))).shape

    predict_multiple(
        inp_dir=te_im, checkpoints_path=check_path, out_dir="/tmp")
    predict_multiple(inps=[np.zeros((h, w, 3))]*3,
                     checkpoints_path=check_path, out_dir="/tmp")

    ev = m.evaluate_segmentation( inp_images_dir=te_im  , annotations_dir=te_an )
    assert ev['frequency_weighted_IU'] > 0.01
    print(ev)
    o = predict(inp=np.zeros((h, w, 3)), checkpoints_path=check_path)
    o.shape

    ev = evaluate( inp_images_dir=te_im  , annotations_dir=te_an , checkpoints_path=check_path)
    assert ev['frequency_weighted_IU'] > 0.01
def main():

    # Setup the testing parameters, input paths etc.,
    checkpoints_path = "./checkpoints_mobilenet_unet_2class/"
    test_images_path = "./dataset2/images_prepped_test/"
    test_annotations_path = "./dataset2/annotations_prepped_test_2class/"

    # Model checkpoint path should exist, else it is an error.
    if not os.path.exists(checkpoints_path):
        print("[ERROR] invalid checkpoints path {}".format(checkpoints_path))
        sys.exit(1)

    # Load model from checkpoint path.
    model = model_from_checkpoint_path(checkpoints_path)

    # Get Intersection over Union (IoU) results for the test data set.
    # The model is picked up from the latest model in the checkpoints_path.
    # The input images and masks (annotations) are picked up from their paths.
    test_results = evaluate(inp_images_dir=test_images_path,
                            annotations_dir=test_annotations_path,
                            model=model)

    print(test_results)
    print("Evaluation complete")
示例#5
0
def main():

    # Setup the training parameters, input paths etc.,
    checkpoints_path = "./checkpoints_mobilenet_unet_2class/"
    test_images_path = "./dataset2/images_prepped_test/"
    test_masks_path = "./dataset2/masks_prepped_test_2class/"
    predicted_masks_path = "./dataset2/masks_predicted/"
    predicted_overlay_path = "./dataset2/overlay_predicted/"

    # Enable printing of IoU scores for tests.
    print_test_results = 0

    # Model checkpoint path should exist, else it is an error.
    if not os.path.exists(checkpoints_path):
        print("[ERROR] invalid checkpoints path {}".format(checkpoints_path))
        sys.exit(1)

    # Get Intersection over Union (IoU) results for the test data set.
    # The model is picked up from the latest model in the checkpoints_path.
    # The input images and masks (masks) are picked up from their paths.
    if print_test_results:
        test_results = evaluate(inp_images_dir=test_images_path,
                                masks_dir=test_masks_path,
                                checkpoints_path=checkpoints_path)
        print(test_results)
        print("Test results complete")

    # Create output directory to store predicted masks and overlays.
    # Remove any existing png files.
    if not os.path.exists(predicted_masks_path):
        os.makedirs(predicted_masks_path)

    if not os.path.exists(predicted_overlay_path):
        os.makedirs(predicted_overlay_path)

    for f in glob.glob(predicted_masks_path + '/*.png'):
        os.remove(f)

    for f in glob.glob(predicted_overlay_path + '/*.png'):
        os.remove(f)

    # Predict results for the input images and store in output directory.
    # Store both the predicted masks and the overlaid images with masks.

    # Step 1. Load model from checkpoint path.
    model = model_from_checkpoint_path(checkpoints_path)

    # Step 2. Read images one by one and feed it in into predict function.
    #    Output predicted mask is written to the predicted masks directory.
    #    Assume that input images are in png format.
    for infile in glob.glob(test_images_path + '/*.png'):
        filename = os.path.split(infile)[1]
        outfile = os.path.join(predicted_masks_path, filename)
        overlayfile = os.path.join(predicted_overlay_path, filename)

        print("Predicting mask for file {}, output in {}".format(
            infile, outfile))

        # Create predicted mask from input file, and store it in outfile.
        predict(inp=infile, out_fname=outfile, model=model)

        # Overlay the predicted mask file over input file for display.
        overlay_image_with_mask(image_file=infile,
                                mask_file=outfile,
                                overlay_file=overlayfile)

    print("Prediction complete")
示例#6
0
from keras_segmentation.predict import predict_multiple, evaluate, model_from_checkpoint_path
from keras_segmentation.data_utils.visualize_dataset import visualize_segmentation_dataset

checkpoints_path = []
checkpoints_path.append("./saveModel/vgg_unet_1")
# checkpoints_path.append("./saveModel/vgg_pspnet_1")
# checkpoints_path.append("./saveModel/vgg_segnet_1")
# checkpoints_path.append("./saveModel/fcn_32_vgg_1")
dictModels = {}
dictModels[0] = "vgg_unet"
dictModels[1] = "vgg_pspnet"
dictModels[2] = "vgg_segnet"
dictModels[3] = "fcn_32_vgg"

for ia in dictModels:
    output = evaluate(
        checkpoints_path=f"./saveModel/{dictModels[ia]}_1",
        inp_images_dir="./dataset1/test",
        annotations_dir = "./dataset1/ano_test",
    )
    print(dictModels[ia], end=" | ")
    print(output)