def get_segmentation_model(model_type, freeze=True):
    x = None
    segmentation_model = None
    if model_type == "three_class_trained":
        segmentation_model = model_from_checkpoint_path(
            three_class_checkpoints_path)

    elif model_type == "seven_class_trained":
        segmentation_model = model_from_checkpoint_path(
            seven_class_checkpoints_path)

        x = segmentation_model.get_layer("conv2d_6").output

    elif model_type == "pretrained":
        segmentation_model = pspnet_101_cityscapes()
        x = segmentation_model.get_layer("activation_107").output

    elif model_type == "seven_class_vanilla_psp":
        segmentation_model = model_from_checkpoint_path(
            seven_class_vanilla_psp_path)
        x = segmentation_model.get_layer("activation_10").output

    elif model_type == "seven_class_vanilla_psp_depth":
        from utils.pspnet import model_from_checkpoint_path as custom_model_from_checkpoint_path
        segmentation_model = custom_model_from_checkpoint_path(
            seven_class_vanilla_psp_depth_path)
        x = segmentation_model.get_layer("activation_10").output

    elif model_type == "seven_class_mobile":
        segmentation_model = model_from_checkpoint_path(
            seven_class_mobile_checkpoints_path)
        output_layer = get_layer_with_name(segmentation_model,
                                           "conv_dw_6_relu")

        x = output_layer.output

    elif model_type == "resnet50_pspnet_8_classes":
        segmentation_model = model_from_checkpoint_path(
            resnet50_pspnet_8_classes)
        output_layer = segmentation_model.get_layer(name="conv2d_6")
        x = output_layer.output

    plot_model(segmentation_model, "segmentation_model.png", show_shapes=True)
    # Explicitly define new model input and output by slicing out old model layers
    model_new = Model(inputs=segmentation_model.layers[0].input, outputs=x)

    if freeze:
        for layer in model_new.layers:
            layer.trainable = False

    return model_new
def evaluate(model=None,
             inp_images=None,
             annotations=None,
             checkpoints_path=None,
             epoch=None):
    #Finished implementation of the evaluate function in keras_segmentation.predict
    #Input: array of paths or nd arrays
    if model is None and (not checkpoints_path is None):
        model = model_from_checkpoint_path(checkpoints_path, epoch)

    ious = []
    pred_time = []
    for inp, ann in zip(inp_images, annotations):
        t_start = time.time()
        pr = predict_fast(model, inp)
        t_end = time.time()
        pred_time.append(t_end - t_start)
        print('Prediction time: ', t_end - t_start)

        gt = get_segmentation_arr(ann,
                                  model.n_classes,
                                  model.output_width,
                                  model.output_height,
                                  no_reshape=True)
        gt = gt.argmax(-1)
        iou = metrics.get_iou(gt, pr, model.n_classes)
        ious.append(iou)

    print("Class wise IoU ", np.mean(ious, axis=0))
    print("Total  IoU ", np.mean(ious))
    print("Median prediction time:", np.median(pred_time))
Esempio n. 3
0
def evaluate(model=None,
             inp_images=None,
             annotations=None,
             checkpoints_path=None,
             epoch=None,
             visualize=False,
             output_folder=''):
    #Finished implementation of the evaluate function in keras_segmentation.predict
    #Input: array of paths or nd arrays
    if model is None and (not checkpoints_path is None):
        model = model_from_checkpoint_path(checkpoints_path, epoch)

    ious = []
    pred_time = []
    for inp, ann in tqdm(zip(inp_images, annotations)):
        t_start = time.time()
        pr = predict_fast(model, inp)
        t_end = time.time()
        pred_time.append(t_end - t_start)
        #print('Prediction time: ', pred_time)

        gt = get_segmentation_arr(ann,
                                  model.n_classes,
                                  model.output_width,
                                  model.output_height,
                                  no_reshape=True)
        gt = gt.argmax(-1)
        iou = metrics.get_iou(gt, pr, model.n_classes)
        ious.append(iou)

        if visualize:
            fig = vis_pred_vs_gt_separate(inp, pr, gt)
            plt.title("Predicted mask and errors. "
                      "IOU (bg, crop, lane):" + str(iou))
            if not output_folder:
                if epoch is None:
                    epoch = ''
                print(checkpoints_path, epoch, os.path.basename(inp))
                fig.savefig(checkpoints_path + epoch + '_IOU_' +
                            os.path.basename(inp))
                print(
                    'Saving to: ',
                    checkpoints_path + epoch + '_IOU_' + os.path.basename(inp))
            else:
                fig.savefig(
                    os.path.join(
                        output_folder,
                        os.path.basename(checkpoints_path) + '_IOU_' +
                        os.path.basename(inp)))
    print ious
    ious = np.array(ious)
    print("Class wise IoU ", np.mean(ious, axis=0))
    print("Total  IoU ", np.mean(ious))
    print("Mean prediction time:", np.mean(pred_time))
Esempio n. 4
0
def segment_images_psp(path_to_checkpoint, folder_name):

    output_folder_name = folder_name + "_segm"
    checkpoint = model_from_checkpoint_path(path_to_checkpoint)
    #checkpoint = pspnet_101_cityscapes()
    output_folder = data_folder / output_folder_name
    verify_folder_exists(output_folder)

    for filename in os.listdir(str(data_folder / folder_name)):
        out_name = str(output_folder / filename).replace("jpg", "png")

        in_name = str(data_folder / folder_name / filename)

        checkpoint.predict_segmentation(out_fname=out_name, inp=in_name)
Esempio n. 5
0
def predict(images, predictions, pretrained, entire_image, checkpoint_dir):

    if pretrained:
        model = pspnet_101_cityscapes(
        )  # load the pretrained model trained on Cityscapes dataset
    else:
        model = model_from_checkpoint_path(normpath(checkpoint_dir))

    files = [f for f in listdir(images) if isfile(join(images, f))]

    for file in files:
        name, ext = splitext(file)
        if entire_image:
            predict_standard(model,
                             inp=f'{images}{file}',
                             out_fname=f'{predictions}{name}.png')
        else:
            predict_split(model,
                          inp=f'{images}{file}',
                          out_fname=f'{predictions}{name}.png')
def main():

    # Setup the testing parameters, input paths etc.,
    checkpoints_path = "./checkpoints_mobilenet_unet_2class/"
    test_images_path = "./dataset2/images_prepped_test/"
    test_annotations_path = "./dataset2/annotations_prepped_test_2class/"

    # Model checkpoint path should exist, else it is an error.
    if not os.path.exists(checkpoints_path):
        print("[ERROR] invalid checkpoints path {}".format(checkpoints_path))
        sys.exit(1)

    # Load model from checkpoint path.
    model = model_from_checkpoint_path(checkpoints_path)

    # Get Intersection over Union (IoU) results for the test data set.
    # The model is picked up from the latest model in the checkpoints_path.
    # The input images and masks (annotations) are picked up from their paths.
    test_results = evaluate(inp_images_dir=test_images_path,
                            annotations_dir=test_annotations_path,
                            model=model)

    print(test_results)
    print("Evaluation complete")
    def __init__(self):

        #### Hyperparameters ####
        self.image = Image()
        self.output_image = Image()
        self.bridge = CvBridge()

        # ROS setup paramters
        self.image_topic_name = rosparam.get_param('camera_topic')
        self.model_config = rosparam.get_param('model_config')
        self.model_weights = rosparam.get_param('model_weights')
        self.model_prefix = rosparam.get_param('model_prefix')
        self.visualize = rosparam.get_param('visualize')

        self.output_video = True
        self.output_video_file_s = rosparam.get_param('output_video_file_s')
        self.output_video_file_l = rosparam.get_param('output_video_file_l')

        self.lane_fit = True
        self.evaluate = False
        self.output_file = None
        self.display = False
        self.seg_arr = []
        self.fit = []
        self.class_number = 2

        #self.loaded_weights = False
        self.img_receive = False
        self.epoch = None
        #model = predict.model_from_checkpoint_files( model_prefix, epoch)#model_config, model_weights)
        self.model = predict.model_from_checkpoint_path(
            self.model_prefix, self.epoch)

        #Listen to image messages and publish predictions with callback
        self.img_sub = rospy.Subscriber(self.image_topic_name, Image,
                                        self.imageCallback)

        #Set up publisher for prediction messages
        self.fit_pub = rospy.Publisher('centerline_local', PoseArray)
        self.rate = rospy.Rate(1)  # 10hz

        # Saving outputs as Video file
        self.fourcc = cv2.VideoWriter_fourcc(
            *"MJPG")  # ('I','Y','U','V') #tried('M','J','P','G')
        self.wr = None
        (self.out_h, self.out_w) = (None, None)
        self.isColor = True
        self.fps = 6

        self.fourcc_1 = cv2.VideoWriter_fourcc(
            *"MJPG")  #('I','Y','U','V') #tried('M','J','P','G')
        self.wr_1 = None
        (self.out_h_1, self.out_w_1) = (None, None)
        self.isColor_1 = True
        self.fps_1 = 6

        ## GNSS ground truth
        self.book = pe.get_book(
            file_name=expanduser("~") +
            "/planner_ws/src/vision-based-navigation-agri-fields/auto_nav/config/ground_truth_coordinates.xls"
        )
        self.gt_lat_utm = []
        self.gt_long_utm = []
        self.lane_number = str(2)
        self.listener = tf.TransformListener()
        self.listener.waitForTransform("utm", "base_link", rospy.Time(),
                                       rospy.Duration(4.0))

        for row in self.book["Sheet" + self.lane_number]:
            self.gt_lat_utm.append(row[1])  # Latitude
            self.gt_long_utm.append(row[2])  # Longitude
        self.dist_0 = 0
Esempio n. 8
0
    def __init__(self):

        #### Hyperparameters ####
        self.image = Image()
        self.output_image = Image()
        self.bridge = CvBridge()

        # ROS setup paramters
        self.image_topic_name = rosparam.get_param(
            'auto_nav/segnet_lane_detection/camera_topic')
        self.model_config = rosparam.get_param(
            'auto_nav/segnet_lane_detection/model_config')
        self.model_weights = rosparam.get_param(
            'auto_nav/segnet_lane_detection/model_weights')
        self.model_prefix = rosparam.get_param(
            'auto_nav/segnet_lane_detection/model_prefix')
        self.visualize = rosparam.get_param(
            'auto_nav/segnet_lane_detection/visualize')

        self.output_video = True
        self.output_video_file_s = rosparam.get_param(
            'auto_nav/segnet_lane_detection/output_video_file_s')
        self.output_video_file_l = rosparam.get_param(
            'auto_nav/segnet_lane_detection/output_video_file_l')

        self.lane_fit = True
        self.evaluate = False
        self.output_file = None
        self.display = False
        self.seg_arr = []
        self.fit = []
        self.class_number = 2

        #self.loaded_weights = False
        self.img_receive = False
        self.epoch = None
        #model = predict.model_from_checkpoint_files( model_prefix, epoch)#model_config, model_weights)
        self.model = predict.model_from_checkpoint_path(
            self.model_prefix, self.epoch)  #, self.loaded_weights

        #Listen to image messages and publish predictions with callback
        self.img_sub = rospy.Subscriber(self.image_topic_name, Image,
                                        self.imageCallback)

        #Set up publisher for prediction messages
        self.fit_pub = rospy.Publisher('centerline_local', PoseArray)
        self.rate = rospy.Rate(1)  # 10hz

        # Saving outputs as Video file
        self.fourcc = cv2.VideoWriter_fourcc(
            *"MJPG")  # ('I','Y','U','V') #tried('M','J','P','G')
        self.wr = None
        (self.out_h, self.out_w) = (None, None)
        self.isColor = True
        self.fps = 6

        self.fourcc_1 = cv2.VideoWriter_fourcc(
            *"MJPG")  #('I','Y','U','V') #tried('M','J','P','G')
        self.wr_1 = None
        (self.out_h_1, self.out_w_1) = (None, None)
        self.isColor_1 = True
        self.fps_1 = 6
Esempio n. 9
0

if __name__ == '__main__':

    try:
        # Initialize node
        rospy.init_node('online_da_predict')
        nav_obj = hilly_nav()

        model_prefix = rospy.get_param("/model_prefix")
        epoch = rospy.get_param("/epoch")
        input_folder = rospy.get_param("/input_folder")
        output_folder = rospy.get_param("/output_folder")

        #Load model
        model = predict.model_from_checkpoint_path(model_prefix, epoch)

        while not rospy.is_shutdown():

            # print('Output_folder',output_folder)
            # im_files = glob.glob(os.path.join(input_folder,'*.jpg'))
            # print(os.path.join(input_folder+'*.png'))
            # for im in sorted(im_files):
            #     if output_folder:
            #         base = os.path.basename(im)
            #         output_file = os.path.join(output_folder,os.path.splitext(base)[0])+"_pred.png"
            #         print(output_file)
            #     else:
            #         output_file = None
            if nav_obj.img_received == True:
                seg_arr = nav_obj.predict_on_image(model,
Esempio n. 10
0
    plot_3d = "N"
    frame_interval_for_3d = 1
    plot_main_lines = "N"
    plot_center = "N"
    plot_apex = "N"
    plot_all_lines = "N"
    plot_intersection = "N"
    plot_contour = "N"
    plot_ellipse = "N"
    show_original = "N"
    show_segmentation = "N"
    show_contour = "N"
    show_output = "N"

    # First we select the model to use, best results were obtained with mobilnet, so run the program with "-m 0"
    model = model_from_checkpoint_path("Checkpoints\\mobilenet_alltools")

    angleFilter = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0
    ]

    SetBoxWidget()
    tform = vtk.vtkTransform()

    base_path = os.path.join("C:\\Users\\Leonardo\\Desktop\\DatasetToTag",
                             folder)

    create_excel(folder)

    # while in modo che passi all'immagine successiva solo quando settano il modello 3d
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 18:08:34 2020

@author: craig
"""

from keras_segmentation.models.unet import vgg_unet
from keras_segmentation.predict import model_from_checkpoint_path

model = vgg_unet(n_classes=51, input_height=416, input_width=608)

model = model_from_checkpoint_path("weights/vgg_unet_1")

model.train(
    train_images="dataset1/images_prepped_train/",
    train_annotations="dataset1/annotations_prepped_train/",
    val_images="dataset1/images_prepped_test/",
    val_annotations="dataset1/annotations_prepped_test/",
    verify_dataset=True,
    #    load_weights="weights/vgg_unet_1.4" ,
    optimizer_name='adadelta',
    do_augment=True,
    augmentation_name="aug_all",
    checkpoints_path="weights/vgg_unet_1",
    epochs=10)

# Display the model's architecture
model.summary()
Esempio n. 12
0
    frame_interval_for_3d = 1
    plot_main_lines = "Y"
    plot_center = "N"
    plot_apex = "N"
    plot_all_lines = "N"
    plot_intersection = "N"
    plot_contour = "N"
    plot_ellipse = "N"
    show_original = "N"
    show_segmentation = "N"
    show_contour = "N"
    show_output = "Y"

    # First we select the model to use, best results were obtained with mobilnet, so run the program with "-m 0"
    if run_model == 0:
        model = model_from_checkpoint_path("Checkpoints\\mobilenet_alltools")
    elif run_model == 1:
        model = model_from_checkpoint_path("..\\Checkpoints\\NewTool\\new_vgg_unet_tool")
    elif run_model == 2:
        model = model_from_checkpoint_path("..\\Checkpoints\\NewTool\\new_resnet_unet_tool")
    elif run_model == 3:
        model = model_from_checkpoint_path("..\\Checkpoints\\NewTool\\new_unet_tool")

    if save_video == "Y":
        try:
            out_vid = cv2.VideoWriter('..\\OutputVideo\\Degree.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 3, (960, 540))
        except cv2.error as e:
            print("cv2.error:", e)
        except Exception as e:
            print("Exception:", e)
        else:
                        help="(Relative) path to input image file")
    parser.add_argument(
        "--output_folder",
        default='',
        help=
        "(Relative) path to output image file. If empty, image is not written."
    )
    parser.add_argument(
        "--display",
        default=False,
        help="Whether to display video on screen (can be slow)")

    args = parser.parse_args()

    #Load model
    model = predict.model_from_checkpoint_path(args.model_prefix, args.epoch)

    while not rospy.is_shutdown():

        print('Output_folder', args.output_folder)
        im_files = glob.glob(os.path.join(args.input_folder, '*.jpg'))
        print(os.path.join(args.input_folder + '*.png'))
        for im in sorted(im_files):
            if args.output_folder:
                base = os.path.basename(im)
                output_file = os.path.join(
                    args.output_folder,
                    os.path.splitext(base)[0]) + "_pred.png"
                print(output_file)
            else:
                output_file = None
Esempio n. 14
0
from keras_segmentation.predict import model_from_checkpoint_path

from tensorflow.compat.v1 import InteractiveSession
from tensorflow.compat.v1 import ConfigProto

config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

model = model_from_checkpoint_path("/home/klz/checkpoints/vgg_unet_1")

out = model.predict_multiple(inp_dir="/home/klz/unlabeled_food_images/",
                             out_dir="/home/klz/unlabeled_food_annotations/",
                             colors=[(0, 0, 0), (255, 255, 255)])

# import matplotlib.pyplot as plt
# plt.imshow(out)

# print(model.evaluate_segmentation( inp_images_dir="/home/klz/food_test_images/",
# 									annotations_dir="/home/klz/food_test_annotations/" ) )
Esempio n. 15
0
    parser.add_argument('--prediction_in', help='Path to the test images', required=True, default=None)
    parser.add_argument('--prediction_out', help='Path to the output csv files folder', required=True, default=None)
    parser.add_argument('--prediction_tmp', help='Path to the temporary csv files folder', required=False, default=None)
    parser.add_argument('--continuous', action='store_true', help='Whether to continuously load test images and perform prediction', required=False, default=False)
    parser.add_argument('--delete_input', action='store_true', help='Whether to delete the input images rather than move them to --prediction_out directory', required=False, default=False)
    parser.add_argument('--clash_suffix', help='The file name suffix to use in case the input file is already a PNG and moving it to the output directory would overwrite the prediction PNG', required=False, default="-in")
    parser.add_argument('--memory_fraction', type=float, help='Memory fraction to use by tensorflow, i.e., limiting memory usage', required=False, default=0.5)
    parsed = parser.parse_args()

    try:
        # apply memory usage
        print("Using memory fraction: %f" % parsed.memory_fraction)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = parsed.memory_fraction
        set_session(tf.Session(config=config))

        # load model
        model_dir = os.path.join(parsed.checkpoints_path, '')
        print("Loading model from %s" % model_dir)
        model = model_from_checkpoint_path(model_dir)

        # predict
        while True:
            predict_on_images(model, parsed.prediction_in, parsed.prediction_out, parsed.prediction_tmp,
                              parsed.delete_input, clash_suffix=parsed.clash_suffix)
            if not parsed.continuous:
                break

    except Exception as e:
        print(traceback.format_exc())
Esempio n. 16
0
    def predict(self,
                model=None,
                inp=None,
                out_fname=None,
                checkpoints_path=None,
                clrs=None,
                out_prob_file=None):
        """Make prediction from an img and loaded model"""

        if model is None and (checkpoints_path is not None):
            model = model_from_checkpoint_path(checkpoints_path)

        assert inp is not None
        assert isinstance(inp, (np.ndarray, six.string_types)), \
            "Input should be the CV image or the input file name"

        if isinstance(inp, six.string_types):
            inp = cv2.imread(inp)

        assert len(inp.shape) == 3, "Image should be h,w,3 "
        orininal_h = inp.shape[0]
        orininal_w = inp.shape[1]

        output_width = model.output_width
        output_height = model.output_height
        input_width = model.input_width
        input_height = model.input_height
        n_classes = model.n_classes

        img_ar = get_image_array(inp,
                                 input_width,
                                 input_height,
                                 ordering=IMAGE_ORDERING)
        pred = model.predict(np.array([img_ar]))[0]

        # Creating probabilities file
        if out_prob_file is not None:
            out_prob_file += "_prob_{}x{}.csv".format(output_width,
                                                      output_height)

            with open(out_prob_file, 'w+') as file:
                # Header
                header = "x y "
                for i in range(0, n_classes):
                    header += "C{} ".format(str(i))
                header += "class\n"
                file.write(header)

                # Pixel per pixel
                coord_x = 0
                coord_y = 0
                for pixel in pred:
                    line = "{} {}".format(coord_x, coord_y)
                    for class_prob in pixel:
                        line += " {}".format(str(class_prob))
                    line += " {}".format(str(np.argmax(pixel))) + "\n"

                    file.write(line)

                    coord_x += 1
                    if coord_x >= output_width:
                        coord_x = 0
                        coord_y += 1

        pred = pred.reshape(
            (output_height, output_width, n_classes)).argmax(axis=2)

        seg_img = np.zeros((output_height, output_width, 3))

        if clrs is None:
            colors = class_colors
        else:
            colors = clrs

        for color in range(n_classes):
            seg_img[:, :, 0] += ((pred[:, :] == color) * (colors[color][0])) \
                .astype('uint8')
            seg_img[:, :, 1] += ((pred[:, :] == color) * (colors[color][1])) \
                .astype('uint8')
            seg_img[:, :, 2] += ((pred[:, :] == color) * (colors[color][2])) \
                .astype('uint8')

        seg_img = cv2.resize(seg_img, (orininal_w, orininal_h))

        if out_fname is not None:
            cv2.imwrite(out_fname, seg_img)

        return pred
Esempio n. 17
0
parser.add_argument(
    "--inp_path",
    default=os.path.expanduser('~') +
    "/Third_Paper/Datasets/Frogn_Dataset/images_prepped_test/frogn_10000.png")
parser.add_argument("--pre_trained", default="True", type=bool)
parser.add_argument("--predict_multiple_images", default="False", type=bool)
args = parser.parse_args()

model = keras_segmentation.models.segnet.segnet(n_classes=4,
                                                input_height=320,
                                                input_width=640)
#pre_trained = True
#predict_multiple_images = False

if args.pre_trained:
    model = model_from_checkpoint_path(args.model_path)

else:
    model.train(train_images=args.train_images_path,
                train_annotations=args.train_annotations_path,
                checkpoints_path=args.model_path,
                epochs=5)

if args.predict_multiple_images:
    out = model.predict_multiple(inp_dir=args.inp_dir_path,
                                 checkpoints_path=args.model_path,
                                 out_dir=args.out_dir_path)

else:
    out = model.predict_segmentation(inp=args.inp_path,
                                     checkpoints_path=args.model_path,
    colors = class_colors

    for c in range(3):  # with 3 classes

        seg_img[:, :, 0] += ((img[:, :] == c) * (colors[c][0])).astype('uint8')
        seg_img[:, :, 1] += ((img[:, :] == c) * (colors[c][1])).astype('uint8')
        seg_img[:, :, 2] += ((img[:, :] == c) * (colors[c][2])).astype('uint8')

    seg_img = cv2.resize(seg_img, (640, 408))

    return seg_img


videoFilePath = "..\\..\\RealTime Video\\CV_2_cropped.mp4"

model = model_from_checkpoint_path("..\\..\\Checkpoints\\AllTools\\mobilenet_alltools")

cap = cv2.VideoCapture(videoFilePath)
i = 0
while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    dim = (960, 540)

    frame = cv2.resize(frame, dim, interpolation=cv2.INTER_LINEAR)

    if (i % 5) == 0:
        out = model.predict_segmentation(inp=frame)
        img = convertNumpyArrayToMat(out)
import tensorflow as tf
from keras_segmentation.predict import predict_multiple
from keras_segmentation.predict import model_from_checkpoint_path

gpu_devices = tf.config.experimental.list_physical_devices('GPU')
for device in gpu_devices: tf.config.experimental.set_memory_growth(device, True)

print(f"[INFO] Predicting on test set mask..")

pdr=predict_multiple(
	checkpoints_path=r"C:\Users\matte\PycharmProjects\ecographic_breast_nn\segnet\checkpoints\psp_unet",
	inp_dir=r"D:\FISICA MEDICA\radiomics_eco\Dataset_BUSI_with_GT\segnet\images_val",
	out_dir=r"C:\Users\matte\PycharmProjects\ecographic_breast_nn\segnet\outputs"
)

model=model_from_checkpoint_path(r"C:\Users\matte\PycharmProjects\ecographic_breast_nn\segnet\checkpoints\psp_unet")


print(f"[INFO] Evaluating model..")
print(model.evaluate_segmentation( inp_images_dir=r"D:\FISICA MEDICA\radiomics_eco\Dataset_BUSI_with_GT\segnet\images_val"  , annotations_dir=r"D:\FISICA MEDICA\radiomics_eco\Dataset_BUSI_with_GT\segnet\masks_val" ) )
def main():
    parser = argparse.ArgumentParser(
        description=
        "Example: Run prediction on an image folder. Example usage: python lane_predict.py --model_prefix=models/resnet_3class --epoch=25 --input_folder=Frogn_Dataset/images_prepped_test --output_folder=."
    )
    parser.add_argument("--model_prefix",
                        default='',
                        help="Prefix of model filename")
    parser.add_argument("--epoch",
                        default=None,
                        help="Checkpoint epoch number")
    parser.add_argument("--input_folder",
                        default='',
                        help="(Relative) path to input image file")
    parser.add_argument(
        "--output_folder",
        default='',
        help=
        "(Relative) path to output image file. If empty, image is not written."
    )
    parser.add_argument(
        "--display",
        default=False,
        help="Whether to display video on screen (can be slow)")

    args = parser.parse_args()

    #Load model
    model = predict.model_from_checkpoint_path(args.model_prefix, args.epoch)

    print('Output_folder', args.output_folder)
    im_files = glob.glob(os.path.join(args.input_folder, '*.jpg'))
    print(os.path.join(args.input_folder + '*.jpg'))
    for im in im_files:
        if args.output_folder:
            base = os.path.basename(im)
            output_file = os.path.join(
                args.output_folder,
                os.path.splitext(base)[0]) + "_da_pred.png"  #
            print(output_file)
        else:
            output_file = None

        seg_arr, input_image, out_img, fit = predict_on_image(
            model,
            inp=im,
            lane_fit=False,
            evaluate=False,
            visualize="segmentation",
            output_file=output_file,
            display=True)
        vis_img = visualization(input_image,
                                seg_arr=seg_arr,
                                lane_fit=None,
                                evaluation=None,
                                n_classes=3,
                                visualize="segmentation",
                                display=False,
                                output_file=output_file)
        #
        # print(timeit.timeit(stmt = "for_loop(seq)",
        #                     setup="seq='Pylenin'",
        #                     number=10000))

        #print(timeit.timeit(predict_on_image(model,inp = im, lane_fit = False, evaluate = False, visualize = "segmentation", output_file = output_file, display=True)))
        print("--- %s seconds ---" % (time.time() - start_time))

    cv2.destroyAllWindows()
Esempio n. 21
0
import cv2
from keras_segmentation.predict import predict, model_from_checkpoint_path
import random

#https://divamgupta.com/image-segmentation/2019/06/06/deep-learning-semantic-segmentation-keras.html
model = model_from_checkpoint_path("checkpoints/resnet50_unet_1")

cap = cv2.VideoCapture("test.mp4")

if (cap.isOpened() == False):
    print("Error opening video stream or file")

while (cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        mask, mask_image = predict(model=model, inp=frame)
        cv2.imshow('Frame', mask_image)

        # Press Q on keyboard to  exit
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break

    else:
        break

cap.release()

cv2.destroyAllWindows()
Esempio n. 22
0
def main():

    # Setup the training parameters, input paths etc.,
    checkpoints_path = "./checkpoints_mobilenet_unet_2class/"
    test_images_path = "./dataset2/images_prepped_test/"
    test_masks_path = "./dataset2/masks_prepped_test_2class/"
    predicted_masks_path = "./dataset2/masks_predicted/"
    predicted_overlay_path = "./dataset2/overlay_predicted/"

    # Enable printing of IoU scores for tests.
    print_test_results = 0

    # Model checkpoint path should exist, else it is an error.
    if not os.path.exists(checkpoints_path):
        print("[ERROR] invalid checkpoints path {}".format(checkpoints_path))
        sys.exit(1)

    # Get Intersection over Union (IoU) results for the test data set.
    # The model is picked up from the latest model in the checkpoints_path.
    # The input images and masks (masks) are picked up from their paths.
    if print_test_results:
        test_results = evaluate(inp_images_dir=test_images_path,
                                masks_dir=test_masks_path,
                                checkpoints_path=checkpoints_path)
        print(test_results)
        print("Test results complete")

    # Create output directory to store predicted masks and overlays.
    # Remove any existing png files.
    if not os.path.exists(predicted_masks_path):
        os.makedirs(predicted_masks_path)

    if not os.path.exists(predicted_overlay_path):
        os.makedirs(predicted_overlay_path)

    for f in glob.glob(predicted_masks_path + '/*.png'):
        os.remove(f)

    for f in glob.glob(predicted_overlay_path + '/*.png'):
        os.remove(f)

    # Predict results for the input images and store in output directory.
    # Store both the predicted masks and the overlaid images with masks.

    # Step 1. Load model from checkpoint path.
    model = model_from_checkpoint_path(checkpoints_path)

    # Step 2. Read images one by one and feed it in into predict function.
    #    Output predicted mask is written to the predicted masks directory.
    #    Assume that input images are in png format.
    for infile in glob.glob(test_images_path + '/*.png'):
        filename = os.path.split(infile)[1]
        outfile = os.path.join(predicted_masks_path, filename)
        overlayfile = os.path.join(predicted_overlay_path, filename)

        print("Predicting mask for file {}, output in {}".format(
            infile, outfile))

        # Create predicted mask from input file, and store it in outfile.
        predict(inp=infile, out_fname=outfile, model=model)

        # Overlay the predicted mask file over input file for display.
        overlay_image_with_mask(image_file=infile,
                                mask_file=outfile,
                                overlay_file=overlayfile)

    print("Prediction complete")
output_name = 'prediction_overlay'
#model
checkpoints_path = os.path.join('../models', 'resnet50_segnet')
epoch = 20

figure_dpi = 300
figure_width_mm = 88.57
image_spacing_mm = figure_width_mm * 0.05
print(len(inp_names))
figure_height_mm = (
    (176 * len(inp_names) * 1.25) /
    (320 * 2 * 1.05)) * figure_width_mm  #Adjusting height to get a tight fit

#Load model first
model = model_from_checkpoint_path(checkpoints_path, epoch)

#Save one at a time as image

for ind, fname in enumerate(inp_names):
    #Load images
    inp = os.path.join(image_folder, fname)
    ann = os.path.join(annotations_folder, fname)
    #Run prediction
    pr = predict_fast(model, inp)
    gt = get_segmentation_arr(ann,
                              model.n_classes,
                              model.output_width,
                              model.output_height,
                              no_reshape=True)
    gt = gt.argmax(-1)