def test_model():
    model_name = "fcn_8"
    h = 224
    w = 256
    n_c = 100
    check_path = tempfile.mktemp()

    m = all_models.model_from_name[model_name](n_c,
                                               input_height=h,
                                               input_width=w)

    m.train(train_images=tr_im,
            train_annotations=tr_an,
            steps_per_epoch=2,
            epochs=2,
            checkpoints_path=check_path)

    m.train(train_images=tr_im,
            train_annotations=tr_an,
            steps_per_epoch=2,
            epochs=2,
            checkpoints_path=check_path,
            augmentation_name='aug_geometric',
            do_augment=True)

    m.predict_segmentation(np.zeros((h, w, 3))).shape

    predict_multiple(inp_dir=te_im,
                     checkpoints_path=check_path,
                     out_dir="/tmp")
    predict_multiple(inps=[np.zeros((h, w, 3))] * 3,
                     checkpoints_path=check_path,
                     out_dir="/tmp")

    ev = m.evaluate_segmentation(inp_images_dir=te_im, annotations_dir=te_an)
    assert ev['frequency_weighted_IU'] > 0.01
    print(ev)
    o = predict(inp=np.zeros((h, w, 3)), checkpoints_path=check_path)

    o = predict(inp=np.zeros((h, w, 3)),
                checkpoints_path=check_path,
                overlay_img=True,
                class_names=['nn'] * n_c,
                show_legends=True)
    print("pr")

    o.shape

    ev = evaluate(inp_images_dir=te_im,
                  annotations_dir=te_an,
                  checkpoints_path=check_path)
    assert ev['frequency_weighted_IU'] > 0.01
def test_model():
    model_name = "fcn_8"
    h = 224
    w = 256
    n_c = 100
    check_path = "/tmp/%d" % (random.randint(0, 199999))

    m = models.model_from_name[model_name](n_c, input_height=h, input_width=w)

    m.train(train_images=tr_im,
            train_annotations=tr_an,
            steps_per_epoch=2,
            epochs=2,
            checkpoints_path=check_path)

    m.predict_segmentation(np.zeros((h, w, 3))).shape

    predict_multiple(inp_dir=te_im,
                     checkpoints_path=check_path,
                     out_dir="/tmp")
    predict_multiple(inps=[np.zeros((h, w, 3))] * 3,
                     checkpoints_path=check_path,
                     out_dir="/tmp")

    o = predict(inp=np.zeros((h, w, 3)), checkpoints_path=check_path)
    o.shape
示例#3
0
def main():
    from keras_segmentation.predict import predict

    # Read original image from Input folder
    img_original = cv2.imread('./Images/Input/img_original.jpg')

    # Prediction method
    predict(
        # path to trained model
        checkpoints_path="./Model_CNN_Segmentation/model",
        # input image path from images/skull striping
        inp="./Images/Input/img_original.jpg",
        # output image path from  ./Images/CNN_Output
        out_fname="./Images/CNN_Output/img_segment_by_cnn.jpg")

    try:
        # Read segmented image from output folder
        # check weather yes image or no
        result, features = get_rf_result()
        # if no use tha blank mask
        if result == 0:
            img = cv2.imread('./Images/CNN_Output/blank_mask.jpg',
                             cv2.IMREAD_GRAYSCALE)
        else:
            img = cv2.imread('./Images/CNN_Output/img_segment_by_cnn.jpg',
                             cv2.IMREAD_GRAYSCALE)
        # given threshold to get tumor mask
        thresh = 200

        thresh_value, img_binary = cv2.threshold(img, thresh, 255,
                                                 cv2.THRESH_BINARY)

        # contour for suspicious point
        cnts = cv2.findContours(img_binary.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        # look over the contours to find max and draw the suspicious area by green
        c1_max = max(cnts, key=cv2.contourArea)
        cv2.drawContours(img_original, [c1_max], -1, (0, 255, 0), 2)
        # writing tumor marked image into ./Images/CNN_Output/img_tumor_marked.jpg
        cv2.imwrite('./Images/CNN_Output/img_tumor_marked.jpg', img_original)
    except:
        cv2.imwrite('./Images/CNN_Output/img_tumor_marked.jpg', img_original)
        print("An exception occurred")
示例#4
0
def main():
	from keras_segmentation.predict import predict

	# Read original image from Input folder
	img_original = cv2.imread('./Images/Input/img_original.jpg')

	# Prediction method
	predict(
		# path to trained model
		checkpoints_path="./Model_CNN_Skull_Remove/model",
		# input image path from images/original
		inp="./Images/Input/img_original.jpg",
		# output image path from  ./Images/CNN_Output
		out_fname="./Images/CNN_Skull_Remove_Output/img_skull_removed_by_cnn_mask.jpg"
	)

	try:
		# threshold value
		thresh = 200
		# read image
		img = cv2.imread('./Images/CNN_Skull_Remove_Output/img_skull_removed_by_cnn_mask.jpg', cv2.IMREAD_GRAYSCALE)
		# binary threshold
		thresh_value, img_binary = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)
		cnts = cv2.findContours(img_binary.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
		cnts = imutils.grab_contours(cnts)
		# look over the contours to find max and draw the suspicious area by green
		c1_max = max(cnts, key=cv2.contourArea)
        # convert to array
		pts = np.array(c1_max, dtype=np.int32)
		# find the bounding rectangles
		rect = cv2.boundingRect(pts)
		x, y, w, h = rect
		cropped = img_original[y:y + h, x:x + w].copy()

		pts = pts - pts.min(axis=0)

		mask = np.zeros(cropped.shape[:2], np.uint8)
		cv2.drawContours(mask, [pts], -1, 255, -1, cv2.LINE_AA)
		# do bit-op
		out = cv2.bitwise_and(cropped, cropped, mask=mask)
		# writing tumor marked image into ./Images/CNN_Output/img_tumor_marked.jpg
		cv2.imwrite('./Images/CNN_Skull_Remove_Output/img_skull_removed_by_cnn.jpg', out)
	except:
		print("An exception occurred")
示例#5
0
def wormLabelAccuracy(imageName, image, actualDirectory, wormDict):

    imageorigsize = image.shape
    image = cv.resize(image, (704, 512), interpolation=cv.INTER_AREA)
    model = resnet50_unet(3, input_height=512, input_width=704)

    checkpointsPath = "../checkpoints/resnet_unet_3"
    model_from_checkpoint_path(model, checkpointsPath)

    predictedim = predict.predict(model=model, inp=image)
    total = 0
    correct = 0
    cm = np.zeros((2, 2))

    for key in wormDict.keys():
        #print(key)
        if key[:3] == imageName:

            wormim = cv.imread(actualDirectory + key)

            wormim = cv.cvtColor(wormim, cv.COLOR_BGR2GRAY)

            wormim = cv.resize(wormim, (imageorigsize[1], imageorigsize[0]),
                               interpolation=cv.INTER_AREA)

            testim = np.zeros_like(predictedim)
            mask = wormim != 0
            testim[mask] = predictedim[mask]
            test1d = testim.flatten()
            counts = np.bincount(test1d)

            ifcorrect = False
            if len(counts == 1):
                counts = np.append(counts, 0)
            if len(counts == 2):
                counts = np.append(counts, 0)
            if counts[1] > counts[2] and wormDict[key] == 1:
                correct += 1
                cm[0][0] += 1
                ifcorrect = True
            elif counts[1] > counts[2] and wormDict[key] == 2:
                cm[0][1] += 1
            elif counts[1] < counts[2] and wormDict[key] == 1:
                cm[1][0] += 1
            elif counts[1] < counts[2] and wormDict[key] == 2:
                cm[1][1] += 1
                correct += 1
                ifcorrect = True
            total += 1
            #print(key, ifcorrect)
    print(correct, total)
    print(cm)
    return correct, total, cm
示例#6
0
def saveVisualizeImages(dir, savedir, predict):
    for file in glob(dir + "*"):
        image = cv.imread(file)
        if predict:
            checkpointsPath = "../checkpoints/resnet_unet_3"
            model = resnet50_unet(3, input_height=512, input_width=704)
            model_from_checkpoint_path(model, checkpointsPath)
            out = predict.predict(model=model, inp=image)
            im = visualizeImage(out)
            cv.imwrite(
                "{save}/{name}".format(save=savedir,
                                       name=os.path.basename(file)), im)
        else:
            im = visualizeImage(image)
            cv.imwrite(
                "{save}/{name}".format(save=savedir,
                                       name=os.path.basename(file)), im)
示例#7
0
def find_classes(inp, out_fname):

    pr = predict(model=pspnet_50_ADE_20K(), inp=inp, out_fname=out_fname)

    CDict = seg2ann(seg_file=out_fname)

    k = 0
    classnum_list = []
    for key in sorted(CDict.keys(), reverse=True):
        k = k + 1
        if k > 10:
            break
        if key != 0:
            classnum_list.append(CDict[key][0])
            print(CDict[key])

    print(classnum_list)
    return (classnum_list)
示例#8
0
def predictImageType(image):
    image = cv.resize(image, (704, 512), interpolation=cv.INTER_AREA)
    model = resnet50_unet(3, input_height=512, input_width=704)

    checkpointsPath = "../checkpoints/resnet_unet_3"
    model_from_checkpoint_path(model, checkpointsPath)

    out = predict.predict(model=model, inp=image)

    out1d = out.flatten()
    counts = np.bincount(out1d)

    if counts[1] > counts[2]:
        print("alive")
        return "alive"
    else:
        print("dead")
        return "dead"
    def color_callback(self, color_img_ros):
        """
        Callback function for color image, do semantic segmantation and show the decoded image. For test purpose
        \param color_img_ros (sensor_msgs.Image) input ros color image message
        """

        print('callback')
        try:
            color_img = self.bridge.imgmsg_to_cv2(
                color_img_ros, "bgr8")  # Convert ros msg to numpy array
        except CvBridgeError as e:
            print(e)

# Do semantic segmantation
        seg = predict(model=self.new_model, inp=color_img)
        seg = seg.astype(np.uint8)
        #print (seg.shape)
        #print (color_img.shape)

        # Do semantic segmantation
        '''
        class_probs = self.predict(color_img)
        confidence, label = class_probs.max(1)
        confidence, label = confidence.squeeze(0).numpy(), label.squeeze(0).numpy()
        label = resize(label, (self.img_height, self.img_width), order = 0, mode = 'reflect', preserve_range = True) # order = 0, nearest neighbour
        label = label.astype(np.int)

        # Add semantic class colors
        decoded = decode_segmap(label, self.n_classes, self.cmap)        # Show input image and decoded image
        confidence = resize(confidence, (self.img_height, self.img_width),  mode = 'reflect', preserve_range = True)
        '''

        cv2.imshow('Camera image', color_img)
        cv2.imshow('seg', seg)
        #cv2.imshow('confidence', confidence)
        #cv2.imshow('Semantic segmantation', decoded)
        cv2.waitKey(3)
示例#10
0
import cv2
from keras_segmentation.predict import predict, model_from_checkpoint_path
import random

#https://divamgupta.com/image-segmentation/2019/06/06/deep-learning-semantic-segmentation-keras.html
model = model_from_checkpoint_path("checkpoints/resnet50_unet_1")

cap = cv2.VideoCapture("test.mp4")

if (cap.isOpened() == False):
    print("Error opening video stream or file")

while (cap.isOpened()):
    ret, frame = cap.read()
    if ret == True:
        mask, mask_image = predict(model=model, inp=frame)
        cv2.imshow('Frame', mask_image)

        # Press Q on keyboard to  exit
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break

    else:
        break

cap.release()

cv2.destroyAllWindows()
示例#11
0
def predict_segmentation(model, img):
    resized_img = cv2.resize(img, (473, 473))
    resized_out = predict(model, inp=resized_img)
    out = cv2.resize(resized_out, (512, 512), interpolation=cv2.INTER_NEAREST)
    return out.astype(np.uint8)
#     optimizer_name='adadelta' , do_augment=True , augmentation_name="aug_all",
#     checkpoints_path = "weights/vgg_unet_1" , epochs=10
# )
# =============================================================================

# Display the model's architecture
model.summary()

# Save the entire model to a HDF5 file.
# The '.h5' extension indicates that the model should be saved to HDF5.
model.save('vgg_unet_1.h5')

from keras_segmentation.predict import predict

predict(checkpoints_path="weights/vgg_unet_1",
        inp="dataset1/images_prepped_test/3.png",
        out_fname="3_predict.png")

from keras_segmentation.predict import predict_multiple

predict_multiple(checkpoints_path="weights/vgg_unet_1",
                 inp_dir="dataset1/images_prepped_test/",
                 out_dir="weights/out/")
'''
#------------------------------------------------------------------------------
#
#        Verification of Input and Predicted Results
#
#------------------------------------------------------------------------------
        
'''
    def color_depth_callback(self, color_img_ros, depth_img_ros):
        """
        Callback function to produce point cloud registered with semantic class color based on input color image and depth image
        \param color_img_ros (sensor_msgs.Image) the input color image (bgr8)
        \param depth_img_ros (sensor_msgs.Image) the input depth image (registered to the color image frame) (float32) values are in meters
        """
        tic = rospy.Time.now()
        diff = tic - self.last_time
        if diff.to_sec() < self.throttle_rate:
            return

        self.last_time = rospy.Time.now()
        # Convert ros Image message to numpy array
        try:
            color_img = self.bridge.imgmsg_to_cv2(color_img_ros, "bgr8")
            depth_img = self.bridge.imgmsg_to_cv2(depth_img_ros, "32FC1")
        except CvBridgeError as e:
            print(e)
        # Resize depth
        if depth_img.shape[0] is not self.img_height or depth_img.shape[
                1] is not self.img_width:
            depth_img = resize(
                depth_img, (self.img_height, self.img_width),
                order=0,
                mode='reflect',
                preserve_range=True)  # order = 0, nearest neighbour
            depth_img = depth_img.astype(np.float32)
            # realsense camera gives depth measurements in mm
            if self.real_sense:
                depth_img = depth_img / 1000.0

################## Save all the Images for debugging #####################
        '''	
	filename = self.test_image_path_output_folder + "image" + str(self.imgCounter) + ".png" 
	print (filename)
	cv2.imwrite(filename, color_img) 
	self.imgCounter = self.imgCounter + 1 ; 
	'''
        ##########################################################################

        semantic_color = predict(model=self.new_model, inp=color_img)

        ################## Save all the Images for debugging #####################
        '''
	filename = self.test_image_path_output_folder + "semanticimage" + str(self.imgSemanticCounter) + ".png" 
	print (filename)
	cv2.imwrite(filename, semantic_color) 
	self.imgSemanticCounter = self.imgSemanticCounter + 1 ; 
	'''
        ##########################################################################
        #label_d = semantic_color.max(1)
        #label_d = resize(label_d, (self.img_height, self.img_width), order = 0, mode = 'reflect', preserve_range = True) # order = 0, nearest neighbour
        #label_d = label_d.astype(np.uint8)
        semantic_color = semantic_color.astype(np.uint8)
        #decoded = decode_segmap(label_d, self.n_classes, self.cmap)        # Show input image and decoded image
        # Publish semantic image
        if self.sem_img_pub.get_num_connections() > 0:
            try:
                semantic_color_msg = self.bridge.cv2_to_imgmsg(semantic_color,
                                                               encoding="bgr8")
                #rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                self.sem_img_pub.publish(semantic_color_msg)
            except CvBridgeError as e:
                print(e)

        cloud_ros = self.cloud_generator.generate_cloud_semantic(
            color_img, depth_img, semantic_color, color_img_ros.header.stamp)
        #Publish point cloud
        #self.get_label(pred_labels)
        self.pcl_pub.publish(cloud_ros)
    def __init__(self, gen_pcl=True):
        """
        Constructor
        \param gen_pcl (bool) whether generate point cloud, if set to true the node will subscribe to depth image
        """
        self.real_sense = rospy.get_param('/semantic_pcl/real_sense')
        self.imgCounter = 0
        self.imgSemanticCounter = 0
        #self.labels_pub  = rospy.Publisher("/semantic_pcl/labels", OverlayText, queue_size=1)
        self.labels_list = []
        #self.text = OverlayText()
        # Get image size
        self.img_width, self.img_height = rospy.get_param(
            '/camera/width'), rospy.get_param('/camera/height')
        self.throttle_rate = rospy.get_param('/semantic_pcl/throttle_rate')
        self.last_time = rospy.Time.now()
        # Set up CNN is use semantics
        print('Setting up CNN model...')
        # Set device
        #self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # Get dataset
        self.dataset = rospy.get_param('/semantic_pcl/dataset')
        # Setup model
        model_name = 'vgg_unet'
        model_path = rospy.get_param('/semantic_pcl/model_path')
        model_json_path = rospy.get_param('/semantic_pcl/model_json_path')
        test_image_path_input = rospy.get_param(
            '/model_params/test_image_path_input')
        test_image_path_output = rospy.get_param(
            '/model_params/test_image_path_output')
        model_input_height = rospy.get_param(
            '/model_params/model_input_height')
        model_input_width = rospy.get_param('/model_params/model_input_width')
        model_output_height = rospy.get_param(
            '/model_params/model_output_height')
        model_output_width = rospy.get_param(
            '/model_params/model_output_width')
        model_n_classes = rospy.get_param('/model_params/model_n_classes')
        model_checkpoints_path = rospy.get_param(
            '/model_params/model_checkpoints_path')
        self.test_image_path_output_folder = rospy.get_param(
            '/model_params/test_image_path_output_folder')

        if self.dataset == 'kucarsRisk':
            self.n_classes = 7  # Semantic class number
            # load the model + weights
            # Recreate the exact same model, including its weights and the optimizer
            #self.new_model = model_from_json(model_path,custom_objects=None)
            #clear_session()
            self.new_model = load_model(model_path)  #,custom_objects=None)
            self.new_model.input_width = model_input_width
            self.new_model.input_height = model_input_height
            self.new_model.output_width = model_output_width
            self.new_model.output_height = model_output_height
            self.new_model.n_classes = model_n_classes
            # Show the model architecture
            self.new_model.summary()
            print("Loaded model from disk")

            self.new_model.compile(loss='categorical_crossentropy',
                                   optimizer='adadelta',
                                   metrics=['accuracy'])
            ##### One Image Prediction ####

            img = cv2.imread(test_image_path_input)
            predict(model=self.new_model,
                    inp=test_image_path_input,
                    out_fname=test_image_path_output)
            out = cv2.imread(test_image_path_output)
            out_rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
#plt.imshow(out)
#plt.show()

        self.cmap = color_map(
            N=self.n_classes,
            normalized=True)  # Color map for semantic classes
        # Declare array containers
        # Set up ROS
        print('Setting up ROS...')
        self.bridge = CvBridge(
        )  # CvBridge to transform ROS Image message to OpenCV image
        # Semantic image publisher
        self.sem_img_pub = rospy.Publisher(
            "/semantic_pcl/semantic_hazard_image", Image, queue_size=1)
        # Set up ros image subscriber
        # Set buff_size to average msg size to avoid accumulating delay
        if gen_pcl:
            # Point cloud frame id
            frame_id = rospy.get_param('/semantic_pcl/frame_id')
            # Camera intrinsic matrix
            fx = rospy.get_param('/camera/fx')
            fy = rospy.get_param('/camera/fy')
            cx = rospy.get_param('/camera/cx')
            cy = rospy.get_param('/camera/cy')
            intrinsic = np.matrix([[fx, 0, cx], [0, fy, cy], [0, 0, 1]],
                                  dtype=np.float32)
            self.pcl_pub = rospy.Publisher("/semantic_pcl/semantic_pcl",
                                           PointCloud2,
                                           queue_size=1)
            self.color_sub = message_filters.Subscriber(
                rospy.get_param('/semantic_pcl/color_image_topic'),
                Image,
                queue_size=1,
                buff_size=30 * 480 * 640)
            self.depth_sub = message_filters.Subscriber(
                rospy.get_param('/semantic_pcl/depth_image_topic'),
                Image,
                queue_size=1,
                buff_size=40 * 480 * 640
            )  # increase buffer size to avoid delay (despite queue_size = 1)
            self.ts = message_filters.ApproximateTimeSynchronizer(
                [self.color_sub, self.depth_sub], queue_size=1, slop=0.3
            )  # Take in one color image and one depth image with a limite time gap between message time stamps
            self.ts.registerCallback(self.color_depth_callback)
            #self.cloud_generator = ColorPclGenerator(intrinsic, self.img_width,self.img_height, frame_id , self.point_type)
            self.cloud_generator = ColorPclSemanticGenerator(
                intrinsic, self.img_width, self.img_height, frame_id)
        else:
            print("No Cloud generation")
            self.image_sub = rospy.Subscriber(
                rospy.get_param('/semantic_pcl/color_image_topic'),
                Image,
                self.color_callback,
                queue_size=1,
                buff_size=30 * 480 * 640)

        semantic_colored_labels_srv = rospy.Service(
            'get_semantic_colored_labels', GetSemanticColoredLabels,
            self.get_semantic_colored_labels)
        print('Ready.')
示例#15
0
    def evaluate(self,
                 model=None,
                 inp_images=None,
                 annotations=None,
                 inp_images_dir=None,
                 annotations_dir=None,
                 checkpoints_path=None):
        """Evaluate the loaded model for an imgs set and segs"""

        with self.graph.as_default():
            with self.session.as_default():
                self.signals.log.emit("Début de la session d'évaluation")
                if model is None:
                    if checkpoints_path is None:
                        self.signals.log.emit("Impossible de trouver le modèle"
                                              " à évaluer.")
                        self.signals.log.emit("")
                        self.signals.error.emit("Impossible de trouver le "
                                                "modèle à évaluer")
                    try:
                        checkpoint_nb = checkpoints_path.split('.')[-1]
                        index = -(int)(len(checkpoint_nb) + 1)
                        existing = checkpoints_path[0:index]
                        model = model_from_checkpoint_path_nb(
                            existing, checkpoint_nb)
                        self.signals.log.emit(
                            "Modèle chargé : {}".format(checkpoints_path))
                    except Exception as exc:
                        self.signals.finished.emit("Impossible de charger le "
                                                   "modèle existant !" +
                                                   traceback.format_exc())
                        return

                if inp_images is None:
                    paths = get_pairs_from_paths(inp_images_dir,
                                                 annotations_dir)
                    paths = list(zip(*paths))
                    inp_images = list(paths[0])
                    annotations = list(paths[1])

                tpm = np.zeros(model.n_classes)
                fpm = np.zeros(model.n_classes)
                fnm = np.zeros(model.n_classes)
                n_pixels = np.zeros(model.n_classes)

                file_processed = 0
                for inp, ann in tqdm(zip(inp_images, annotations)):
                    pred = predict(model, inp)

                    ground = get_segmentation_array(ann,
                                                    model.n_classes,
                                                    model.output_width,
                                                    model.output_height,
                                                    no_reshape=True)
                    ground = ground.argmax(-1)

                    pred = pred.flatten()
                    ground = ground.flatten()

                    matrix = confusion_matrix(ground, pred)
                    self.signals.log.emit("Image {}".format(str(inp)))
                    self.signals.log.emit(
                        "Matrice de confusion :\n{}\n".format(str(matrix)))

                    for cl_i in range(model.n_classes):
                        tpm[cl_i] += np.sum((pred == cl_i) * (ground == cl_i))
                        fpm[cl_i] += np.sum((pred == cl_i) * (ground != cl_i))
                        fnm[cl_i] += np.sum((pred != cl_i) * (ground == cl_i))
                        n_pixels[cl_i] += np.sum(ground == cl_i)

                    file_processed += 1
                    progression = 100 * file_processed / len(inp_images)
                    self.signals.progressed.emit(progression)

                cl_wise_score = tpm / (tpm + fpm + fnm + 0.000000000001)
                n_pixels_norm = n_pixels / np.sum(n_pixels)
                frequency_weighted_iu = np.sum(cl_wise_score * n_pixels_norm)
                mean_iu = np.mean(cl_wise_score)
                self.signals.log.emit("frequency_weighted_IU {}".format(
                    str(frequency_weighted_iu)))
                self.signals.log.emit("mean_IU {}".format(str(mean_iu)))
                self.signals.log.emit("class_wise_IU {}".format(
                    str(cl_wise_score)))
                self.signals.log.emit("")
                self.signals.finished.emit("Evaluation terminée !")
示例#16
0
from keras_segmentation.predict import predict, predict_multiple, model_from_checkpoint_path, evaluate

model = model_from_checkpoint_path(
    "..\\..\\Checkpoints\\AllTools\\mobilenet_alltools")

# BACKGROUND, TOOL, CAT
print(
    model.evaluate_segmentation(
        inp_images_dir=
        "C:\\Users\\Leonardo\\Desktop\\Catetere\\UpdatedDataset\\Test\\Original\\JPEGImages\\",
        annotations_dir=
        "C:\\Users\\Leonardo\\Desktop\\Catetere\\UpdatedDataset\\Test\\Original\\Labels\\"
    ))

# out = model.predict_segmentation(inp="C:\\Users\\Leonardo\\Desktop\\Catetere\\UpdatedDataset\\Test\\Prova\\JPEGImages\\frame1140.jpg", out_fname="..\\a.png")
'''
predict(checkpoints_path="checkpoints\\vgg_unet_1",
        inp="C:\\Users\\d053175\\Desktop\\Prostate\\Dataset\\Test\\39151.png",
        out_fname="C:\\Users\\d053175\\Desktop\\output.png"
        )


predict_multiple(checkpoints_path="checkpoints\\vgg_unet_1",
		inp_dir="C:\\Users\\d053175\\Desktop\\Prostate\\Test\\",
		out_dir="C:\\Users\\d053175\\Desktop\\outputs\\"
)
'''
plotDir = "log/plot"

segTrainOutDir = 'COCO/segTrain2017'
segValOutDir = 'COCO/segVal2017'

model = vgg_segnet(n_classes=91, input_height=416, input_width=608, encoder_level=3)
model.load_weights(ckPtDir + "/vgg_segnet.199")

model.summary()

# segTrain2017 Img Gen
for (path, _, files) in os.walk(trainImgs):
    for filename in files:
        predict(model,
                overlay_img=True,
                inp = path+filename,
                out_fname = segTrainOutDir + '/' + filename[:-3]+'png'
               )
        
# segVal2017 Img Gen
for (path, _, files) in os.walk(valImgs):
    for filename in files:
        predict(model,
                overlay_img=True,
                inp = path+filename,
                out_fname = segValOutDir + '/' + filename[:-3]+'png'
               )

# send the msg to discord channel
from discord_webhook import DiscordWebhook
url = 'https://discordapp.com/api/webhooks/710208007618822145/4yUFIEoTa7kZFOhyJpSkalNn2NysrM6p5PFVG5iBDkt1ikJxBPwV3_J4FDYi40THgxvl'
示例#18
0
    '/home/arudrin/Documents/nivfrm-ml/logs/mobilenet_unet_50e-0.015LR/checkpoints/weights-050-0.5345.hdf5'
)

source = '/home/arudrin/Documents/nivfrm-ml/results/source'  # Source Folder / INPUT
dstpath = '/home/arudrin/Documents/nivfrm-ml/results/destination'  # Destination Folder / OUTPUT

try:
    makedirs(dstpath)
except:
    print("Directory already exist, images will be written in same folder")

files = list(filter(lambda f: isfile(join(source, f)), listdir(source)))
for image in files:
    inp = os.path.join(source, image)
    out = os.path.join(dstpath, image)
    predict(model, inp, out)

exit()
#    try:
#        img = cv2.imread(os.path.join(path,image))
#        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#        dstPath = join(dstpath,image)
#        cv2.imwrite(dstPath,gray)
#    except:
#        print ("{} is not converted".format(image))

#for fil in glob.glob("*.jpg"):
#    try:
#        image = cv2.imread(fil)
#        gray_image = cv2.cvtColor(os.path.join(path,image), cv2.COLOR_BGR2GRAY) # convert to greyscale
#        cv2.imwrite(os.path.join(dstpath,fil),gray_image)
示例#19
0
def main():

    # Setup the training parameters, input paths etc.,
    checkpoints_path = "./checkpoints_mobilenet_unet_2class/"
    test_images_path = "./dataset2/images_prepped_test/"
    test_masks_path = "./dataset2/masks_prepped_test_2class/"
    predicted_masks_path = "./dataset2/masks_predicted/"
    predicted_overlay_path = "./dataset2/overlay_predicted/"

    # Enable printing of IoU scores for tests.
    print_test_results = 0

    # Model checkpoint path should exist, else it is an error.
    if not os.path.exists(checkpoints_path):
        print("[ERROR] invalid checkpoints path {}".format(checkpoints_path))
        sys.exit(1)

    # Get Intersection over Union (IoU) results for the test data set.
    # The model is picked up from the latest model in the checkpoints_path.
    # The input images and masks (masks) are picked up from their paths.
    if print_test_results:
        test_results = evaluate(inp_images_dir=test_images_path,
                                masks_dir=test_masks_path,
                                checkpoints_path=checkpoints_path)
        print(test_results)
        print("Test results complete")

    # Create output directory to store predicted masks and overlays.
    # Remove any existing png files.
    if not os.path.exists(predicted_masks_path):
        os.makedirs(predicted_masks_path)

    if not os.path.exists(predicted_overlay_path):
        os.makedirs(predicted_overlay_path)

    for f in glob.glob(predicted_masks_path + '/*.png'):
        os.remove(f)

    for f in glob.glob(predicted_overlay_path + '/*.png'):
        os.remove(f)

    # Predict results for the input images and store in output directory.
    # Store both the predicted masks and the overlaid images with masks.

    # Step 1. Load model from checkpoint path.
    model = model_from_checkpoint_path(checkpoints_path)

    # Step 2. Read images one by one and feed it in into predict function.
    #    Output predicted mask is written to the predicted masks directory.
    #    Assume that input images are in png format.
    for infile in glob.glob(test_images_path + '/*.png'):
        filename = os.path.split(infile)[1]
        outfile = os.path.join(predicted_masks_path, filename)
        overlayfile = os.path.join(predicted_overlay_path, filename)

        print("Predicting mask for file {}, output in {}".format(
            infile, outfile))

        # Create predicted mask from input file, and store it in outfile.
        predict(inp=infile, out_fname=outfile, model=model)

        # Overlay the predicted mask file over input file for display.
        overlay_image_with_mask(image_file=infile,
                                mask_file=outfile,
                                overlay_file=overlayfile)

    print("Prediction complete")
示例#20
0
_ = subprocess.check_output(['bash', '-c', set_cuda_cache])
_ = subprocess.check_output(['bash', '-c', allow_gpu_growth])

config = compat.ConfigProto()
config.gpu_options.allow_growth = True
session = compat.InteractiveSession(config=config)

batch_size = 1

#FOR LOADING SAVED MODELS , GIVES VALUE ERROR, asks for custom_objects
#model = load_model('/home/arudrin/Documents/nivfrm-ml/scripts/mobilenet_50E-0.015LR_saved_model')

model = mobilenet_unet(n_classes=2,
                       input_height=240,
                       input_width=240,
                       batch_size=batch_size)
logs_path = "../logs/mobilenet_unet"

adam = optmzrs.Adam(amsgrad=True)
model.compile(loss=pixel_wise_loss, optimizer=adam, metrics=[iou, dice])

model.load_weights(
    '/home/arudrin/Documents/nivfrm-ml/logs/mobilenet_unet_50e-0.015LR/checkpoints/weights-050-0.5345.hdf5'
)

#val = inference result, inp = input
val = '/home/arudrin/Documents/nivfrm-ml/val/where.jpg'
inp = '/home/arudrin/Documents/nivfrm-ml/scripts/data_in_scriptfolder/raw-240x240/proper/1.jpg'
predict(model, inp,
        val)  #modified model.predict from /keras_segmentation/predict.py