예제 #1
0
def process(path_to_image, out_file):
    """ Apply detection and classification ML model to the input image IN and stores the result to 
    output file OUT.
    """
    from keras_yolo3.yolo import YOLO
    from PIL import Image

    # Create an instance of YOLO. In fact, its name is YOLO but this implementation already contains
    # the custom network for classification, hence it returns a image already with the classification!
    yolo_model = YOLO()
    # Notice that there will be some warnings displayed regarding instantiating YOLO, but it's not
    # important by now, it should work fine

    # Load image
    image = Image.open(path_to_image)

    # Apply network to image
    image_result, result_list = yolo_model.detect_image(image)

    # Save the generated image on same directory, with name: "result_image.jpg"
    print("***")
    print("Saving image to: '{}'".format(out_file))
    try:
        image_result.save(out_file)
        print("Success! Image saved at: {}".format(out_file))
    except Exception as e:
        print(
            "An error occurred while trying to save image to '{}': {}".format(
                out_file, str(e)))
예제 #2
0
def detect_object(imageFolder='imagesToDetect/', postfix=""):
    gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
    session = tf.compat.v1.InteractiveSession(config=tf.compat.v1.ConfigProto(
        gpu_options=gpu_options))
    """
    Calls the YOLO face detector on a folder and saves results to detectedImages folder

    Args:
      imageFolder: string
          folder where the images to detect are stored

      postfix: string 
          appends string to filenames
    Returns:
      detections: a list of bounding boxes in format [filename,[(xmin,ymin,xmax,ymax,class_id,confidence]]

    """
    save_img = True  #always save the images for now

    #keeping the output folder static for time being
    save_img_path = 'boxedImages/'

    #get detection output folder from params
    detect_path = imageFolder

    #make the yolo model
    yolo = YOLO()

    # list for min
    detections = []

    #iterate over all images in detect folder, try to open image and detect
    for img_path in os.listdir('imagesToDetect'):
        try:
            image = Image.open(detect_path + img_path)
            if image.mode != "RGB":
                image = image.convert("RGB")
            image_array = np.array(image)
        # thrown if file can't be opened, return None if this is the case
        except:
            print("File Open Error! Try again!")
            return None

        # make Prediction using yolo network
        prediction, new_image = yolo.detect_image(image)

        # add faces detected to be returned
        detections.append([img_path, prediction])

        # save image in output folder
        img_out = postfix.join(os.path.splitext(os.path.basename(img_path)))
        if save_img:
            new_image.save(os.path.join(save_img_path, img_out))

    session.close()
    return detections
예제 #3
0
class Yolo():
    def __init__(self):
        # Yolo setup
        self.yolo = YOLO(
            **{
                "model_path": CONFIG["model_path"],
                "anchors_path": CONFIG["anchors_path"],
                "classes_path": CONFIG["classes_path"],
                "score": CONFIG["score"],
                "gpu_num": CONFIG["gpu_num"],
                "model_image_size": (416, 416),
            })

        # Make a dataframe for the prediction outputs
        self.out_df = pd.DataFrame(columns=OUT_COLS)

    def predict(self, img_path, out_dir):
        class_file = open(CONFIG["classes_path"], "r")
        input_labels = [line.rstrip("\n") for line in class_file.readlines()]

        self.out_df = pd.DataFrame(columns=OUT_COLS)

        total = len(os.listdir(img_path))
        for i, filename in enumerate(os.listdir(img_path)):
            print("\rDetecting image ", i, " of ", total, end="", flush=True)
            image = open_image(os.path.join(img_path, filename))
            prediction, predict_image = self.yolo.detect_image(image)
            y_size, x_size, _ = np.array(predict_image).shape

            for single_prediction in prediction:
                self.out_df = self.out_df.append(
                    pd.DataFrame(
                        [[
                            filename,
                            img_path.rstrip("\n"),
                        ] + single_prediction + [x_size, y_size]],
                        columns=OUT_COLS,
                    ))

        self.out_df.to_csv(out_dir, index=False)
예제 #4
0
def classify_baby_yolo(baby_url):

    img_data = requests.get(baby_url).content
    image = Image.open(io.BytesIO(img_data))
    print('image loaded!', image.size)

    from keras_yolo3.yolo import YOLO
    yolo_obj = YOLO()
    print('YOLO loaded!')
    image_post_yolo, result = yolo_obj.detect_image(image)
    print('YOLO applied!', result)
    print(result[0])
    #result[0].show()
    '''imgs = []
    for obj in result[1]:
        if obj['predicted_class'] == 'person' and obj['score']>0.33:
            border = tuple(obj['box']) # left, up, right, bottom
            img_cropped = image.crop(border)
            imgs.append(img_cropped)
    print('Babies found: ',len(imgs))'''
    import keras
    keras.backend.clear_session()
    return result
예제 #5
0
def listener():
    global image
    yolo = YOLO(**vars(FLAGS))
    rospy.init_node('yolo', anonymous=True)
    pub = rospy.Publisher('yolo_res', String, queue_size=10)
    rospy.Subscriber("/camera/rgb/image_raw", msg.Image, callback)
    print("Waiting ...")
    while True:
        if image != None:
            res, rimage = yolo.detect_image(image)
            rimage.show()
            image = None
            message = "{ "
            for obj in res:
                top, left, bottom, right, label, score = obj
                message = message + "{top : " + str(top) + ", left : " + str(
                    left) + ", bottom : " + str(bottom)
                message = message + ", right : " + str(
                    right) + ", label : " + str(label) + ", score : " + str(
                        score) + "},"
            message = message[:-1]
            message = message + "}"
            rospy.loginfo(message)
            pub.publish(message)
예제 #6
0
class YoloModel:
    def __init__(self):
        min_confidence = 0.25
        is_tiny = False

        if is_tiny and anchors_path:
            anchors_path = os.path.join(
                os.path.dirname(anchors_path), "yolo-tiny_anchors.txt"
            )

        anchors_path = os.path.join(src_path, "keras_yolo3", "model_data", "yolo_anchors.txt")
        anchors = get_anchors(anchors_path)
        # define YOLO detector
        self.yolo = YOLO(
            **{
                "model_path": model_weights,
                "anchors_path": anchors_path,
                "classes_path": model_classes,
                "score": min_confidence,
                "gpu_num": 0,
                "model_image_size": (416, 416),
            }
        )

        # labels to draw on images
        class_file = open(model_classes, "r")
        self.input_labels = [line.rstrip("\n") for line in class_file.readlines()]
    
    def __del__(self):
        # Close the current yolo session
        self.yolo.close_session()

    
    def detect(self, img, show_stats=True):
        start = timer()
        prediction, detected_img = self.yolo.detect_image(img, show_stats=show_stats)
        detected_img = np.asarray(detected_img)
        y_size, x_size, _ = detected_img.shape

        # Make a dataframe for the prediction outputs
        out_df = pd.DataFrame(
            columns=[
                "xmin",
                "ymin",
                "xmax",
                "ymax",
                "label",
                "confidence",
                "x_size",
                "y_size",
            ]
        )

        for single_prediction in prediction:
            out_df = out_df.append(
                pd.DataFrame(
                    [
                        single_prediction
                        + [x_size, y_size]
                    ],
                    columns=[
                        "xmin",
                        "ymin",
                        "xmax",
                        "ymax",
                        "label",
                        "confidence",
                        "x_size",
                        "y_size",
                    ],
                )
            )
        end = timer()
        if show_stats:
            print(f"Yolo v3 detection took {end-start:.2f} s")
        return out_df, detected_img
예제 #7
0
        raise IOError("Couldn't open webcam or video") 
    video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                  int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) 

    # Output stream to save the video output
    if config["save_video"]:
        video_name = config["output_name"] + ".mp4"
        video_fps = config["output_fps"]
        video_fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(video_name, video_fourcc, video_fps, video_size)

    first_centered = True
    while True:
        return_value, frame = vid.read()
        image = Image.fromarray(frame)
        image, object_center = yolo.detect_image(image)
        image = np.asarray(image)
        # Add target zone to image
        target_window = get_target_window(*video_size)
        image = draw_target_window(image, target_window, *video_size)
        # If object found
        if object_center is not None:
            move_x, move_y = track_object(*object_center, target_window)
            shoot = "0"
            # If object centered
            if move_x == "0" and move_y == "0":
                shoot_delay = 3
                # Restart countdown if object lost or decentered
                if first_centered == True:
                    start_time = time.time()
                    first_centered = False
예제 #8
0
def run():
    print("Loading model...")

    # initialize yolo model
    model_path = root_path + '/YOLO/Data/Model_Weights/trained_weights_final.h5'
    anchors_path = root_path + '/YOLO/2_Training/src/keras_yolo3/model_data/yolo_anchors.txt'
    classes_path = root_path + '/YOLO/Data/Model_Weights/data_classes.txt'

    # define full float32 policy because of YOLO
    with tf.compat.v1.variable_scope('float32_scope') as scope:
        policy = tf.keras.mixed_precision.experimental.Policy('float32')
        tf.keras.mixed_precision.experimental.set_policy(policy)

        yolo = YOLO(
            **{
                "model_path": model_path,
                "anchors_path": anchors_path,
                "classes_path": classes_path,
                "score": 0.25,
                "gpu_num": 1,
                "model_image_size": (256, 256),
            })

    # initialize verificator model
    start_time = time.time()
    verificator = CatVerificator([256, 256, 3],
                                 threshold=1.4,
                                 data_dir=dir_path + '/data',
                                 load_data=True)
    print("Loaded Cat Verficator in {:.2f}sec.".format(time.time() -
                                                       start_time))

    # open camera feed
    video_capture = cv2.VideoCapture(0)
    cv2.namedWindow("Window")

    # set time
    tag_time = time.time()

    while True:
        ret, frame = video_capture.read()

        frame = Image.fromarray(frame)

        # detect faces only every second
        if np.isclose(time.time() - tag_time, 1, rtol=0.1):
            # detect
            predictions, _ = yolo.detect_image(frame, show_stats=False)

            # check if theres more then one cat
            if len(predictions) == 1:
                # crop images
                x_min, y_min, x_max, y_max = predictions[
                    0][:-2]  # get only coordinates
                cropped_face = utilities.crop_bounding_box(
                    np.asarray(frame), x_min, x_max, y_min, y_max)

                # run verificator
                same_cat, distance = verificator.is_own_cat(cropped_face)

                if same_cat:
                    # draw green bbox
                    annotated_image = utils.draw_annotated_box(
                        frame, [predictions], ['Own_Cat'], [(85, 255, 85)])
                else:
                    # draw red bbox
                    annotated_image = utils.draw_annotated_box(
                        frame, [predictions], ['Own_Cat'], [(0, 0, 255)])

            elif len(predictions) > 1:
                # draw yellow bboxes
                annotated_image = utils.draw_annotated_box(
                    frame, [predictions], ['To_Many_Cats'], [(85, 255, 255)])

            tag_time = time.time()  # set time
        else:
            annotated_image = frame

        # show image
        cv2.imshow("Window", np.asarray(annotated_image))
        # This breaks on 'q' key
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    cv2.destroyAllWindows()
예제 #9
0
class Receiver(object):#I python 2 burde man skrive klassen som dette her, det gir flere fordeler som @classmethods, @staticmethods og mer. i Python3 trenger man ikke aa skrive den slik etter som det er standard
    def __init__(self, args):
        self.colorImage = None
        self.img = None
        self.pilImg = None
        self.yolo = YOLO(**vars(args))
        self.publishers = []
        self.callback_list = []



        self.detection_info_msg = detection_info()
        self.pub_detection = rospy.Publisher("/detection_info_publisher", detection_info)
        self.pub_color_image = rospy.Publisher("/detection_color", Image)
        self.pub_color_info = rospy.Publisher("/detection_color_info", CameraInfo)
        self.pub_depth_image = rospy.Publisher("/detection_depth", Image)
        self.pub_depth_info = rospy.Publisher("/detection_depth_info", CameraInfo)
        #self.publishers[0] = rospy.Publisher("/detection_info_publisher", detection_info)
        #self.publishers[1] = rospy.Publisher("/detection_color", Image)
        #self.publishers[2] = rospy.Publisher("/detection_color_info", CameraInfo)
        #self.publishers[3] = rospy.Publisher("/detection_depth", Image)
        #self.publishers[4] = rospy.Publisher("/detection_depth_info", CameraInfo)


        self.bridge = CvBridge()
        #self.image_sub = rospy.Subscriber("/kinect2/qhd/image_color_rect", Image, self.callback)
        self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        self.video = cv2.VideoWriter("/home/erlendb/Programmering/PCL/kinect_ws/test.avi", self.fourcc, 30, (960, 540))

        self.running = None

        #FPS variables
        self.accum_time = 0
        self.curr_fps = 0
        self.prev_time = timer()
        self.fps = "FPS: ??"


    def start(self):
        print("Start()")

        #Subscribers, using TimeSynchronization
        image_color_sub = message_filters.Subscriber('/kinect2/qhd/image_color_rect', Image)
        image_info_sub = message_filters.Subscriber('/kinect2/qhd/camera_info', CameraInfo)
        image_depth_sub = message_filters.Subscriber('/kinect2/qhd/image_depth_rect', Image)
        image_depth_info_sub = message_filters.Subscriber('/kinect2/qhd/camera_info', CameraInfo)

        ts = message_filters.TimeSynchronizer([image_color_sub, image_info_sub, image_depth_sub, image_depth_info_sub], 10)
        ts.registerCallback(self.callback)

        self.running = True

        self.startTime = time.time()
        rospy.spin()



    def callback(self, color_image, color_info, depth_image, depth_info):
        #print("Neine")
        self.colorImage = color_image

        try:
            self.img = self.bridge.imgmsg_to_cv2(self.colorImage, "bgr8")
            self.pilImg = PILImage.fromarray(self.img)
        except CvBridgeError as e:
            print(e)



        self.detection()
        #print(self.detection_info_msg)

        #self.viewer(self.img)
        self.detection_info_msg.header.stamp = color_image.header.stamp
        self.pub_detection.publish(self.detection_info_msg)
        self.pub_color_image.publish(color_image)
        self.pub_color_info.publish(color_info)
        self.pub_depth_image.publish(depth_image)
        self.pub_depth_info.publish(depth_info)



    def close(self):
        self.yolo.close_session()
        self.video.release()
        cv2.destroyAllWindows()
        rospy.signal_shutdown('Quitting')

    def viewer(self, img):


        if self.running:
            if not self.video.isOpened():
                print("error with writer")
            else:
                self.video.write(img)

        key = cv2.waitKey(1)
        if key == 113:
            self.close()


        cv2.imshow("get_image", img)

    def detection(self):



        image = self.pilImg

        #boxes har verdiene top, left, bottom, right

        (image, boxes, scores, classes) = self.yolo.detect_image(image)
        print(boxes)
        result = np.asarray(image) #Konverterer bildet slik at den kan brukes i openCV

        #Assign to message
        if len(classes) > 0:
            self.detection_info_msg.class_type = classes[0]
            self.detection_info_msg.score = scores[0]
            self.detection_info_msg.y1 = boxes[0][0]
            self.detection_info_msg.x1 = boxes[0][1]
            self.detection_info_msg.y2 = boxes[0][2]
            self.detection_info_msg.x2 = boxes[0][3]
            print("element 0: %d element 1: %d element 2: %d element 3: %d" % (boxes[0][0], boxes[0][1], boxes[0][2] ,boxes[0][3]) )
        else:
            self.detection_info_msg.class_type = -1

        #print(self.detection_info_msg.class_type)

        """"
        curr_time = timer()
        exec_time = curr_time - self.prev_time

        self.prev_time = curr_time
        self.accum_time = self.accum_time + exec_time
        self.curr_fps = self.curr_fps + 1
        if self.accum_time > 1:
            self.accum_time = self.accum_time -1
            self.fps = "FPS: " + str(self.curr_fps)
            self.curr_fps = 0
      """
        #cv2.putText(result, text=self.fps, org=(3,15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255,255,255), thickness=2)
        #cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        key = cv2.waitKey(1)
        if key == 113:
            self.close()
예제 #10
0
print("Found {} input labels: {} ...".format(len(input_labels), input_labels))

game_has_ended = False
game_has_just_started = True
some_move = None
is_confirming_move = False  # the confirming_move-state is when the player is doing the move physically.
game_logic = None
# there_are_unknown_cards = False

while not game_has_ended:
    # print(list_of_piles)
    ret, img = video_stream.read()
    final_image = img
    if not is_confirming_move:
        im_pil = Image.fromarray(img)
        predictions, image = yolo.detect_image(im_pil, False)
        # sort the predictions based on ymin
        predictions.sort(key=lambda x: x[1], reverse=False)

        desired_min_confidence_level = 0.25
        list_of_certain_detections = []

        # removing all non certain predictions
        for prediction in predictions:
            if prediction[5] >= desired_min_confidence_level:
                list_of_certain_detections.append(prediction)

        # removing duplicates
        list_of_detected_names = []
        list_of_detections_without_duplicates = []
        for prediction in list_of_certain_detections:
overlap_threshold = 0.2

sumIoU = 0.0  # this becomes the numerator in the average
counter = 0  # this becomes the denomenator in the average

# Loop through test images
for file in img_files:
    print(f"\nLoading image: {file}")
    img = Image.open(os.path.join(png_dir, file)).convert("RGB")

    # set target for this image
    targets = all_targets[file]

    # Run YOLO detector
    new_img = copy.deepcopy(img)
    YOLO_predictions, new_image = yolo.detect_image(new_img)

    matched_gt = []
    matched_p = []
    # loop through groundtruth boxes
    for targetID, target in enumerate(targets):
        gt_bb = target['bbox']

        # loop through predicted boxes
        maxIoU = 0
        maxID = -1
        for ix, p_bb in enumerate(YOLO_predictions):
            if ix in matched_p:
                continue

            # calculate IoU
예제 #12
0
    # labels to draw on images
    class_file = open(FLAGS.classes_path, 'r')
    input_labels = [line.rstrip('\n') for line in class_file.readlines()]
    print('Found {} input labels: {} ...'.format(len(input_labels),
                                                 input_labels))

    openwin = True
    while openwin:
        frame = depth_stream.read_frame()
        frame_data = frame.get_buffer_as_triplet()
        img = np.frombuffer(frame_data, dtype=np.uint16)
        img.shape = (1, 480, 640)
        img = np.swapaxes(img, 0, 2)
        img = np.swapaxes(img, 0, 1)
        m = img.copy()
        n = cv2.cvtColor(m, cv2.COLOR_GRAY2RGB)
        n = n.astype(np.uint8)
        print(n.shape)
        prediction, new_image = yolo.detect_image(n)
        cv2.imshow('image', new_image)
        cv2.setMouseCallback('image', get_pos)

        if cv2.waitKey(34) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    openni2.unload()
    # Close the current yolo session
    yolo.close_session()
class TLClassifier(object):
    def __init__(self):
        #TODO load classifier
        model_data_path = os.path.dirname(os.path.abspath(__file__))
        self.detector = YOLO(anchors_path=model_data_path +
                             '/keras_yolo3/model_data/tiny_yolo_anchors.txt',
                             model_path=model_data_path +
                             '/model_data/tiny_yolo.h5',
                             class_name='traffic light',
                             height=240,
                             width=120)
        model_name = model_data_path + '/model_data/lenet_traffic_light.h5'
        f = h5py.File(model_name, mode='r')
        model_version = f.attrs.get('keras_version')
        keras_version = str(keras.__version__).encode('utf8')

        if model_version != keras_version:
            print('You are using Keras version ', keras_version,
                  ', but the model was built using ', model_version)

        self.classifier = load_model(
            model_name, custom_objects={'Normalization': Normalization()})
        global graph
        graph = tf.get_default_graph()

    def get_classification(self, image):
        """Determines the color of the traffic light in the image

        Args:
            image (cv::Mat): image containing the traffic light

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        #TODO implement light color prediction
        _, images = self.detector.detect_image(image)
        with graph.as_default():
            if len(images) > 0:
                print(images[0].size)
                result = self.classifier.predict(np.asarray(
                    images[0])[None, :, :, :],
                                                 batch_size=1)
                ret = np.argmax(result)
                print(ret)
                return ret
        return TrafficLight.UNKNOWN

    def load_dataset(self, path):
        from sklearn.model_selection import train_test_split
        samples = []
        for root, _, files in os.walk(os.path.expanduser(path)):
            label = os.path.basename(root).lower()
            for file in files:
                sample = []
                sample.append(root + '/' + file)
                sample.append(label)
                samples.append(sample)
        self.train_samples, self.validation_samples = train_test_split(
            samples, test_size=0.2)
        train = len(self.train_samples)
        validation = len(self.validation_samples)
        total = train + validation
        print('Train set size: {} ({}%)'.format(train,
                                                round(train * 100 / total)))
        print('Validation set size: {} ({}%)'.format(
            validation, round(validation * 100 / total)))
        print('Total size: {} (100%)'.format(total))

    def generator(self, samples, batch_size=16):
        label_dict = {
            "none": TrafficLight.UNKNOWN,
            "green": TrafficLight.GREEN,
            "red": TrafficLight.RED,
            "yellow": TrafficLight.YELLOW
        }
        from random import shuffle
        import cv2
        import sklearn
        import numpy as np
        from keras.utils import to_categorical
        num_samples = len(samples)
        while 1:
            shuffle(samples)
            for offset in range(0, num_samples, batch_size):
                batch_samples = samples[offset:offset + batch_size]
                images = []
                labels = []
                for batch_sample in batch_samples:
                    image = cv2.imread(batch_sample[0])
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    label = batch_sample[1]
                    images.append(image)
                    labels.append(
                        to_categorical(label_dict[label], num_classes=4))

                X_train = np.array(images)
                y_train = np.array(labels)
                yield sklearn.utils.shuffle(X_train, y_train)

    def train(self,
              filename=os.path.dirname(os.path.abspath(__file__)) +
              '/model_data/lenet_' + str(dt.now()) + '.h5',
              batch_size=16,
              epochs=15):
        from keras.models import Sequential
        from keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, \
        Cropping2D, Dropout
        from keras.callbacks import ModelCheckpoint
        from keras.utils import plot_model
        from math import ceil
        model = Sequential()
        model.add(Normalization(input_shape=(240, 120, 3)))
        model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
        model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
        model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Flatten())
        model.add(Dense(100))
        model.add(Dropout(0.5))
        model.add(Dense(50))
        model.add(Dropout(0.5))
        model.add(Dense(4))
        model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
        history = model.fit_generator(
            self.generator(self.train_samples, batch_size=batch_size),
            steps_per_epoch=ceil(len(self.train_samples) / batch_size),
            validation_data=self.generator(self.validation_samples,
                                           batch_size=batch_size),
            validation_steps=ceil(len(self.validation_samples) / batch_size),
            epochs=epochs,
            verbose=1,
            callbacks=[
                ModelCheckpoint(filename, verbose=1, save_best_only=True)
            ])
        print(history.history.keys())
        print('Loss:')
        print(history.history['loss'])
        print('Validation Loss:')
        print(history.history['val_loss'])
        print('Accuracy:')
        print(history.history['acc'])
예제 #14
0
def detection(target_path):
    ssl._create_default_https_context = ssl._create_unverified_context
    # https 뚫어야함 뼤엑

    #이전생성파일 삭제
    if os.path.isfile("./result/좌표.txt"):
        os.remove("./result/좌표.txt")

    if os.path.isfile("./result/번호판.txt"):
        os.remove("./result/번호판.txt")




    HOME_DIR='keras_yolo3/'



    plate_yolo = YOLO(model_path=os.path.join(HOME_DIR,'snapshots/000/plate_model.h5'),
                classes_path=os.path.join(HOME_DIR, 'model_data/plate_class.txt'),
                anchors_path=os.path.join(HOME_DIR,'model_data/yolo_anchors.txt'))

    print('target_path' + target_path)
    img = Image.open(target_path)
    print('이미지 오픈!!!!!!')
    plt.figure(figsize=(12, 12))
    # plt.imshow(img)

    print('디텍트 이미지 시작')
    detected_img = plate_yolo.detect_image(img)
    print('디텍트 이미지 성공')

    # plt.figure(figsize=(12, 12))


    plt.imshow(detected_img)
    plt.axis('off')
    # # 돌린 번호판인식된거 사진저장
    plt.savefig(target_path)

    # 좌표불러오기
    print('좌표 불러오기 시작')
    with open('keras_yolo3/result/좌표.txt', 'r') as file:
        point_files = file.readlines()
    print('좌표 불러오기 성공')

    i = point_files[0]
    split_points = i.split(' ')
    
    left = split_points[0]
    top = split_points[1]
    right = split_points[2]
    bottom = split_points[3]


    i_left = int(left)
    i_top = int(top)
    i_right = int(right) + 1
    i_bottom = int(bottom) + 1
    # print(i_bottom)
    # for line in point_files : # 파일 배열로 자르기
    #      split_points = line.split(' ')
    #      split_points[-1] = split_points[-1][:-1]

    #     print(split_points[0])

    #춉춉 하기~ >< 
    
    croppedImage=detected_img.crop((i_left,i_top,i_right,i_bottom))
    # croppedImage.show()
    plt.imshow(croppedImage)
    plt.axis('off')
    
    # plate path 지정
    plate_path = target_path.replace('.jpg', 'plate.jpg')
    plt.savefig(target_path.replace('.jpg', 'plate.jpg'))
    # plt.show()
    plt.close()
    print('yolo 끗')
    result = {
        'detect' : "http://localhost:8000/" + target_path,
        'plate' : "http://localhost:8000/" + plate_path,
        'status' : True
    }
    return result