import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body


sess = K.get_session()

class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)    
yolo_model = load_model("model_data/yolo.h5")
#                     tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
#                     tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
#     scores, boxes, classes = yolo_eval(yolo_outputs)
#     print("scores[2] = " + str(scores[2].eval()))
#     print("boxes[2] = " + str(boxes[2].eval()))
#     print("classes[2] = " + str(classes[2].eval()))
#     print("scores.shape = " + str(scores.eval().shape))
#     print("boxes.shape = " + str(boxes.eval().shape))
#     print("classes.shape = " + str(classes.eval().shape))

# Test YOLO pretrained model on images
# Create a session to start your graph
sess = K.get_session()

# loading class_name,anchors
class_names = read_classes('model_data/coco_classes.txt')
anchors = read_anchors('model_data/yolo_anchors.txt')
image_shape = (720., 1280.)

# Loading a pre_trained model which's shape of output is (m,19,19,425)
yolo_model = load_model('model_data/yolo.h5')
#Show detail of the model
yolo_model.summary()

# Convert output of the model to usable bounding box tensors (m,19,19,5,85)-->(m,19,19,5,1)、(m,19,19,5,4)、(m,19,19,5,80)
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

# Filter boxes
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

Exemple #3
0
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
import keras
import sys
import os

class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)

yolo_model = load_model("yolo.h5")

yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))


def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
    """Filters YOLO boxes by thresholding on object and class confidence."""

    box_scores = box_confidence * box_class_probs

    box_classes = keras.backend.argmax(box_scores, axis=-1)
    box_class_scores = keras.backend.max(box_scores, axis=-1)
Exemple #4
0
        print("在" + str(image_file) + "中找到了" + str(len(out_boxes)) + "个锚框。")

    #指定要绘制的边界框的颜色
    colors = yolo_utils.generate_colors(class_names)

    #在图中绘制边界框
    yolo_utils.draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    #保存已经绘制了边界框的图
    image.save(os.path.join("out", image_file), quality=100)

    #打印出已经绘制了边界框的图
    if is_plot:
        output_image = scipy.misc.imread(os.path.join("out", image_file))
        plt.imshow(output_image)

    return out_scores, out_boxes, out_classes

if __name__ == '__main__':
    with tf.Session() as sess:
        class_names = yolo_utils.read_classes(r"E:\深度学习\第四课第三周编程作业\Car detection for Autonomous Driving\model_data/coco_classes.txt")
        anchors = yolo_utils.read_anchors(r"E:\深度学习\第四课第三周编程作业\Car detection for Autonomous Driving\model_data/yolo_anchors.txt")
        image_shape = (720., 1280.)

        yolo_model = load_model(r"E:\深度学习\第四课第三周编程作业\Car detection for Autonomous Driving\model_data/yolo.h5")
        yolo_model.summary()
        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
        scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
        out_scores, out_boxes, out_classes = predict(sess, r"0083.jpg")

Exemple #5
0
    
    # Scale boxes back to original image shape.
    boxes = scale_boxes(boxes, image_shape)

    # Use function Non-max suppression with a threshold of iou_threshold 
    scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5)
    
    
    return scores, boxes, classes


#creating a session to start our graph
sess = K.get_session()

#The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images.
class_names = read_classes("coco_classes.txt")
anchors = read_anchors("yolo_anchors.txt")
image_shape = (720., 1280.)    

#Loading a pretrained model- yolo.h5
yolo_model = load_model("yolo.h5")

#summary of the layers our model yolo.h5 contains.
yolo_model.summary()

#Converting output of the model to usable bounding box tensors
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

#Filtering boxes
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
        #pridect
        res = pridect(images, dec_image, yolo_model)
        #frame = cv.resize(frame, (96),(96))
        display = np.array(res)
        cv.imshow("Yolo", display)
        # cv.imshow("Yolo", frame)
        out.write(display)
        # 等待30ms显示图像,若过程中按“Esc”退出
        c = cv.waitKey(30) & 0xff
        if c == 27:
            capture.release()
            break


# yolo = YOLO()
class_names = yolo_utils.read_classes(current_path +
                                      "/model_data/voc_classes.txt")
anchors = yolo_utils.read_anchors(current_path +
                                  "/model_data/yolo_anchors.txt")
num_classes = len(class_names)
num_anchors = len(anchors)
input_shape = (416, 416)
image_input = Input(shape=(None, None, 3))
h, w = input_shape
model = yolo_body(image_input, num_anchors // 3, num_classes)
model.load_weights("logs/ep055-loss18.931-val_loss20.760.h5",
                   by_name=True,
                   skip_mismatch=True)
model.summary()
model.compile(optimizer='Adam',
              loss={
                  'yolo_loss': lambda y_true, y_pred: y_pred
# In this part, you are going to use a **pretrained model** and test it on the car detection dataset. As usual, you start by **creating a session to start your graph**. Run the following cell.

# In[10]:

sess = K.get_session() #creating a session to start your graph


# ### 3.1 - Defining classes, anchors and image shape.

# Recall that we are trying to detect **80 classes**, and are using **5 anchor boxes**. We have gathered the information about the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". Let's load these quantities into the model by running the next cell. 
# 
# The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images. 

# In[11]:

class_names = read_classes("model_data/coco_classes.txt")#person、bicycle、car、motorbike、aeroplane...
anchors = read_anchors("model_data/yolo_anchors.txt") #值为0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828
image_shape = (720., 1280.)    


# ### 3.2 - Loading a pretrained model 直接导入预训练好的模型,不需要再训练CNN了
# 
# Training a YOLO model **takes a very long time** and requires a fairly large dataset of **labelled bounding boxes for a large range of target classes.** You are going to load an existing pretrained Keras YOLO model stored in "yolo.h5". (These weights come from the **official YOLO website**, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the **"YOLOv2" model,** but we will more simply refer to it as "YOLO" in this notebook.) Run the cell below to load the model from this file.

# In[12]:

yolo_model = load_model("model_data/yolo.h5")


# This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
import argparse
import numpy as np
import tensorflow as tf
import time
import os
import glob
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

# might need to set random seed the same in debug
# np.random.seed(101) 

# Get Data #############################################################################################################
PATH = path
DATA_PATH = data_path
classes_paths = PATH + '/model/kitti_classes.txt'
classes_data = read_classes(classes_paths)
anchors_paths = PATH + '/model/kitti_anchors.txt'
anchors = read_anchors(anchors_paths)
print ("Number of classes: ", len(classes_data))

annotation_path_train = PATH + '/model/kitti_train.txt'
annotation_path_valid = PATH + '/model/kitti_train.txt'
annotation_path_test = PATH + '/model/kitti_train.txt'

label_train = []
label_valid = []
with open(annotation_path_train) as f:
        label_train = f.readlines()
with open(annotation_path_valid) as f:
        label_valid = f.readlines()
Exemple #9
0
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
import yolo_filter_boxes
import iou
import yolo_non_max_suppression
import yolo_eval

sess = K.get_session()
yolo_model = load_model("C:/Users/Vikarn Bhakri/Downloads/yolo.h5")
class_names = read_classes(
    "C:/Users/Vikarn Bhakri/Desktop/model_data/coco_classes.txt")
anchors = read_anchors(
    "C:/Users/Vikarn Bhakri/Desktop/model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
yolo_model.summary()
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

image, image_data = preprocess_image("C:/Users/Vikarn Bhakri/Desktop/images/" +
                                     "test.jpg",
                                     model_image_size=(608, 608))
out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                              feed_dict={
                                                  yolo_model.input: image_data,
                                                  K.learning_phase(): 0
                                              })
Exemple #10
0
def getFrame(sec):
    vidcap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000)
    hasFrames, image = vidcap.read()
    if hasFrames:
        path = "/Users/prachis/pet_projects/YOLOv2_keras/images/"
        cv2.imwrite(os.path.join(path, "image" + str(count) + ".jpg"), image)
        # cv2.imwrite("image"+str(count)+".jpg", image)     # save frame as JPG file

        input_image_name = "image" + str(count) + ".jpg"

        # Obtaining the dimensions of the input image
        input_image = Image.open(
            "/Users/prachis/pet_projects/YOLOv2_keras/images/" +
            input_image_name)
        width, height = input_image.size
        width = np.array(width, dtype=float)
        height = np.array(height, dtype=float)

        # Assign the shape of the input image to image_shapr variable
        image_shape = (height, width)

        # Loading the classes and the anchor boxes that are provided in the madel_data folder
        class_names = read_classes("model_data/coco_classes.txt")
        anchors = read_anchors("model_data/yolo_anchors.txt")

        # Load the pretrained model. Please refer the README file to get info on how to obtain the yolo.h5 file
        yolo_model = load_model("model_data/yolo.h5")

        # Print the summery of the model
        # yolo_model.summary()

        # Convert final layer features to bounding box parameters
        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

        # Now yolo_eval function selects the best boxes using filtering and non-max suppression techniques.
        # If you want to dive in more to see how this works, refer keras_yolo.py file in yad2k/models
        boxes, scores, classes = yolo_eval(yolo_outputs, image_shape)

        # Initiate a session
        sess = K.get_session()

        # Preprocess the input image before feeding into the convolutional network
        image, image_data = preprocess_image(
            "/Users/prachis/pet_projects/YOLOv2_keras/images/" +
            input_image_name,
            model_image_size=(608, 608))

        # Run the session
        out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                      feed_dict={
                                                          yolo_model.input:
                                                          image_data,
                                                          K.learning_phase(): 0
                                                      })

        # Print the results
        print('Found {} boxes for {}'.format(len(out_boxes), input_image_name))
        # Produce the colors for the bounding boxs
        colors = generate_colors(class_names)
        # Draw the bounding boxes
        draw_boxes(image, out_scores, out_boxes, out_classes, class_names,
                   colors)
        # Apply the predicted bounding boxes to the image and save it
        image.save(os.path.join(
            "/Users/prachis/pet_projects/YOLOv2_keras/out/", input_image_name),
                   quality=90)
        output_image = imageio.imread(
            os.path.join("/Users/prachis/pet_projects/YOLOv2_keras/out/",
                         input_image_name))

    return hasFrames
Exemple #11
0
    return scores, boxes, classes


scores, boxes, classes = yolo_eval(yolo_outputs)

with tf.Session() as test_b:
    print("scores[2] = " + str(scores[2].eval()))
    print("boxes[2] = " + str(boxes[2].eval()))
    print("classes[2] = " + str(classes[2].eval()))
    
    
    
################################################################

sess = K.get_session()
class_names = read_classes("sambid/model_data/coco_classes.txt")
anchors = read_anchors("sambid/model_data/yolo_anchors.txt")

yolo_model = load_model("sambid/model_data/yolo.h5")

yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))


def predict(sess, image_file):
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0})

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))

    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
    scores, boxes, classes = yolo_non_max_suppression(
        scores,
        boxes,
        classes,
        max_boxes=max_boxes,
        iou_threshold=iou_threshold)

    return scores, boxes, classes


pather = "/home/robot-tumas/Desktop/projects/Class/find-people/"

#Load pre_trained model
sess = K.get_session()
class_names = read_classes(pather + "classes.txt")
image_shape = (720., 1280.)
yolo_model = load_model(pather + "compModel.pb")
#yolo_model.summary()
yolo_outputs = yolo_head(tf.convert_to_tensor(yolo_model.output), anchors,
                         len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
scores, boxes, classes = yolo_eval(yolo_outputs, input_image_shape)
import imageio


def predict(sess, image_file):

    # Preprocess your image
    image, image_data = preprocess_image(image_file,
                                         model_image_size=(608, 608))
                                               box_class_probs,
                                               score_threshold)

    boxes = scale_boxes(boxes, image_shape)

    scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes,
                                                      max_boxes, iou_threshold)

    return scores, boxes, classes


#3 - Test YOLO pretrained model on images
sess = K.get_session()

#3.1 - Defining classes, anchors and image shape.
class_names = read_classes('C:/Users/kobe24/yad2k/model_data/coco_classes.txt')
anchors = read_anchors('C:/Users/kobe24/yad2k/model_data/yolo_anchors.txt')
image_shape = (300., 450.)

#3.2 - Loading a pretrained model
yolo_model = load_model('C:/Users/kobe24/yad2k/model_data/yolo.h5')
#yolo_model.summary()

#3.3 - Convert output of the model to usable bounding box tensors
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

#3.4 - Filtering boxes
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)


#3.5 - Run the graph on an image
Exemple #14
0
    # 保存已经绘制了边界框的图
    # image.save(os.path.join("out", image_file), quality=100)

    cv2.imshow('Image', np.array(image))

    # 打印出已经绘制了边界框的图
    if is_plot:
        output_image = scipy.misc.imread(os.path.join("out", image_file))
        plt.imshow(output_image)

    return out_scores, out_boxes, out_classes


'''定义分类, 锚框与图像维度'''
class_names = yolo_utils.read_classes(
    "model_data/coco_classes.txt")  # 读入类别标签文本
anchors = yolo_utils.read_anchors(
    "model_data/yolo_anchors.txt")  # 读入锚框尺寸(w,h), 共五种锚框
image_shape = (720., 1280.)
yolo_model = load_model("model_data/yolov2.h5")  # 加载训练模型(该模块位于keras模块下)
yolo_outputs = yolo_head(yolo_model.output, anchors,
                         len(class_names))  # 将模型的输出转化为边界框
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)  # 过滤锚框
# 将视频文件转化为图像进行处理
vc = cv2.VideoCapture("video/test_video1.mp4")
totalFrameNumber = vc.get(7)  # 获取总视频帧数
c = 1
step = 3  # 定义帧数间隔
if vc.isOpened():
    rval, frame = vc.read()
else: