Exemple #1
0
    def detect(self, original_image):
        self.output_width, self.output_height = original_image.shape[0:2]
        start_time = time.time()
        image = cv2.resize(original_image,
                           (self.input_width, self.input_height))
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        results = self.detect_objects(image)
        elapsed_ms = (time.time() - start_time) * 1000

        fps = 1 / elapsed_ms * 1000
        print("Estimated frames per second : {0:.2f} Inference time: {1:.2f}".
              format(fps, elapsed_ms))

        def _to_original_scale(boxes):
            minmax_boxes = to_minmax(boxes)
            minmax_boxes[:, 0] *= self.output_width
            minmax_boxes[:, 2] *= self.output_width
            minmax_boxes[:, 1] *= self.output_height
            minmax_boxes[:, 3] *= self.output_height
            return minmax_boxes.astype(np.int)

        boxes, probs = self.run(results)
        print(boxes)
        if len(boxes) > 0:
            boxes = _to_original_scale(boxes)
            original_image = draw_boxes(original_image, boxes, probs,
                                        self.labels)
        return cv2.imencode('.jpg', original_image)[1].tobytes()
Exemple #2
0
    def parse(self, original_image, tensor):
        #start_time = time.time()
        #elapsed_ms = (time.time() - start_time) * 1000
        #fps  = 1 / elapsed_ms*1000
        #print("Estimated frames per second : {0:.2f} Inference time: {1:.2f}".format(fps, elapsed_ms))
        boxes, probs = self.run(tensor)

        def _to_original_scale(boxes):
            minmax_boxes = to_minmax(boxes)
            minmax_boxes[:, 0] *= 224
            minmax_boxes[:, 2] *= 224
            minmax_boxes[:, 1] *= 224
            minmax_boxes[:, 3] *= 224
            return minmax_boxes.astype(np.int)

        if len(boxes) > 0:
            boxes = _to_original_scale(boxes)
            #print(boxes)
            original_image = draw_boxes(original_image, boxes, probs,
                                        self.labels)
        return original_image
Exemple #3
0
    def detect(self, original_image):
        self.output_height, self.output_width = original_image.shape[0:2]
        start_time = time.time()
        results = self.detect_objects(original_image)
        elapsed_ms = (time.time() - start_time) * 1000
        fps = 1 / elapsed_ms * 1000
        print("Estimated frames per second : {0:.2f} Inference time: {1:.2f}".
              format(fps, elapsed_ms))

        def _to_original_scale(boxes):
            minmax_boxes = to_minmax(boxes)
            minmax_boxes[:, 0] *= self.output_width
            minmax_boxes[:, 2] *= self.output_width
            minmax_boxes[:, 1] *= self.output_height
            minmax_boxes[:, 3] *= self.output_height
            return minmax_boxes.astype(np.int)

        boxes, probs = self.run(results)
        print(boxes)
        if len(boxes) > 0:
            boxes = _to_original_scale(boxes)
            original_image = draw_boxes(original_image, boxes, probs,
                                        self.labels)
        return original_image
Exemple #4
0
def face_detect():
    option = 'image'

    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.4, 0.5
    anchors = [
        55, 69, 75, 234, 133, 240, 136, 129, 142, 363, 203, 290, 228, 184, 285,
        359, 341, 260
    ]

    def load_graph(frozen_graph_filename):
        # We load the protobuf file from the disk and parse it to retrieve the
        # unserialized graph_def
        with open(frozen_graph_filename, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        # Then, we can use again a convenient built-in function to import a graph_def into the
        # current default Graph
        with tf.Graph().as_default() as graph:
            tf.import_graph_def(graph_def,
                                input_map=None,
                                return_elements=None,
                                name="prefix",
                                op_dict=None,
                                producer_op_list=None)
        return graph

    # We use our "load_graph" function
    graph = load_graph("yolo.pb")

    # We can verify that we can access the list of operations in the graph
    # =============================================================================
    #     for node in graph.as_graph_def().node:
    #         print node.name
    # =============================================================================

    # =============================================================================
    #     for op in graph.get_operations():
    #         opname=op.name
    #         print(opname)     # <--- printing the operations snapshot below
    # =============================================================================
    # prefix/Placeholder/inputs_placeholder
    # ...
    # prefix/Accuracy/predictions
    # We access the input and output nodes
    x = graph.get_tensor_by_name('prefix/input_1:0')
    y0 = graph.get_tensor_by_name('prefix/k2tfout_0:0')
    y1 = graph.get_tensor_by_name('prefix/k2tfout_1:0')
    y2 = graph.get_tensor_by_name('prefix/k2tfout_2:0')

    # We launch a Session
    with tf.Session(graph=graph) as sess:
        if option == 'image':
            img = cv2.imread('1.jpg')
            cv2.resize(img, (640, 480))
            img_h, img_w, _ = img.shape
            # compute the predicted output for test_x
            batch_input = preprocess_input(img, net_h, net_w)  #416x416x3

            inputs = np.zeros((1, net_h, net_w, 3), dtype='float32')
            inputs[0] = batch_input
            net_output = sess.run([y0, y1, y2],
                                  feed_dict={x: inputs})  # output=1x13x13x18

            batch_boxes = get_yolo_boxes(net_output, img_h, img_w, net_h,
                                         net_w, anchors, obj_thresh,
                                         nms_thresh)
            _, _, facecen = draw_boxes(img, batch_boxes[0], ['face'],
                                       obj_thresh)
            print(facecen)
            cv2.imshow('image with bboxes', img)
            cv2.imwrite('jietu_out.jpg', img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        elif option == 'webcam':
            video_reader = cv2.VideoCapture('last.mp4')
            fourcc = cv2.VideoWriter_fourcc(*'DIVX')
            out = cv2.VideoWriter('last_out.mp4', fourcc, 20.0, (640, 480))
            while (video_reader.isOpened()):
                ret_val, img = video_reader.read()
                #print(ret_val)
                if ret_val == True:
                    img = cv2.resize(img, (640, 480))
                    img_h, img_w, _ = img.shape
                    #print ("image size is:",img_h,",",img_w)

                    # compute the predicted output for test_x
                    batch_input = preprocess_input(img, net_h,
                                                   net_w)  #416x416x3

                    inputs = np.zeros((1, net_h, net_w, 3), dtype='float32')
                    inputs[0] = batch_input
                    net_output = sess.run([y0, y1, y2],
                                          feed_dict={x: inputs
                                                     })  # output=1x13x13x18

                    batch_boxes = get_yolo_boxes(net_output, img_h, img_w,
                                                 net_h, net_w, anchors,
                                                 obj_thresh, nms_thresh)
                    _, _, facecen = draw_boxes(img, batch_boxes[0], ['face'],
                                               obj_thresh)
                    print(facecen)
                    out.write(img)
                    cv2.imshow('image with bboxes', img)
                    #yield facecen
                    if cv2.waitKey(1) == 27:
                        break  # esc to quit
                else:
                    break
            video_reader.release()
            out.release()
            cv2.destroyAllWindows()
Exemple #5
0
PROJECT_ROOT = os.path.dirname(__file__)  #获取当前目录
print(PROJECT_ROOT)

IMAGE_FOLDER = os.path.join(PROJECT_ROOT, "data", "test", "*.png")
img_fnames = glob.glob(IMAGE_FOLDER)

imgs = []  #存放图片
for fname in img_fnames:  #读取图片
    img = cv2.imread(fname)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    imgs.append(img)

yolo_v3 = Yolonet(n_classes=len(LABELS))
yolo_v3.load_weights(save_fname + ".h5")  #将训练好的模型载入

import numpy as np
i = 0
for img in imgs:  #依次传入模型
    boxes, labels, probs = yolo_v3.detect(img, COCO_ANCHORS, imgsize)
    print(boxes, labels, probs)
    image = draw_boxes(img,
                       boxes,
                       labels,
                       probs,
                       class_labels=LABELS,
                       desired_size=400)
    image = np.asarray(image, dtype=np.uint8)
    plt.imsave("{}.jpg".format(i), image)
    i += 1
    plt.imshow(image)
    plt.show()