def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    """

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    out_scores, out_boxes, out_classes = None

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    
    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.
    
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    
    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
    """

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    ### START CODE HERE ### (≈ 1 line)
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, 
                                                                                       K.learning_phase(): 0})

    ### END CODE HERE ###

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
Beispiel #3
0
def predict(sess, image_file, is_show_info=True, is_plot=True):
    """
    运行存储在sess的计算图以预测image_file的边界框,打印出预测的图与信息。

    参数:
        sess - 包含了YOLO计算图的TensorFlow/Keras的会话。
        image_file - 存储在images文件夹下的图片名称
    返回:
        out_scores - tensor类型,维度为(None,),锚框的预测的可能值。
        out_boxes - tensor类型,维度为(None,4),包含了锚框位置信息。
        out_classes - tensor类型,维度为(None,),锚框的预测的分类索引。
    """
    #图像预处理

    image, image_data = yolo_utils.preprocess_image(r"E:\深度学习\第四课第三周编程作业\Car detection for Autonomous Driving\images\\" + image_file, model_image_size = (608, 608))

    #运行会话并在feed_dict中选择正确的占位符.
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict = {yolo_model.input:image_data, K.learning_phase(): 0})

    #打印预测信息
    if is_show_info:
        print("在" + str(image_file) + "中找到了" + str(len(out_boxes)) + "个锚框。")

    #指定要绘制的边界框的颜色
    colors = yolo_utils.generate_colors(class_names)

    #在图中绘制边界框
    yolo_utils.draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    #保存已经绘制了边界框的图
    image.save(os.path.join("out", image_file), quality=100)

    #打印出已经绘制了边界框的图
    if is_plot:
        output_image = scipy.misc.imread(os.path.join("out", image_file))
        plt.imshow(output_image)

    return out_scores, out_boxes, out_classes
Beispiel #4
0
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    
    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.
    
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    
    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
    """

    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(608, 608))

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))

    colors = generate_colors(class_names)

    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    image.save(os.path.join("out", image_file), quality=90)

    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    
    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.
    
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    
    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
    """

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    ### START CODE HERE ### (≈ 1 line)
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict = {yolo_model.input:image_data, K.learning_phase():0})
    ### END CODE HERE ###

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
Beispiel #6
0
def predict(sess, image_file):
    # Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    # Preprocess your image
    image, image_data = preprocess_image("Images/" + image_file,
                                         model_image_size=(608, 608))

    out_scores, out_boxes, out_classes = sess.run((scores, boxes, classes),
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("output", image_file), quality=90)

    return out_scores, out_boxes, out_classes
def yolo_pipeline(vid_frame):
    # Preprocess video frame
    image, image_data = preprocess_vid(vid_frame, model_image_size=(608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    ### START CODE HERE ### (≈ 1 line)
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })
    ### END CODE HERE ###

    # Print predictions info
    #print('Found {} boxes'.format(len(out_boxes)))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    return np.asarray(image)
Beispiel #8
0
def predict(sess, image_file):
    '''
    The function will run the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    '''

    # processing the image data
    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(608, 608))

    # Runing the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # We will need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))

    # Generating colors for drawing bounding boxes.
    colors = generate_colors(class_names)

    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # saving the image
    image.save(os.path.join("out", image_file), quality=90)

    # displaying the image
    img = cv2.imread(os.path.join("out", "test.jpg"), 1)
    cv2.imshow('output', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    return out_scores, out_boxes, out_classes
Beispiel #9
0
    def run(self, imgList, fpgaOutput_list, fpgaOutputShape_list, shapeArr):
        if self.numProcessed == 0:
            self.zmqPub = None
            if self.args['zmqpub']:
                self.zmqPub = mp_classify.ZmqResultPublisher(
                    self.args['deviceID'])
            self.goldenMap = None

        self.numProcessed += len(imgList)
        bboxlist_for_images = self.yolo_postproc(fpgaOutput_list,
                                                 args,
                                                 shapeArr,
                                                 biases=self.biases)

        if (not self.args['profile']):
            for i in range(min(self.args['batch_sz'], len(shapeArr))):
                print("Detected {} boxes in {}".format(
                    len(bboxlist_for_images[i]), imgList[i]))

        if (self.args['results_dir']):
            boxes = bboxlist_for_images
            for i in range(min(self.args['batch_sz'], len(shapeArr))):
                filename = os.path.splitext(os.path.basename(imgList[i]))[0]
                out_file_txt = os.path.join(self.args['results_dir'],
                                            filename + '.txt')
                print("Saving {} boxes to {}".format(len(boxes[i]),
                                                     out_file_txt))
                sys.stdout.flush()
                saveDetectionDarknetStyle(out_file_txt, boxes[i], shapeArr[i])

                if (self.args['visualize']):
                    out_file_png = os.path.join(self.args['results_dir'],
                                                filename + '.png')
                    print("Saving result to {}".format(out_file_png))
                    sys.stdout.flush()
                    draw_boxes(imgList[i], boxes[i], self.labels, self.colors,
                               out_file_png)
Beispiel #10
0
def predict(sess, image_file):

    image, image_data = preprocess_image(image_file,
                                         model_image_size=(608, 608))

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))

    colors = generate_colors(class_names)

    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    image.save(os.path.join("out", image_file), quality=90)

    output_image = plt.imread(os.path.join("out", image_file))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
Beispiel #11
0
def predict(sess, image_file):
    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    ### START CODE HERE ### (≈ 1 line)
    out_scores, out_boxes, out_classes = sess.run(fetches=[scores,boxes,classes],
       feed_dict={yolo_model.input: image_data,
                  K.learning_phase():0
    })
    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = imageio.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
def predict(sess, image_file, is_show_info=True, is_plot=True):
    """
    运行存储在sess的计算图以预测image_file的边界框,打印出预测图与信息
    :param sess: 包含了YOLO计算图的TensorFlow/keras的会话
    :param imagefile: 存储images文件下的图片名称
    :param is_show_info:
    :param is_plot:
    :return:
            out_scores:tensor, (None, ),锚框的预测的可能值
            out_boxes:tensor, (None,4),包含了锚框位置信息
            out_classes:tensor, (None, ),锚框的预测的分类索引
    """
    image, image_data = preprocess_image(image_file, model_image_size =(608, 608))###预处理图像
    out_scores, out_boxes, out_classes = sess.run([scores,boxes,classes],feed_dict={yolo_model.input:image_data, K.learning_phase():0})
    if is_show_info:
        print("在" + str(image_file)+"中找到"+str(len(out_boxes))+"个锚框。")
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image.save(os.path.join('C:\\Users\\korey\\Desktop\\car',image_file), quality=90)
    if is_plot:
        out_image = plt.imread(os.path.join('C:\\Users\\korey\\Desktop\\car',image_file))
        plt.imshow(out_image)
        plt.show()
    return out_scores, out_boxes, out_classes
Beispiel #13
0
def predict(sess, image_file):
    """
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    """
    image, image_data = preprocess_image("image/" + image_file,
                                         model_image_size=(608, 608))
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image.save(os.path.join("out", image_file), quality=90)
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    plt.imshow(output_image)
    plt.show()
    return out_scores, out_boxes, out_classes
Beispiel #14
0
def predict(sess, image):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
    """

    class_names = read_classes("model_data/coco_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")
    image_shape = (720., 1280.)
    yolo_model = load_model("model_data/yolo.h5")
    # yolo_model.summary()
    t = time.time()
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    print("t1:", time.time()-t)

    t = time.time()
    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
    print("t2:", time.time() - t)

    # Preprocess your image
    image_data = preprocess_image(image, model_image_size=(608, 608))

    t = time.time()
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={yolo_model.input: image_data,
                                                             K.learning_phase(): 0})
    print("t3:", time.time() - t)

    # Print predictions info
    #print('Found {} boxes'.format(len(out_boxes)))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image
    result = draw_boxes(image, out_boxes, out_classes, colors)

    return result
Beispiel #15
0
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
    """
    # np.savetxt("output.txt", dept_array, fmt="%i")
    # print(dept_array)

    if image_file.__class__ == str:
        image, image_data = preprocess_image("images/" + image_file,
                                             model_image_size=(416, 416))
    else:
        image = PIL.Image.fromarray(image_file)
        image, image_data = preprocess_image(image,
                                             model_image_size=(416, 416))

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })
    # print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    labels = draw_boxes(image, out_scores, out_boxes, out_classes, class_names,
                        colors)

    return out_scores, out_boxes, out_classes, image, labels
Beispiel #16
0
    def predict(sess, image):
        # Preprocess your image
        image, image_data = preprocess_image(image,
                                             model_image_size=(416, 416))

        # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
        # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
        out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                      feed_dict={
                                                          yolo_model.input:
                                                          image_data,
                                                          K.learning_phase(): 0
                                                      })

        # Print predictions info
        print('Found {} boxes'.format(len(out_boxes)))
        # Generate colors for drawing bounding boxes.
        colors = generate_colors(class_names)
        # Draw bounding boxes on the image file
        out_image = draw_boxes(image, out_scores, out_boxes, out_classes,
                               class_names, colors)

        return out_image, out_scores, out_boxes, out_classes
Beispiel #17
0
def predict(sess, image_file):
    """
    Predict boxes for "image_file"

    Arguments:
    sess -- Keras session
    image_file -- name of a test image stored in the "images" folder.

    Returns:
    out_scores -- scores of the predicted boxes
    out_boxes -- coordinates of the predicted boxes
    out_classes -- class index of the predicted boxes
    """

    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(416, 416))

    out_scores, out_boxes, out_classes = sess.run(yolo_eval(yolo_outputs),
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    # Predictions info
    # print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    output_stats = draw_boxes(image, out_scores, out_boxes, out_classes,
                              class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    output_image = scipy.misc.imread(os.path.join("out", image_file))

    return out_scores, out_boxes, out_classes, output_image, output_stats
Beispiel #18
0
    def bbox_stage(config, q_bbox, maxNumIters=-1):
        results = []

        numIters = 0
        while True:
            numIters += 1
            if maxNumIters > 0 and numIters > maxNumIters:
                break

            payload = q_bbox.get()
            if payload == None:
                break
            (job, fpgaOutput) = payload

            images = job['images']
            display = job['display']
            coco = job['coco']

            if ((config['yolo_model'] == 'standard_yolo_v3')
                    or (config['yolo_model'] == 'tiny_yolo_v3')
                    or (config['yolo_model'] == 'spp_yolo_v3')):
                anchorCnt = config['anchorCnt']
                classes = config['classes']

                if (config['yolo_model'] == 'tiny_yolo_v3'):
                    classes = 80
                    #config['classes'] = 3
                #print "classes fpgaOutput len", classes, len(fpgaOutput)
                out_yolo_layers = process_all_yolo_layers(
                    fpgaOutput, classes, anchorCnt, config['net_w'],
                    config['net_h'])

                num_proposals_layer = [0]
                total_proposals = 0
                for layr_idx in range(len(out_yolo_layers)):
                    yolo_layer_shape = out_yolo_layers[layr_idx].shape
                    #print "layr_idx , yolo_layer_shape", layr_idx , yolo_layer_shape
                    out_yolo_layers[layr_idx] = out_yolo_layers[
                        layr_idx].reshape(
                            yolo_layer_shape[0], anchorCnt, (5 + classes),
                            yolo_layer_shape[2] * yolo_layer_shape[3])
                    out_yolo_layers[layr_idx] = out_yolo_layers[
                        layr_idx].transpose(0, 3, 1, 2)
                    out_yolo_layers[layr_idx] = out_yolo_layers[
                        layr_idx].reshape(
                            yolo_layer_shape[0], yolo_layer_shape[2] *
                            yolo_layer_shape[3] * anchorCnt, (5 + classes))
                    #print "layr_idx, final in layer sape, outlayer shape", layr_idx, yolo_layer_shape, out_yolo_layers[layr_idx].shape
                    total_proposals += yolo_layer_shape[2] * yolo_layer_shape[
                        3] * anchorCnt
                    num_proposals_layer.append(total_proposals)

                boxes_array = np.empty(
                    [config['batch_sz'], total_proposals, (5 + classes)])

                for layr_idx in range(len(out_yolo_layers)):
                    proposal_st = num_proposals_layer[layr_idx]
                    proposal_ed = num_proposals_layer[layr_idx + 1]
                    #print "proposal_st proposal_ed", proposal_st, proposal_ed
                    boxes_array[:,
                                proposal_st:proposal_ed, :] = out_yolo_layers[
                                    layr_idx][...]

                for i in range(config['batch_sz']):
                    boxes_array[i, :, :] = correct_region_boxes(
                        boxes_array[i, :, :], 0, 1, 2, 3,
                        float(job['shapes'][i][1]), float(job['shapes'][i][0]),
                        float(config['net_w']), float(config['net_h']))
                    detected_boxes = apply_nms(boxes_array[i, :, :], classes,
                                               config['scorethresh'],
                                               config['iouthresh'])

                    bboxes = []
                    for det_idx in range(len(detected_boxes)):
                        #print  detected_boxes[det_idx][0], detected_boxes[det_idx][1], detected_boxes[det_idx][2], detected_boxes[det_idx][3], config['names'][detected_boxes[det_idx][4]], detected_boxes[det_idx][5]

                        bboxes.append({
                            'classid': detected_boxes[det_idx][4],
                            'prob': detected_boxes[det_idx][5],
                            'll': {
                                'x':
                                int((detected_boxes[det_idx][0] -
                                     0.5 * detected_boxes[det_idx][2]) *
                                    job['shapes'][i][1]),
                                'y':
                                int((detected_boxes[det_idx][1] +
                                     0.5 * detected_boxes[det_idx][3]) *
                                    job['shapes'][i][0])
                            },
                            'ur': {
                                'x':
                                int((detected_boxes[det_idx][0] +
                                     0.5 * detected_boxes[det_idx][2]) *
                                    job['shapes'][i][1]),
                                'y':
                                int((detected_boxes[det_idx][1] -
                                     0.5 * detected_boxes[det_idx][3]) *
                                    job['shapes'][i][0])
                            }
                        })

                        log.info("Obj %d: %s" %
                                 (det_idx,
                                  config['names'][bboxes[det_idx]['classid']]))
                        log.info("\t score = %f" % (bboxes[det_idx]['prob']))
                        log.info("\t (xlo,ylo) = (%d,%d)" %
                                 (bboxes[det_idx]['ll']['x'],
                                  bboxes[det_idx]['ll']['y']))
                        log.info("\t (xhi,yhi) = (%d,%d)" %
                                 (bboxes[det_idx]['ur']['x'],
                                  bboxes[det_idx]['ur']['y']))

                    if display:
                        draw_boxes(images[i], bboxes, config['names'],
                                   config['colors'])

                    filename = images[i]
                    out_file_txt = ((filename.split("/")[-1]).split(".")[0])
                    out_file_txt = config[
                        'out_labels_path'] + "/" + out_file_txt + ".txt"
                    out_line_list = []

                    for j in range(len(bboxes)):
                        #x,y,w,h = darknet_style_xywh(job['shapes'][i][1], job['shapes'][i][0], bboxes[j]["ll"]["x"],bboxes[j]["ll"]["y"],bboxes[j]['ur']['x'],bboxes[j]['ur']['y'])
                        x = detected_boxes[j][0]
                        y = detected_boxes[j][1]
                        w = detected_boxes[j][2]
                        h = detected_boxes[j][3]

                        line_string = str(bboxes[j]["classid"])
                        line_string = line_string + " " + str(
                            round(bboxes[j]['prob'], 3))
                        line_string = line_string + " " + str(x)
                        line_string = line_string + " " + str(y)
                        line_string = line_string + " " + str(w)
                        line_string = line_string + " " + str(h)
                        out_line_list.append(line_string + "\n")

                    log.info("writing this into prediction file at %s" %
                             (out_file_txt))
                    with open(out_file_txt, "w") as the_file:
                        for lines in out_line_list:
                            the_file.write(lines)

                continue

            fpgaOutput = fpgaOutput.flatten()
            for i in range(config['batch_sz']):
                log.info("Results for image %d: %s" % (i, images[i]))
                startidx = i * config['outsize']
                softmaxout = fpgaOutput[startidx:startidx + config['outsize']]

                # first activate first two channels of each bbox subgroup (n)
                for b in range(config['bboxplanes']):
                    for r in range(
                            config['batchstride'] * b,
                            config['batchstride'] * b + 2 * config['groups']):
                        softmaxout[r] = sigmoid(softmaxout[r])
                    for r in range(
                            config['batchstride'] * b +
                            config['groups'] * config['coords'],
                            config['batchstride'] * b +
                            config['groups'] * config['coords'] +
                            config['groups']):
                        softmaxout[r] = sigmoid(softmaxout[r])

                # Now softmax on all classification arrays in image
                for b in range(config['bboxplanes']):
                    for g in range(config['groups']):
                        softmax(
                            config['beginoffset'] + b * config['batchstride'] +
                            g * config['groupstride'], softmaxout, softmaxout,
                            config['classes'], config['groups'])

                # NMS
                bboxes = nms.do_baseline_nms(
                    softmaxout, job['shapes'][i][1], job['shapes'][i][0],
                    config['net_w'], config['net_h'], config['out_w'],
                    config['out_h'], config['bboxplanes'], config['classes'],
                    config['scorethresh'], config['iouthresh'])

                # REPORT BOXES
                log.info("Found %d boxes" % (len(bboxes)))
                filename = images[i]
                out_file_txt = ((filename.split("/")[-1]).split(".")[0])
                out_file_txt = config[
                    'out_labels_path'] + "/" + out_file_txt + ".txt"

                out_line_list = []

                for j in range(len(bboxes)):
                    log.info("Obj %d: %s" %
                             (j, config['names'][bboxes[j]['classid']]))
                    log.info("\t score = %f" % (bboxes[j]['prob']))
                    log.info("\t (xlo,ylo) = (%d,%d)" %
                             (bboxes[j]['ll']['x'], bboxes[j]['ll']['y']))
                    log.info("\t (xhi,yhi) = (%d,%d)" %
                             (bboxes[j]['ur']['x'], bboxes[j]['ur']['y']))
                    filename = images[i]
                    if coco:
                        image_id = int(((filename.split("/")[-1]
                                         ).split("_")[-1]).split(".")[0])
                    else:
                        image_id = filename.split("/")[-1]
                    x, y, w, h = cornersToxywh(bboxes[j]["ll"]["x"],
                                               bboxes[j]["ll"]["y"],
                                               bboxes[j]['ur']['x'],
                                               bboxes[j]['ur']['y'])
                    result = {
                        "image_id": image_id,
                        "category_id": config['cats'][bboxes[j]["classid"]],
                        "bbox": [x, y, w, h],
                        "score": round(bboxes[j]['prob'], 3)
                    }
                    results.append(result)
                    x, y, w, h = darknet_style_xywh(job['shapes'][i][1],
                                                    job['shapes'][i][0],
                                                    bboxes[j]["ll"]["x"],
                                                    bboxes[j]["ll"]["y"],
                                                    bboxes[j]['ur']['x'],
                                                    bboxes[j]['ur']['y'])
                    line_string = str(bboxes[j]["classid"])
                    line_string = line_string + " " + str(
                        round(bboxes[j]['prob'], 3))
                    line_string = line_string + " " + str(x)
                    line_string = line_string + " " + str(y)
                    line_string = line_string + " " + str(w)
                    line_string = line_string + " " + str(h)
                    out_line_list.append(line_string + "\n")

                # DRAW BOXES w/ LABELS
                if display:
                    draw_boxes(images[i], bboxes, config['names'],
                               config['colors'])

                log.info("writing this into prediction file at %s" %
                         (out_file_txt))
                with open(out_file_txt, "w") as the_file:

                    for lines in out_line_list:

                        the_file.write(lines)

        log.info("Saving results as results.json")
        with open("results.json", "w") as fp:
            fp.write(json.dumps(results, sort_keys=True, indent=4))
Beispiel #19
0
    def bbox_stage(config, q_bbox):
        results = []

        while True:
            payload = q_bbox.get()
            if payload == None:
                break
            (job, fpgaOutput) = payload

            images = job['images']
            display = job['display']
            coco = job['coco']

            for i in range(config['batch_sz']):
                log.info("Results for image %d: %s" % (i, images[i]))
                startidx = i * config['outsize']
                softmaxout = fpgaOutput[startidx:startidx + config['outsize']]

                # first activate first two channels of each bbox subgroup (n)
                for b in range(config['bboxplanes']):
                    for r in range(
                            config['batchstride'] * b,
                            config['batchstride'] * b + 2 * config['groups']):
                        softmaxout[r] = sigmoid(softmaxout[r])
                    for r in range(
                            config['batchstride'] * b +
                            config['groups'] * config['coords'],
                            config['batchstride'] * b +
                            config['groups'] * config['coords'] +
                            config['groups']):
                        softmaxout[r] = sigmoid(softmaxout[r])

                # Now softmax on all classification arrays in image
                for b in range(config['bboxplanes']):
                    for g in range(config['groups']):
                        softmax(
                            config['beginoffset'] + b * config['batchstride'] +
                            g * config['groupstride'], softmaxout, softmaxout,
                            config['classes'], config['groups'])

                # NMS
                bboxes = nms.do_baseline_nms(
                    softmaxout, job['shapes'][i][1], job['shapes'][i][0],
                    config['net_w'], config['net_h'], config['out_w'],
                    config['out_h'], config['bboxplanes'], config['classes'],
                    config['scorethresh'], config['iouthresh'])

                # REPORT BOXES
                log.info("Found %d boxes" % (len(bboxes)))
                for j in range(len(bboxes)):
                    log.info("Obj %d: %s" %
                             (j, config['names'][bboxes[j]['classid']]))
                    log.info("\t score = %f" % (bboxes[j]['prob']))
                    log.info("\t (xlo,ylo) = (%d,%d)" %
                             (bboxes[j]['ll']['x'], bboxes[j]['ll']['y']))
                    log.info("\t (xhi,yhi) = (%d,%d)" %
                             (bboxes[j]['ur']['x'], bboxes[j]['ur']['y']))
                    filename = images[i]
                    if coco:
                        image_id = int(((filename.split("/")[-1]
                                         ).split("_")[-1]).split(".")[0])
                    else:
                        image_id = filename.split("/")[-1]
                    x, y, w, h = cornersToxywh(bboxes[j]["ll"]["x"],
                                               bboxes[j]["ll"]["y"],
                                               bboxes[j]['ur']['x'],
                                               bboxes[j]['ur']['y'])
                    result = {
                        "image_id": image_id,
                        "category_id": config['cats'][bboxes[j]["classid"]],
                        "bbox": [x, y, w, h],
                        "score": round(bboxes[j]['prob'], 3)
                    }
                    results.append(result)

                # DRAW BOXES w/ LABELS
                if display:
                    draw_boxes(images[i], bboxes, config['names'],
                               config['colors'])

        log.info("Saving results as results.json")
        with open("results.json", "w") as fp:
            fp.write(json.dumps(results, sort_keys=True, indent=4))
Beispiel #20
0
def main():
    parser = xdnn_io.default_parser_args()
    parser = yolo_parser_args(parser)
    args = parser.parse_args()
    args = xdnn_io.make_dict_args(args)

    # Setup the environment
    img_paths = xdnn_io.getFilePaths(args['images'])
    if (args['golden'] or args['visualize']):
        assert args['labels'], "Provide --labels to compute mAP."
        assert args[
            'results_dir'], "For accuracy measurements, provide --results_dir to save the detections."
        labels = xdnn_io.get_labels(args['labels'])
        colors = generate_colors(len(labels))

    if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc
    elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc

    runner = Runner(args['vitis_rundir'])

    # Setup the blobs
    inTensors = runner.get_input_tensors()
    outTensors = runner.get_output_tensors()
    batch_sz = args['batch_sz']
    if batch_sz == -1:
        batch_sz = inTensors[0].dims[0]

    fpgaBlobs = []
    for io in [inTensors, outTensors]:
        blobs = []
        for t in io:
            shape = (batch_sz, ) + tuple([t.dims[i]
                                          for i in range(t.ndims)][1:])
            blobs.append(np.empty((shape), dtype=np.float32, order='C'))
        fpgaBlobs.append(blobs)
    fpgaInput = fpgaBlobs[0][0]

    # Setup the YOLO config
    net_h, net_w = fpgaInput.shape[-2:]
    args['net_h'] = net_h
    args['net_w'] = net_w
    biases = bias_selector(args)

    # Setup profiling env
    prep_time = 0
    exec_time = 0
    post_time = 0

    # Start the execution
    for i in range(0, len(img_paths), batch_sz):
        pl = []
        img_shapes = []

        # Prep images
        t1 = timeit.default_timer()
        for j, p in enumerate(img_paths[i:i + batch_sz]):
            fpgaInput[j, ...], img_shape = xdnn_io.loadYoloImageBlobFromFile(
                p, net_h, net_w)
            pl.append(p)
            img_shapes.append(img_shape)
        t2 = timeit.default_timer()

        # Execute
        jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])
        runner.wait(jid)

        # Post Proc
        t3 = timeit.default_timer()
        boxes = yolo_postproc(fpgaBlobs[1], args, img_shapes, biases=biases)
        t4 = timeit.default_timer()

        prep_time += (t2 - t1)
        exec_time += (t3 - t2)
        post_time += (t4 - t3)

        for i in range(min(batch_sz, len(img_shapes))):
            print("Detected {} boxes in {}".format(len(boxes[i]), pl[i]))

        # Save the result
        if (args['results_dir']):
            for i in range(min(batch_sz, len(img_shapes))):
                filename = os.path.splitext(os.path.basename(pl[i]))[0]
                out_file_txt = os.path.join(args['results_dir'],
                                            filename + '.txt')
                print("Saving {} boxes to {}".format(len(boxes[i]),
                                                     out_file_txt))
                sys.stdout.flush()
                saveDetectionDarknetStyle(out_file_txt, boxes[i],
                                          img_shapes[i])
                if (args['visualize']):
                    out_file_png = os.path.join(args['results_dir'],
                                                filename + '.png')
                    print("Saving result to {}".format(out_file_png))
                    sys.stdout.flush()
                    draw_boxes(pl[i], boxes[i], labels, colors, out_file_png)

    # Profiling results
    if (args['profile']):
        print("\nAverage Latency in ms:")
        print("  Image Prep: {0:3f}".format(prep_time * 1000.0 /
                                            len(img_paths)))
        print("  Exec: {0:3f}".format(exec_time * 1000.0 / len(img_paths)))
        print("  Post Proc: {0:3f}".format(post_time * 1000.0 /
                                           len(img_paths)))
        sys.stdout.flush()

    # mAP calculation
    if (args['golden']):
        print()
        print("Computing mAP score  : ")
        print("Class names are  : {} ".format(labels))
        mAP = calc_detector_mAP(args['results_dir'], args['golden'],
                                len(labels), labels, args['prob_threshold'],
                                args['mapiouthresh'], args['points'])
        sys.stdout.flush()
Beispiel #21
0
def yolo_gpu_inference(backend_path, image_dir, deploy_model, weights,
                       out_labels, IOU_threshold, scorethresh, mean_value,
                       pxscale, transpose, channel_swap, yolo_model,
                       num_classes, args):

    # Setup the environment
    images = xdnn_io.getFilePaths(args['images'])
    if (args['golden'] or args['visualize']):
        assert args['labels'], "Provide --labels to compute mAP."
        assert args[
            'results_dir'], "For accuracy measurements, provide --results_dir to save the detections."
        labels = xdnn_io.get_labels(args['labels'])
        colors = generate_colors(len(labels))

    # Select postproc and biases
    if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc
    elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc
    biases = bias_selector(args)

    import caffe
    caffe.set_mode_cpu()
    print(args)
    if (args['gpu'] is not None):
        caffe.set_mode_gpu()
        caffe.set_device(args['gpu'])

    net = caffe.Net(deploy_model, weights, caffe.TEST)

    net_h, net_w = net.blobs['data'].data.shape[-2:]
    args['net_h'] = net_h
    args['net_w'] = net_w

    for i, img in enumerate(images):
        if ((i + 1) % 100 == 0): print(i + 1, "images processed")
        raw_img, img_shape = xdnn_io.loadYoloImageBlobFromFile(
            img, net_h, net_w)

        net.blobs['data'].data[...] = raw_img
        out = net.forward()

        caffeOutput = sorted(out.values(), key=lambda item: item.shape[-1])
        boxes = yolo_postproc(caffeOutput, args, [img_shape], biases=biases)

        print("{}. Detected {} boxes in {}".format(i, len(boxes[0]), img))

        # Save the result
        boxes = boxes[0]
        if (args['results_dir']):
            filename = os.path.splitext(os.path.basename(img))[0]
            out_file_txt = os.path.join(args['results_dir'], filename + '.txt')
            print("Saving {} boxes to {}".format(len(boxes), out_file_txt))
            sys.stdout.flush()
            saveDetectionDarknetStyle(out_file_txt, boxes, img_shape)
            if (args['visualize']):
                out_file_png = os.path.join(args['results_dir'],
                                            filename + '.png')
                print("Saving result to {}".format(out_file_png))
                sys.stdout.flush()
                draw_boxes(img, boxes, labels, colors, out_file_png)
        # draw_boxes(images[i],bboxes,class_names,colors=[(0,0,0)]*num_classes)

    return len(images)
Beispiel #22
0
    def loop(self, payload):
        if self.numProcessed == 0:
            self.startTime = timeit.default_timer()

        (meta, buf) = payload
        # print("POST get payload : {} ".format(meta))
        imgList = []
        imgShape = []

        buf2 = np.frombuffer(buf, dtype=np.float32)
        # print("buf2 details : ", buf2.shape, buf2.dtype)
        # print("buf indices : ", self.buf_indices)
        # print("fpgaOutput shapes : ", self.fpgaOutputShapes)

        bufs = []
        for i in range(len(self.buf_indices[:-1])):
            tmp = buf2[self.buf_indices[i]:self.buf_indices[i + 1]].reshape(
                self.fpgaOutputShapes[i]).copy()
            #print("tmp : ", tmp.shape, tmp.dtype, tmp.flags)
            bufs.append(tmp)

        for ri, r in enumerate(meta['requests']):
            imgList.append(r['path'])
            imgShape.append(r['image_shape'])

        if not self._args["benchmarkmode"]:
            # buf is a list containing multiple blobs
            for b in range(self._args['batch_sz']):
                for idx, bname in enumerate(
                        meta['outputs']):  #(layer25-conv, layer27-conv)
                    # print("Adding to layer : ", bname, self.net.blobs[bname].data.shape, bufs[idx][b].shape)
                    self.net.blobs[bname].data[...] = bufs[idx][b, ...]
                _ = self.net.forward(start='layer28-reorg', end='layer31-conv')
                self.netOut[b, ...] = self.net.blobs['layer31-conv'].data[...]

        #  fpgaOutput = np.copy(np.frombuffer(buf, dtype=np.float32)\
        #    .reshape(self.fpgaOutputShape))
        # print("Going to Post run")
            image_detections = self._run(
                imgList, imgShape, self.netOut
            )  # N images with K detections per image, each detection is a dict... list of list of dict
            #for i in range(len(image_detections)):
            #  print("{} boxes detected in image : {}".format(len(image_detections[i]), imgList[i]))

            boxes = image_detections
            if (self._args['results_dir']):
                for i in range(len(imgShape)):
                    filename = os.path.splitext(os.path.basename(
                        imgList[i]))[0]
                    out_file_txt = os.path.join(self._args['results_dir'],
                                                filename + '.txt')
                    # print("Saving {} boxes to {}".format(len(boxes[i]), out_file_txt)); sys.stdout.flush()
                    saveDetectionDarknetStyle(out_file_txt, boxes[i],
                                              imgShape[i])
                    if (self._args['visualize']):
                        out_file_png = os.path.join(self._args['results_dir'],
                                                    filename + '.png')
                        # print("Saving result to {}".format(out_file_png)); sys.stdout.flush()
                        draw_boxes(imgList[i], boxes[i], self.labels,
                                   self.colors, out_file_png)

            #[[{"classid": 21, "ll": {"y": 663, "x": 333}, "ur": {"y": 238, "x": 991}, "prob": 0.6764760613441467, "label": "bear"}]]

            for ri, r in enumerate(meta['requests']):
                detections = image_detections[
                    ri]  # Examine result for first image
                boxes = []
                # detection will be a dict
                for detection in detections:
                    x1 = detection["ll"]["x"]
                    #y1 = detection["ll"]["y"] # ONEHACK to conform to the way facedetect does corners
                    y1 = detection["ur"]["y"]
                    x2 = detection["ur"]["x"]
                    #y2 = detection["ur"]["y"]
                    y2 = detection["ll"]["y"]
                    label = detection["classid"]
                    boxes.append([x1, y1, x2, y2, label])

                meta['requests'][ri]['boxes'] = boxes
                meta['requests'][ri]['callback'] = self._callback

        self.numProcessed += len(meta['requests'])

        # TODO shouldn't meta always have requests?
        if 'requests' in meta:
            for r in meta['requests']:
                self.put(r)
        del buf
        del payload
Beispiel #23
0
def predict(sess, image_file):
    image, image_data = preprocess_image(image_file, model_image_size = (608, 608))
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict = {yolo_model.input:image_data, K.learning_phase():0})
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    return np.asarray(image)
Beispiel #24
0
def main():
    global image_name
    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    model.load_weights("model.h5")
    print("loaded heta_map model from disk")

    sess = K.get_session()

    class_names = read_classes("model_data/pascal_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")

    yolo_model = load_model("model_data/yolo.h5")

    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

    time.sleep(1.0)
    fps = FPS().start()

    #cap = cv2.VideoCapture(1)
    imgResp = urllib.request.urlopen(url)
    imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
    frame = cv2.imdecode(imgNp, -1)
    #ret,frame = cap.read()

    image_shape = (float(frame.shape[0]), float(frame.shape[1]))

    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
    count = np.zeros(10, dtype=int)
    cnt = -1
    heat_cnt = 0
    while True:
        if heat_cnt == 1080:
            image_name += 1
            time.sleep(3.0)

            img = cv2.resize(frame, (128, 128), interpolation=cv2.INTER_AREA)
            img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = np.expand_dims(img, axis=0)
            img = np.expand_dims(img, axis=3)

            Y_pred = model.predict(img)

            plt.imshow(Y_pred[0, :, :, 0], cmap='hot')
            savefig("out.png")
            ##Below is the heatmap that is to be updated on firebase##
            blob = bucket.blob('main' + str(image_name) + '.png')
            file_to_upload = open('out.png', 'rb')
            blob.upload_from_file(file_to_upload)
            file_to_upload.close()

            oldrange = np.amax(Y_pred[0, :, :, 0]) - np.amin(Y_pred[0, :, :,
                                                                    0])
            if oldrange == 0:
                oldrange = 1
            newrange = 0.035
            Y_pred[0, :, :,
                   0] = ((Y_pred[0, :, :, 0] - np.amin(Y_pred[0, :, :, 0])) *
                         newrange) / oldrange
            ##Below count is the count of people in the room##

            person_count = np.sum(Y_pred[0, :, :, 0])
            db.reference('/Heatmap').update(
                {'numberOfPeople': str(person_count)})
            print(person_count)
            heat_cnt = 0

        else:
            if cnt == 9:
                counts = np.bincount(count)
                print('Number of persons are ' + str(np.argmax(counts)))
                if (np.argmax(counts)):
                    r = req.post('http://192.168.1.2:443/lightNumber',
                                 data='{"ac":true}',
                                 verify=False)
                    print(r.text)
                else:
                    r = req.post('http://192.168.1.2:443/lightNumber',
                                 data='{"ac":false}',
                                 verify=False)
                    print(r.text)
                cnt = 0
            else:
                cnt += 1

            #ret, frame = cap.read()
            imgResp = urllib.request.urlopen(url)
            imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
            frame = cv2.imdecode(imgNp, -1)

            image, image_data = preprocess_image(frame,
                                                 model_image_size=(416, 416))

            out_scores, out_boxes, out_classes = sess.run(
                [scores, boxes, classes],
                feed_dict={
                    yolo_model.input: image_data,
                    K.learning_phase(): 0
                })

            colors = generate_colors(class_names)

            count[cnt] = draw_boxes(image, out_scores, out_boxes, out_classes,
                                    class_names, colors)
            cv2.imshow('frame', np.array(image)[:, :, ::-1])

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
            fps.update()
            heat_cnt += 1

    fps.stop()
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    cv2.destroyAllWindows()
Beispiel #25
0
    def run(rundir, chanIdx, q, args):
        xspub = xstream.Publisher()
        xssub = xstream.Subscribe(chanIdx2Str(chanIdx))
        runner = Runner(rundir)
        inTensors = runner.get_input_tensors()
        outTensors = runner.get_output_tensors()

        q.put(1)  # ready for work

        fpgaBlobs = None
        labels = xdnn_io.get_labels(args['labels'])
        if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc
        elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc
        else:
            assert args['yolo_version'] in (
                'v2', 'v3'), "--yolo_version should be <v2|v3>"

        biases = bias_selector(args)
        if (args['visualize']): colors = generate_colors(len(labels))

        while True:
            try:
                payload = xssub.get()
                if not payload:
                    break
                (meta, buf) = payload

                if fpgaBlobs == None:
                    # allocate buffers
                    fpgaBlobs = []
                    batchsz = meta['shape'][0]  # inTensors[0].dims[0]

                    for io in [inTensors, outTensors]:
                        blobs = []
                        for t in io:
                            shape = (batchsz, ) + tuple(
                                [t.dims[i] for i in range(t.ndims)][1:])
                            blobs.append(
                                np.empty((shape), dtype=np.float32, order='C'))
                        fpgaBlobs.append(blobs)

                    fcOutput = np.empty((
                        batchsz,
                        args['outsz'],
                    ),
                                        dtype=np.float32,
                                        order='C')

                fpgaInput = fpgaBlobs[0][0]
                assert (tuple(meta['shape']) == fpgaInput.shape)
                data = np.frombuffer(buf,
                                     dtype=np.float32).reshape(fpgaInput.shape)
                np.copyto(fpgaInput, data)

                jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])
                runner.wait(jid)

                boxes = yolo_postproc(fpgaBlobs[1],
                                      args,
                                      meta['image_shapes'],
                                      biases=biases)

                if (not args['profile']):
                    for i in range(min(batchsz, len(meta['image_shapes']))):
                        print("Detected {} boxes in {}".format(
                            len(boxes[i]), meta['images'][i]),
                              flush=True)

                # Save the result
                if (args['results_dir']):
                    for i in range(min(batchsz, len(meta['image_shapes']))):
                        fname = meta['images'][i]
                        filename = os.path.splitext(os.path.basename(fname))[0]
                        out_file_txt = os.path.join(args['results_dir'],
                                                    filename + '.txt')
                        print("Saving {} boxes to {}".format(
                            len(boxes[i]), out_file_txt))
                        sys.stdout.flush()
                        saveDetectionDarknetStyle(out_file_txt, boxes[i],
                                                  meta['image_shapes'][i])

                        if (args['visualize']):
                            out_file_png = os.path.join(
                                args['results_dir'], filename + '.png')
                            print("Saving result to {}".format(out_file_png))
                            sys.stdout.flush()
                            draw_boxes(fname, boxes[i], labels, colors,
                                       out_file_png)

                if meta['id'] % 1000 == 0:
                    print("Recvd query %d" % meta['id'])
                    sys.stdout.flush()

                del data
                del buf
                del payload

                xspub.send(meta['from'], "success")

            except Exception as e:
                logging.error("Worker exception " + str(e))
Beispiel #26
0
def predict(sess, input_path="images", out_path="out"):
    """
	Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

	Arguments:
	sess -- your tensorflow/Keras session containing the YOLO graph
	image_file -- name of an image stored in the "images" folder.

	Returns:
	out_scores -- tensor of shape (None, ), scores of the predicted boxes
	out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
	out_classes -- tensor of shape (None, ), class index of the predicted boxes

	Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
	"""

    class_names = read_classes("model_data/coco_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")
    image_shape = (720., 1280.)

    yolo_model = load_model("model_data/yolo.h5")

    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

    file_list = sorted(os.listdir(input_path))

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    # Preprocess your image
    for i, image_file in enumerate(file_list):
        image, image_data = preprocess_image(os.path.join(
            input_path, image_file),
                                             model_image_size=(608, 608))

        # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
        # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
        with tf.device('device:GPU:0'):
            out_scores, out_boxes, out_classes = sess.run(
                [scores, boxes, classes],
                feed_dict={
                    yolo_model.input: image_data,
                    K.learning_phase(): 0
                })

        # Print predictions info
        print('')
        print('=======[ {}-th process ]======='.format(i))
        print('Found {} boxes for {}'.format(len(out_boxes), image_file))
        print('')

        # Generate colors for drawing bounding boxes.
        colors = generate_colors(class_names)

        # Draw bounding boxes on the image file
        draw_boxes(image, out_scores, out_boxes, out_classes, class_names,
                   colors)

        # Save the predicted bounding box on the image
        image.save(os.path.join(out_path, image_file), quality=90)
    image_data = np.array(resized_image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
    b = BytesIO()
    img.save(b, format="jpeg")
    image = Image.open(b)
    out_scores, out_boxes, out_classes = sess.run(
        [scores, boxes, classes],
        feed_dict={
            yolo_model.input: image_data,
            input_image_shape: [image.size[1], image.size[0]],
            K.learning_phase(): 0
        })

    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image = np.array(image)
    cv2.imshow('RGB image', image)
    # cv2.imshow('RGB image',frame)

    if cv2.waitKey(20) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

# import freenect
# import cv2
# import numpy as np
# from io import BytesIO
Beispiel #28
0
def getFrame(sec):
    vidcap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000)
    hasFrames, image = vidcap.read()
    if hasFrames:
        path = "/Users/prachis/pet_projects/YOLOv2_keras/images/"
        cv2.imwrite(os.path.join(path, "image" + str(count) + ".jpg"), image)
        # cv2.imwrite("image"+str(count)+".jpg", image)     # save frame as JPG file

        input_image_name = "image" + str(count) + ".jpg"

        # Obtaining the dimensions of the input image
        input_image = Image.open(
            "/Users/prachis/pet_projects/YOLOv2_keras/images/" +
            input_image_name)
        width, height = input_image.size
        width = np.array(width, dtype=float)
        height = np.array(height, dtype=float)

        # Assign the shape of the input image to image_shapr variable
        image_shape = (height, width)

        # Loading the classes and the anchor boxes that are provided in the madel_data folder
        class_names = read_classes("model_data/coco_classes.txt")
        anchors = read_anchors("model_data/yolo_anchors.txt")

        # Load the pretrained model. Please refer the README file to get info on how to obtain the yolo.h5 file
        yolo_model = load_model("model_data/yolo.h5")

        # Print the summery of the model
        # yolo_model.summary()

        # Convert final layer features to bounding box parameters
        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

        # Now yolo_eval function selects the best boxes using filtering and non-max suppression techniques.
        # If you want to dive in more to see how this works, refer keras_yolo.py file in yad2k/models
        boxes, scores, classes = yolo_eval(yolo_outputs, image_shape)

        # Initiate a session
        sess = K.get_session()

        # Preprocess the input image before feeding into the convolutional network
        image, image_data = preprocess_image(
            "/Users/prachis/pet_projects/YOLOv2_keras/images/" +
            input_image_name,
            model_image_size=(608, 608))

        # Run the session
        out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                      feed_dict={
                                                          yolo_model.input:
                                                          image_data,
                                                          K.learning_phase(): 0
                                                      })

        # Print the results
        print('Found {} boxes for {}'.format(len(out_boxes), input_image_name))
        # Produce the colors for the bounding boxs
        colors = generate_colors(class_names)
        # Draw the bounding boxes
        draw_boxes(image, out_scores, out_boxes, out_classes, class_names,
                   colors)
        # Apply the predicted bounding boxes to the image and save it
        image.save(os.path.join(
            "/Users/prachis/pet_projects/YOLOv2_keras/out/", input_image_name),
                   quality=90)
        output_image = imageio.imread(
            os.path.join("/Users/prachis/pet_projects/YOLOv2_keras/out/",
                         input_image_name))

    return hasFrames