def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    """

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    out_scores, out_boxes, out_classes = None

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
Beispiel #2
0
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the predictions.
    
    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.
    
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    
    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
    """

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    ### START CODE HERE ### (≈ 1 line)
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })
    ### END CODE HERE ###

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
Beispiel #3
0
def predict_cv2(sess, image_file, delta_time):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
    """
    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(608, 608))
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    out_scores, out_boxes, out_classes = sess.run(
        fetches=[scores, boxes, classes],
        feed_dict={
            yolo_model.input: image_data,
            K.learning_phase(): 0
        })
    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)

    #scale_boxes(boxes, image.size)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    #output_image = scipy.misc.imread(os.path.join("out", image_file))
    output_image = cv2.imread(os.path.join("out", image_file))
    delta_time = int(delta_time * 1000)
    cv2.putText(output_image, str(delta_time), (200, 200),
                cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
    cv2.imshow("predictions", output_image)

    return out_scores, out_boxes, out_classes
Beispiel #4
0
def predict(sess, image_file):

    # 对图片预处理, image_data 会增加一个维度, 变成 (1, 608, 608, 3), 这将作为CNN的输入
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # 喂入数据, 运行 session
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})


    # 打印预测信息
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image.save(os.path.join("out", image_file), quality=90)
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    plt.show()
    
    return out_scores, out_boxes, out_classes
Beispiel #5
0
  def loop(self):
    fpgaOutputShapes = []
    for idx in range(len( self.output_shapes)):
        fpgaOutputShape_l = self.output_shapes[idx]
        fpgaOutputShape_l[0] = self.args['batch_sz']
        fpgaOutputShapes.append(fpgaOutputShape_l)

    if   self.args['yolo_version'] == 'v2': self.yolo_postproc = yolo.yolov2_postproc
    elif self.args['yolo_version'] == 'v3': self.yolo_postproc = yolo.yolov3_postproc

    self.biases = bias_selector(self.args)
    self.labels = xdnn_io.get_labels(self.args['labels'])
    self.colors = generate_colors(len(self.labels))

    while True:
      read_slot = self._shared_output_arrs.openReadId()
      if read_slot is None:
          break

      read_slot_arrs = self._shared_output_arrs.accessNumpyBuffer(read_slot)
      imgList = []
      shape_list = []
      #image_id = self._qFrom.get()
      num_images = (read_slot_arrs[-1].shape)[0]
      for image_num in range(num_images):
          image_id = read_slot_arrs[-1][image_num][0]

          if image_id == -1:
              break
          imgList.append(self.img_paths[int(image_id)])
          shape_list.append(read_slot_arrs[-1][image_num][1:4])

      if self.args["benchmarkmode"]:
        self.numProcessed += len(imgList)
        #self.streamQ.put(sId)
        self._shared_output_arrs.closeReadId(read_slot)
        continue

      self.run(imgList,read_slot_arrs[0:-1], fpgaOutputShapes, shape_list)
      self._shared_output_arrs.closeReadId(read_slot)

    self.finish()
def predict(sess, image_file):

    image, image_data = preprocess_image(image_file,
                                         model_image_size=(608, 608))

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], {
        yolo_model.input: image_data,
        K.learning_phase(): 0
    })

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image.save(os.path.join(r"C:\Users\User\Desktop\Project2", image_file),
               quality=90)
    output_image = scipy.misc.imread(
        os.path.join(r"C:\Users\User\Desktop\Project2", image_file))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
def predict(sess, image_file):


  image, image_data = preprocess_image('images/' + image_file, model_image_size = (608, 608))

  out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})
  ### END CODE HERE ###

  print('Found {} boxes for {}'.format(len(out_boxes), image_file))

  colors = generate_colors(class_names)

  draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

  image.save(os.path.join('out', image_file), quality = 90)

  output_image = scipy.misc.imread(os.path.join('out', image_file))
  imshow(output_image)

  return out_scores, out_boxes, out_classes
Beispiel #8
0
def predict(sess, image_file):
   
    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # Running the session.
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase():0})
    
    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
 
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
Beispiel #9
0
    def _run(self, imgList, imgShape, fpgaOutput):
        if self.numProcessed == 0:
            self.startTime = timeit.default_timer()
            self.labels = xdnn_io.get_labels(self._args['labels'])
            self.colors = generate_colors(len(self.labels))
            self.zmqPub = None
            if self._args['zmqpub']:
                self.zmqPub = ZmqResultPublisher(self._args['deviceID'])
            self.goldenMap = None
            if self._args['golden']:
                #self.goldenMap = xdnn_io.getGoldenMap(self._args['golden'])
                self.top5Count = 0
                self.top1Count = 0

        bboxes = yolo.yolov2_postproc([fpgaOutput],
                                      self._args,
                                      imgShape,
                                      biases=self.biases)

        #if self._args['servermode']:
        return bboxes
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    
    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.
    
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    
    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
    """

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    ### START CODE HERE ### (≈ 1 line)
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, 
                                                                                       K.learning_phase(): 0})

    ### END CODE HERE ###

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
Beispiel #11
0
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
    """
    # np.savetxt("output.txt", dept_array, fmt="%i")
    # print(dept_array)

    if image_file.__class__ == str:
        image, image_data = preprocess_image("images/" + image_file,
                                             model_image_size=(416, 416))
    else:
        image = PIL.Image.fromarray(image_file)
        image, image_data = preprocess_image(image,
                                             model_image_size=(416, 416))

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })
    # print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    labels = draw_boxes(image, out_scores, out_boxes, out_classes, class_names,
                        colors)

    return out_scores, out_boxes, out_classes, image, labels
Beispiel #12
0
def predict(sess, image_file):

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
Beispiel #13
0
def predict(sess, image_file):

    image, image_data = preprocess_image("in2/" + image_file,
                                         model_image_size=(416, 416))

    # Run the yolo model

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    colors = generate_colors(class_names)

    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    image.save(os.path.join("out8", image_file), quality=90)

    output_image = scipy.misc.imread(os.path.join("out8", image_file))
    return out_scores, out_boxes, out_classes
def predict(sess, image_file):

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(608, 608))
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    out_scores, out_boxes, out_classes = sess.run(
        fetches=[scores, boxes, classes],
        feed_dict={
            yolo_model.input: image_data,
            K.backend.learning_phase(): 0
        })
    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)

    return out_scores, out_boxes, out_classes
Beispiel #15
0
def predict(sess, image_file):

    # Preprocess your image
    image, image_data = preprocess_image(image_file,
                                         model_image_size=(608, 608))

    out_scores, out_boxes, out_classes = sess.run(
        [scores, boxes, classes],
        feed_dict={
            yolo_model.input: image_data,
            input_image_shape: [image.size[1], image.size[0]],
            K.learning_phase(): 0
        })

    # Print predictions info
    Filter = []
    counter = 0  # counter of non humans
    for i in range(len(out_classes)):
        if out_classes[i] != 0:
            Filter.append(False)
            counter = counter + 1
        else:
            Filter.append(True)
    numberValid = len(out_classes) - counter
    for i in range(len(out_classes)):
        out_classes[i] = Filter[i] * out_classes[i]
    out_classes = out_classes[0:numberValid]
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    imageio.imwrite("output.jpg", output_image)
    #print('output classes: ',out_classes.eval())
    return out_scores, out_boxes, out_classes
Beispiel #16
0
def predict(sess, image_file):
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0})

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))

    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)

    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)

    # Display the results in the notebook
    output_image = scipy.misc.imread(os.path.join("out", image_file))

    plt.figure(figsize=(12,12))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
Beispiel #17
0
def predict_image(yolo_model, class_names, anchors, image_shape, in_dir, out_dir, image_name, score_th=0.6):
    print('_'*40) 
    print('Processing ', image_name)
    
    # Step 1 - Preprocess input image
    # Yolo requires 608x608 size
    image, image_data = preprocess_image(in_dir+image_name, model_image_size = (608, 608))

    # Step 2 - Define Tensorflow computation graph
    # The output of yolo_model is a (m, 19, 19, 5, 85) tensor that needs to 
    # pass through non-trivial processing and conversion. 
    # yolo_head function from yad2k does that for you.
    yolo_output_good = yolo_head(yolo_model.output, anchors, len(class_names))

    # Convert the output of YOLO encoding (a lot of boxes) 
    # to the predicted boxes along with their scores, box coordinates and classes
    scores, boxes, classes = yolo_eval(yolo_output_good, image_shape, max_boxes=10, score_threshold=score_th, iou_threshold=.5)

    # Step 3 - Run TF session
    sess = K.get_session()
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], 
                                                  feed_dict={yolo_model.input: image_data, K.learning_phase(): 0})
    # output:
    # out_scores -- tensor of shape (None, ), scores of the predicted boxes
    # out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    # out_classes -- tensor of shape (None, ), class index of the predicted boxes 
    print('Found {} boxes for {}'.format(len(out_boxes), image_name))

    # Step 4 - Generate the output image
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)

    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
 
    # Save the predicted bounding box on the image
    image.save(out_dir+image_name, quality=100)
    print('Saved new', image_name)
Beispiel #18
0
def predict(sess, image_file, is_show_info=True, is_plot=True):
    """
    运行存储在sess的计算图以预测image_file的边界框,打印出预测的图与信息。

    参数:
        sess - 包含了YOLO计算图的TensorFlow/Keras的会话。
        image_file - 存储在images文件夹下的图片名称
    返回:
        out_scores - tensor类型,维度为(None,),锚框的预测的可能值。
        out_boxes - tensor类型,维度为(None,4),包含了锚框位置信息。
        out_classes - tensor类型,维度为(None,),锚框的预测的分类索引。
    """
    #图像预处理

    image, image_data = yolo_utils.preprocess_image(r"E:\深度学习\第四课第三周编程作业\Car detection for Autonomous Driving\images\\" + image_file, model_image_size = (608, 608))

    #运行会话并在feed_dict中选择正确的占位符.
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict = {yolo_model.input:image_data, K.learning_phase(): 0})

    #打印预测信息
    if is_show_info:
        print("在" + str(image_file) + "中找到了" + str(len(out_boxes)) + "个锚框。")

    #指定要绘制的边界框的颜色
    colors = yolo_utils.generate_colors(class_names)

    #在图中绘制边界框
    yolo_utils.draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    #保存已经绘制了边界框的图
    image.save(os.path.join("out", image_file), quality=100)

    #打印出已经绘制了边界框的图
    if is_plot:
        output_image = scipy.misc.imread(os.path.join("out", image_file))
        plt.imshow(output_image)

    return out_scores, out_boxes, out_classes
Beispiel #19
0
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    
    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.
    
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    
    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
    """

   
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

   
   
   
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],feed_dict={yolo_model.input: image_data,K.learning_phase(): 0})
   

   
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
   
    colors = generate_colors(class_names)
   
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
   
    image.save(os.path.join("out", image_file), quality=90)
   
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
Beispiel #20
0
def predict(sess, image_file):
    # Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    # Preprocess your image
    image, image_data = preprocess_image("Images/" + image_file,
                                         model_image_size=(608, 608))

    out_scores, out_boxes, out_classes = sess.run((scores, boxes, classes),
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("output", image_file), quality=90)

    return out_scores, out_boxes, out_classes
Beispiel #21
0
    def predict(sess, image):
        # Preprocess your image
        image, image_data = preprocess_image(image,
                                             model_image_size=(416, 416))

        # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
        # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
        out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                      feed_dict={
                                                          yolo_model.input:
                                                          image_data,
                                                          K.learning_phase(): 0
                                                      })

        # Print predictions info
        print('Found {} boxes'.format(len(out_boxes)))
        # Generate colors for drawing bounding boxes.
        colors = generate_colors(class_names)
        # Draw bounding boxes on the image file
        out_image = draw_boxes(image, out_scores, out_boxes, out_classes,
                               class_names, colors)

        return out_image, out_scores, out_boxes, out_classes
def yolo_pipeline(vid_frame):
    # Preprocess video frame
    image, image_data = preprocess_vid(vid_frame, model_image_size=(608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    ### START CODE HERE ### (≈ 1 line)
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })
    ### END CODE HERE ###

    # Print predictions info
    #print('Found {} boxes'.format(len(out_boxes)))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    return np.asarray(image)
Beispiel #23
0
def predict(sess, image_file):
    '''
    The function will run the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    '''

    # processing the image data
    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(608, 608))

    # Runing the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # We will need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))

    # Generating colors for drawing bounding boxes.
    colors = generate_colors(class_names)

    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # saving the image
    image.save(os.path.join("out", image_file), quality=90)

    # displaying the image
    img = cv2.imread(os.path.join("out", "test.jpg"), 1)
    cv2.imshow('output', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    return out_scores, out_boxes, out_classes
Beispiel #24
0
def predict(sess, image_file):
    """
    Predict boxes for "image_file"

    Arguments:
    sess -- Keras session
    image_file -- name of a test image stored in the "images" folder.

    Returns:
    out_scores -- scores of the predicted boxes
    out_boxes -- coordinates of the predicted boxes
    out_classes -- class index of the predicted boxes
    """

    image, image_data = preprocess_image("images/" + image_file,
                                         model_image_size=(416, 416))

    out_scores, out_boxes, out_classes = sess.run(yolo_eval(yolo_outputs),
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    # Predictions info
    # print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    output_stats = draw_boxes(image, out_scores, out_boxes, out_classes,
                              class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    output_image = scipy.misc.imread(os.path.join("out", image_file))

    return out_scores, out_boxes, out_classes, output_image, output_stats
def predict(sess, image_file, is_show_info=True, is_plot=True):
    """
    运行存储在sess的计算图以预测image_file的边界框,打印出预测图与信息
    :param sess: 包含了YOLO计算图的TensorFlow/keras的会话
    :param imagefile: 存储images文件下的图片名称
    :param is_show_info:
    :param is_plot:
    :return:
            out_scores:tensor, (None, ),锚框的预测的可能值
            out_boxes:tensor, (None,4),包含了锚框位置信息
            out_classes:tensor, (None, ),锚框的预测的分类索引
    """
    image, image_data = preprocess_image(image_file, model_image_size =(608, 608))###预处理图像
    out_scores, out_boxes, out_classes = sess.run([scores,boxes,classes],feed_dict={yolo_model.input:image_data, K.learning_phase():0})
    if is_show_info:
        print("在" + str(image_file)+"中找到"+str(len(out_boxes))+"个锚框。")
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image.save(os.path.join('C:\\Users\\korey\\Desktop\\car',image_file), quality=90)
    if is_plot:
        out_image = plt.imread(os.path.join('C:\\Users\\korey\\Desktop\\car',image_file))
        plt.imshow(out_image)
        plt.show()
    return out_scores, out_boxes, out_classes
Beispiel #26
0
def predict_cv2_fast(sess, image):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
    """
    # Preprocess your image
    image, image_data = preprocess_image_cv2(image,
                                             model_image_size=(608, 608))
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    start_time = time.time()
    out_scores, out_boxes, out_classes = sess.run(
        fetches=[scores, boxes, classes],
        feed_dict={
            yolo_model.input: image_data,
            K.learning_phase(): 0
        })
    end_time = time.time()
    delta_time = end_time - start_time
    colors = generate_colors(class_names)
    draw_boxes_cv2(image, out_scores, out_boxes, out_classes, class_names,
                   colors)
    delta_time = int(delta_time * 1000)
    cv2.putText(image, str(delta_time), (200, 200), cv2.FONT_HERSHEY_SIMPLEX,
                2, (255, 255, 255), 2)
    cv2.imshow("predictions", image)
    return out_scores, out_boxes, out_classes
Beispiel #27
0
def predictFromImg(sess, image, scores, boxes, classes, yolo_model,
                   class_names):

    # preprocess_image
    model_image_size = (608, 608)
    resized_image = image.resize(tuple(reversed(model_image_size)),
                                 Image.BICUBIC)
    image_data = np.array(resized_image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    colors = generate_colors(class_names)

    draw_boxes_2(image, out_scores, out_boxes, out_classes, class_names,
                 colors)

    return out_scores, out_boxes, out_classes
Beispiel #28
0
def predict(sess, image_file):
    """
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    """
    image, image_data = preprocess_image("image/" + image_file,
                                         model_image_size=(608, 608))
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                  feed_dict={
                                                      yolo_model.input:
                                                      image_data,
                                                      K.learning_phase(): 0
                                                  })

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image.save(os.path.join("out", image_file), quality=90)
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    plt.imshow(output_image)
    plt.show()
    return out_scores, out_boxes, out_classes
Beispiel #29
0
    def __init__(self,
                 batch_sz=10,
                 in_shape=[3, 608, 608],
                 quantizecfg="yolo_deploy_608.json",
                 xclbin=None,
                 netcfg="yolo.cmds",
                 datadir="yolov2.caffemodel_data",
                 labels="coco.names",
                 xlnxlib="libxfdnn.so",
                 firstfpgalayer="conv0",
                 classes=80,
                 verbose=False):

        if verbose:
            log.basicConfig(format="%(levelname)s: %(message)s",
                            level=log.DEBUG)
            log.info("Running with verbose output")
        else:
            log.basicConfig(format="%(levelname)s: %(message)s")

        if xclbin is None:
            log.error(
                "XYOLO initialized without reference to xclbin, please set this before calling detect!!"
            )
            sys.exit(1)

        self.xdnn_handle = None

        log.info("Reading labels...")
        with open(labels) as f:
            names = f.readlines()
        self.names = [x.strip() for x in names]

        # Arguments exposed to user
        self.in_shape = in_shape
        self.quantizecfg = quantizecfg
        self.xclbin = xclbin
        self.netcfg = netcfg
        self.datadir = datadir
        self.labels = labels
        self.xlnxlib = xlnxlib
        self.batch_sz = batch_sz
        self.firstfpgalayer = firstfpgalayer  # User may be using their own prototxt w/ unique names
        self.classes = classes  # User may be using their own prototxt with different region layer

        # Arguments not exposed to user
        ## COCO categories are not sequential
        self.cats = [
            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
            21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
            41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
            59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79,
            80, 81, 82, 84, 85, 86, 87, 88, 89, 90
        ]
        self.images = None
        self.scaleA = 10000
        self.scaleB = 30
        self.PE = -1
        self.transform = "yolo"  # XDNN_IO will scale/letterbox the image for YOLO network
        self.img_mean = "0,0,0"
        self.net_w = self.in_shape[1]
        self.net_h = self.in_shape[2]
        self.out_w = self.out_h = (self.net_w / 32)
        self.bboxplanes = 5
        #self.classes = 80
        self.scorethresh = 0.24
        self.iouthresh = 0.3
        self.groups = self.out_w * self.out_h
        self.coords = 4
        self.groupstride = 1
        self.batchstride = (self.groups) * (self.classes + self.coords + 1)
        self.beginoffset = (self.coords + 1) * (self.out_w * self.out_h)
        self.outsize = (self.out_w * self.out_h *
                        (self.bboxplanes + self.classes)) * self.bboxplanes
        self.colors = generate_colors(
            self.classes)  # Generate color pallette for drawing boxes

        config = vars(self)

        self.q_fpga = Queue(maxsize=1)
        self.q_bbox = Queue(maxsize=1)
        self.proc_fpga = Process(target=self.fpga_stage,
                                 args=(config, self.q_fpga, self.q_bbox))
        self.proc_bbox = Process(target=self.bbox_stage,
                                 args=(config, self.q_bbox))
        self.proc_fpga.start()
        self.proc_bbox.start()

        log.info("Running network input %dx%d and output %dx%d" %
                 (self.net_w, self.net_h, self.out_w, self.out_h))
                               Image.BICUBIC)
    image_data = np.array(resized_image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
    b = BytesIO()
    img.save(b, format="jpeg")
    image = Image.open(b)
    out_scores, out_boxes, out_classes = sess.run(
        [scores, boxes, classes],
        feed_dict={
            yolo_model.input: image_data,
            input_image_shape: [image.size[1], image.size[0]],
            K.learning_phase(): 0
        })

    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image = np.array(image)
    cv2.imshow('RGB image', image)
    # cv2.imshow('RGB image',frame)

    if cv2.waitKey(20) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

# import freenect
# import cv2
# import numpy as np
Beispiel #31
0
    def run(rundir, chanIdx, q, args):
        xspub = xstream.Publisher()
        xssub = xstream.Subscribe(chanIdx2Str(chanIdx))
        runner = Runner(rundir)
        inTensors = runner.get_input_tensors()
        outTensors = runner.get_output_tensors()

        q.put(1)  # ready for work

        fpgaBlobs = None
        labels = xdnn_io.get_labels(args['labels'])
        if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc
        elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc
        else:
            assert args['yolo_version'] in (
                'v2', 'v3'), "--yolo_version should be <v2|v3>"

        biases = bias_selector(args)
        if (args['visualize']): colors = generate_colors(len(labels))

        while True:
            try:
                payload = xssub.get()
                if not payload:
                    break
                (meta, buf) = payload

                if fpgaBlobs == None:
                    # allocate buffers
                    fpgaBlobs = []
                    batchsz = meta['shape'][0]  # inTensors[0].dims[0]

                    for io in [inTensors, outTensors]:
                        blobs = []
                        for t in io:
                            shape = (batchsz, ) + tuple(
                                [t.dims[i] for i in range(t.ndims)][1:])
                            blobs.append(
                                np.empty((shape), dtype=np.float32, order='C'))
                        fpgaBlobs.append(blobs)

                    fcOutput = np.empty((
                        batchsz,
                        args['outsz'],
                    ),
                                        dtype=np.float32,
                                        order='C')

                fpgaInput = fpgaBlobs[0][0]
                assert (tuple(meta['shape']) == fpgaInput.shape)
                data = np.frombuffer(buf,
                                     dtype=np.float32).reshape(fpgaInput.shape)
                np.copyto(fpgaInput, data)

                jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])
                runner.wait(jid)

                boxes = yolo_postproc(fpgaBlobs[1],
                                      args,
                                      meta['image_shapes'],
                                      biases=biases)

                if (not args['profile']):
                    for i in range(min(batchsz, len(meta['image_shapes']))):
                        print("Detected {} boxes in {}".format(
                            len(boxes[i]), meta['images'][i]),
                              flush=True)

                # Save the result
                if (args['results_dir']):
                    for i in range(min(batchsz, len(meta['image_shapes']))):
                        fname = meta['images'][i]
                        filename = os.path.splitext(os.path.basename(fname))[0]
                        out_file_txt = os.path.join(args['results_dir'],
                                                    filename + '.txt')
                        print("Saving {} boxes to {}".format(
                            len(boxes[i]), out_file_txt))
                        sys.stdout.flush()
                        saveDetectionDarknetStyle(out_file_txt, boxes[i],
                                                  meta['image_shapes'][i])

                        if (args['visualize']):
                            out_file_png = os.path.join(
                                args['results_dir'], filename + '.png')
                            print("Saving result to {}".format(out_file_png))
                            sys.stdout.flush()
                            draw_boxes(fname, boxes[i], labels, colors,
                                       out_file_png)

                if meta['id'] % 1000 == 0:
                    print("Recvd query %d" % meta['id'])
                    sys.stdout.flush()

                del data
                del buf
                del payload

                xspub.send(meta['from'], "success")

            except Exception as e:
                logging.error("Worker exception " + str(e))
Beispiel #32
0
def run(args=None):
    if not args:
        parser = xdnn_io.default_parser_args()
        parser = yolo_parser_args(parser)
        parser.add_argument('--startxstream',
                            default=True,
                            action='store_true',
                            help='automatically start obj store server')
        parser.add_argument('--servermode',
                            default=False,
                            action='store_true',
                            help='accept images from another process')
        parser.add_argument("--deploymodel",
                            type=str,
                            default='',
                            help='Original prototxt')
        parser.add_argument("--caffemodel",
                            type=str,
                            default='',
                            help='Original caffemodel')

        args = parser.parse_args()
        args = xdnn_io.make_dict_args(args)
        args['preprocseq'] = [('resize', (224, 224)),
                              ('meansub', [104.007, 116.669, 122.679]),
                              ('chtranspose', (2, 0, 1))]

    if (args['golden'] or args['visualize']):
        assert args['labels'], "Provide --labels to compute mAP."
        assert args[
            'results_dir'], "For accuracy measurements, provide --results_dir to save the detections."
        labels = xdnn_io.get_labels(args['labels'])
        colors = generate_colors(len(labels))

    args['startxstream'] = True
    args['servermode'] = False

    timerQ = Queue()
    args['timerQ'] = timerQ

    compJson = xdnn.CompilerJsonParser(args['netcfg'])
    firstInputShape = next(itervalues(compJson.getInputs()))
    args['net_h'] = firstInputShape[2]
    args['net_w'] = firstInputShape[3]

    # start object store
    # (make sure to 'pip install pyarrow')
    xserver = None
    if args['startxstream']:
        xserver = xstream.Server()

    graph = grapher.Graph("yolo_v2")
    graph.node("prep", yolov2_pre.Node, args)
    graph.node("fpga", yolov2_fpga.Node, args)
    graph.node("post", yolov2_post.Node, args)

    graph.edge("START", None, "prep")
    graph.edge("prep", "prep", "fpga")
    graph.edge("fpga", "fpga", "post")
    graph.edge("DONE", "post", "fpga")
    graph.edge("DONE", "post", None)

    if not args['servermode']:
        graph.serve(background=True)
        img_paths = xdnn_io.getFilePaths(args['images'])

        reqProc = mp.Process(target=request_process,
                             args=(
                                 args,
                                 img_paths,
                                 graph._in[0],
                                 graph._out[0],
                             ))

        t = timeit.default_timer()
        reqProc.start()
        reqProc.join()
        graph.stop(kill=False)
        t2 = args['timerQ'].get()
        full_time = t2 - t

        args['timerQ'].close()

        print("Total time : {}s for {} images".format(full_time,
                                                      len(img_paths)))
        print("Average FPS : {} imgs/sec".format(len(img_paths) / full_time))
    else:
        print("Serving %s -> %s" % (graph._in[0], graph._out[0]))
        graph.serve()

    # mAP calculation
    if (args['golden']):
        print(flush=True)
        print("Computing mAP score  : ", flush=True)
        print("Class names are  : {} ".format(labels), flush=True)
        mAP = calc_detector_mAP(args['results_dir'], args['golden'], len(labels), labels,\
                args['prob_threshold'], args['mapiouthresh'], args['points'])
        sys.stdout.flush()