Exemple #1
0
def _main_(args):
    config_path  = args.conf
    weights_path = args.weights
    image_path   = args.input

    with open(config_path) as config_buffer:    
        config = json.load(config_buffer)

    ###############################
    #   Make the model 
    ###############################

    yolo = YOLO(backend             = config['model']['backend'],
                input_size          = config['model']['input_size'], 
                labels              = config['model']['labels'], 
                max_box_per_image   = config['model']['max_box_per_image'],
                anchors             = config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################    

    yolo.load_weights(weights_path)

    ###############################
    #   Predict bounding boxes 
    ###############################

    if image_path[-4:] == '.mp4':
        video_out = image_path[:-4] + '_detected' + image_path[-4:]
        video_reader = cv2.VideoCapture(image_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                               cv2.VideoWriter_fourcc(*'MPEG'), 
                               50.0, 
                               (frame_w, frame_h))

        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()
            
            boxes = yolo.predict(image)
            image = draw_boxes(image, boxes, config['model']['labels'])

            video_writer.write(np.uint8(image))

        video_reader.release()
        video_writer.release()  
    else:
        image = cv2.imread(image_path)
        boxes = yolo.predict(image)
        image = draw_boxes(image, boxes, config['model']['labels'])

        print(len(boxes), 'boxes are found')

        cv2.imwrite(image_path[:-4] + '_detected' + image_path[-4:], image)
Exemple #2
0
def _main_(args):
    config_path = "config.json"
    weights_path = "trained_wts.h5"
    image_path = "m.jpg"

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################

    yolo.load_weights(weights_path)

    ###############################
    #   Predict bounding boxes
    ###############################

    if image_path[-4:] == '.mp4':
        video_out = image_path[:-4] + '_detected' + image_path[-4:]
        video_reader = cv2.VideoCapture(image_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                                       cv2.VideoWriter_fourcc(*'MPEG'), 50.0,
                                       (frame_w, frame_h))

        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            boxes = yolo.predict(image)
            image = draw_boxes(image, boxes, config['model']['labels'])

            video_writer.write(np.uint8(image))

        video_reader.release()
        video_writer.release()
    else:
        image = cv2.imread(image_path)
        boxes = yolo.predict(image)
        image = draw_boxes(image, boxes, config['model']['labels'])

        print(len(boxes), 'boxes are found')

        cv2.imwrite(image_path[:-4] + '_detected' + image_path[-4:], image)
Exemple #3
0
def draw_and_show(detected_boxes, pil_im):
    filtered_boxes = non_max_suppression(
        detected_boxes,
        confidence_threshold=FLAGS.conf_threshold,
        iou_threshold=FLAGS.iou_threshold)
    draw_boxes(filtered_boxes, pil_im, classes, (FLAGS.size, FLAGS.size), True)
    img = np.array(pil_im)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    cv2.imshow('CSI Camera', img)
Exemple #4
0
def _main_():
    config_path  = "./config.json"
    weights_path = "./weights.h5"

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    ###############################
    #   Make the model
    ###############################

    ###############################
    #   Load trained weights
    ###############################

    yolo.load_weights(weights_path)

    ###############################
    #   Predict bounding boxes
    ###############################

    last_recorded_time = time.time()

    while True:
        curr_time = time.time()

        # Capture frame-by-frame
        ret, frame = cap.read()

        # checks if 2 or more seconds have passed since last [placeholder]
        if curr_time - last_recorded_time >= 2.0:
            cv2.imshow('', frame)
            boxes = yolo.predict(frame)

            # [placeholder for api call]
            if (len(boxes) > 0):
                print(boxes[0].get_score())

            frame2 = draw_boxes(frame, boxes, config['model']['labels'])

            # Display the resulting frame
            cv2.imshow('', frame2)

            # Stores last time frame was drawn
            last_recorded_time = curr_time
        else:
            cv2.imshow('', frame)
            boxes = yolo.predict(frame)
            frame2 = draw_boxes(frame, boxes, config['model']['labels'])
            cv2.imshow('', frame2)

        # press q to quit
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #5
0
 def detect_staff(self, staff_dir: str) -> None:
     staff_imgs = load_imgs(staff_dir)
     detector = Detector(self,
                         staff_imgs,
                         self._threshold_staff,
                         is_staff=True)
     self._staff_boxes = detector.detect()
     self._staff_boxes.sort(key=lambda box: box.y)
     draw_boxes('staff_boxes_img.png', self._img_rgb, self._staff_boxes)
Exemple #6
0
def main(argv=None):

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)

    config = tf.ConfigProto(
        gpu_options=gpu_options,
        log_device_placement=False,
    )

    img = Image.open(FLAGS.input_img)
    img_resized = letter_box_image(img, FLAGS.size, FLAGS.size, 128)
    img_resized = img_resized.astype(np.float32)
    classes = load_coco_names(FLAGS.class_names)

    if FLAGS.frozen_model:

        t0 = time.time()
        frozenGraph = load_graph(FLAGS.frozen_model)
        print("Loaded graph in {:.2f}s".format(time.time() - t0))

        boxes, inputs = get_boxes_and_inputs_pb(frozenGraph)

        with tf.Session(graph=frozenGraph, config=config) as sess:
            t0 = time.time()
            detected_boxes = sess.run(boxes, feed_dict={inputs: [img_resized]})

    else:
        if FLAGS.tiny:
            model = yolo_v3_tiny.yolo_v3_tiny
        elif FLAGS.spp:
            model = yolo_v3.yolo_v3_spp
        else:
            model = yolo_v3.yolo_v3

        boxes, inputs = get_boxes_and_inputs(model, len(classes), FLAGS.size,
                                             FLAGS.data_format)

        saver = tf.train.Saver(var_list=tf.global_variables(scope='detector'))

        with tf.Session(config=config) as sess:
            t0 = time.time()
            saver.restore(sess, FLAGS.ckpt_file)
            print('Model restored in {:.2f}s'.format(time.time() - t0))

            t0 = time.time()
            detected_boxes = sess.run(boxes, feed_dict={inputs: [img_resized]})

    filtered_boxes = non_max_suppression(
        detected_boxes,
        confidence_threshold=FLAGS.conf_threshold,
        iou_threshold=FLAGS.iou_threshold)
    print("Predictions found in {:.2f}s".format(time.time() - t0))

    draw_boxes(filtered_boxes, img, classes, (FLAGS.size, FLAGS.size), True)

    img.save(FLAGS.output_img)
Exemple #7
0
def main(args):
    config_path = args.conf
    weights_path = args.weights
    image_path = args.input

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)
    # build model
    yolo = YOLO(architecture=config['model']['architecture'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    # load pretrained model
    print(weights_path)
    yolo.load_weights(weights_path)

    # predict bounding box
    if image_path[-4:] == '.mp4':
        input_file = os.path.basename(args.input)
        video_out = args.output + config['model']['architecture'].replace(
            " ", "") + "_" + input_file[:-4] + ".mp4"

        video_reader = cv2.VideoCapture(image_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                                       cv2.VideoWriter_fourcc(*'MPEG'), 30.0,
                                       (frame_w, frame_h))

        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            # boxes is list of box. normalize to 0~1 with input shape
            # box.x: xmin, box.y: ymin, box.w: box width, box.h: box height
            boxes = yolo.predict(image)
            image = draw_boxes(image, boxes, config['model']['labels'])

            video_writer.write(np.uint8(image))

        video_reader.release()
        video_writer.release()
    else:
        image = cv2.imread(image_path)
        boxes = yolo.predict(image)
        image = draw_boxes(image, boxes, config['model']['labels'])

        print(len(boxes), 'boxes are found')

        input_file = os.path.basename(args.input)
        cv2.imwrite(args.output +
                    config['model']['architecture'].replace(" ", "") + "_" +
                    input_file[:-4] + ".png")
def main(argv=None):

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)

    config = tf.ConfigProto(
        gpu_options=gpu_options,
        log_device_placement=False,
    )

    classes = load_coco_names(FLAGS.class_names)

    t0 = time.time()
    frozenGraph = load_graph(FLAGS.frozen_model)
    print("Loaded graph in {:.2f}s".format(time.time() - t0))

    boxes, inputs = get_boxes_and_inputs_pb(frozenGraph)

    with tf.Session(graph=frozenGraph, config=config) as sess:
        t0 = time.time()
        print(FLAGS.input_img)
        cap = cv2.VideoCapture(FLAGS.input_img)
        # cap = cv2.VideoCapture(0)
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        videoWriter = cv2.VideoWriter(
            "output.mp4", cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), fps,
            (int(width), int(height)))
        while (cap.isOpened()):
            ret, frame = cap.read()
            if ret == True:
                frame = cv2.flip(frame, 0)
                img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                img_resized = letter_box_image(img, FLAGS.size, FLAGS.size,
                                               128)
                img_resized = img_resized.astype(np.float32)
                detected_boxes = sess.run(boxes,
                                          feed_dict={inputs: [img_resized]})
                filtered_boxes = non_max_suppression(
                    detected_boxes,
                    confidence_threshold=FLAGS.conf_threshold,
                    iou_threshold=FLAGS.iou_threshold)
                print("Predictions found in {:.2f}s".format(time.time() - t0))

                draw_boxes(filtered_boxes, img, classes,
                           (FLAGS.size, FLAGS.size), True)

                fimg = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
                cv2.imshow("show", fimg)
                videoWriter.write(fimg)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break
        cap.release()
        videoWriter.release()
Exemple #9
0
 def draw_layer_boxes(self, image):
     layer1_input_boxes_img = draw_boxes(image, self.layer1_input_boxes)
     layer1_output_img = draw_boxes(image, self.layer1_output_boxes)
     layer2_input_boxes_img = draw_boxes(image, self.layer2_input_boxes)
     layer2_output_img = draw_boxes(image, self.layer2_output_boxes)
     vehicles_img = self.draw_vehicles(image)
     return layer1_input_boxes_img, layer1_output_img, \
            layer2_input_boxes_img, layer2_output_img, \
            self.layer1_heatmap, self.layer2_heatmap, \
            vehicles_img
def draw_boxes_on_points(fname, data):
    pc = np.fromfile(data, np.float32).reshape(-1, 4)
    bboxes = []
    read_annotation_kitti(bboxes, fname)
    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1600, 1000))
    draw_points(fig, pc)
    draw_boxes(fig, bboxes)
    mlab.show()
Exemple #11
0
 def predict(self, img, display=True, output_path='out.jpg'):
     img = Image.open(img)
     img_array = np.expand_dims(
         np.array(img.resize(size=(416, 416)), dtype=np.float32), 0) / 255.0
     if self.sess:
         K.set_session(self.sess)
     res = self.model.predict(img_array)
     print("Performing suppression")
     filtered_boxes = non_max_suppression(res, 0.25, 0.4)
     print(filtered_boxes)
     draw_boxes(filtered_boxes, img, self.classes, (416.0, 416.0))
     if display:
         img.show()
     img.save(output_path)
Exemple #12
0
def showBoxDetection(boxAp, boxAr, idx, type):
    # print(boxAp[idx]['gtboxs'])

    figure, ax = plt.subplots(1, 2, figsize=(12, 6))
    imgId = boxAp[idx]['image_id'][0][0] + 1

    imgName = 'render' + utils.createName(str(imgId)) + '.png'
    print(imgName)
    img_path = os.path.join(lunar_loc, "images/render2", imgName)
    img = Image.open(img_path)

    ax[0].imshow(img)
    ax[1].imshow(img)

    utils.draw_boxes(boxAp[idx]['gtBoxes'], ax[0], 'g')
    utils.draw_boxes(boxAp[idx]['predBoxes'], ax[1], 'g')
    ax[0].set_title('Grounth truth', fontsize=12)
    ax[1].set_title('Predicted', fontsize=12)
    if type == 'double':
        ap = (boxAp[idx]['AP{.50:.95:.05}'] +
              boxAp[idx - 1]['AP{.50:.95:.05}']) / 2
        ar = (boxAr[idx]['AP{.50:.95:.05}'] +
              boxAr[idx - 1]['AP{.50:.95:.05}']) / 2
        utils.draw_boxes(boxAp[idx - 1]['gtBoxes'], ax[0], 'b')
        utils.draw_boxes(boxAp[idx - 1]['predBoxes'], ax[1], 'b')
    else:
        ap = boxAp[idx]['AP{.50:.95:.05}']
        ar = boxAr[idx]['AP{.50:.95:.05}']

    figure.suptitle('Rock detection\n' + 'AP{.50:.95:.05}:' + str(ap) +
                    '\nAR{.50:.95:.05}: ' + str(ar),
                    fontsize=18)
    plt.tight_layout(w_pad=3, h_pad=3)
    figure.savefig('example_outputs/box_' + 'Rock_detection_' +
                   'AP{.50:.95:.05}:' + str(ap) + '.jpg')
Exemple #13
0
def run(work_path):
    # 系统初始化,参数要与创建技能时填写的检验值保持一致
    hilens.init("driving")

    # 初始化自带摄像头与HDMI显示器,
    # hilens studio中VideoCapture如果不填写参数,则默认读取test/camera0.mp4文件,
    # 在hilens kit中不填写参数则读取本地摄像头
    camera = hilens.VideoCapture()
    display = hilens.Display(hilens.HDMI)

    # 初始化模型
    model_path = os.path.join(work_path, 'model/yolo3.om')
    driving_model = hilens.Model(model_path)

    frame_index = 0
    json_bbox_list = []
    json_data = {'info': 'det_result'}

    while True:
        frame_index += 1
        try:
            time_start = time.time()

            # 1. 设备接入 #####
            input_yuv = camera.read()  # 读取一帧图片(YUV NV21格式)

            # 2. 数据预处理 #####
            img_bgr = cv2.cvtColor(input_yuv,
                                   cv2.COLOR_YUV2BGR_NV21)  # 转为BGR格式
            img_preprocess, img_w, img_h = preprocess(img_bgr)  # 缩放为模型输入尺寸

            # 3. 模型推理 #####
            output = driving_model.infer([img_preprocess.flatten()])

            # 4. 获取检测结果 #####
            bboxes = get_result(output, img_w, img_h)

            # 5-1. [比赛提交作品用] 将结果输出到json文件中 #####
            if len(bboxes) > 0:
                json_bbox = convert_to_json(bboxes, frame_index)
                json_bbox_list.append(json_bbox)

            # 5-2. [调试用] 将结果输出到模拟器中 #####
            img_bgr = draw_boxes(img_bgr, bboxes)  # 在图像上画框
            output_yuv = hilens.cvt_color(img_bgr, hilens.BGR2YUV_NV21)
            display.show(output_yuv)  # 显示到屏幕上
            time_frame = 1000 * (time.time() - time_start)
            hilens.info('----- time_frame = %.2fms -----' % time_frame)

        except RuntimeError:
            print('last frame')
            break

    # 保存检测结果
    hilens.info('write json result to file')
    result_filename = './result.json'
    json_data['result'] = json_bbox_list
    save_json_to_file(json_data, result_filename)

    hilens.terminate()
Exemple #14
0
def run(input_paths):
    print '-' * 30
    print 'Train classifier:'
    model, scaler = utils.train_model()
    print '-' * 30

    print 'Read input'
    imgs = utils.read_input(input_paths)

    print 'Sliding window search'
    # heatmaps = utils.get_sliding_window_preds(imgs, model, scaler, window_scale=2)
    heatmaps = utils.get_sliding_window_preds(imgs, model, scaler)
    heatmaps += utils.get_sliding_window_preds(imgs,
                                               model,
                                               scaler,
                                               window_scale=1.5)
    heatmaps += utils.get_sliding_window_preds(imgs,
                                               model,
                                               scaler,
                                               window_scale=0.75,
                                               y_start=400,
                                               y_stop=500)

    print 'Clean multi-detections and false positives'
    heatmaps_clean = utils.rolling_threshold(heatmaps)
    # heatmap_overlays = utils.heatmap_overlay(imgs, heatmaps_clean)
    # utils.display_images(heatmap_overlays)
    # return heatmap_overlays
    car_segmentation, num_cars = utils.segment_cars(heatmaps_clean)

    print 'Find and draw bounding boxes'
    imgs_superimposed = utils.draw_boxes(imgs, car_segmentation, num_cars)
    # utils.display_images(imgs_superimposed)

    return imgs_superimposed
Exemple #15
0
def _main_(args):
    config_path  = args.conf
    weights_path = args.weights
    image_path   = args.input

    with open(config_path) as config_buffer:    
        config = json.load(config_buffer)

    #model 

    yolo = YOLO(backend             = config['model']['backend'],
                input_size          = config['model']['input_size'], 
                labels              = config['model']['labels'], 
                max_box_per_image   = config['model']['max_box_per_image'],
                anchors             = config['model']['anchors'])

    #Load weights   
	
    yolo.load_weights(weights_path)

    # Predict
    image = cv2.imread(image_path)
    boxes = yolo.predict(image)
    image = draw_boxes(image, boxes, config['model']['labels'])
	print(len(boxes), 'boxes :')
	cv2.imwrite(image_path[:-4] + '_detected' + image_path[-4:], image)
Exemple #16
0
def predict():

    file = request.files['fileupload']
    filename = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
    file.save(filename)

    image_path = filename

    ###############################
    #   Predict bounding boxes
    ###############################

    image = cv2.imread(image_path)
    boxes = yolo.predict(image)
    image = draw_boxes(image, boxes, config['model']['labels'])

    print(len(boxes), 'boxes are found')
    log_str = str(len(boxes)) + ' boxes are found\n'

    #cv2.imwrite(image_path[:-4] + '_detected' + image_path[-4:], image)
    for box in boxes:
        box_label = config['model']['labels'][box.get_label()]
        box_score = box.get_score()
        log_str += box_label + ',' + str(box_score) + '\n'

    os.remove(filename)

    return log_str
Exemple #17
0
def _main(args):
    os.chdir(os.path.join(os.getcwd(), 'yad2k-em3d'))
    # Parse input arguments
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    data_path = os.path.expanduser(args.data_path)
    test_results = os.path.expanduser(args.test_results)
    # Extract anchors, classes, images, and boxes from input files
    anchors = utils.get_anchors(anchors_path)
    classes = utils.get_classes(classes_path)
    images, boxes = utils.get_data(data_path)
    test_results = scipy.io.loadmat(test_results)
    t = test_results['output']
    cv2.imshow("TESTING", images[20])
    cv2.waitKey(0)
    ipdb.set_trace()
    classes = [
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
        20, 21, 22, 23, 24, 25
    ]
    class_names = [
        'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
        'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
    ]
    drawn = utils.draw_boxes(images[0],
                             t[:1],
                             classes,
                             class_names,
                             scores=t[2])
    cv2.imshow('drawm', drawn)
    cv2.waitKey(0)
 def send_detection(self):
     while True:
         if self.image is not None:
             break
         time.sleep(0.5)
     # set socket
     ui_image_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     ui_image_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),75]
     # start sending image data
     while True:
         try:
             boxes = self.drawing_tools['boxes']
             if boxes is None:
                 image  = self.image
             else:
                 image, scores, labels = self.drawing_tools['image'], self.drawing_tools['scores'],\
                                         self.drawing_tools['labels']
                 _, index = get_target(boxes)
                 image = np.array(utils.draw_boxes(Image.fromarray(image),boxes,scores,labels,self.classes,
                                                    [self.IMAGE_H, self.IMAGE_W],target_index=index))
         except:
             image = self.image
         image = cv2.imencode('.jpeg',image,encode_param)[1]
         image = image.tostring()
         ui_image_socket.sendto(image, self.ui_image_addr)        
Exemple #19
0
 def match_ocr(self, ocr: dict, img, thresh=0.95):
     tmp = img.copy()
     # if self.verbose:
     #     xmin, ymin, xmax, ymax = self.coord
     #     empty = np.zeros([ymax - ymin, xmax - xmin, 3], dtype=np.uint8)
     #     cv2.namedWindow('Show OCR Match')
     # from dict to OCRBlock objects
     self.ocr_blocks: List[OCRBlock] = self._load_ocr_dict(ocr)
     # self.ocr_blocks_copy = copy.deepcopy(self.ocr_blocks)  # a deep copy of ocr block, using for draw match img
     for table_cell in self.table_cells[::-1]:  # traverse in reserved order
         # tmp = draw_boxes(tmp, [table_cell.coord])
         for ocr_block in self.ocr_blocks[::-1]:
             # if self.verbose:
             if table_cell.shape.intersection(
                     ocr_block.shape).area / ocr_block.shape.area >= thresh:
                 tmp = draw_boxes(tmp, [ocr_block.coord])
                 # matched!
                 table_cell.ocr_content.append(ocr_block.ocr_content)
                 table_cell.matched = True
                 self.ocr_blocks.remove(ocr_block)
                 break
     # ocr blocks left behind will be the title
     self.title = ''.join(
         [ocr_block.ocr_content for ocr_block in self.ocr_blocks])
     return tmp
Exemple #20
0
def detection(path):
    image = Image.open(path)
    img_resized = utils.letter_box_image(image, input_size, input_size, 128)
    img_resized = img_resized.astype(np.float32)
    boxes, inputs = utils.get_boxes_and_inputs_pb(frozenGraph)
    t0 = time.time()
    detected_boxes = sess.run(boxes, feed_dict={inputs: [img_resized]})
    filtered_boxes = utils.non_max_suppression(detected_boxes,
                                               confidence_threshold=conf_threshold,
                                               iou_threshold=iou_threshold)
    print("Predictions found in {:.2f}s".format(time.time() - t0))
    if filtered_boxes:
        # if len(filtered_boxes[0][:]) == 1:
        img, region, score, box = utils.draw_boxes(filtered_boxes, image, classes, (input_size, input_size), True)
        # box = np.array(box)
        # print(box)
        if score > 0.90:
            person_image_height = box[0][3] - box[0][1]
            # region.save(out_image)
            print(person_image_height)
            # 计算当前用户身高
            # 可根据参照物(本例采用椅子作为参照物,其实际高度为96cm,在固定距离下该参照物在图像中像素值为230)实际高度与图像高度像素,
            # 获取人物图像像素高度。具体调参需在具体环境下进行调参
            # 此方法存在较大的误差,故结果仅供趣味输出,追求准确仍需具体输入准确值
            person_height = (person_image_height * 96) / 230
            print("person_height: %.2fcm \n" % (person_height))
Exemple #21
0
 def pipeline(self, image):
     self.detect_car(image)
     layer1_input_boxes_img, layer1_output_img, \
     layer2_input_boxes_img, layer2_output_img, \
     layer1_heatmap, layer2_heatmap, vehicles_img \
         = self.filter.draw_layer_boxes(image)
     slide_boxes_img = image
     for i, slide_boxes in enumerate(self.slide_boxes_list):
         color_idx = i % len(colors)
         slide_boxes_img = draw_boxes(slide_boxes_img,
                                      slide_boxes,
                                      color=colors[color_idx],
                                      thick=1,
                                      colorful=True)
     layer1_heatmap = ((layer1_heatmap / (np.max(layer1_heatmap) + 1)) *
                       255).astype(np.uint8)
     layer2_heatmap = ((layer2_heatmap / (np.max(layer2_heatmap) + 1)) *
                       255).astype(np.uint8)
     layer1_heatmap = cv2.applyColorMap(layer1_heatmap, cv2.COLORMAP_HOT)
     layer2_heatmap = cv2.applyColorMap(layer2_heatmap, cv2.COLORMAP_HOT)
     parameter_img = show_text(np.zeros_like(image), show_parameters)
     return layer1_input_boxes_img, layer1_output_img, \
            layer2_input_boxes_img, layer2_output_img, \
            layer1_heatmap, layer2_heatmap, \
            slide_boxes_img, vehicles_img, parameter_img
Exemple #22
0
def process(image, svc, X_scaler):
	# Test the result on one single image
	image = mpimg.imread(image)
	draw_image = np.copy(image)

	windows = utils.slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop, xy_window=(96, 96), xy_overlap=(0.75, 0.75))

	hot_windows = utils.search_windows(image, windows, svc, X_scaler, color_space=color_space, 
							spatial_size=spatial_size, hist_bins=hist_bins, 
							orient=orient, pix_per_cell=pix_per_cell, 
							cell_per_block=cell_per_block, 
							hog_channel=hog_channel, spatial_feat=spatial_feat, 
							hist_feat=hist_feat, hog_feat=hog_feat)                       

	window_img = utils.draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)

	# Find the place were is the most overlapping boxes by drawing a Heatmap
	heat = np.zeros_like(window_img[:,:,0]).astype(np.float)
	heat = utils.add_heat(heat, hot_windows)
	heat = utils.apply_threshold(heat, 1)
	heatmap = np.clip(heat, 0, 255)
	labels = label(heatmap)
	draw_img = utils.draw_labeled_bboxes(image, labels)

	return draw_img
Exemple #23
0
 def __call__(self, img, show_plots=False):
     hits = self.get_hits(img)
     heat = make_heatmap(img.shape[0:2], hits)
     if self._last_heatmap is None:
         self._last_heatmap = heat
     filtered_heat = (1-self._alpha) * self._last_heatmap + self._alpha * heat
     self._last_heatmap = filtered_heat
     binary = filtered_heat >= self._threshold
     labels = label_image(binary)
     boxes = []
     for i in range(labels[1]):
         y_points, x_points = np.where(labels[0] == i+1)
         box = ((np.min(x_points), np.min(y_points)),
                (np.max(x_points), np.max(y_points)))
         width = box[1][0] - box[0][0]
         height = box[1][1] - box[0][1]
         if width >= 32 and height >= 32:
             boxes.append(box)
     if show_plots:
         f, ((a0, a1), (a2, a3)) = plt.subplots(2, 2)
         a0.set_title('Raw Hits')
         a0.imshow(draw_boxes(rgb(img, self._cspace), hits))
         a1.set_title('Heatmap')
         a1.imshow(heat.astype(np.float32)/np.max(heat), cmap='gray')
         a2.set_title('Thresholded Heatmap')
         a2.imshow(binary, cmap='gray')
         a3.set_title('Label Image')
         a3.imshow(labels[0], cmap='gray')
         plt.show()
     return boxes
Exemple #24
0
def _detection_thread():
    global detected_image, loaded
    print('Creating model...', end = ' ')
    yolo = YOLO(architecture='Tiny Yolo',
                    input_size=416,
                    labels=['cube'],
                    max_box_per_image=3,
                    anchors=[0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828])
    yolo.load_weights('save_tiny.h5')
    print('Done!')
    loaded = True
    while True:
        webcam_lock.acquire()
        img = webcam_image
        webcam_lock.release()
        if img is not None:
            start = time.time()
            boxes = yolo.predict(img, nms_threshold=0.5, bgr=False)
            drawn_img = draw_boxes(img, boxes, ['cube'])
            end = time.time()
            fps = 1.0/(end-start)
            past_fps.append(fps)
            while len(past_fps) > 10:
                del past_fps[0]
            avg_fps = sum(past_fps)/len(past_fps)
            print('\rFPS: {:.2f}'.format(avg_fps), end='')
            detected_lock.acquire()
            detected_image = drawn_img
            detected_lock.release()
        if should_stop:
            break
Exemple #25
0
def parseImage(img):
    '''
    Ja prasira individualnata slika i vrsi predviduvanje i 
    crtanjeto na kutii vrz nea
    '''
    print(img.shape)
    dims = (img.shape[1], img.shape[0])

    img_float = cv2.resize(img, (224, 224)).astype(np.float32)
    img_float -= 128

    img_in = np.expand_dims(img_float, axis=0)

    pred = model.predict(img_in)

    bboxes = utils.get_boxes(pred[0], dims=dims, cutoff=0.2)
    bboxes = utils.nonmax_suppression(bboxes, iou_cutoff=0.05)
    draw = utils.draw_boxes(img,
                            bboxes,
                            color=(0, 0, 255),
                            thick=3,
                            draw_dot=True,
                            radius=3)
    draw = draw.astype(np.uint8)
    return draw
def process_frame(image):
    global trained_clf
    global scaler

    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    draw_image = image.copy()

    windows = slide_window(image,
                           x_start_stop=[None, None],
                           y_start_stop=[360, 700],
                           xy_window=(64, 64),
                           xy_overlap=(0.85, 0.85))

    logging.info("Searching hot windows using classifier")
    hot_windows = search_windows(image, windows, trained_clf, scaler)

    logging.info("Drawing the hot image")
    window_img = draw_boxes(draw_image,
                            hot_windows,
                            color=(0, 0, 255),
                            thick=6)

    #plt.imshow(window_img)
    #plt.show()

    return car_positions(image, hot_windows)
Exemple #27
0
def run_images():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dir_path', default="./test_images", type=str)
    parser.add_argument('--out_path', default='./out_images', type=str)
    parser.add_argument('--model_file',
                        default="./model/yolov2-tiny-voc.h5",
                        type=str)
    args = parser.parse_args()

    paths = utils.get_image_path(args.dir_path)
    images = []
    print('reading image from %s' % args.dir_path)
    for path in paths:
        image = cv2.imread(path)
        resized = cv2.resize(image, (416, 416))
        images.append(resized)

    image_processed = []
    for image in images:
        image_processed.append(utils.preprocess_image(image))

    print('loading model from %s' % args.model_file)
    model = load_model(args.model_file)
    predictions = model.predict(np.array(image_processed))

    if not os.path.exists(args.out_path):
        os.mkdir(args.out_path)
    print('writing image to %s' % args.out_path)
    for i in range(predictions.shape[0]):
        boxes = utils.process_predictions(predictions[i],
                                          probs_threshold=0.3,
                                          iou_threshold=0.2)
        out_image = utils.draw_boxes(images[i], boxes)
        cv2.imwrite('%s/out%s.jpg' % (args.out_path, i), out_image)
Exemple #28
0
    def detect(self, image, windows=None, window_img_filename=None):
        if windows is None:
            windows_32 = slide_window(image,
                                      x_start_stop=None,
                                      y_start_stop=[398, 434],
                                      xy_window=(32, 32),
                                      xy_overlap=(0.5, 0.5))

            windows_64 = slide_window(image,
                                      x_start_stop=None,
                                      y_start_stop=[390, 450],
                                      xy_window=(64, 64),
                                      xy_overlap=(0.5, 0.5))

            windows_96 = slide_window(image,
                                      x_start_stop=None,
                                      y_start_stop=[390, 540],
                                      xy_window=(96, 96),
                                      xy_overlap=(0.5, 0.5))

            windows_128 = slide_window(image,
                                       x_start_stop=None,
                                       y_start_stop=[350, image.shape[0]],
                                       xy_window=(128, 128),
                                       xy_overlap=(0.25, 0.25))

            windows = windows_32 + windows_64 + windows_96 + windows_128
            self.windows = windows  # store the window coords for next time

        if window_img_filename:
            window_img = draw_boxes(image, windows, color=(0, 0, 255), thick=6)
            plt.figure()
            plt.imshow(window_img)
            plt.title('Anchor Windows')
            plt.savefig(window_img_filename)

        on_windows = search_windows(
            image,
            windows,
            self.clf,
            self.scaler,
            feature_extractor_params=self.feature_extractor_params)

        # window_img = draw_boxes(image, on_windows, color=(0, 0, 255), thick=6)
        # plt.figure()
        # plt.imshow(window_img)

        heatmap = build_heatmap(image.shape[:2], on_windows)
        # plt.figure()
        # plt.imshow(heatmap)
        #
        # plt.figure()
        # plt.imshow(heatmap > 3)

        heatmap[heatmap < self.heatmap_threshold] = 0

        labels = label(heatmap)
        imb_bbox = draw_labeled_bboxes(np.copy(image), labels)

        return imb_bbox
Exemple #29
0
    def predict_on_h5(self, h5_path, idx, path_to_save, sequence_length=30, stride=1,
                      obj_threshold=0.3, nms_threshold=0.1):
        f = h5py.File(h5_path, 'r')
        x_batches = f["x_batches"]
        b_batches = f["b_batches"]
        y_batches = f["y_batches"]

        id_in_h5 = idx % x_batches.shape[0]
        x_batch = x_batches[id_in_h5, ...] # read from disk
        b_batch = b_batches[id_in_h5, ...]
        y_batch = y_batches[id_in_h5, ...]

        x_batch = x_batch[::-1, ...][:sequence_length:stride][::-1, ...]
        image_id = -1
        image = x_batch[image_id, ...].copy() 
        boxes, filtered_boxes = self.predict(image, obj_threshold=obj_threshold,
                             nms_threshold=nms_threshold, is_filter_bboxes=False,
                             shovel_type="Cable")
        boxes += filtered_boxes
        image = draw_boxes(image, boxes, self.labels, score_threshold=obj_threshold)

        h5_name = h5_path.split('/')[-1]
        filepath = os.path.join(path_to_save, "pred_" + h5_name + str(idx) + ".jpg")
        cv2.imwrite(filepath, image) 

        f.close()
def predict(sess, y, x, image, image_path):
    image_h, image_w, _ = image.shape
    image = cv2.resize(image, (640, 640))
    image = image / 255.
    # image = self.feature_extractor.normalize(image)

    input_image = image[:, :, ::-1]
    input_image = np.expand_dims(input_image, 0)
    input_image = np.expand_dims(input_image, 0)

    # netout = self.model.predict(input_image)[0]
    start_time = time()
    netout = sess.run(y, {x: input_image})[0]

    anchors = [
        0.31, 0.81, 0.40, 1.16, 0.41, 0.91, 0.42, 0.68, 0.50, 1.42, 0.52, 1.09,
        0.63, 1.72, 0.96, 1.72, 1.14, 2.69, 10.64, 2.53
    ]
    labels = ["tooth", "toothline"]
    nb_class = len(labels)
    boxes = decode_netout(netout, anchors, nb_class, obj_threshold,
                          nms_threshold)
    print("Time spent: ", time() - start_time)

    image = draw_boxes(image, boxes, labels, obj_threshold)
    # plt.imshow(image)
    # plt.show()
    path_to_save = pb_filepath.split('/')[:-1]
    path_to_save = '/'.join(path_to_save)
    path_to_save = os.path.join(path_to_save, image_path.split('/')[-1])
    print(path_to_save, image.shape)
    image *= 255
    cv2.imwrite(path_to_save, image)

    return boxes
    def process(self, image):
        draw_image = np.zeros_like(image)
        y_start_stop = [(350, 500), (400, 550), (350, 650)]
        scale = [1, 2, 3]
        cells_per_step = [1, 1, 1]

        hot_windows =\
            self.detector.search_windows(image,
                                         y_start_stop=y_start_stop,
                                         scale=scale,
                                         cells_per_step=cells_per_step)
        self.windows.append(hot_windows)

        alive_box = hot_windows
        if self.draw_heatmap:
            labels, heatmap = self.draw_heatmap_labels(draw_image.shape[:2])
            window_img, alive_box =\
                draw_labeled_bboxes(draw_image, labels,
                                    heatmap, self.cthreshold)
        else:
            window_img = draw_boxes(draw_image,
                                    hot_windows,
                                    color=(0, 0, 255),
                                    thick=6)
        if self.standalone:
            return window_img
        else:
            return alive_box