예제 #1
0
def process_images(model_path, detection_class, detection_threshold,
                   max_processing_delay):
    """Starts Tensorflow and detects objects in the incoming images.

    Args:
        model_path (str): Filepath to the Tensorflow model to use.
        detection_class (int): Detection classe to detect
        detection_threshold (float): Detection threshold to apply to all Tensorflow detections.
        max_processing_delay (float): Allowed delay before processing an incoming image.
    """

    odapi = DetectorAPI(path_to_ckpt=model_path)
    num_processed_skips = 0

    if TENSORFLOW_PROCESS_BARRIER is None:
        return

    try:
        TENSORFLOW_PROCESS_BARRIER.wait()
    except BrokenBarrierError as exc:
        print(f'Error waiting for Tensorflow processes to initialize: {exc}')
        return False

    while True:
        entry = RAW_IMAGES_QUEUE.get()
        for _, capture in entry.items():
            if time.time() - capture['raw_image_time'] > max_processing_delay:
                num_processed_skips += 1
                continue  # Skip image due to delay

            image = capture['cv_image']
            # process_start = time.time()
            boxes, scores, classes, _ = odapi.process_frame(image)
            # process_end = time.time()
            confident_boxes = []
            confident_object_classes = []
            confident_scores = []
            if len(boxes) == 0:
                continue
            for box, score, box_class in sorted(zip(boxes, scores, classes),
                                                key=lambda x: x[1],
                                                reverse=True):
                # if score > detection_threshold:
                if score > detection_threshold and box_class == detection_class:
                    confident_boxes.append(box)
                    confident_object_classes.append(COCO_CLASS_DICT[box_class])
                    confident_scores.append(score)
                    image = cv2.rectangle(image, (box[1], box[0]),
                                          (box[3], box[2]), (255, 0, 0), 2)

            capture['processed_image_time'] = time.time()
            capture['boxes'] = confident_boxes
            capture['classes'] = confident_object_classes
            capture['scores'] = confident_scores
            capture['cv_image'] = image
        try:
            PROCESSED_BOXES_QUEUE.put_nowait(entry)
        except Full as exc:
            print(f'PROCESSED_BOXES_QUEUE is full: {exc}')
def process_images(index, model_path, detection_classes, detection_threshold,
                   max_processing_delay):
    """Starts Tensorflow and detects objects in the incoming images.

    Args:
        index: Process index used for displaying number of skips only.
        model_path: Filepath to the Tensorflow model to use.
        detection_classes: List of detection classes to detect. Empty list means all classes
            in the model are detected.
        detection_threshold: Detection threshold to apply to all Tensorflow detections.
        max_processing_delay: Allowed delay before processing an incoming image.
    """

    odapi = DetectorAPI(path_to_ckpt=model_path)
    num_processed_skips = 0

    while True:
        entry = RAW_IMAGES_QUEUE.get()
        inline_print(0, RAW_IMAGES_QUEUE.qsize())
        raw_time = entry['raw_image_time']
        if time.time() - raw_time > max_processing_delay:
            num_processed_skips = num_processed_skips + 1
            inline_print(20 + index, str(num_processed_skips))
            continue  # Skip image due to delay

        image = entry['image']
        boxes, scores, classes, _ = odapi.process_frame(image)

        for i in range(len(boxes)):
            # Empty detection classes means detect everything
            if not detection_classes or classes[i] in detection_classes:
                if scores[i] > detection_threshold:
                    box = boxes[i]
                    image = cv2.rectangle(image, (box[1], box[0]),
                                          (box[3], box[2]), (255, 0, 0), 2)
        out_entry = {
            'source': entry['source'],
            'raw_image_time': entry['raw_image_time'],
            'capture_image_time': entry['capture_image_time'],
            'processed_image_time': time.time(),
            'image': image
        }
        PROCESSED_IMAGES_QUEUE.put(out_entry)
        inline_print(3, PROCESSED_IMAGES_QUEUE.qsize())
        inline_print(
            9,
            str(out_entry['processed_image_time'] -
                out_entry['capture_image_time']))