Esempio n. 1
0
def test_start_stop_file_source_image_size():
    """Expect to receive an image with dimentions of the input video frame."""
    _dir = os.path.dirname(os.path.abspath(__file__))
    video_file = os.path.join(_dir, 'test2-cam-person1.mkv')
    abs_path = os.path.abspath(video_file)
    video_uri = pathlib.Path(abs_path).as_uri()
    avsource = AVSourceElement(uri=video_uri, type='video')
    sample_received = threading.Event()
    sample_image = None

    def sample_callback(image=None, inference_result=None, **kwargs):
        nonlocal sample_image
        nonlocal sample_received
        sample_image = image
        sample_received.set()

    output = _OutPipeElement(sample_callback=sample_callback)
    avsource.connect_to_next_element(output)
    t = threading.Thread(name="Test AVSourceElement",
                         target=avsource.start,
                         daemon=True)
    t.start()
    sample_received.wait(timeout=5)
    assert sample_image
    assert sample_image.size[0] == 1280
    assert sample_image.size[1] == 720
    assert t.is_alive()
    avsource.stop()
    t.join(timeout=20)
    assert not t.is_alive()
Esempio n. 2
0
def test_start_stop_file_source_person_detect():
    """Expect to detect a person in the video sample."""
    _dir = os.path.dirname(os.path.abspath(__file__))
    video_file = os.path.join(_dir, 'test2-cam-person1.mkv')
    abs_path = os.path.abspath(video_file)
    video_uri = pathlib.Path(abs_path).as_uri()
    avsource = AVSourceElement(uri=video_uri, type='video')
    object_config = _object_detect_config()
    detection_received = threading.Event()
    sample_image = None
    detections = None

    def sample_callback(image=None, inference_result=None, **kwargs):
        nonlocal sample_image
        nonlocal detection_received
        sample_image = image
        nonlocal detections
        detections = inference_result
        print('detections: {det}'.format(det=detections))
        print('len(detections): {len}'.format(len=len(detections)))
        if detections:
            label = detections[0]['label']
            confidence = detections[0]['confidence']
            if label == 'person' and confidence > 0.9:
                # skip video image samples until we reach a person detection
                # with high level of confidence
                detection_received.set()

    object_detector = ObjectDetector(**object_config)
    avsource.connect_to_next_element(object_detector)
    output = _OutPipeElement(sample_callback=sample_callback)
    object_detector.connect_to_next_element(output)
    t = threading.Thread(name="Test AVSourceElement",
                         target=avsource.start,
                         daemon=True)
    t.start()
    detection_received.wait(timeout=10)
    assert sample_image
    assert sample_image.size[0] == 1280
    assert sample_image.size[1] == 720
    assert detections
    assert len(detections) == 1

    label = detections[0]['label']
    confidence = detections[0]['confidence']
    (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin']
    (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax']

    assert label == 'person'
    assert confidence > 0.9
    assert x0 > 0 and x0 < x1
    assert y0 > 0 and y0 < y1
    avsource.stop()
    t.join(timeout=10)
    assert not t.is_alive()
Esempio n. 3
0
def test_still_image_input_detect_person_exit_stop_signal():
    """Proces a single jpg image. Detect a person. Exit via stop signal."""
    _dir = os.path.dirname(os.path.abspath(__file__))
    video_file = os.path.join(_dir, "../ai/person.jpg")
    abs_path = os.path.abspath(video_file)
    video_uri = pathlib.Path(abs_path).as_uri()
    avsource = AVSourceElement(uri=video_uri, type="image")
    object_config = _object_detect_config()
    detection_received = threading.Event()
    sample_image = None
    detections = None

    def sample_callback(image=None, inference_result=None, **kwargs):
        nonlocal sample_image
        nonlocal detection_received
        sample_image = image
        nonlocal detections
        detections = inference_result
        print(f"detections: {detections}")
        print(f"len(detections): {len(detections)}")
        if detections:
            label = detections[0]["label"]
            confidence = detections[0]["confidence"]
            if label == "person" and confidence > 0.9:
                # skip video image samples until we reach a person detection
                # with high level of confidence
                detection_received.set()

    object_detector = ObjectDetector(**object_config)
    avsource.connect_to_next_element(object_detector)
    output = _OutPipeElement(sample_callback=sample_callback)
    object_detector.connect_to_next_element(output)
    t = threading.Thread(name="Test AVSourceElement",
                         target=avsource.start,
                         daemon=True)
    t.start()
    detection_received.wait(timeout=10)
    assert sample_image
    assert sample_image.size[0] == 1280
    assert sample_image.size[1] == 720
    assert detections
    assert len(detections) == 1
    label = detections[0]["label"]
    confidence = detections[0]["confidence"]
    (x0, y0) = detections[0]["box"]["xmin"], detections[0]["box"]["ymin"]
    (x1, y1) = detections[0]["box"]["xmax"], detections[0]["box"]["ymax"]
    assert label == "person"
    assert confidence > 0.9
    assert x0 > 0 and x0 < x1
    assert y0 > 0 and y0 < y1
    avsource.stop()
    t.join(timeout=10)
    assert not t.is_alive()
Esempio n. 4
0
def test_still_image_input_detect_person_exit_stop_signal():
    """Proces a single jpg image. Detect a person. Exit via stop signal."""
    dir = os.path.dirname(os.path.abspath(__file__))
    video_file = os.path.join(
        dir,
        '../ai/person.jpg'
        )
    abs_path = os.path.abspath(video_file)
    video_uri = pathlib.Path(abs_path).as_uri()
    avsource = AVSourceElement(uri=video_uri, type='image')
    object_config = _object_detect_config()
    detection_received = threading.Event()
    sample_image = None
    detections = None

    def sample_callback(image=None, inference_result=None):
        nonlocal sample_image
        nonlocal detection_received
        sample_image = image
        nonlocal detections
        detections = inference_result
        print('detections: {det}'.format(det=detections))
        print('len(detections): {len}'.format(len=len(detections)))
        if detections and len(detections) > 0:
            category, confidence, _ = detections[0]
            if category == 'person' and confidence > 0.9:
                # skip video image samples until we reach a person detection
                # with high level of confidence
                detection_received.set()
    object_detector = ObjectDetector(element_config=object_config)
    avsource.connect_to_next_element(object_detector)
    output = _OutPipeElement(sample_callback=sample_callback)
    object_detector.connect_to_next_element(output)
    t = threading.Thread(
        name="Test AVSourceElement",
        target=avsource.start, daemon=True
        )
    t.start()
    detection_received.wait(timeout=5)
    assert sample_image
    assert sample_image.size[0] == 1280
    assert sample_image.size[1] == 720
    assert detections
    assert len(detections) == 1
    category, confidence, (x0, y0, x1, y1) = detections[0]
    assert category == 'person'
    assert confidence > 0.9
    assert x0 > 0 and x0 < x1
    assert y0 > 0 and y0 < y1
    avsource.stop()
    t.join(timeout=10)
    assert not t.is_alive()
Esempio n. 5
0
def test_picamera_input():
    # mock picamera module
    picam.picamera_override = picamera_override

    avsource = AVSourceElement(uri="picamera", type='video')
    object_config = _object_detect_config()
    detection_received = threading.Event()
    sample_image = None
    detections = None

    def sample_callback(image=None, inference_result=None, **kwargs):
        nonlocal sample_image
        nonlocal detection_received
        sample_image = image
        nonlocal detections
        detections = inference_result
        print('detections: {det}'.format(det=detections))
        print('len(detections): {len}'.format(len=len(detections)))
        if detections:
            label = detections[0]['label']
            confidence = detections[0]['confidence']
            if label == 'person' and confidence > 0.9:
                # skip video image samples until we reach a person detection
                # with high level of confidence
                detection_received.set()

    object_detector = ObjectDetector(**object_config)
    avsource.connect_to_next_element(object_detector)
    output = _OutPipeElement(sample_callback=sample_callback)
    object_detector.connect_to_next_element(output)
    t = threading.Thread(name="Test AVSourceElement",
                         target=avsource.start,
                         daemon=True)
    t.start()
    detection_received.wait(timeout=10)
    assert sample_image
    assert sample_image.size[0] == 1280
    assert sample_image.size[1] == 720
    assert detections
    assert len(detections) == 1
    label = detections[0]['label']
    confidence = detections[0]['confidence']
    (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin']
    (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax']
    assert label == 'person'
    assert confidence > 0.9
    assert x0 > 0 and x0 < x1
    assert y0 > 0 and y0 < y1
    avsource.stop()
    t.join(timeout=10)
    assert not t.is_alive()
Esempio n. 6
0
def test_http_still_image_input_detect_person_exit_stop_signal():
    """Proces a single jpg image. Detect a person. Exit via stop signal."""
    source_uri = "https://raw.githubusercontent.com/ambianic/ambianic-edge/master/tests/pipeline/ai/person.jpg"
    avsource = AVSourceElement(uri=source_uri, type="image", live=True)
    object_config = _object_detect_config()
    detection_received = threading.Event()
    sample_image = None
    detections = None

    def sample_callback(image=None, inference_result=None, **kwargs):
        nonlocal sample_image
        nonlocal detection_received
        sample_image = image
        nonlocal detections
        detections = inference_result
        print(f"detections: {detections}")
        print(f"len(detections): {len(detections)}")
        if detections:
            label = detections[0]["label"]
            confidence = detections[0]["confidence"]
            if label == "person" and confidence > 0.9:
                # skip video image samples until we reach a person detection
                # with high level of confidence
                detection_received.set()

    object_detector = ObjectDetector(**object_config)
    avsource.connect_to_next_element(object_detector)
    output = _OutPipeElement(sample_callback=sample_callback)
    object_detector.connect_to_next_element(output)
    t = threading.Thread(name="Test AVSourceElement",
                         target=avsource.start,
                         daemon=True)
    t.start()
    detection_received.wait(timeout=10)
    assert sample_image
    assert sample_image.size[0] == 1280
    assert sample_image.size[1] == 720
    assert detections
    assert len(detections) == 1
    label = detections[0]["label"]
    confidence = detections[0]["confidence"]
    (x0, y0) = detections[0]["box"]["xmin"], detections[0]["box"]["ymin"]
    (x1, y1) = detections[0]["box"]["xmax"], detections[0]["box"]["ymax"]
    assert label == "person"
    assert confidence > 0.9
    assert x0 > 0 and x0 < x1
    assert y0 > 0 and y0 < y1
    avsource.stop()
    t.join(timeout=10)
    assert not t.is_alive()
Esempio n. 7
0
def test_stop_on_video_EOS():
    """Processing should stop when AVSource reaches end of input stream."""
    dir = os.path.dirname(os.path.abspath(__file__))
    video_file = os.path.join(
        dir,
        'test2-cam-person1.mkv'
        )
    abs_path = os.path.abspath(video_file)
    video_uri = pathlib.Path(abs_path).as_uri()
    avsource = AVSourceElement(uri=video_uri, type='video')
    sample_received = threading.Event()
    sample_image = None

    def sample_callback(image=None, inference_result=None):
        nonlocal sample_image
        nonlocal sample_received
        sample_image = image
        sample_received.set()
    output = _OutPipeElement(sample_callback=sample_callback)
    avsource.connect_to_next_element(output)
    t = threading.Thread(
        name="Test AVSourceElement",
        target=avsource.start, daemon=True
        )
    t.start()
    sample_received.wait(timeout=5)
    assert sample_image
    assert sample_image.size[0] == 1280
    assert sample_image.size[1] == 720
    assert t.is_alive()
    avsource._gst_process_eos_reached.wait(timeout=30)
    if not avsource._gst_process_eos_reached.is_set():
        # Intermitently gstreamer does not feed an EOS message
        # on the event bus when it reaches end of the video file.
        # This is a known issue under investigation.
        # Let's send a stop signal to the pipeline.
        avsource.stop()
    t.join(timeout=10)
    assert not t.is_alive()