def test2_one_person_high_confidence_face_low_confidence_two_stage_pipe(): """Expect to detect a person but not a face.""" object_config = _object_detect_config() face_config = _face_detect_config() result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result # test stage one, obect detection -> out object_detector = ObjectDetector(**object_config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person-face2.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 label = result[0]['label'] confidence = result[0]['confidence'] (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 # test stage 2, rearrange pipe elements: object->face->out face_detector = FaceDetector(**face_config) object_detector.connect_to_next_element(face_detector) face_detector.connect_to_next_element(output) object_detector.receive_next_sample(image=img) assert not result
def test_thermal_one_person_miss_face_two_stage_pipe(): """Expect to detect a person but not a face.""" object_config = _object_detect_config() face_config = _face_detect_config() result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result # test stage one, obect detection -> out object_detector = ObjectDetector(**object_config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name="person_thermal_bw.jpg") object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 label = result[0]["label"] confidence = result[0]["confidence"] (x0, y0) = result[0]["box"]["xmin"], result[0]["box"]["ymin"] (x1, y1) = result[0]["box"]["xmax"], result[0]["box"]["ymax"] assert label == "person" assert confidence > 0.8 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 # test stage 2, rearrange pipe elements: object->face->out face_detector = FaceDetector(**face_config) object_detector.connect_to_next_element(face_detector) face_detector.connect_to_next_element(output) object_detector.receive_next_sample(image=img) assert not result
def test_one_person_high_confidence_face_low_confidence_two_stage_pipe(): """Expect to detect a person but not a face.""" object_config = _object_detect_config() face_config = _face_detect_config() result = None def sample_callback(image=None, inference_result=None): nonlocal result result = inference_result # test stage one, obect detection -> out object_detector = ObjectDetector(element_config=object_config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 # test stage 2, rearrange pipe elements: object->face->out face_detector = FaceDetector(element_config=face_config) object_detector.connect_to_next_element(face_detector) face_detector.connect_to_next_element(output) object_detector.receive_next_sample(image=img) assert not result
def test_gst_process_terminate(): """Gst process terminate when it doesn't respond to stop signal.""" _dir = os.path.dirname(os.path.abspath(__file__)) video_file = os.path.join(_dir, '../ai/person.jpg') abs_path = os.path.abspath(video_file) video_uri = pathlib.Path(abs_path).as_uri() avsource = _TestAVSourceElement4(uri=video_uri, type='image') object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: label = detections[0]['label'] confidence = detections[0]['confidence'] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(**object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread(name="Test AVSourceElement", target=avsource.start, daemon=True) t.start() detection_received.wait(timeout=5) assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 label = detections[0]['label'] confidence = detections[0]['confidence'] (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 avsource.stop() t.join(timeout=30) assert not t.is_alive() assert avsource._terminate_requested assert avsource._clean_terminate
def test_still_image_input_detect_person_exit_eos(): """Process a single jpg image. Detect a person. Exit via EOS.""" _dir = os.path.dirname(os.path.abspath(__file__)) video_file = os.path.join(_dir, "../ai/person.jpg") abs_path = os.path.abspath(video_file) video_uri = pathlib.Path(abs_path).as_uri() avsource = AVSourceElement(uri=video_uri, type="image") object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print(f"detections: {detections}") print(f"len(detections): {len(detections)}") if detections: label = detections[0]["label"] confidence = detections[0]["confidence"] if label == "person" and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(**object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread(name="Test AVSourceElement", target=avsource.start, daemon=True) t.start() detection_received.wait(timeout=10) assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 label = detections[0]["label"] confidence = detections[0]["confidence"] (x0, y0) = detections[0]["box"]["xmin"], detections[0]["box"]["ymin"] (x1, y1) = detections[0]["box"]["xmax"], detections[0]["box"]["ymax"] assert label == "person" assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 t.join(timeout=10) assert avsource._gst_process_eos_reached.is_set() assert not t.is_alive()
def test_exception_on_new_sample(): """Exception from _on_new_sample() should not break the pipe loop.""" dir = os.path.dirname(os.path.abspath(__file__)) video_file = os.path.join( dir, '../ai/person.jpg' ) abs_path = os.path.abspath(video_file) video_uri = pathlib.Path(abs_path).as_uri() avsource = _TestAVSourceElement2(uri=video_uri, type='image') object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections and len(detections) > 0: category, confidence, _ = detections[0] if category == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(element_config=object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread( name="Test AVSourceElement", target=avsource.start, daemon=True ) t.start() detection_received.wait(timeout=5) assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 category, confidence, (x0, y0, x1, y1) = detections[0] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 avsource._gst_process_eos_reached.wait(timeout=5) assert avsource._gst_process_eos_reached.is_set() t.join(timeout=3) assert not t.is_alive()
def test_picamera_input(): # mock picamera module picam.picamera_override = picamera_override avsource = AVSourceElement(uri="picamera", type='video') object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: label = detections[0]['label'] confidence = detections[0]['confidence'] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(**object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread(name="Test AVSourceElement", target=avsource.start, daemon=True) t.start() detection_received.wait(timeout=10) assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 label = detections[0]['label'] confidence = detections[0]['confidence'] (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 avsource.stop() t.join(timeout=10) assert not t.is_alive()
def test_http_still_image_input_detect_person_exit(): """Process a single jpg image. Detect a person and exit pipeline.""" source_uri = 'https://raw.githubusercontent.com/ambianic/ambianic-edge/master/tests/pipeline/ai/person.jpg' avsource = _TestAVSourceElement(uri=source_uri, type='image', live=False) object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: label = detections[0]['label'] confidence = detections[0]['confidence'] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(**object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread( name="Test AVSourceElement", target=avsource.start, daemon=True ) t.start() detection_received.wait(timeout=10) assert avsource._run_http_fetch_called assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 label = detections[0]['label'] confidence = detections[0]['confidence'] (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 t.join(timeout=10) assert not t.is_alive()
def test_http_still_image_input_detect_person_exit_stop_signal(): """Proces a single jpg image. Detect a person. Exit via stop signal.""" source_uri = "https://raw.githubusercontent.com/ambianic/ambianic-edge/master/tests/pipeline/ai/person.jpg" avsource = AVSourceElement(uri=source_uri, type="image", live=True) object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print(f"detections: {detections}") print(f"len(detections): {len(detections)}") if detections: label = detections[0]["label"] confidence = detections[0]["confidence"] if label == "person" and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(**object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread(name="Test AVSourceElement", target=avsource.start, daemon=True) t.start() detection_received.wait(timeout=10) assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 label = detections[0]["label"] confidence = detections[0]["confidence"] (x0, y0) = detections[0]["box"]["xmin"], detections[0]["box"]["ymin"] (x1, y1) = detections[0]["box"]["xmax"], detections[0]["box"]["ymax"] assert label == "person" assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 avsource.stop() t.join(timeout=10) assert not t.is_alive()
def test_bad_sample_good_sample(): """One bad sample should not prevent good samples from being processed.""" config = _object_detect_config() result = 'nothing passed to me' def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) # bad sample object_detector.receive_next_sample(image=None) assert result == 'nothing passed to me' # good sample img = _get_image(file_name='person.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_still_image_input_detect_person_exit_stop_signal(): """Proces a single jpg image. Detect a person. Exit via stop signal.""" _dir = os.path.dirname(os.path.abspath(__file__)) video_file = os.path.join(_dir, '../ai/person.jpg') abs_path = os.path.abspath(video_file) video_uri = pathlib.Path(abs_path).as_uri() avsource = AVSourceElement(uri=video_uri, type='image') object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(**object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread(name="Test AVSourceElement", target=avsource.start, daemon=True) t.start() detection_received.wait(timeout=10) assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 avsource.stop() t.join(timeout=10) assert not t.is_alive()
def test_exception_on_new_sample(): """Exception from _on_new_sample() should not break the pipe loop.""" source_uri = 'https://raw.githubusercontent.com/ambianic/ambianic-edge/master/tests/pipeline/ai/person.jpg' avsource = _TestAVSourceElement2(uri=source_uri, type='image', live=False) object_config = _object_detect_config() detection_received = threading.Event() sample_image = None detections = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal sample_image nonlocal detection_received sample_image = image nonlocal detections detections = inference_result print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence detection_received.set() object_detector = ObjectDetector(**object_config) avsource.connect_to_next_element(object_detector) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) t = threading.Thread(name="Test AVSourceElement", target=avsource.start, daemon=True) t.start() detection_received.wait(timeout=10) assert sample_image assert sample_image.size[0] == 1280 assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 t.join(timeout=3) assert not t.is_alive()
def test_two_person_high_confidence_one_face_high_confidence_two_stage_pipe(): """Expect to detect two persons but only one face.""" object_config = _object_detect_config() face_config = _face_detect_config() result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result # test stage one, obect detection -> out object_detector = ObjectDetector(**object_config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person2-face1.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 2 label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 label, confidence, (x0, y0, x1, y1) = result[1] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 # test stage 2, rearrange pipe elements: object->face->out face_detector = FaceDetector(**face_config) object_detector.connect_to_next_element(face_detector) face_detector.connect_to_next_element(output) object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.6 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_bad_sample_good_sample(): """One bad sample should not prevent good samples from being processed.""" config = _fall_detect_config() result = 'nothing passed to me' def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) # bad sample object_detector.receive_next_sample(image=None) assert result == 'nothing passed to me' # good sample fall_detector = FallDetector(**config) fall_detector.connect_to_next_element(output) img_1 = _get_image(file_name='fall_img_1.png') img_2 = _get_image(file_name='fall_img_2.png') fall_detector.receive_next_sample(image=img_1) fall_detector.min_time_between_frames = 0.01 time.sleep(fall_detector.min_time_between_frames) fall_detector.receive_next_sample(image=img_2) assert result assert len(result) == 1 category, confidence, box, angle = result[0] assert box # Add this line to avoid 'Unused local variable' assert category == 'FALL' assert confidence > 0.7 assert angle > 60
def test_no_labels_filter(): """Expect to detect all labeled objects - one person and one couch.""" config = _object_detect_config() config['confidence_threshold'] = 0.6 # No label_filter set, which is the same as None # config['label_filter'] = None result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person-couch.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 2 category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.7 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 category, confidence, (x0, y0, x1, y1) = result[1] assert category == 'couch' assert confidence > 0.6 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_one_person_two_stage_pipe_high_face_confidence(): """Detect a person in 1st stage and a face in 2nd stage.""" object_config = _object_detect_config() face_config = _face_detect_config() result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**object_config) face_detector = FaceDetector(**face_config) object_detector.connect_to_next_element(face_detector) output = _OutPipeElement(sample_callback=sample_callback) face_detector.connect_to_next_element(output) img = _get_image(file_name='person-face.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 label = result[0]['label'] confidence = result[0]['confidence'] (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_one_person_no_face(): """Expect to detect one person.""" config = _object_detect_config() result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name="person-no-face.jpg") object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 category = result[0]["label"] confidence = result[0]["confidence"] (x0, y0) = result[0]["box"]["xmin"], result[0]["box"]["ymin"] (x1, y1) = result[0]["box"]["xmax"], result[0]["box"]["ymax"] assert category == "person" assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_one_label_filter(): """Expect to detect one person and no other objects.""" config = _object_detect_config() confidence_threshold = 0.7 config["confidence_threshold"] = confidence_threshold config["label_filter"] = ["person"] result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name="person-couch.jpg") object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 category = result[0]["label"] confidence = result[0]["confidence"] (x0, y0) = result[0]["box"]["xmin"], result[0]["box"]["ymin"] (x1, y1) = result[0]["box"]["xmax"], result[0]["box"]["ymax"] assert category == "person" assert confidence > confidence_threshold assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_one_person(): """Expect to detect one person.""" config = _object_detect_config() result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 category = result[0]['label'] confidence = result[0]['confidence'] (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_model_inputs(): """Verify against known model inputs.""" config = _object_detect_config() object_detector = ObjectDetector(config) tfe = object_detector._tfengine samples = tfe.input_details[0]['shape'][0] assert samples == 1 height = tfe.input_details[0]['shape'][1] assert height == 300 width = tfe.input_details[0]['shape'][2] assert width == 300 colors = tfe.input_details[0]['shape'][3] assert colors == 3
def test_model_outputs(): """Verify against known model outputs.""" config = _object_detect_config() object_detector = ObjectDetector(config) tfe = object_detector._tfengine assert tfe.output_details[0]['shape'][0] == 1 scores = tfe.output_details[0]['shape'][1] assert scores == 20 assert tfe.output_details[1]['shape'][0] == 1 boxes = tfe.output_details[1]['shape'][1] assert boxes == 20 assert tfe.output_details[2]['shape'][0] == 1 labels = tfe.output_details[2]['shape'][1] assert labels == 20 num = tfe.output_details[3]['shape'][0] assert num == 1
def test_no_sample(): """Expect element to pass empty sample to next element.""" config = _object_detect_config() result = 'Something' def sample_callback(image=None, inference_result=None): nonlocal result result = image is None and inference_result is None object_detector = ObjectDetector(element_config=config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) object_detector.receive_next_sample() assert result is True
def test_background_image(): """Expect to not detect anything interesting in a background image.""" config = _object_detect_config() result = None def sample_callback(image=None, inference_result=None): nonlocal result result = inference_result object_detector = ObjectDetector(element_config=config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='background.jpg') object_detector.receive_next_sample(image=img) assert not result
def test_one_label_not_in_picture(): """Expect to detect nothing because there is no object with the given label in the picture.""" config = _object_detect_config() config['confidence_threshold'] = 0.6 config['label_filter'] = ['car'] result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person-couch.jpg') object_detector.receive_next_sample(image=img) assert not result
def test_bad_label_filter(): """Expect to detect nothing because the label is not in the training label set.""" config = _object_detect_config() config['confidence_threshold'] = 0.6 config['label_filter'] = ['SomeR@ndomJunk'] result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person-couch.jpg') object_detector.receive_next_sample(image=img) assert not result
def test_bad_sample_good_sample(): """One bad sample should not prevent good samples from being processed.""" config = _fall_detect_config() result = "nothing passed to me" def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) # bad sample object_detector.receive_next_sample(image=None) assert result == "nothing passed to me" # good sample fall_detector = FallDetector(**config) fall_detector.connect_to_next_element(output) # The frame represents a person who is in a standing position. img_1 = _get_image(file_name="fall_img_1.png") # The frame represents a person falls. img_2 = _get_image(file_name="fall_img_2.png") fall_detector.receive_next_sample(image=img_1) fall_detector.min_time_between_frames = 0.01 time.sleep(fall_detector.min_time_between_frames) fall_detector.receive_next_sample(image=img_2) assert result assert len(result) == 1 category = result[0]["label"] confidence = result[0]["confidence"] angle = result[0]["leaning_angle"] keypoint_corr = result[0]["keypoint_corr"] assert keypoint_corr assert category == "FALL" assert confidence > 0.7 assert angle > 60
def test_one_person_no_face_two_stage(): """Expect to detect one person.""" object_config = _object_detect_config() face_config = _face_detect_config() result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**object_config) face_detector = FaceDetector(**face_config) object_detector.connect_to_next_element(face_detector) output = _OutPipeElement(sample_callback=sample_callback) face_detector.connect_to_next_element(output) img = _get_image(file_name='person-no-face.jpg') object_detector.receive_next_sample(image=img) assert not result
def test_one_person_two_stage_pipe_low_person_confidence(): """Fail to detect person in 1st stage hence no face in 2nd stage.""" object_config = _object_detect_config() face_config = _face_detect_config() result = None def sample_callback(image=None, inference_result=None): nonlocal result result = inference_result object_detector = ObjectDetector(element_config=object_config) face_detector = FaceDetector(element_config=face_config) object_detector.connect_to_next_element(face_detector) output = _OutPipeElement(sample_callback=sample_callback) face_detector.connect_to_next_element(output) img = _get_image(file_name='person-face.jpg') object_detector.receive_next_sample(image=img) assert not result
def test_one_person_no_face(): """Expect to detect one person.""" config = _object_detect_config() result = None def sample_callback(image=None, inference_result=None): nonlocal result result = inference_result object_detector = ObjectDetector(element_config=config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person-no-face.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1
def test_two_labels_filter(): """Expect to detect one person and one couch.""" config = _object_detect_config() config['confidence_threshold'] = 0.6 config['label_filter'] = ['person', 'couch'] result = None def sample_callback(image=None, inference_result=None, **kwargs): nonlocal result result = inference_result object_detector = ObjectDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) object_detector.connect_to_next_element(output) img = _get_image(file_name='person-couch.jpg') object_detector.receive_next_sample(image=img) assert result assert len(result) == 2 category = result[0]['label'] confidence = result[0]['confidence'] (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] assert category == 'person' assert confidence > 0.7 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 category = result[1]['label'] confidence = result[1]['confidence'] (x0, y0) = result[1]['box']['xmin'], result[1]['box']['ymin'] (x1, y1) = result[1]['box']['xmax'], result[1]['box']['ymax'] assert category == 'couch' assert confidence > 0.6 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1