Exemple #1
0
def main(cam=0, video_file=None, debug=False):
    if video_file is not None:
        video_stream_class = edgeiq.FileVideoStream
        kwargs = {'path': video_file, 'play_realtime': True}
    else:
        video_stream_class = edgeiq.WebcamVideoStream
        kwargs = {'cam': cam}

        edgeiq.WebcamVideoStream.more = lambda x: True

    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")

    if edgeiq.is_jetson():
        pose_estimator.load(engine=edgeiq.Engine.TENSOR_RT)
    else:
        pose_estimator.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    jj = JumpingJacksTracker()

    try:
        with video_stream_class(**kwargs) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while video_stream.more():
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                if len(results.poses) > 0:
                    jj.update(results.poses[0])
                else:
                    text.append('No poses found')
                text.append("Jumping jacks: {}".format(jj.count))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        if debug is True:
            jj.save_history()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #2
0
def main():
    # load the configuration data from config.json
    config = load_json(CONFIG_FILE)
    scale = config.get(SCALE)

    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")

    pose_estimator.load(engine=edgeiq.Engine.DNN,
                        accelerator=edgeiq.Accelerator.CPU)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            posture = CheckPosture(scale)

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                for ind, pose in enumerate(results.poses):
                    text.append("Person {}".format(ind))
                    text.append('-' * 10)
                    text.append("Key Points:")

                    # update the instance key_points to check the posture
                    posture.set_key_points(pose.key_points)

                    correct_posture = posture.correct_posture()
                    if not correct_posture:
                        text.append(posture.build_message())

                        # make a sound to alert the user to improper posture
                        print("\a")

                streamer.send_data(results.draw_poses(frame), text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #3
0
    def __init__(self, scale=1, key_points={}):
        self.key_points = key_points
        self.scale = scale
        self.message = ""
        self.interval = 3  # the time between the raised hand and the potential alert
        self.timestamp = None  # marks when the raised hand was triggered
        self.listening = False
        self.signals = []
        self.people_count = 0
        self.previous_people_count = 0
        self._server_url = "http://localhost:5001/"  # configure as needed
        self._start_time = time.time()

        self.pose_estimator = edgeiq.PoseEstimation("alwaysai/human_pose")

        self.pose_estimator.load(engine=edgeiq.Engine.DNN)

        print("Loaded model:\n{}\n".format(self.pose_estimator.model_id))
        print("Engine: {}".format(self.pose_estimator.engine))
        print("Accelerator: {}\n".format(self.pose_estimator.accelerator))
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    posture = CheckPosture(scale=0.8)

    bad_posture = False
    alpha = 0

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = [
                    "Model: {}".format(pose_estimator.model_id),
                    "Inference time: {:1.3f} s".format(results.duration)
                ]
                for ind, pose in enumerate(results.poses):
                    text.append("Person {}".format(ind))
                    text.append('-' * 10)
                    # update the instance key_points to check the posture
                    posture.set_key_points(pose.key_points)

                    # play a reminder if you are not sitting up straight
                    correct_posture = posture.correct_posture()
                    if not correct_posture:
                        text.append(posture.build_message())
                        # make a sound to alert the user to improper posture
                        # print("\a")
                        if bad_posture:
                            alpha += 0.001
                            alpha = max(0, min(alpha, 0.5))
                        else:
                            bad_posture = True
                            alpha = 0
                        overlay(alpha)
                    else:
                        overlay(0)
                        bad_posture = False

                streamer.send_data(results.draw_poses(frame), text)

                fps.update()

                if streamer.check_exit():
                    break

                # time.sleep(2.0)
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #5
0
def main():
    # The current frame index
    frame_idx = 0
    # The number of frames to skip before running detector
    detect_period = 30

    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    tracker = edgeiq.CorrelationTracker(max_objects=5)

    rightShoulder_y = 0
    leftShoulder_y = 0

    rightWrist_y = 0
    leftWrist_y = 0

    rightHip_y = 0
    leftHip_y = 0

    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO,
                        accelerator=edgeiq.Accelerator.MYRIAD)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            time.sleep(2.0)
            fps.start()

            startTime = time.time()
            futureTime = startTime + 20

            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)

                text = ["Model: {}".format(pose_estimator.model_id)]

                # Get right shoulder points
                for ind, pose in enumerate(results.poses):
                    rightShoulder_y = pose.key_points[2][1]
                    leftShoulder_y = pose.key_points[5][1]
                    rightWrist_y = pose.key_points[4][1]
                    leftWrist_y = pose.key_points[7][1]
                    rightHip_y = pose.key_points[8][1]
                    leftHip_y = pose.key_points[11][1]

                if (rightWrist_y < rightShoulder_y) or (leftWrist_y <
                                                        leftShoulder_y):
                    text.append("Mood: Happy")
                elif (rightWrist_y < rightHip_y) and (leftWrist_y < leftHip_y):
                    text.append("Mood: Angry")
                elif ((rightWrist_y >= rightHip_y + 20) or
                      (rightWrist_y <= rightHip_y - 20)) and (
                          (leftWrist_y >= leftHip_y + 20) or
                          (leftWrist_y <= leftHip_y - 20)):
                    text.append("Mood: Idle")
                else:
                    text.append("Mood: Idle")

                detectionResults = obj_detect.detect_objects(
                    frame, confidence_level=.5)

                if tracker.count:
                    tracker.stop_all()

                predictions = detectionResults.predictions
                boxList = []
                boxNameList = []

                for prediction in predictions:
                    tracker.start(frame, prediction)

                    if prediction.label == 'person':
                        if not boxNameList:
                            boxList.append(prediction.box)
                            boxNameList.append(prediction.label)
                        else:
                            for name in boxNameList:
                                if name == prediction.label:
                                    break
                                else:
                                    boxList.append(prediction.box)
                                    boxNameList.append(prediction.label)
                    elif prediction.label == 'chair':
                        if not boxNameList:
                            boxList.append(prediction.box)
                            boxNameList.append(prediction.label)
                        else:
                            for name in boxNameList:
                                if name == prediction.label:
                                    break
                                else:
                                    boxList.append(prediction.box)
                                    boxNameList.append(prediction.label)

                if len(boxList) >= 2:
                    distance = boxList[0].compute_distance(boxList[1])
                    if abs(distance) < 115:
                        text.append(str("At chair"))
                        if time.time() >= futureTime:
                            text.append(str("Get Out"))
                    else:
                        text.append(str("Not in chair"))
                        futureTime = time.time() + 20

                frame = edgeiq.markup_image(frame,
                                            predictions,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=obj_detect.colors)

                streamer.send_data(results.draw_poses(frame), text)

                streamer.send_data(frame, text)

                frame_idx += 1

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        tracker.stop_all()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #6
0
def record_data(out_q, break_q):
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            # Check if the Intel Compute Stick is connected and set up properly...
            try:
                pose_estimator.estimate(video_stream.read())
            except:
                pose_estimator.load(
                    engine=edgeiq.Engine.DNN
                )  # ...if not, standard engine & GPU accelerator is used
                print(
                    "\nCouldn't initialize NCS. Did you connect the compute stick?"
                )
                print("Engine: {}".format(pose_estimator.engine))
                print("Accelerator: {}\n".format(pose_estimator.accelerator))

            fps.start()

            # loop pose detection
            while True:

                frame = video_stream.read()
                results = pose_estimator.estimate(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append(
                    "Inference time: {:1.3f} s".format(results.duration) +
                    "\nFPS: {:.2f}".format(fps.compute_fps()))

                pose_out = dict()
                for ind, pose in enumerate(results.poses):
                    # filter out low quality results by checking confidence score
                    if pose.score > 20:
                        pose_out["Person {}".format(ind)] = pose
                # put result in out queue -> used by the print_data function/thread
                out_q.put(pose_out)

                streamer.send_data(results.draw_poses(frame), text)
                fps.update()

                if streamer.check_exit():
                    break
                # break_q is used to check for keyboard interrupt in the main thread
                if break_q.qsize() > 0:
                    break_q.put(1)
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Streamer stopped")
        break_q.put(1)
Exemple #7
0
def generatePoseEstimationModel():
    pose_estimation = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimation.load(engine=edgeiq.Engine.DNN)
    return pose_estimation