예제 #1
0
def ingest_image(image, exercise_over=False):
    global frames
    global exercise
    global joints_total

    # if frame is a "real" image -> process
    if not exercise_over:
        # frame to process
        new_frame = image

        # process frame
        joints_person, processed_frame = process_image(new_frame,
                                                       show_joints=False)
        joints_total.append(joints_person)
        frames.append(processed_frame)

        if flag_debug_csv:
            frame_x = copy(new_frame)
            cmap = get_cmap('hsv')
            for i, point in enumerate(joints_total[-1]):
                if all(point):  # is not (None, None):
                    rgba = np.array(cmap(1 - i / 18. - 1. / 36))
                    rgba[0:3] *= 255
                    # cv2.putText(canvas, str(i), org=point, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=colors[i])
                    cv2.circle(frame_x, point[0:2], 4, colors[i], thickness=-1)
            cv2.imwrite(
                "images/" + str(number_frames) + "_" +
                str(datetime.datetime.now().strftime("%d_%m_%Y_%H_%M_%S")) +
                ".jpg", frame_x)

        # execution on angles only if frames >=3 (outliers & interpolation)
        if len(frames) >= 3:
            preprocessed_x = preprocess_angles(np.array(frames[-3:]),
                                               indexes=exercise.angles_index,
                                               mids=exercise.medians,
                                               cap_values=cap_values)
            joints = joints_total[-2]
            # debugging: TODO remove in production -> flag_debug_csv = False
            # file_debug defined in -> ingest_video()
            if flag_debug_csv:
                with open(file_debug, "a+") as file:
                    for element in preprocessed_x[1, exercise.angles_index]:
                        file.write(str(element) + ",")
                    file.write("\n")

            try:
                # exceptions will be catched from super method calling this function
                exercise.process_frame(preprocessed_x[-2],
                                       exercise_over,
                                       joints=joints)
            finally:
                # remove unnecessary elements from angles and joints data structures
                frames = copy(list(preprocessed_x[-2:]))
                joints_total = copy(joints_total[-2:])

    # stop signal -> process only last frames
    else:
        # exceptions will be catched from super method calling this function
        exercise.process_frame(None, exercise_over, joints=None)
예제 #2
0
def ingest_image_local(image):
    """

    @param image:
    @return:
    """
    global frames
    global exercise
    global number_frames
    global file
    global joints_total

    # aggiorno il client
    print(colored("Frame processing: " + str(number_frames), 'yellow'))
    number_frames += 1

    new_frame = image

    # lo gestiamo separatamente all'arrivo di ogni frame senza il resto dello script? controllare i tempi di esecuzione
    joints_person, processed_frame = process_image(new_frame,
                                                   show_joints=False)
    joints_total.append(joints_person)
    frames.append(processed_frame)

    if len(frames) >= 3:
        preprocessed_x = preprocess_angles(np.array(frames[-3:]),
                                           indexes=exercise.angles_index,
                                           mids=exercise.medians)
        print(preprocessed_x[1, exercise.angles_index])

        # debugging: TODO remove in production -> flag_debug_csv = False
        if flag_debug_csv:
            with open(file_debug, "a+") as file:
                for element in preprocessed_x[1, exercise.angles_index]:
                    file.write(str(element) + ",")
                file.write("\n")

        joints = joints_total[-2]

        try:
            exercise.process_frame(preprocessed_x[-2], joints=joints)
        except GoodRepetitionException:
            print(colored("Reps OK", 'green'))
            # send message to client per ripetizione corretta
        except CompleteExerciseException:
            print(colored("Esercizio completato: ripetizioni finite!",
                          'green'))
        except NoneRepetitionException:
            print(colored("Esercizio in timeout senza ripetizioni", 'red'))
        except BadRepetitionException as bre:
            message = str(bre)
            print(colored("Reps BAD: " + message, 'red'))
        except TimeoutError:
            print(colored("Timeout", 'red'))
        finally:
            frames = copy(frames[-2:])
            joints_total = copy(joints_total[-2:])
예제 #3
0
    def ingest_video(self, video):
        """
        this function ingest video arrived from client
        """

        global orientation

        # useful to reset system
        global frames
        global exercise
        global joints_total
        global number_frames

        # reps counters and delimiter
        global reps_ok, reps_wrong, reps_total
        global flag_break

        # useful for debugging
        global file_debug_dir, file_debug, file_debug_rep

        # prepare system for new ingest video -> erasing global vars -> not at the end of "ingest_video()" because if the clients disconnects, global vars never erased
        # resetting global variables
        self.__clean_global_vars__()
        # print(colored("> Erasing global variables", 'red'))
        # [client.write_message({'type': 'console', 'text': "Erasing global variables on server"}) for client in
        #  self.connections]
        # #
        # orientation = None
        # frames = []
        # joints_total = []
        # number_frames = 1
        # #
        # reps_ok = 0
        # reps_wrong = 0
        # reps_total = 0
        # # flag break
        # flag_break = False

        # file debug save useful for save csv
        file_debug = file_debug_dir + str(
            datetime.datetime.now().strftime("%d_%m_%Y_%H_%M_%S")) + ".csv"
        file_debug_rep = file_debug_dir + str(datetime.datetime.now().strftime(
            "%d_%m_%Y_%H_%M_%S")) + "_reps.csv"

        # saving file
        # update client
        [
            client.write_message({
                'type': 'video_processing_updates_title',
                'update': 'Saving file to server...'
            }) for client in self.connections
        ]
        file_name = video_processing_dir + time.strftime(
            "%d_%m_%Y_%H_%M_%S") + "." + video['video_extension']
        file = open(file_name, "wb")
        file.write(base64.b64decode(video['data'].split(",")[1]))
        file.close()

        # opening file
        capture = cv2.VideoCapture(file_name)
        if not capture.isOpened():
            # update client
            [
                client.write_message({
                    'type': 'error',
                    'text': 'Video file not valid'
                }) for client in self.connections
            ]
            # file not useful anymore -> removing
            capture.release()
            os.remove(file_name)
            raise Exception("Could not open video device")

        # get width/height from video
        width = capture.get(3)
        height = capture.get(4)
        # portrait or landscape
        if width > height:
            # landscape -> fixed width, calculating height
            w_video = int(width_resize_video)
            h_video = int(w_video * height / width)
            print(
                colored(
                    "Landscape. w_video = " + str(w_video) + "; h_video = " +
                    str(h_video), 'yellow'))
        else:
            # portrait -> fixed height, calculating width
            h_video = int(height_resize_video)
            w_video = int(h_video * width / height)
            print(
                colored(
                    "Portrait. w_video = " + str(w_video) + "; h_video = " +
                    str(h_video), 'yellow'))

        # start processing video
        [
            client.write_message({
                'type': 'video_processing_updates_title',
                'update': 'Detecting initial position...'
            }) for client in self.connections
        ]
        # read first frame
        success, image = capture.read()
        # process first frame to check initial position
        try:
            # get images resized
            image = cv2.resize(image, (w_video, h_video))
            joints, _ = process_image(image,
                                      accept_missing=False,
                                      no_features=True)
            orientation = get_orientation(joints[13], joints[10])
            print(
                colored("> Person detected correctly: " + str(orientation),
                        'green'))
            [
                client.write_message({
                    'type':
                    'initial_position_detected',
                    'text':
                    "Person detected correctly in video.",
                    "orientation":
                    "south-east" if orientation == "s_e" else "south-west"
                }) for client in self.connections
            ]

            # initialize exercise object once the position has been detected
            exercise = EXERCISE[exercise_name](config=None,
                                               side=orientation,
                                               fps=fps)
        except FeetException:
            print(colored("> Can't detect one or both foot.", 'red'))
            [
                client.write_message({
                    'type': 'initial_position_error',
                    'error': "Can't detect one or both foot."
                }) for client in self.connections
            ]
        except NotFoundPersonException:
            print(colored("> Can't detect enough body joints.", 'red'))
            [
                client.write_message({
                    'type':
                    'initial_position_error',
                    'error':
                    "Can't detect enough body joints."
                }) for client in self.connections
            ]
        except Exception:
            print(colored("> Unexpected error.", 'red'))
            [
                client.write_message({
                    'type':
                    'error',
                    'text':
                    "Unexpected error occured during video parsing."
                }) for client in self.connections
            ]

        # start processing video
        [
            client.write_message({
                'type': 'video_processing_updates_title',
                'update': 'Processing video...'
            }) for client in self.connections
        ]
        # counter to update client
        while success:
            print(
                colored("> Processing frames: " + str(number_frames),
                        'yellow'))
            [
                client.write_message({
                    'type': 'update_number_frame',
                    'number': str(number_frames)
                }) for client in self.connections
            ]
            [
                client.write_message({
                    'type':
                    'console',
                    'text':
                    'Processing frame: ' + str(number_frames)
                }) for client in self.connections
            ]

            # get images resized
            image = cv2.resize(image, (w_video, h_video))

            # ingest images
            flag_break = flag_break or self.__try_catch_images__(image)

            # stop processing for overall timeout or total number of repetitions reached
            if flag_break:
                break

            number_frames += 1
            # skip next frame according to fps
            capture.set(cv2.CAP_PROP_POS_MSEC, (number_frames * 1000 / fps))
            success, image = capture.read()

        if not flag_break:
            # processing last repetition
            _ = self.__try_catch_images__(image, last_one=True)

        # update client -> video terminated
        [
            client.write_message({
                'type': 'video_processing_terminated',
                'reps_total': reps_total,
                'reps_ok': reps_ok,
                'reps_wrong': reps_wrong
            }) for client in self.connections
        ]
        [
            client.write_message({
                'type':
                'console',
                'text':
                "Reps Total: " + str(reps_total) + "; Reps OK: " +
                str(reps_ok) + "; Reps WRONG: " + str(reps_wrong) + ";"
            }) for client in self.connections
        ]
        print(
            colored(
                "Reps Total: " + str(reps_total) + "; Reps OK: " +
                str(reps_ok) + "; Reps WRONG: " + str(reps_wrong) + ";",
                'green'))

        # file not useful anymore -> removing
        print(colored("> Removing video file from server", 'red'))
        [
            client.write_message({
                'type': 'console',
                'text': "Removing video file on server"
            }) for client in self.connections
        ]
        capture.release()
        capture.release()
        os.remove(file_name)
예제 #4
0
    def __detect_initial_position__(self, image_data_url):
        """
        method that ingests a single frame from WEBCAM to detect the initial position
        manages exceptions related to missing joints of the body and feet position
        @param image_data_url: data url of the image to be decoded
        @return:
        """
        global orientation
        global exercise
        global flag_check_initial_position

        # TODO: ci penso se inserire il timeout dopo X tentativi
        #frame = frame in base64
        #decode base64 -> image
        frame = cv2.cvtColor(
            np.array(
                Image.open(
                    BytesIO(base64.b64decode(image_data_url.split(",")[1])))),
            cv2.COLOR_RGB2BGR)

        if flag_check_initial_position:
            # process image with pose estimation
            try:
                joints, _ = process_image(frame,
                                          accept_missing=False,
                                          no_features=True)
                orientation = get_orientation(joints[13], joints[10])

                print(colored("> Person detected correctly.", 'green'))
                [
                    client.write_message({
                        'type':
                        'initial_position_detected',
                        'text':
                        "Person detected correctly.",
                        "orientation":
                        "south-east" if orientation == "s_e" else "south-west"
                    }) for client in self.connections
                ]

                print(
                    colored(
                        "> Setting flag_check_initial_position to False. Skipping other possible frames sent by FE.",
                        'green'))
                flag_check_initial_position = False

                # initialize exercise object once the position has been detected
                exercise = EXERCISE[exercise_name](config=None,
                                                   side=orientation,
                                                   fps=fps)

            except FeetException:
                print(colored("> Can't detect one or both foot.", 'red'))
                [
                    client.write_message({
                        'type':
                        'initial_position_error',
                        'error':
                        "Can't detect one or both foot."
                    }) for client in self.connections
                ]
            except NotFoundPersonException:
                print(colored("> Can't detect enough body joints.", 'red'))
                [
                    client.write_message({
                        'type':
                        'initial_position_error',
                        'error':
                        "Can't detect enough body joints. Try moving slowly."
                    }) for client in self.connections
                ]
            except Exception:
                pass
        else:
            print(
                colored("> Skipping frame to detect initial position.",
                        'green'))
예제 #5
0
    ssl_ctx.load_cert_chain(os.path.join("certificates", "server.crt"),
                            os.path.join("certificates", "server.key"))

    return tornado.web.Application([(r"/alke", WebSocketHandler)],
                                   debug=True,
                                   websocket_ping_interval=0,
                                   websocket_max_message_size=1000000000,
                                   ssl_options=ssl_ctx)


if __name__ == "__main__":
    # set up session logger
    set_logger(level='debug')
    # set up web application
    app = make_app()

    app.listen(5000,
               ssl_options={
                   "certfile": os.path.join("certificates", "server.crt"),
                   "keyfile": os.path.join("certificates", "server.key"),
               })

    # instantiate tf model before running the inference to prevent slow loading times
    instantiate_model()

    # inference first image -> otherwise first frame processed slowly
    img_first = cv2.imread("first_frame_instance_model.jpg")
    process_image(img_first)

    tornado.ioloop.IOLoop.current().start()