Exemplo n.º 1
0
    def scan_image_without_normalize(self, img_path):
        # Opening image in OpenCV
        imageToProcess = cv2.imread(img_path)

        # Get data points (datum)
        datum = op.Datum()
        datum.cvInputData = imageToProcess
        self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Get output image
        output_image = datum.cvOutputData

        arr = []
        try:
            pop_all(arr)
            x_high = 0
            x_low = 9999
            y_high = 0
            y_low = 9999

            # Get highest and lowest keypoints
            for count, x in enumerate(datum.poseKeypoints[0]):
                # Avoid x=0 and y=0 because some keypoints that are not discovered.
                # This "if" is to define the LOWEST and HIGHEST discovered keypoint.
                if x[0] != 0 and x[1] != 0:
                    if x_high < x[0]:
                        x_high = x[0]
                    if x_low > x[0]:
                        x_low = x[0]
                    if y_high < x[1]:
                        y_high = x[1]
                    if y_low > x[1]:
                        y_low = x[1]

                # Add pose keypoints to a dictionary
                KP = {'x': x[0], 'y': x[1]}
                # Append dictionary to array
                arr.append(KP)

            # Find the highest and lowest position of x and y
            # (Used to draw rectangle)
            if y_high - y_low > x_high - x_low:
                height = y_high - y_low
                width = x_high - x_low
            else:
                height = x_high - x_low
                width = y_high - y_low

            # Draw rectangle (get width and height)
            y_high = int(y_high + height / 40)
            y_low = int(y_low - height / 12)
            x_high = int(x_high + width / 5)
            x_low = int(x_low - width / 5)

            return arr, x_low, y_low, output_image
        except Exception as e:
            print(end="")
Exemplo n.º 2
0
    out = []
    last = 0.0

    while last < len(seq):
        out.append(seq[int(last):int(last + avg)])
        last += avg

    return out


if __name__ == "__main__":
    from multiprocessing import Process

    # Initialize logs path
    k = 10
    log_path = '/home/kevin/projects/exercise_pose_evaluation_machine/k_fold_results/training_logs/'

    # Get all files from folder
    file_list = os.listdir(log_path)

    THREADS = []

    for files in chunk_it(file_list, 3):
        thread = Process(target=extract_log_from_files,
                         args=(k, log_path, files))
        thread.start()
        THREADS.append(thread)
    for t in THREADS:
        t.join()
    pop_all(THREADS)
Exemplo n.º 3
0
    def scan_video_without_normalize(self, video_path, keypoints_to_extract):
        # Opening OpenCV stream
        stream = cv2.VideoCapture(video_path)

        # Define list of pose, x low, and y low
        list_of_pose = []
        list_of_x_low = []
        list_of_y_low = []
        while True:
            try:
                # Stream
                ret, imageToProcess = stream.read()
                datum = op.Datum()
                datum.cvInputData = imageToProcess
            except Exception as e:
                # Break at end of frame
                break

            # Find keypoints
            self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

            # Get output image processed by Openpose
            output_image = datum.cvOutputData

            # Define keypoints array and binding box array
            arr = []
            boxes = []

            try:
                # Loop each of the 17 keypoints
                for keypoint in datum.poseKeypoints:
                    pop_all(arr)
                    x_high = 0
                    x_low = 9999
                    y_high = 0
                    y_low = 9999

                    # Get highest and lowest keypoints
                    for count, x in enumerate(keypoint):
                        # Check which keypoints to extract
                        if count in keypoints_to_extract:
                            # Avoid x=0 and y=0 because some keypoints that are not discovered.
                            # This "if" is to define the LOWEST and HIGHEST discovered keypoint.
                            if x[0] != 0 and x[1] != 0:
                                if x_high < x[0]:
                                    x_high = x[0]
                                if x_low > x[0]:
                                    x_low = x[0]
                                if y_high < x[1]:
                                    y_high = x[1]
                                if y_low > x[1]:
                                    y_low = x[1]

                            # Add pose keypoints to a dictionary
                            KP = {'x': x[0], 'y': x[1]}
                            # Append dictionary to array
                            arr.append(KP)

                    # Find the highest and lowest position of x and y
                    # (Used to draw rectangle)
                    if y_high - y_low > x_high - x_low:
                        height = y_high - y_low
                        width = x_high - x_low
                    else:
                        height = x_high - x_low
                        width = y_high - y_low

                    # Draw rectangle (get width and height)
                    y_high = int(y_high + height / 40)
                    y_low = int(y_low - height / 12)
                    x_high = int(x_high + width / 5)
                    x_low = int(x_low - width / 5)

                    # Append list of pose, x low, and y low
                    list_of_pose.append(arr)
                    list_of_x_low.append(x_low)
                    list_of_y_low.append(y_low)
            except Exception as e:
                print(end="")

        return list_of_pose, list_of_x_low, list_of_y_low
Exemplo n.º 4
0
    def get_keypoints_and_id_from_img_without_normalize(self, img):
        # KP ordering of body parts
        NECK = 1
        R_SHOULDER = 2
        R_ELBOW = 3
        R_WRIST = 4
        L_SHOULDER = 5
        L_ELBOW = 6
        L_WRIST = 7
        MID_HIP = 8
        R_HIP = 9
        R_KNEE = 10
        R_ANKLE = 11
        L_HIP = 12
        L_KNEE = 13
        L_ANKLE = 14

        # Define bodyparts to get the selected keypoints
        BODY_PARTS = [
            NECK, R_SHOULDER, R_ELBOW, R_WRIST, L_SHOULDER, L_ELBOW, L_WRIST,
            MID_HIP, R_HIP, R_KNEE, R_ANKLE, L_HIP, L_KNEE, L_ANKLE
        ]

        # Set tracker
        max_cosine_distance = 0.2
        nn_budget = 100
        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        tracker = Tracker(metric)

        # Get data points (datum)
        datum = op.Datum()
        datum.cvInputData = img
        self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Initialize lists
        arr = []
        boxes = []
        list_of_pose_temp = []
        list_of_pose_and_id = []
        try:
            # Get highest and lowest keypoints
            for kp_idx, keypoint in enumerate(datum.poseKeypoints):
                pop_all(arr)
                x_high = 0
                x_low = 9999
                y_high = 0
                y_low = 9999

                for count, x in enumerate(keypoint):
                    # Avoid x=0 and y=0 because some keypoints that are not discovered.
                    # This "if" is to define the LOWEST and HIGHEST discovered keypoint.
                    if x[0] != 0 and x[1] != 0:
                        if x_high < x[0]:
                            x_high = x[0]
                        if x_low > x[0]:
                            x_low = x[0]
                        if y_high < x[1]:
                            y_high = x[1]
                        if y_low > x[1]:
                            y_low = x[1]

                    # Add pose keypoints to a dictionary
                    if count in BODY_PARTS:
                        KP = {'x': x[0], 'y': x[1]}

                        # Append dictionary to array
                        arr.append(KP)

                # Find the highest and lowest position of x and y
                # (Used to draw rectangle)
                if y_high - y_low > x_high - x_low:
                    height = y_high - y_low
                    width = x_high - x_low
                else:
                    height = x_high - x_low
                    width = y_high - y_low

                # Draw rectangle (get width and height)
                y_high = int(y_high + height / 40)
                y_low = int(y_low - height / 12)
                x_high = int(x_high + width / 5)
                x_low = int(x_low - width / 5)

                # # Normalize keypoint
                list_of_pose_temp.append(arr)

                # Make the box
                boxes.append([x_low, y_low, width, height])

                # Encode the features inside the designated box
                features = self.encoder(datum.cvOutputData, boxes)

                # For a non-empty item add to the detection array
                def nonempty(xywh):
                    return xywh[2] != 0 and xywh[3] != 0

                detections = [
                    Detection(bbox, 1.0, feature)
                    for bbox, feature in zip(boxes, features) if nonempty(bbox)
                ]

                # Run non-maxima suppression.
                np_boxes = np.array([d.tlwh for d in detections])
                scores = np.array([d.confidence for d in detections])
                indices = preprocessing.non_max_suppression(
                    np_boxes, self.nms_max_overlap, scores)
                detections = [detections[i] for i in indices]

                # Update tracker.
                tracker.predict()
                tracker.update(detections)

                # Make pose and person ID list
                if kp_idx == len(datum.poseKeypoints) - 1:
                    for track_idx, track in enumerate(tracker.tracks):
                        bbox = track.to_tlwh()
                        list_of_pose_and_id.append({
                            "Keypoints":
                            list_of_pose_temp[track_idx],
                            "ID":
                            track.track_id
                        })

            return list_of_pose_and_id
        except Exception as e:
            print(end="")
                            if exercise_type == "plank":
                                if prediction == 1:
                                    correct_reps += 1
                            else:
                                # If starting position is found and start is True then mark end
                                if prediction == 1 and start and len(list_of_frames) > 12:
                                    end = True
                                
                                # If starting position is found and end is False then mark start
                                if (len(list_of_frames) == 1 or not end) and prediction == 1 and len(list_of_frames) <= 1:
                                    start = True

                                    # If the found counter is more than one
                                    # Delete frames and restart collection
                                    if len(list_of_frames) >= 1:
                                        pop_all(list_of_frames)

                                # validate_keypoints(keypoints)
                                # Add frames
                                if start:
                                    list_of_frames.append(keypoints)

                                # If both start and end was found 
                                # send data to LSTM model and Plotter
                                if start and end:
                                    # Send data
                                    pred_result = predict_sequence(list_of_frames, exercise_type)
                                    if pred_result == "1":
                                        correct_reps += 1
                                    list_of_lstm_predictions.append(pred_result)
    def video_stream():
        start = False
        end = False
        # Read frames
        # If camera not available print error and destroy
        if cap.read()[0]==False:
            cap.release()
            master.destroy()
            messagebox.showerror("Error", "Error when processing: Camera is not available!")
            exit()

        # Read frames if not exists
        _, frame = cap.read()
        cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)

        # Process frames here
        # Send to external function
        # Get keypoint and ID data
        list_of_keypoints = pm.kp_extractor.get_keypoints_and_id_from_img(frame)

        try: 
            if list_of_keypoints == None:
                raise Exception("List of keypoints cannot be None")
            x = list_of_keypoints[0]
            if x['ID'] == pm.target_id:
                print("masuk sini gak???")
                # Transform keypoints list to array
                keypoints = np.array(x['Keypoints']).flatten()

                # Get prediction
                prediction = pm.init_pose_detector.predict(np.array([keypoints]))

                # If starting position is found and start is True then mark end
                if prediction == pm.exercise_type and start:
                    end = True
                
                # If starting position is found and end is False then mark start
                if prediction == pm.exercise_type and not end:
                    start = True

                    # If the found counter is more than one
                    # Delete frames and restart collection
                    if len(all_exercise_reps) >= 1:
                        pop_all(all_exercise_reps)

                # Add frames
                all_exercise_reps.append(pm.kp_extractor.get_keypoints_and_id_from_img_without_normalize(frame))

                # If both start and end was found 
                # send data to LSTM model and Plotter
                if start and end:
                    # Send data
                    x_low, y_low, _, _ = pm.kp_extractor.get_min_max_frames(all_exercise_reps)
                    scaler = make_min_max_scaler(all_exercise_reps, x_low, y_low)
                    normalized_reps = normalize_keypoints_from_external_scaler(all_exercise_reps, scaler)
                    reshaped_normalized_reps = [np.array(frames).flatten() for frames in normalized_reps]

                    exercise_evaluator.predict(get_exact_frames(reshaped_normalized_reps))
                    # Pop all frames in list
                    pop_all(all_exercise_reps)

                    # Restart found_counter, start flag and end flag
                    start = True
                    end = False

                    # Add frames
                    all_exercise_reps.append(keypoints)
        except Exception as e:
            print(e)

        # Convert the Image object into a TkPhoto object
        im = Image.fromarray(cv2image)
        imgtk = ImageTk.PhotoImage(
            im.resize(
                (800, 600),
                Image.ANTIALIAS
            )
        )

        # Show image in panel
        panel.imgtk = imgtk
        panel.configure(image=imgtk)
        panel.after(10, video_stream)