""" This code demonstrates simple learning and feedback process for wrong push-up posture. For the intermediate presentations use only. """ from json_parser import JsonParser from video_processor import VideoProcessor from feedback import FeedbackSystem from pathlib import Path import subprocess import os, re openpose_demo_path = "D:\\OneDrive\\OneDrive - postech.ac.kr\\2019 Fall\\창의설계4\\openpose-1.5.1-binaries-win64-only_cpu-python-flir-3d\\openpose-1.5.1-binaries-win64-only_cpu-python-flir-3d\\openpose\\bin\\OpenPoseDemo.exe" camera_offset = 1 video_name = "flare1" json_dir = "D:\\OneDrive\\OneDrive - postech.ac.kr\\2019 Fall\\창의설계4\\code\\json\\" + video_name model_dir = "D:\\OneDrive\\OneDrive - postech.ac.kr\\2019 Fall\\창의설계4\\openpose-1.5.1-binaries-win64-only_cpu-python-flir-3d\\openpose-1.5.1-binaries-win64-only_cpu-python-flir-3d\\openpose\\models" tys = ["elbow", "arm", "shoulder"] for ty in tys: fds = FeedbackSystem() fds.load("demo_front_" + ty + "_model", "front") # 2. Run Openpose Webcam Mode # 3. Give feedback j = JsonParser() count = len(os.listdir(json_dir)) video = j.parse(video_name, count, json_dir, "front", None) result = fds.feedback_kmeans(video, ty) print(result)
json_dir = "../json/output" model_dir = "models" for f in os.listdir(json_dir): os.remove(os.path.join(json_dir, f)) # 2. Run Openpose Webcam Mode handler = subprocess.Popen([ openpose_demo_path, "--disable_blending=false", "--camera=" + str(camera_offset), "--net_resolution=128x128", "--write_json=" + json_dir, "--model_folder=" + model_dir, "--number_people_max=1" ], shell=False) print("Start 3 push-up") tys = ["elbow", "arm", "shoulder"] for ty in tys: fds = FeedbackSystem() fds.load("demo_front_" + ty + "_model", "front") # 3. Give feedback #try: j = JsonParser() video = j.parse(None, 60, json_dir, "front", None) result = fds.feedback_kmeans(video, ty) print(result) handler.terminate() #except: # print("Exception Occured") # handler.terminate()
# 1. learning FeedbackSystem with pre-labelled push-up data fds = FeedbackSystem() j = JsonParser() #label format [partial range or not, elbow flare or not, wide or not] videos_with_label = [("r0e0ns1", [0, 0, 0]), ("r0e0ns2", [0, 0, 0]), ("r0e0ns3", [0, 0, 0]), ("r0e0ws1", [0, 0, 1]), ("r0e0ws2", [0, 0, 1]), ("r0e0ws3", [0, 0, 1]), ("r0e1ns1", [0, 1, 0]), ("r0e1ns2", [0, 1, 0]), ("r0e1ws1", [0, 1, 1]), ("r0e1ws2", [0, 1, 1]), ("r0e1ws3", [0, 1, 1]), ("r1e0ns1", [1, 0, 0]), ("r1e0ns2", [1, 0, 0]), ("r1e0ns3", [1, 0, 0]), ("r1e0ns4", [1, 0, 0]), ("r1e0ws1", [1, 0, 1]), ("r1e0ws2", [1, 0, 1]), ("r1e0ws3", [1, 0, 1]), ("r1e1ns1", [1, 1, 0]), ("r1e1ns2", [1, 1, 0]), ("r1e1ns3", [1, 1, 0]), ("r1e1ws1", [1, 1, 1]), ("r1e1ws2", [1, 1, 1]), ("r1e1ws3", [1, 1, 1])] tys = ["elbow", "shoulder", "arm"] for ty in tys: for video_with_label in videos_with_label: path = Path("../json/" + video_with_label[0]) print(str(path)) count = len(os.listdir(path)) video = j.parse(video_with_label[0], count, path, "front", video_with_label[1]) fds.learn(video, ty, threshold=0.5) fds.save("demo_front_" + ty + "_model", "front")
from json_parser import JsonParser from video_processor import VideoProcessor from feedback import FeedbackSystem j = JsonParser() video = j.parse("flare3", 200, "json/learn", "front", [0,0]) vp = VideoProcessor(video) angles = vp.compute_left_elbow_angle(0.4) fs = FeedbackSystem() out = fs.min_max(angles) print(out)
""" This code demonstrates simple learning and feedback process for wrong push-up posture. For the intermediate presentations use only. """ from json_parser import JsonParser from feedback import FeedbackSystem from pathlib import Path # 1. learning FeedbackSystem with pre-labelled push-up data fds = FeedbackSystem() j = JsonParser() #front_videos_with_label = [("correct1", 1), ("correct2", 1), ("correct3", 0), ("flare1", 1), ("flare2", 0), ("flare3", 0)] videos_with_label = [("incorrect_squat", 1), ("correct_squat", 0)] for video_with_label in videos_with_label: path = Path("../json/" + video_with_label[0]) print(str(path)) video = j.parse(video_with_label[0], 200, path, "squat", video_with_label[1]) fds.learn(video, threshold=0.5) fds.save("demo_squat_model", "squat")
""" This code demonstrates simple learning and feedback process for wrong push-up posture. For the intermediate presentations use only. """ from json_parser import JsonParser from video_processor import VideoProcessor from feedback import FeedbackSystem from pathlib import Path import subprocess import os, re files = ["output"] for filename in files: json_dir = "../json/" + filename j = JsonParser() count = len(os.listdir(json_dir)) print(count) video = j.parse(None, count, json_dir, "pushup", None) vp = VideoProcessor(video) vp.compute_left_elbow_angle(0.5) vp.dump_csv()
def start_feedback(self): #time.sleep(5) #collect data print("feedback start") print("GET READY") time.sleep(3) print("START") #for i in reversed(range(self.sub2_layout.count())): # self.sub2_layout.itemAt(i).widget().setParent(None) #go_img = QLabel("GO") #go_img.setPixmap(QPixmap("../pictures/go.JPG").scaledToWidth(320)) #go_img.setAlignment(Qt.AlignCenter) #self.sub2_layout.addWidget(go_img) start_point = len(os.listdir(json_dir)) j = JsonParser(start_point=start_point) # incremental try frame_no_list = [i*10 for i in range(4,10)] err = 0 tys = ["elbow", "arm", "shoulder"] result_dict = {} for frame_no in frame_no_list: print(str(frame_no) + " frame test") video = j.parse(None, frame_no , json_dir, "front", None) result_dict = {} err = 0 for ty in tys: print("doing " + ty) fds = FeedbackSystem() fds.load("demo_front_" + ty + "_model", "front") result, div_zero = fds.feedback_kmeans(video, ty, threshold=0.3) if div_zero: err = 1 else: result_dict[ty] = result if err is 0: break if err is 1: self.stop_op_screen("Posture is not detected. Please adjust webcam position") return fdm = FeedbackMsg(result_dict) msg = fdm.get_feedback_msg() #self.op_handler.terminate() # now print out feedback msg #self.stop_op_screen("Result") need_cor = msg[0] cor_msg = msg[1:] #top_layout = QVBoxLayout() #bottom_layout = QVBoxLayout() """ for m in cor_msg: op_tmp = QLabel(m) op_tmp.setAlignment(Qt.AlignCenter) self.op_layout.addWidget(op_tmp) """ for i in reversed(range(self.sub2_layout.count())): self.sub2_layout.itemAt(i).widget().setParent(None) if need_cor: bad_img = QLabel() bad_img.setPixmap(QPixmap("../pictures/bad.JPG").scaledToWidth(260)) bad_img.setAlignment(Qt.AlignCenter) self.sub2_layout.addWidget(bad_img) else: nice_img = QLabel() nice_img.setPixmap(QPixmap("../pictures/nice.JPG").scaledToWidth(260)) nice_img.setAlignment(Qt.AlignCenter) self.sub2_layout.addWidget(nice_img) feedback_msg = "" for m in cor_msg: feedback_msg += m + "\n" op_tmp = QLabel(feedback_msg) op_tmp.setAlignment(Qt.AlignCenter) op_tmp.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)) self.sub2_layout.addWidget(op_tmp) """