# load the model #model_path = os.path.join("./models", args.model+"_%d_adam_best.t7"%args.inp_dim) model_path = "./models/mobilenetv2_224_adam_best.t7" net = CoordRegressionNetwork(n_locations=6, backbone=args.model).to("cpu") e = ResEstimator(model_path, net, args.inp_dim) # # initial the camera # cam = cv2.VideoCapture(args.camera) # ret_val, image = cam.read() # image = crop_camera(image) # while True: # # read image from the camera and preprocess # ret_val , image = cam.read() # image = crop_camera(image) # # forward the image # humans = e.inference(image) # image = ResEstimator.draw_humans(image, humans, imgcopy=False) # cv2.imshow('MobilePose Demo', image) # if cv2.waitKey(1) == 27: # ESC # break # cv2.destroyAllWindows() # single person rgb image test image = cv2.imread("./12.jpg") humans = e.inference(image) image = ResEstimator.draw_humans(image, humans, imgcopy=False) cv2.imwrite("./results/out.png", image)
ret_val, image = cam.read() image = crop_camera(image) # small pointy finger image offset from image's border x_offset = y_offset = 100 value_threshold = 0.6 while True: # read image from the camera and preprocess ret_val, image = cam.read() image = crop_camera(image) # forward the image humans = e.inference(image) image, lr, amount = ResEstimator.draw_humans(image, humans, imgcopy=False) print("lr, amount", lr, amount) if (lr == "L"): img_l = cv2.imread( r'/home/jesse/Koodiprojektit/Junction2019/mobilepose-pythorch/MobilePose-pytorch/left_2.png', 1) img_l_rs = cv2.resize(img_l, (100, 100), interpolation=cv2.INTER_AREA) if (amount > value_threshold): asd = submitData("L") #image[y_offset:y_offset+img.shape[0], x_offset:x_offset+img.shape[1]] = img elif (lr == "R"): #print("Right") img_r = cv2.imread( r'/home/jesse/Koodiprojektit/Junction2019/mobilepose-pythorch/MobilePose-pytorch/right_2.png',