def run(self): # initial BB by HOG detection self.BB_init() success, frame = self.cameraCapture.read() frame = frame.T if self.T else frame while success and cv2.waitKey(1) == -1: found, w = self.hog.detectMultiScale(frame) if len(found) > 0: self._draw_BB_rect(frame, found[np.argmax(w)]) scale = 400 / self.H frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale) cv2.imshow(self._box_init_window_name, frame) if self._clicked: self._clicked = False cv2.destroyWindow(self._box_init_window_name) break success, frame = self.cameraCapture.read() frame = frame.T if self.T else frame x, y, w, h = self.rect plt.show() # main loop success, frame = self.cameraCapture.read() frame = frame.T if self.T else frame while success and cv2.waitKey(1) == -1: t = time.time() # crop bounding box from the raw frame frame_cropped = frame[y:y + h, x:x + w, :] # crop --> one sqrare input img for CNN self.frame_square = utils.img_scale_squareify( frame_cropped, self.box_size) # one sqrare input img --> a batch of sqrare input imgs self._create_input_batch() # sqrare input img batch --CNN net--> 2d and 3d skeleton joint coordinates self._run_net() # filter to smooth the joint coordinate results self._joint_coord_filter() ## plot ## # 2d plotting self.frame_square = utils.draw_limbs_2d(self.frame_square, self.joints_2d, self.joint_parents) cv2.imshow('2D Prediction', self.frame_square) # 3d plotting self._imshow_3d() print('FPS: {:>2.2f}'.format(1 / (time.time() - t))) success, frame = self.cameraCapture.read() frame = frame.T if self.T else frame self._exit()
def main(q_start3d, q_joints): init() x, y, w, h = hog_box() q_start3d.put(True) success, frame = camera_capture.read() while success and cv2.waitKey(1) == -1: if T: # mirror # frame = np.transpose(frame, axes=[1, 0, 2]) # no mirror frame = np.rot90(frame, 3) # crop bounding box from the raw frame frame_cropped = frame[y:y + h, x:x + w, :] # vnect estimation joints_2d, joints_3d = estimator(frame_cropped) q_joints.put(joints_3d) # 2d plotting joints_2d[:, 0] += y joints_2d[:, 1] += x frame_draw = utils.draw_limbs_2d(frame.copy(), joints_2d, joint_parents, [x, y, w, h]) frame_draw = utils.img_scale(frame_draw, 1024 / max(W_img, H_img)) cv2.imshow('2D Prediction', frame_draw) # update bounding box y_min = (np.min(joints_2d[:, 0])) y_max = (np.max(joints_2d[:, 0])) x_min = (np.min(joints_2d[:, 1])) x_max = (np.max(joints_2d[:, 1])) buffer_x = 0.8 * (x_max - x_min + 1) buffer_y = 0.2 * (y_max - y_min + 1) x, y = (max(int(x_min - buffer_x / 2), 0), max(int(y_min - buffer_y / 2), 0)) w, h = (int(min(x_max - x_min + buffer_x, W_img - x)), int(min(y_max - y_min + buffer_y, H_img - y))) # update frame success, frame = camera_capture.read() # angles_file.close() try: camera_capture.release() cv2.destroyAllWindows() except Exception as e: print(e) raise
def main(q_start3d, q_joints): init() x, y, w, h = hog_box() q_start3d.put(True) t = time.time() success, frame = camera_capture.read() q_joints_list = [] # width = int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)) # height = int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) # fourcc = cv2.VideoWriter_fourcc(*'XVID') # writer = cv2.VideoWriter("output.avi", fourcc, 30.0,(width, height)) while success and cv2.waitKey(1) == -1: # crop bounding box from the raw frame frame = np.transpose(frame, axes=[1, 0, 2]).copy() if T else frame frame_cropped = frame[y:y + h, x:x + w, :] # vnect estimating process joints_2d, joints_3d = estimator(frame_cropped) q_joints.put(joints_3d) q_joints_list.append(joints_3d) # 2d plotting frame_square = utils.img_scale_squarify(frame_cropped, box_size) frame_square = utils.draw_limbs_2d(frame_square, joints_2d, joint_parents) cv2.imshow('2D Prediction', frame_square) # writer.write(frame_square) # write angle data into txt # angles_file.write('%f %f %f %f %f %f %f %f\n' % tuple([a for a in angles])) success, frame = camera_capture.read() q_joints_numpy = interpolation(q_joints_list) np.save(savepath, q_joints_numpy) # angles_file.close() try: camera_capture.release() # writer.release() cv2.destroyAllWindows() except Exception as e: print(e) raise
success, frame = camera_capture.read() while success and cv2.waitKey(1) == -1: if T: # mirror # frame = np.transpose(frame, axes=[1, 0, 2]) # no mirror frame = np.rot90(frame, 3) # crop bounding box from the raw frame frame_cropped = frame[y: y + h, x: x + w, :] # vnect estimation joints_2d, joints_3d = estimator(frame_cropped) # 2d plotting joints_2d[:, 0] += y joints_2d[:, 1] += x frame_draw = utils.draw_limbs_2d(frame.copy(), joints_2d, joint_parents, [x, y, w, h]) frame_draw = utils.img_scale(frame_draw, 1024 / max(W_img, H_img)) cv2.imshow('2D Prediction', frame_draw) # update bounding box y_min = (np.min(joints_2d[:, 0])) y_max = (np.max(joints_2d[:, 0])) x_min = (np.min(joints_2d[:, 1])) x_max = (np.max(joints_2d[:, 1])) buffer_x = 0.8 * (x_max - x_min + 1) buffer_y = 0.2 * (y_max - y_min + 1) x, y = (max(int(x_min - buffer_x / 2), 0), max(int(y_min - buffer_y / 2), 0)) w, h = (int(min(x_max - x_min + buffer_x, W_img - x)), int(min(y_max - y_min + buffer_y, H_img - y))) # update frame success, frame = camera_capture.read()
from src import utils from src.hog_box import HOGBox from src.estimator import VNectEstimator box_size = 368 hm_factor = 8 joints_num = 21 scales = [1.0, 0.7] joint_parents = [1, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13] estimator = VNectEstimator() img = cv2.imread('./pic/test_pic.jpg') hog = HOGBox() hog.clicked = True choose, rect = hog(img.copy()) x, y, w, h = rect img_cropped = img[y: y + h, x: x + w, :] joints_2d, joints_3d = estimator(img_cropped) # 3d plotting utils.draw_limbs_3d(joints_3d, joint_parents) # 2d plotting joints_2d[:, 0] += y joints_2d[:, 1] += x img_draw = utils.draw_limbs_2d(img.copy(), joints_2d, joint_parents, [x, y, w, h]) cv2.imshow('2D Prediction', img_draw) cv2.waitKey() cv2.destroyAllWindows()
################# ### Main Loop ### ################# ## trigger any keyboard events to stop the loop ## # start 3d skeletal animation plotting utils.plot_3d_init(joint_parents, joints_iter_gen) t = time.time() success, frame = camera_capture.read() while success and cv2.waitKey(1) == -1: # crop bounding box from the raw frame frame = np.transpose(frame, axes=[1, 0, 2]).copy() if T else frame frame_cropped = frame[y:y + h, x:x + w, :] # vnect estimating process joints_2d, joints_3d = estimator(frame_cropped) # 2d plotting frame_square = utils.img_scale_squareify(frame_cropped, box_size) frame_square = utils.draw_limbs_2d(frame_square, joints_2d, joint_parents) cv2.imshow('2D Prediction', frame_square) # write angle data into txt # angles_file.write('%f %f %f %f %f %f %f %f\n' % tuple([a for a in angles])) success, frame = camera_capture.read() # angles_file.close() my_exit(camera_capture)
hm_factor) for i in range(21): if i == 0: himg = hm[0, :, :, i] ximg = xm[0, :, :, i] yimg = ym[0, :, :, i] zimg = zm[0, :, :, i] else: tmp = hm[0, :, :, i] himg = np.hstack([himg, tmp]) tmp = xm[0, :, :, i] ximg = np.hstack([ximg, tmp]) tmp = ym[0, :, :, i] yimg = np.hstack([yimg, tmp]) tmp = zm[0, :, :, i] zimg = np.hstack([zimg, tmp]) all_hm = np.vstack([himg, ximg, yimg, zimg]) cv2.imshow('all heatmaps', all_hm * 128) img_res2d = utils.draw_limbs_2d(img_square[0, ...], joints_2d, limb_parents) cv2.imshow('2D results', img_res2d) cv2.waitKey() cv2.destroyAllWindows() print(hm[0, :, :, 0]) # np.savetxt('original', hm[0, :, :, 0])