def on_video(self, video: np.ndarray) -> np.ndarray: current_time = time.time() wall_time = current_time - self.t0 spb = 60.0 / self.bpm wall_beat_distance = (wall_time % spb) / spb beat = self.heartbeat(wall_time) / 2.0 a = (1 - beat) * video + beat * 255 if wall_beat_distance < self.prev_beat_distance: zero_goal = AIRecordingState.GoalPose() self.prev_goal = self.goal_pose if self.prev_goal == zero_goal: self.make_new_goal() else: self.goal_pose = zero_goal beat_distance = (np.tanh(wall_beat_distance * np.pi * 2.0 - np.pi) / 2.0) + .5 g = self.prev_goal * (1 - beat_distance) + self.goal_pose * beat_distance xfromed_grab = transform_about_center(self.grabbed_frame, scale_multiplier=(g.z, g.z), translation=(g.x, g.y), rotation_degrees=g.roll, skew=(g.yaw, g.pitch)) xformed_goal = transform_about_center(self.goal_square, scale_multiplier=(self.goal_pose.z, self.goal_pose.z), translation=(self.goal_pose.x, self.goal_pose.y), rotation_degrees=self.goal_pose.roll, skew=(self.goal_pose.yaw, self.goal_pose.pitch)) a = overlay_transparent(a, xformed_goal) a = overlay_transparent(a, xfromed_grab) self.prev_beat_distance = wall_beat_distance q = self.imu.protocol.imu.get_orientation(.1, .1, .1, .1) acc = self.imu.protocol.imu.get_acc(q) self.out_record.append((g, wall_time, q, acc)) return a.astype(np.uint8)
def __init__(self, cam, grabbed_frame, imu, out_name, music, bpm): super().__init__(cam, imu, out_name, music) self.heartbeat = get_heartbeat_callback(bpm) self.bpm = bpm / 2.0 self.grabbed_frame = grabbed_frame self.t0 = None self.prev_beat_distance = 0 self.goal_pose = AIRecordingState.GoalPose() self.prev_goal = AIRecordingState.GoalPose() hsv = cv2.cvtColor(self.grabbed_frame, cv2.COLOR_BGR2HSV) self.grabbed_frame = np.append(self.grabbed_frame, hsv[:, :, 2:], -1) self.goal_square = np.zeros((100, 200, 4)) self.goal_square[:, :, 3] = 255 self.goal_square[:, :, 1] = 255 self.goal_square = overlay_transparent( np.zeros( (self.grabbed_frame.shape[0], self.grabbed_frame.shape[1], 3)), self.goal_square, ) hsv_square = cv2.cvtColor(self.goal_square.astype(np.uint8), cv2.COLOR_BGR2HSV) self.goal_square = np.append(self.goal_square, hsv_square[:, :, 2:] * 0.5, -1) self.out_video = cv2.VideoWriter( f"{out_name}.avi", cv2.VideoWriter_fourcc(*"MJPG"), 20.0, (grabbed_frame.shape[1], grabbed_frame.shape[0]), True, ) self.out_record = AIRecordingState.Record(self.grabbed_frame) self.out_name = out_name
def on_video(self, video): a = overlay_transparent(video, self.menu_np) return a