class AnnotatedMotionVector(MotionVector): def __init__(self, skeleton=None, algorithm_config=None, rotation_type=ROTATION_TYPE_QUATERNION): super(AnnotatedMotionVector, self).__init__(skeleton, algorithm_config, rotation_type) self.keyframe_event_list = None self.mg_input = None self.graph_walk = None self.grounding_constraints = None self.ground_contacts = None self.ik_constraints = collections.OrderedDict() def export(self, output_filename, add_time_stamp=False, export_details=False): """ Saves the resulting animation frames, the annotation and actions to files. Also exports the input file again to the output directory, where it is used as input for the constraints visualization by the animation server. """ MotionVector.export(self, self.skeleton, output_filename, add_time_stamp) self.export_annotation(output_filename) def export_annotation(self, output_filename): if self.mg_input is not None: write_to_json_file(output_filename + ".json", self.mg_input.mg_input_file) if self.keyframe_event_list is not None: self.keyframe_event_list.export_to_file(output_filename) def load_from_file(self, file_name): bvh = BVHReader(file_name) self.skeleton = SkeletonBuilder().load_from_bvh(bvh) def generate_bvh_string(self): quat_frames = np.array(self.frames) if len(quat_frames) > 0 and len( quat_frames[0]) < self.skeleton.reference_frame_length: quat_frames = self.skeleton.add_fixed_joint_parameters_to_motion( quat_frames) bvh_writer = BVHWriter(None, self.skeleton, quat_frames, self.skeleton.frame_time, True) return bvh_writer.generate_bvh_string() def to_unity_format(self, scale=1.0): """ Converts the frames into a custom json format for use in a Unity client""" animated_joints = [ j for j, n in list(self.skeleton.nodes.items()) if "EndSite" not in j and len(n.children) > 0 ] # self.animated_joints unity_frames = [] for node in list(self.skeleton.nodes.values()): node.quaternion_index = node.index for frame in self.frames: unity_frame = self._convert_frame_to_unity_format( frame, animated_joints, scale) unity_frames.append(unity_frame) result_object = dict() result_object["frames"] = unity_frames result_object["frameTime"] = self.frame_time result_object["jointSequence"] = animated_joints if self.graph_walk is not None: result_object["events"] = self._extract_event_list_from_keyframes() return result_object def _convert_frame_to_unity_format(self, frame, animated_joints, scale=1.0): """ Converts the frame into a custom json format and converts the transformations to the left-handed coordinate system of Unity. src: http://answers.unity3d.com/questions/503407/need-to-convert-to-right-handed-coordinates.html """ unity_frame = {"rotations": [], "rootTranslation": None} for node_name in self.skeleton.nodes.keys(): if node_name in animated_joints: node = self.skeleton.nodes[node_name] if node_name == self.skeleton.root: t = frame[:3] * scale unity_frame["rootTranslation"] = { "x": -t[0], "y": t[1], "z": t[2] } if node_name in self.skeleton.animated_joints: # use rotation from frame # TODO fix: the animated_joints is ordered differently than the nodes list for the latest model index = self.skeleton.animated_joints.index(node_name) offset = index * 4 + 3 r = frame[offset:offset + 4] unity_frame["rotations"].append({ "x": -r[1], "y": r[2], "z": r[3], "w": -r[0] }) else: # use fixed joint rotation r = node.rotation unity_frame["rotations"].append({ "x": -float(r[1]), "y": float(r[2]), "z": float(r[3]), "w": -float(r[0]) }) return unity_frame def _extract_event_list_from_keyframes(self): frame_offset = 0 event_list = list() for step in self.graph_walk.steps: time_function = None if self.graph_walk.use_time_parameters: time_function = self.graph_walk.motion_state_graph.nodes[ step.node_key].back_project_time_function(step.parameters) for c in step.motion_primitive_constraints.constraints: if c.constraint_type == SPATIAL_CONSTRAINT_TYPE_KEYFRAME_POSITION and c.event_name is not None: event_keyframe_index = c.extract_keyframe_index( time_function, frame_offset) event_list.append({ "eventName": c.event_name, "eventTarget": c.event_target, "keyframe": event_keyframe_index }) frame_offset += step.end_frame - step.start_frame + 1 print("extracted", event_list) return event_list
class MotionPrimitiveController(LegacySkeletonAnimationController): def __init__(self, scene_object, name, data, color=(0, 0, 1)): LegacySkeletonAnimationController.__init__(self, scene_object) self.motion_primitive = MotionStateGraphNode(None) self.skeleton = SkeletonBuilder().load_from_json_data(data["skeleton"]) self.frameTime = self.skeleton.frame_time self._visualizations = [] self.samples = [] self.algorithm_config = DEFAULT_ALGORITHM_CONFIG self.color = color self.motion_primitive._initialize_from_json(convert_to_mgrd_skeleton(self.skeleton), data) print("loaded motion primitive") print("spatial", self.motion_primitive.get_n_spatial_components()) print("time", self.motion_primitive.get_n_time_components()) self.motion_primitive.cluster_tree = None self.training_data = None #print("n gmm", len(self.motion_primitive.get_gaussian_mixture_model().weights)) self.name = name self._regenerate = True self.type = CONTROLLER_TYPE_MP set_log_mode(LOG_MODE_DEBUG) self.start_pose = {"position": [0, 0, 0], "orientation": [0, 0, 0]} self.mock_graph = MockGraph(self.skeleton) self.label_color_map = dict() def init_visualization(self): self.generate_random_samples(1) def load_cluster_tree_from_json_file(self, filepath): tree_data = load_json_file(filepath) self.load_cluster_tree_from_json(tree_data) def load_cluster_tree_from_json(self, tree_data): print("load cluster tree") self.motion_primitive.cluster_tree = FeatureClusterTree.load_from_json(tree_data) print("finished loading cluster tree") def clear(self): self.samples = [] self._visualizations = [] def generate_random_samples(self, n_samples, sample_offset=0): self.clear() if n_samples > 1: x_offset = -n_samples / 2 * sample_offset else: x_offset = 0 for idx in range(n_samples): self.generate_random_sample(x_offset) x_offset += sample_offset self.updated_frame() def generate_random_samples_from_tree(self, n_samples, sample_offset=0): if self.motion_primitive.cluster_tree is None: return self.clear() if n_samples > 1: x_offset = -n_samples / 2 * sample_offset else: x_offset = 0 for idx in range(n_samples): self.generate_random_sample_from_tree(x_offset) x_offset += sample_offset self.updated_frame() def generate_random_constraints(self, joint_name, frame_idx, n_samples): positions = [] for idx in range(n_samples): positions.append(self.generate_random_constraint(joint_name, frame_idx)) return positions def generate_random_sample(self, x_offset=0): spline = self.motion_primitive.sample(use_time=False) frames = spline.get_motion_vector() self.create_sample_visualization(frames, x_offset) def generate_random_sample_from_tree(self, x_offset=0): if self.motion_primitive.cluster_tree is not None: n_points = len(self.motion_primitive.cluster_tree.data) sample_idx = np.random.randint(0, n_points) print("visualize", sample_idx, "/", n_points) sample = self.motion_primitive.cluster_tree.data[sample_idx] frames = self.motion_primitive.back_project(sample, False).get_motion_vector() self.create_sample_visualization(frames, x_offset) def generate_random_sample_from_data(self, x_offset=0): self.clear() if self.training_data is not None: n_samples = len(self.training_data) sample_idx = np.random.randint(0, n_samples) print("visualize", sample_idx, "/", n_samples) sample = self.training_data[sample_idx] frames = self.motion_primitive.back_project(sample, False).get_motion_vector() self.create_sample_visualization(frames, x_offset) def create_sample_visualization(self, frames, x_offset=0): motion = AnnotatedMotionVector(skeleton=self.skeleton) frames[:, 0] += x_offset print(frames.shape) motion.append_frames(frames) v = SkeletonVisualization(self.scene_object, self.color) v.set_skeleton(self.skeleton) v.draw_mode = SKELETON_DRAW_MODE_LINES self._visualizations.append(v) self.samples.append(motion) def generate_constrained_sample(self, joint_name, frame_idx, position): action_constraints = ElementaryActionConstraints() action_constraints.motion_state_graph = self.mock_graph self.algorithm_config["local_optimization_settings"]["max_iterations"] = 50000 self.algorithm_config["local_optimization_settings"]["method"] = "L-BFGS-B" mp_generator = MotionPrimitiveGenerator(action_constraints, self.algorithm_config) #mp_generator.numerical_minimizer = OptimizerBuilder(self.algorithm_config).build_path_following_minimizer() mp_generator.numerical_minimizer = OptimizerBuilder(self.algorithm_config).build_path_following_with_likelihood_minimizer() mp_constraints = MotionPrimitiveConstraints() n_frames = self.getNumberOfFrames() if frame_idx == -1: frame_idx = n_frames-1 mp_constraints.skeleton = self.skeleton c_desc = {"joint": joint_name, "canonical_keyframe": frame_idx, "position": position, "n_canonical_frames": n_frames, "semanticAnnotation": {"keyframeLabel":"none"}} print("set constraint", c_desc) c = GlobalTransformConstraint(self.skeleton, c_desc, 1.0, 1.0) mp_constraints.constraints.append(c) mp_constraints.use_local_optimization = self.algorithm_config["local_optimization_mode"] in ["all", "keyframes"] vector = mp_generator.generate_constrained_sample(self.motion_primitive, mp_constraints) spline = self.motion_primitive.back_project(vector, use_time_parameters=False) frames = spline.get_motion_vector() self.create_sample_visualization(frames) def generate_random_constraint(self, joint_name, frame_idx): spline = self.motion_primitive.sample(use_time=False) frames = spline.get_motion_vector() position = self.skeleton.nodes[joint_name].get_global_position(frames[frame_idx]) return position def updated_frame(self): prevPlayAnimation = self.playAnimation self.playAnimation = True self.update(0) self.playAnimation = prevPlayAnimation def getNumberOfFrames(self): if self.isLoadedCorrectly(): return len(self.samples[0].frames) else: return 0 def isLoadedCorrectly(self): return len(self.samples) > 0 def export_to_file(self, filename, sample_idx=0): if sample_idx < len(self.samples): frame_time = self.frameTime frames = self.skeleton.add_fixed_joint_parameters_to_motion(self.samples[0].frames) bvh_writer = BVHWriter(None, self.skeleton, frames, frame_time, True) bvh_writer.write(filename) def get_skeleton_copy(self): skeleton = deepcopy(self.skeleton) count = 0 for node_key in skeleton.get_joint_names(): if node_key != skeleton.root: skeleton.nodes[node_key].quaternion_frame_index = count count += 1 return skeleton def get_motion_vector_copy(self, start_frame, end_frame, sample_idx=0): if sample_idx < len(self.samples): mv_copy = MotionVector() mv_copy.frames = deepcopy(self.samples[0].frames[start_frame:end_frame]) mv_copy.frames = self.skeleton.add_fixed_joint_parameters_to_motion(mv_copy.frames) mv_copy.n_frames = len(mv_copy.frames) mv_copy.frame_time = self.frameTime return mv_copy def draw(self, modelMatrix, viewMatrix, projectionMatrix, lightSources): if self.isLoadedCorrectly() and 0 <= self.currentFrameNumber < self.getNumberOfFrames(): for v in self._visualizations: v.draw(modelMatrix, viewMatrix, projectionMatrix, lightSources) def updateTransformation(self): if self.isLoadedCorrectly() and 0 <= self.currentFrameNumber < self.getNumberOfFrames(): # update global transformation matrices of joints for idx, motion in enumerate(self.samples): current_frame = motion.frames[self.currentFrameNumber] self._visualizations[idx].updateTransformation(current_frame, self.scene_object.scale_matrix) def setColor(self, color): print("set color", color) self.color = color for v in self._visualizations: v.set_color(color) def create_blend_controller(self): skeleton = self.skeleton motions = self.samples name = "Blend Controller" + self.name self.scene_object.scene.object_builder.create_blend_controller(name, skeleton, motions) def update_markers(self): return def set_config(self, algorithm_config): self.algorithm_config = algorithm_config def getFrameTime(self): return self.frameTime def get_semantic_annotation(self): n_keys = len(self.motion_primitive.keyframes) if n_keys <= 0: return None else: sorted_keyframes = collections.OrderedDict(sorted(self.motion_primitive.keyframes.items(), key=lambda t: t[1])) start = 0 end = int(self.motion_primitive.get_n_canonical_frames()) semantic_annotation = collections.OrderedDict() for k, v in sorted_keyframes.items(): semantic_annotation[k] = list(range(start, v)) self.label_color_map[k] = get_random_color() start = v k = "contact"+str(n_keys) semantic_annotation[k] = list(range(start, end)) self.label_color_map[k] = get_random_color() return list(semantic_annotation.items()) def get_label_color_map(self): return self.label_color_map def set_frame_time(self, frame_time): self.frameTime = frame_time def get_frame_time(self): return self.frameTime