예제 #1
0
def process_split(
    all_fnames, output_path, rep, src_len, tgt_len, create_windows=None,
):
    """
    Process data into numpy arrays.

    Args:
        all_fnames: List of filenames that should be processed.
        output_path: Where to store numpy files.
        rep: If the output data should be rotation matrices, quaternions or
            axis angle.
        create_windows: Tuple (size, stride) of windows that should be
            extracted from each sequence or None otherwise.

    Returns:
        Some meta statistics (how many sequences processed etc.).
    """
    assert rep in ["aa", "rotmat", "quat"]
    convert_fn = utils.convert_fn_from_R(rep)

    data = fairmotion_utils.run_parallel(
        process_file,
        all_fnames,
        num_cpus=40,
        create_windows=create_windows,
        convert_fn=convert_fn,
        lengths=(src_len, tgt_len),
    )
    src_seqs, tgt_seqs = [], []
    for worker_data in data:
        s, t = worker_data
        src_seqs.extend(s)
        tgt_seqs.extend(t)
    logging.info(f"Processed {len(src_seqs)} sequences")
    pickle.dump((src_seqs, tgt_seqs), open(output_path, "wb"))
예제 #2
0
    def construct(
        self,
        w_joints=None,
        w_joint_pos=0.4,
        w_joint_vel=0.1,
        w_root_pos=0.4,
        w_root_vel=0.6,
        w_ee_pos=0.4,
        w_ee_vel=0.6,
        w_trajectory=0.5,
        diff_threshold=1.0,
        num_comparison=3,
        num_workers=1,
    ):
        assert len(self.motions) > 0, "No motions to construct graph"
        if self.verbose:
            logging.info("Starting construction")
        self.skel = self.motions[0].skel
        for m in self.motions:
            m.set_skeleton(self.skel)
        if self.verbose:
            logging.info("Creating nodes")
        # Create nodes
        ns = utils.run_parallel(
            create_nodes,
            list(range(len(self.motions))),
            motions=self.motions,
            num_cpus=num_workers,
            base_length=self.base_length,
            stride_length=self.stride_length,
            compare_length=self.compare_length,
            fps=self.fps,
        )
        ns = flatten(ns)
        if self.verbose:
            logging.info(f"Merging {len(ns)} nodes...")
        for motion_idx, frame_start, frame_end in tqdm.tqdm(ns):
            # for motion_idx, frame_start, frame_end in ns:
            self.graph.add_node(
                self.graph.number_of_nodes(),
                motion_idx=motion_idx,
                frame_start=frame_start,
                frame_end=frame_end,
            )
        if self.verbose:
            logging.info("Creating edges...")

        wes = utils.run_parallel(
            compare_and_connect_edge,
            list(range(self.graph.number_of_nodes())),
            nodes=self.graph.nodes,
            motions=self.motions,
            num_cpus=num_workers,
            frames_compare=self.frames_compare,
            w_joints=w_joints,
            w_joint_pos=w_joint_pos,
            w_joint_vel=w_joint_vel,
            w_root_pos=w_root_pos,
            w_root_vel=w_root_vel,
            w_ee_pos=w_ee_pos,
            w_ee_vel=w_ee_vel,
            w_trajectory=w_trajectory,
            diff_threshold=diff_threshold,
            num_comparison=num_comparison,
            verbose=self.verbose,
        )
        wes = flatten(wes)

        self.w_joints = w_joints
        self.w_joint_pos = w_joint_pos
        self.w_joint_vel = w_joint_vel
        self.w_root_pos = w_root_pos
        self.w_root_vel = w_root_vel
        self.w_ee_pos = w_ee_pos
        self.w_ee_vel = w_ee_vel
        self.w_trajectory = w_trajectory
        if self.verbose:
            logging.info(f"Merging {len(wes)} edges...")
        for w, e_i, e_j in tqdm.tqdm(wes):
            self.graph.add_edge(e_i, e_j, weights=w)
        if self.verbose:
            logging.info("MotionGraph was constructed")
            logging.info(f"NumNodes: {self.graph.number_of_nodes()}")
            logging.info(f"NumEdges: {self.graph.number_of_edges()}")
예제 #3
0
def load_parallel(files, cpus=20, **kwargs):
    return utils.run_parallel(load, files, num_cpus=cpus, **kwargs)