def get_extrinsic_parameters(camera: bpy.types.Object, dtype: type = np.float32, frame: int = bpy.context.scene.frame_current, use_cv_coord: bool = False) -> np.ndarray: if not use_cv_coord: return world_matrix2np(camera, dtype=dtype, frame=frame)[0:3, 0:4] # Reference: https://blender.stackexchange.com/questions/38009/3x4-camera-matrix-from-blender-camera # There are 3 coordinate systems involved: # 1. The World coordinates: "world" # - right-handed # 2. The Blender camera coordinates: "bcam" # - x is horizontal # - y is up # - right-handed: negative z look-at direction # 3. The desired computer vision camera coordinates: "cv" # - x is horizontal # - y is down (to align to the actual pixel coordinates # used in digital images) # - right-handed: positive z look-at direction # bcam stands for blender camera R_bcam2cv = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], dtype=dtype) location, rotation = camera.matrix_world.decompose()[0:2] R_world2bcam = rotation.to_matrix().transposed() T_world2bcam = -1 * R_world2bcam @ location R_world2cv = vec2np(R_bcam2cv @ R_world2bcam) T_world2cv = vec2np(R_bcam2cv @ T_world2bcam).reshape(3, 1) Rt = np.hstack((R_world2cv, T_world2cv)) return Rt
def mesh2np(mesh: bpy.types.Mesh, world_matrix: np.ndarray = None, geo_type: str = "position", dtype: type = np.float32, is_local: bool = False, frame: int = bpy.context.scene.frame_current, as_homogeneous: bool = False) -> np.ndarray: # Input: mesh(bpy.types.Mesh), Output: positions or normals bpy.context.scene.frame_set(frame) if geo_type not in ["position", "normal"]: raise Exception("The type should be position or normal.") # select position or normal local_verts = np.array([ vec2np(v.co if geo_type == "position" else v.normal) for v in mesh.vertices ], dtype=dtype) # whether convert to homogeneous coordinates or not local_verts = np.hstack((local_verts, np.ones( (len(local_verts), 1)))) if as_homogeneous or world_matrix is not None else local_verts if is_local or geo_type == "normal" or world_matrix is None: return local_verts if as_homogeneous else local_verts[:, 0:3] # Calculate global positions global_verts = np.array([world_matrix @ v for v in local_verts], dtype=dtype) return global_verts if as_homogeneous else global_verts[:, 0:3]
def bone2np(bone: bpy.types.Bone, dtype: type = np.float32, mode: str = "rest", frame: int = bpy.context.scene.frame_current) -> np.ndarray: # Get bone in edit mode bpy.context.scene.frame_set(frame) if mode == "head": # local head position from the origin of the object return vec2np(bone.head_local, dtype=dtype) elif mode == "tail": # local tail position from the origin of the object return vec2np(bone.tail_local, dtype=dtype) elif mode == "length": # bone length return bone.length elif mode == "offset": # offset matrix from the parent offset = bone.matrix.to_4x4() offset.translation = bone.head if bone.parent is not None: offset.translation.y += bone.parent.length return mat2np(offset, dtype=dtype) elif mode == "rest": # absolute translation matrix not considering bones' rotation at rest pose return mat2np(bone.matrix_local, dtype=dtype) else: raise NotImplementedError(f"mode {mode} isn't supported.")
def posebone2np(posebone: bpy.types.PoseBone, dtype: type = np.float32, mode: str = "dynamic", frame: int = bpy.context.scene.frame_current) -> np.ndarray: # Get posebon in pose mode bpy.context.scene.frame_set(frame) if mode == "head": return vec2np(posebone.head, dtype=dtype) elif mode == "tail": # local tail position from the origin of the object return vec2np(posebone.tail, dtype=dtype) elif mode == "length": # bone length return posebone.length elif mode == "offset": # offset matrix from the parent return bone2np(posebone.bone, dtype=dtype, mode=mode, frame=frame) elif mode == "dynamic": dynamic_pose = rotation2np(posebone, dtype=dtype, to_matrix=True, frame=frame) basis = bone2np(posebone.bone, dtype=dtype, mode="rest") rot_scale = basis @ dynamic_pose if posebone.parent is None else posebone2np( posebone.parent, dtype=dtype, mode=mode) @ posebone2np(posebone, dtype=dtype, mode="offset") dynamic_pose = rot_scale @ dynamic_pose if posebone.parent is None: # root node dynamic_pose[0:3, 3] = (basis @ location2np(posebone, dtype=dtype, to_matrix=True, frame=frame))[0:3, 3] return dynamic_pose # equal to mat2np(posebone.matrix, dtype=dtype) else: raise NotImplementedError(f"mode {mode} isn't supported.")
def any2np(obj: bpy.types.Object, dtype: type = np.float32, **kwargs) -> np.ndarray: if type(obj) == Vector: return vec2np(obj, dtype=dtype) if type(obj) == Matrix: return mat2np(obj, dtype=dtype) elif type(obj) == bpy.types.Object: return obj2np(obj, dtype=dtype, **kwargs) elif type(obj) == str: return objname2np(obj, dtype=dtype, **kwargs) elif type(obj) == bpy.types.Mesh: return mesh2np(obj, dtype=dtype) elif type(obj) == bmesh.types.BMVertSeq: return vertices2np(obj, dtype) elif type(obj) == bpy.types.bpy_prop_collection: return collection2np(obj, dtype) else: raise NotImplementedError(f"{type(obj)} is not supported with any2np.")
def obj2np(obj: bpy.types.Object, dtype: type = np.float32, apply_modifier: bool = False, frame: int = bpy.context.scene.frame_current, geo_type: str = "position", is_local: bool = False, as_homogeneous: bool = False, mode: str = "dynamic") -> np.ndarray: # Input: obj(bpy.types.Object), Output: positions or normals bpy.context.scene.frame_set(frame) if type(obj.data) == bpy.types.Mesh : if apply_modifier: depsgraph = bpy.context.evaluated_depsgraph_get() obj = obj.evaluated_get(depsgraph) mesh = obj.to_mesh() return np.array([vec2np(v.co) for v in mesh.vertices], dtype=dtype) world_matrix = world_matrix2np(obj, dtype=dtype) # (4, 4) return mesh2np(obj.data, world_matrix=world_matrix, geo_type=geo_type, dtype=dtype, is_local=is_local, frame=frame, as_homogeneous=as_homogeneous) elif type(obj.data) == bpy.types.Armature: return armature2np(obj.data, dtype=dtype, mode=mode, frame=frame) else: raise NotImplementedError( f"{type(obj.data)} is not supported with obj2np")