def wireframe( obj: bpy.types.Object, evaluated: bool) -> typing.List[typing.Tuple[float, float, float]]: ''' Get coords of verts for edges of an object. Args: obj: An object that can be converted to mesh. evaluated: Whether to apply modifiers. Returns: points: A list of coordinates. ''' points = [] if evaluated: deps = bpy.context.view_layer.depsgraph obj = obj.evaluated_get(deps) mesh = obj.to_mesh() bm = bmesh.new() bm.from_mesh(mesh) bm.transform(obj.matrix_world) for edge in bm.edges: for vert in edge.verts: points.append(vert.co.xyz) bm.free() obj.to_mesh_clear() return points
def generate_evaluated_mesh(self, mesh_object: bpy.types.Object, reference_frame: mathutils.Matrix = None): if self.i3d.get_setting('apply_modifiers'): self.object = mesh_object.evaluated_get(self.i3d.depsgraph) self.logger.debug(f"is exported with modifiers applied") else: self.object = mesh_object self.logger.debug(f"is exported without modifiers applied") self.mesh = self.object.to_mesh(preserve_all_data_layers=False, depsgraph=self.i3d.depsgraph) # If a reference is given transform the generated mesh by that frame to place it somewhere else than center of # the mesh origo if reference_frame is not None: self.mesh.transform( reference_frame.inverted() @ self.object.matrix_world) conversion_matrix = self.i3d.conversion_matrix if self.i3d.get_setting('apply_unit_scale'): self.logger.debug(f"applying unit scaling") conversion_matrix = \ mathutils.Matrix.Scale(bpy.context.scene.unit_settings.scale_length, 4) @ conversion_matrix self.mesh.transform(conversion_matrix) if conversion_matrix.is_negative: self.mesh.flip_normals() self.logger.debug( f"conversion matrix is negative, flipping normals") # Calculates triangles from mesh polygons self.mesh.calc_loop_triangles() # Recalculates normals after the scaling has messed with them self.mesh.calc_normals_split()
def _get_camera_space_bounding_box(context: bpy.types.Context, camera_obj: bpy.types.Object, target_obj: bpy.types.Object) -> Bounds2D: # TODO support more than just meshes (esp. metaballs) if target_obj.type != 'MESH': raise Exception(f"Target object {target_obj} is not a mesh") # Get latest version of target object with modifiers such as armature applied depsgraph = context.evaluated_depsgraph_get() target_obj = target_obj.evaluated_get(depsgraph) m_obj_to_world = target_obj.matrix_world m_world_to_cam = camera_obj.rotation_euler.to_matrix().inverted() obj_verts = target_obj.to_mesh().vertices cam_verts = [m_world_to_cam @ (m_obj_to_world @ v.co) for v in obj_verts] return Bounds2D.from_points(cam_verts)
def obj2np(obj: bpy.types.Object, dtype: type = np.float32, apply_modifier: bool = False, frame: int = bpy.context.scene.frame_current, geo_type: str = "position", is_local: bool = False, as_homogeneous: bool = False, mode: str = "dynamic") -> np.ndarray: # Input: obj(bpy.types.Object), Output: positions or normals bpy.context.scene.frame_set(frame) if type(obj.data) == bpy.types.Mesh : if apply_modifier: depsgraph = bpy.context.evaluated_depsgraph_get() obj = obj.evaluated_get(depsgraph) mesh = obj.to_mesh() return np.array([vec2np(v.co) for v in mesh.vertices], dtype=dtype) world_matrix = world_matrix2np(obj, dtype=dtype) # (4, 4) return mesh2np(obj.data, world_matrix=world_matrix, geo_type=geo_type, dtype=dtype, is_local=is_local, frame=frame, as_homogeneous=as_homogeneous) elif type(obj.data) == bpy.types.Armature: return armature2np(obj.data, dtype=dtype, mode=mode, frame=frame) else: raise NotImplementedError( f"{type(obj.data)} is not supported with obj2np")
def from_blender(self, lookup: Lookup, armature: bpy.types.Object, object: bpy.types.Object): if object.type not in {'MESH', 'CURVE', 'SURFACE', 'FONT'}: return collection = bpy.data.collections.new('SourceOps') bpy.context.scene.collection.children.link(collection) object = object.copy() collection.objects.link(object) mod: bpy.types.TriangulateModifier = object.modifiers.new( 'Triangulate', 'TRIANGULATE') mod.min_vertices = 4 mod.quad_method = 'FIXED' mod.ngon_method = 'CLIP' mod.keep_custom_normals = True for mod in getattr(object, 'modifiers', []): if mod.type == 'ARMATURE': mod.show_viewport = False bpy.context.view_layer.update() depsgraph: bpy.types.Depsgraph = bpy.context.evaluated_depsgraph_get() evaluated: bpy.types.Object = object.evaluated_get(depsgraph) mesh = bpy.data.meshes.new_from_object(evaluated, preserve_all_data_layers=True, depsgraph=depsgraph) if not self.settings.ignore_transforms: mesh.transform(object.matrix_world) mesh.calc_normals_split() for poly in mesh.polygons: triangle = Triangle(self.settings) triangle.from_blender(lookup, armature, object, mesh, poly) self.triangles.append(triangle) mesh.free_normals_split() bpy.data.meshes.remove(mesh) bpy.data.objects.remove(object) bpy.data.collections.remove(collection)
def prepare_mesh(context: bpy.types.Context, object: bpy.types.Object): dg = context.evaluated_depsgraph_get() mesh = object.evaluated_get(dg).data # TODO: Creating a new mesh just to triangulate sucks for perf I'd expect tempmesh = bmesh.new() tempmesh.from_mesh(mesh) bmesh.ops.triangulate(tempmesh, faces=tempmesh.faces[:], quad_method='BEAUTY', ngon_method='BEAUTY') # Finish up, write the bmesh back to the mesh tempmesh.to_mesh(mesh) tempmesh.free() # Now it's triangulated we can calculate tangents (TODO calc_normals_split may not be necessary anymore) mesh.calc_normals_split() mesh.calc_tangents() mesh.calc_loop_triangles() mesh.transform(object.matrix_world) return mesh
def build_mesh(obj: bpy.types.Object) -> GlMesh: if glsl_draw_obj is None: raise Exception("glsl draw obj is None") scene_mesh = GlMesh() ob_eval = obj.evaluated_get(bpy.context.view_layer.depsgraph) tmp_mesh = ob_eval.to_mesh() tmp_mesh.calc_tangents() tmp_mesh.calc_loop_triangles() st = tmp_mesh.uv_layers[0].data scene_mesh.mat_list = [ glsl_draw_obj.materials[ms.material.name] for ms in obj.material_slots ] count_list = collections.Counter( [tri.material_index for tri in tmp_mesh.loop_triangles] ) scene_mesh.index_per_mat = { scene_mesh.mat_list[i]: [ (n * 3, n * 3 + 1, n * 3 + 2) for n in range(v) ] for i, v in count_list.items() } def job_pos() -> Dict[Any, Any]: if scene_mesh.index_per_mat is None: raise Exception("scene mesh index per mat is None") return { k: [ tmp_mesh.vertices[vid].co for tri in tmp_mesh.loop_triangles for vid in tri.vertices if tri.material_index == i ] for i, k in enumerate(scene_mesh.index_per_mat.keys()) } def job_normal() -> Dict[Any, Any]: if tmp_mesh.has_custom_normals: return { k: [ tri.split_normals[x] for tri in tmp_mesh.loop_triangles if tri.material_index == i for x in range(3) ] for i, k in enumerate(scene_mesh.index_per_mat.keys()) } return { k: [ tmp_mesh.vertices[vid].normal for tri in tmp_mesh.loop_triangles for vid in tri.vertices if tri.material_index == i ] for i, k in enumerate(scene_mesh.index_per_mat.keys()) } def job_uv() -> Dict[Any, Any]: return { k: [ st[lo].uv for tri in tmp_mesh.loop_triangles for lo in tri.loops if tri.material_index == i ] for i, k in enumerate(scene_mesh.index_per_mat.keys()) } def job_tangent() -> Dict[Any, Any]: return { k: [ tmp_mesh.loops[lo].tangent for tri in tmp_mesh.loop_triangles for lo in tri.loops if tri.material_index == i ] for i, k in enumerate(scene_mesh.index_per_mat.keys()) } pos_future = glsl_draw_obj.sub_executor.submit(job_pos) normals_future = glsl_draw_obj.sub_executor.submit(job_normal) uvs_future = glsl_draw_obj.sub_executor.submit(job_uv) tangents_future = glsl_draw_obj.sub_executor.submit(job_tangent) scene_mesh.pos = pos_future.result() scene_mesh.normals = normals_future.result() scene_mesh.uvs = uvs_future.result() scene_mesh.tangents = tangents_future.result() return scene_mesh
def camera_view_bounds_2d( scene: bpy.types.Scene, cam_ob: bpy.types.Object, me_ob: bpy.types.Object ) -> Tuple[int, int, int, int]: """ Shamelessly copy pasted from https://blender.stackexchange.com/a/158236/105631 Idk how it works, but it works Returns camera space bounding box of mesh object. Negative 'z' value means the point is behind the camera. Takes shift-x/y, lens angle and sensor size into account as well as perspective/ortho projections. :arg scene: Scene to use for frame size. :type scene: :class:`bpy.types.Scene` :arg obj: Camera object. :type obj: :class:`bpy.types.Object` :arg me: Untransformed Mesh. :type me: :class:`bpy.types.Mesh´ :return: a Box object (call its to_tuple() method to get x, y, width and height) :rtype: :class:tuple """ mat = cam_ob.matrix_world.normalized().inverted() depsgraph = bpy.context.evaluated_depsgraph_get() # me_ob.evaluated_get(depsgraph) crashed on Linux build in Blender 2.83.9, but works in 2.83.13. # Solution was to copy me_ob. first. But that resulted in memory leak. mesh_eval = me_ob.evaluated_get(depsgraph) me = mesh_eval.to_mesh() me.transform(me_ob.matrix_world) me.transform(mat) camera: bpy.types.Camera = cam_ob.data frame = [-v for v in camera.view_frame(scene=scene)[:3]] camera_persp: bool = camera.type != "ORTHO" # True of PERSP lx = [] ly = [] min_x: float max_x: float min_y: float max_y: float for v in me.vertices: co_local: Sequence[float] = v.co z: float = -co_local.z if camera_persp: if z == 0.0: lx.append(0.5) ly.append(0.5) # Does it make any sense to drop these? # if z <= 0.0: # continue else: frame = [(v / (v.z / z)) for v in frame] min_x, max_x = frame[1].x, frame[2].x min_y, max_y = frame[0].y, frame[1].y x = (co_local.x - min_x) / (max_x - min_x) y = (co_local.y - min_y) / (max_y - min_y) lx.append(x) ly.append(y) min_x = np.clip(min(lx), 0.0, 1.0) max_x = np.clip(max(lx), 0.0, 1.0) min_y = np.clip(min(ly), 0.0, 1.0) max_y = np.clip(max(ly), 0.0, 1.0) mesh_eval.to_mesh_clear() # r: 'bpy.types.RenderSettings' = scene.render # fac: float = r.resolution_percentage * 0.01 # dim_x: float = r.resolution_x * fac # dim_y: float = r.resolution_y * fac # Sanity check # if round((max_x - min_x) * dim_x) == 0 or round((max_y - min_y) * dim_y) == 0: # return (0, 0, 0, 0) # Relative values return ( round(min_x, 4), # X round(1 - max_y, 4), # Y round((max_x - min_x), 4), # Width round((max_y - min_y), 4), # Height ) # Absolute values return ( int(round(min_x * dim_x)), # X int(round(dim_y - max_y * dim_y)), # Y int(round((max_x - min_x) * dim_x)), # Width int(round((max_y - min_y) * dim_y)), # Height )