Exemple #1
0
def add_box_to_scene(context,
                     location=np.zeros(3),
                     rotation=np.zeros(3),
                     size=np.ones(3),
                     name='Box'):
    """
    Add a box mesh to a given context.

    Parameters
    ----------
    context : bpy.context
        Blender context to add the box to
    location : numpy.ndarray = (0, 0, 0)
        World location of the box
    rotation : numpy.ndarray = (0, 0, 0)
        World rotation of the box in Euler angles
    size : numpy.ndarray = (1, 1, 1)
        Length, height, and depth of the box, respectively
    name : String
        Name of the box
    """
    bob = bm.new()
    verts, faces = get_unit_cube()
    add_geom_to_bmesh(bob, verts, faces)
    mesh = bpy.data.meshes.new(name)
    bob.to_mesh(mesh)
    mesh.update()

    # Add the mesh as an object into the scene
    ob = object_utils.object_data_add(context, mesh)
    mat = make_transf_mat(location, rotation, size)
    ob.matrix_world = Matrix(mat)
Exemple #2
0
def add_box_to_obj(ob,
                   location=np.zeros(3),
                   rotation=np.zeros(3),
                   size=np.ones(3),
                   select=True,
                   deselect=True):
    """
    Add a box mesh to a given Blender object.

    Parameters
    ----------
    ob : bpy.object
        Blender object to add the box to
    location : numpy.ndarray = (0, 0, 0)
        World location of the box
    rotation : numpy.ndarray = (0, 0, 0)
        World rotation of the box in Euler angles
    size : numpy.ndarray = (1, 1, 1)
        Length, height, and depth of the box, respectively
    select_new : Bool = True
        Should the newly added vertices be selected?
    deselect : Bool = True
        Should already existing vertices be deselected?
    """
    bob = bm.from_edit_mesh(ob.data)

    # If box should be selected, deselect everything else
    if deselect:
        for v in bob.verts:
            v.select = False
        bob.select_flush(False)

    verts, faces = get_unit_cube()

    # First apply given box transform, then transform it from world to
    # local space
    mat = np.array(ob.matrix_world.inverted()) @ \
        make_transf_mat(location, rotation, size)
    verts = transf_vecs(mat, verts)

    add_geom_to_bmesh(bob, verts, faces, select)
    bm.update_edit_mesh(ob.data)
Exemple #3
0
    def _execute_inner(self, obs):
        dim = self.dim
        dimhalf = dim * .5
        offbuf = gpu.types.GPUOffScreen(dim, dim)
        sample = sample_sphere if self.dom == 'SPHERE' \
            else sample_hemisphere

        # Construct depthpass shader
        shader = gpu.types.GPUShader(
            vertexcode='''
            uniform mat4 mvp;
            in vec3 pos;
            void main() {
                gl_Position = mvp * vec4(pos, 1);
            }''',
            fragcode='''
            out vec4 col;
            void main() {
                col = vec4(0, 0, 1, 1);
            }'''
        )
        shader.bind()

        # Create batch from all objects in edit mode
        verts, indcs, geoinfo = combine_meshes(obs)
        batch = batch_for_shader(
            shader, 'TRIS',
            {"pos": verts},
            indices=indcs,
        )
        batch.program_set(shader)

        # Find the center and bounds of all objects to calculate the
        # encompassing radius of the (hemi-)sphere on which render
        # positions will be sampled
        bounds, centr = get_bounds_and_center(verts)
        rad = np.linalg.norm(bounds[:2]) * .5 + 1
        del indcs, bounds

        # Spawn debug sphere with calculated radius
        if self._debug_spawn_sphere:
            bpy.ops.mesh.primitive_uv_sphere_add(
                radius=rad,
                location=centr,
                )

        # Render the objects from several views and mark seen vertices
        visibl = np.zeros(len(verts), dtype=np.bool)
        for _ in range(self.samplecnt):
            # Generate random points on the chosen domain from which
            # to render the objects
            # Chose rotation so the 'camera' looks to the center
            samplepos, (theta, phi) = sample(rad)
            view_mat_inv = make_transf_mat(
                transl=samplepos + centr,
                rot=(phi, 0, theta + np.pi * .5),
                )

            # Spawn debug camera at sampled position
            if self._debug_spawn_cams:
                bpy.ops.object.camera_add()
                bpy.context.object.matrix_world = Matrix(view_mat_inv)

            # Build the Model View Projection matrix from chosen
            # render position and radius
            # The model matrix has already been applied to the vertices
            # befor creating the batch
            mvp = make_proj_mat(
                fov=90,
                clip_start=rad * .25,
                clip_end=rad * 1.5,
                dimx=dim,
                dimy=dim,
                ) @ np.linalg.inv(view_mat_inv)
            shader.uniform_float("mvp", Matrix(mvp))
            del view_mat_inv, samplepos, theta, phi

            with offbuf.bind():
                # Render the selected objects into the offscreen buffer
                bgl.glDepthMask(bgl.GL_TRUE)
                bgl.glClear(bgl.GL_DEPTH_BUFFER_BIT)
                bgl.glEnable(bgl.GL_DEPTH_TEST)
                batch.draw()

                # Write texture back to CPU
                pxbuf = bgl.Buffer(bgl.GL_FLOAT, dim * dim)
                bgl.glReadBuffer(bgl.GL_BACK)
                bgl.glReadPixels(0, 0, dim, dim, bgl.GL_DEPTH_COMPONENT,
                                 bgl.GL_FLOAT, pxbuf)

            # Map depth values from [0, 1] to [-1, 1]
            pxbuf = np.asanyarray(pxbuf) * 2 - 1
            pxbuf.shape = (dim, dim)

            # Transform verts of active object to clip space
            tverts = mvp @ append_one(verts).T
            # Perspective divide to transform to NDCs [-1, 1]
            tverts /= tverts[3]

            # Find pixel coordinates of each vertex' projected position
            # by remapping x and y coordinates from NDCs to [0, dim]
            # Add .5 to make sure the flooring from conversion to int
            # is actually rounding
            uvs = tverts[:2] * dimhalf + (dimhalf + .5)
            uvs = uvs.astype(np.int32)

            # Map all vertices outside the view frustum to (0, 0)
            # so they don't sample the pixel array out of bounds
            invalid = np.any((uvs < 0) | (dim <= uvs), axis=0)
            uvs.T[invalid] = (0, 0)

            # For each vertex, get the depth at its projected pixel
            # and its distance to the render position
            imgdpth = pxbuf[(uvs[1], uvs[0])]
            camdist = tverts[2]
            # Set the distance of invalid vertices past [-1, 1] so they
            # won't be selected
            camdist[invalid] = 2

            # A vertex is visible if it's inside the view frustum
            # (valid) and not occluded by any face.
            # A vertex is occluded when its depth sampled from the
            # image is smaller than its distance to the camera.
            # A small error margin is added to prevent self-occlusion.
            # The result is logically or-ed with the result from other
            # render positions.
            visibl |= camdist <= (imgdpth + .001)

            # Create debug image of the rendered view
            if self._debug_create_img:
                # Grayscale to RGBA and [-1, 1] to [0, 1]
                pxbuf = np.repeat(pxbuf, 4) * .5 + .5
                pxbuf.shape = (dim, dim, 4)
                # Alpha channel is 1
                pxbuf[:, :, 3] = 1
                # Mark projected vertex positions in red
                pxbuf[(uvs[1], uvs[0])] = (1, 0, 0, 1)

                imgname = "Debug"
                if imgname not in bpy.data.images:
                    bpy.data.images.new(imgname, dim, dim)
                image = bpy.data.images[imgname]
                image.scale(dim, dim)
                image.pixels = pxbuf.ravel()

        # Split visible flag list back in original objects
        offbuf.free()
        start = 0
        for o, (end, _) in zip(obs, geoinfo):
            o.data.vertices.foreach_set('select', visibl[start:end])
            start = end