Example #1
0
    def _find_parts(self, context):
        for o in context.objects_in_mode:
            data = o.data
            parts = TreeDict(acc=concat)
            coords = get_vecs(data.vertices)
            # choose comparison method
            method = np.linalg.norm if self._method == 0 else np.prod

            for indcs in get_parts(from_edit_mesh(data).verts):
                bounds, _ = get_bounds_and_center(coords[indcs])
                # calculate comparison value from bounding box,
                # round to create less bins in dict for better
                # performance
                key = round(method(bounds), self._resolution)
                parts[key] = indcs

            self._data.append((data.vertices, parts))
Example #2
0
    def __init__(self, data, res_heuristic=_res_heur):
        """
        @TODO document
        """
        self.data = data
        bounds, center = get_bounds_and_center(data)

        # Determine the voxel edge length
        self.celllen = np.average(
            np.divide(
                res_heuristic(data, bounds, center),
                bounds,
                # Prevent division by zero
                out=np.zeros_like(bounds, dtype=float),
                where=bounds != 0,
            ))

        # Quantize data and bring in a form that can be broadcast by
        # NumPy. For N vertices, this transforms the array from
        # (N, 3) to (N, 1, 3) dimensions.
        qdata = quantize(data, self.celllen)[:, np.newaxis]

        # Generate voxel kernel from all 3**3 = 27 combinations of
        # -1, 0, and 1.
        o = np.array((-1, 0, 1)) * self.celllen
        kern = np.array(np.meshgrid(o, o, o)).T.reshape(-1, 3)

        # Generate array that not only contains every quantized point,
        # but also the points offset by the voxel cell size along each
        # axis in +/- direction. For each original data entry, that
        # yields 3**3 = 27 entries.
        kdata = (kern + qdata).reshape(-1, 3)

        # Build hash. Every data point gets linked to the cell it's
        # in as well as all 26 neighboring cells. This overlap is what
        # guarantees fast "closest vertex" lookup at the cost of higher
        # memory consumption.
        self.hash = defaultdict(list)
        for i, c in enumerate(kdata):
            # Store index in data array for each of the 27 entries.
            self.hash[tuple(c)] += [floor(i / 27)]
Example #3
0
 def _save_barplot(self, ob, xvals, yvals):
     """
     Helper for saving a plot of an object's shape distribution
     to disk.
     :param ob: The object to draw the plot for
     :param xvals: Values on the plot's x-axis
     :param yvals: Values on the plot's y-axis
     """
     bounds, _ = get_bounds_and_center(ob.bound_box)
     maxdist = np.linalg.norm(bounds)
     save_barplot(
         xvals=xvals[:-1],
         yvals=yvals,
         barwidth=maxdist / self.bincnt,
         # xmax=maxdist,
         title=(
             f"Samples: {self._samplcnt}, "
             f"Bins: {self.bincnt}, "
             f"Diaglength: {maxdist}"
         ),
         filename=ob.name,
         )
Example #4
0
    def _find_parts(self, obs):
        for o in obs:
            # get parts, each a vertex index list
            bob = bmesh.new()
            bob.from_mesh(o.data)
            parts = get_parts(bob.verts)
            del bob

            # choose comparison method
            method = np.linalg.norm if self._method == 0 else np.prod

            # create dict of parts and their comparison value
            partdict = TreeDict(acc=concat)
            coords = get_vecs(o.data.vertices)
            for indcs in parts:
                bounds, _ = get_bounds_and_center(coords[indcs])
                # calculate comparison value from bounding box,
                # round to create less bins in dict for better
                # performance
                key = round(method(bounds), self._resolution)
                partdict[key] = indcs

            self._data.append((o.data.vertices, partdict))
Example #5
0
    def execute(self, context):
        sel = context.selected_objects
        ob = context.object

        # Determine spawn position of new parent object
        if self.loc == 'CENTER':
            obs = [o.location for o in sel]
            paloc = Vector(get_bounds_and_center(obs)[1])
        elif self.loc == 'ACTIVE' and ob:
            paloc = ob.location
        else:
            paloc = context.scene.cursor.location

        # Parent name is the longest common substring of all selected
        # objects
        paname = os.path.commonprefix([o.name for o in sel])
        paname = paname.rstrip(self.to_strip) if paname else "Empty"

        # Create new Empty parent object
        pa = bpy.data.objects.new(paname, None)
        pa.location = paloc

        # Idol is the object determining the new Empty's parent and
        # collection
        idol = ob if ob else sel[0]
        pa.parent = idol.parent
        if idol.parent:
            pa.matrix_parent_inverse = idol.parent.matrix_world.inverted()
        idol.users_collection[0].objects.link(pa)

        context.view_layer.update()
        # Set the parent of all selected objects to 'pa'
        for o in sel:
            set_parent(o, pa, self.set_inv)

        return {'FINISHED'}
Example #6
0
    def execute(self, context):
        # TARGET
        target = context.object
        tdata = target.data
        tverts = tdata.vertices
        teuler = target.matrix_world.to_euler()

        if tdata.is_editmode:
            # ensure newest changes from edit mode are visible to data
            target.update_from_editmode()

            # get selected vertices in target
            sel_flags_target = sbio.get_scalars(tdata.vertices)
            tcoords = sbio.get_vecs(tverts)[sel_flags_target]
        else:
            trot = np.array(teuler)

            # If we align to axes and the target is rotated, we can't
            # use Blender's bounding box. Instead, we have to find the
            # global bounds from all global vertex positions.
            # This is because for a rotated object, the global bounds of
            # its local bounding box aren't always equal to the global
            # bounds of all its vertices.
            # If we don't align to axes, we aren't interested in the
            # global target bounds anyway.
            tcoords = sbio.get_vecs(tverts) \
                if self.axes_align \
                and trot.dot(trot) > 0.001 \
                else np.array(target.bound_box)

        if len(tcoords) < 2:
            self.report({'ERROR_INVALID_INPUT'}, "Select at least 2 vertices")
            return {'CANCELLED'}

        tworldmat = np.array(target.matrix_world)

        if self.axes_align:
            # If we align sources to world axes, we are interested in
            # the target bounds in world coordinates.
            tcoords = sbt.transf_pts(tworldmat, tcoords)
            # If we align sources to axes, we ignore target's rotation.
            trotmat = np.identity(3)

        tbounds, tcenter = sbio.get_bounds_and_center(tcoords)

        if not self.axes_align:
            # Even though we want the target bounds in object space if
            # align to axes is false, we still are interested in world
            # scale and center.
            tbounds *= np.array(target.matrix_world.to_scale())
            tcenter = sbt.transf_point(tworldmat, tcenter)
            # target rotation to later apply to all sources
            trotmat = np.array(teuler.to_matrix())

        # SOURCE
        error_happened = False
        for source in context.selected_objects:
            if source is target:
                continue

            if source.type != 'MESH':
                continue

            sdata = source.data
            sverts = sdata.vertices

            if sdata.is_editmode:
                # get selected vertices in source
                source.update_from_editmode()
                sverts_all = sbio.get_vecs(sdata.vertices)
                sselflags = sbio.get_scalars(sdata.vertices)
                sverts_sel = sverts_all[sselflags]

                if len(sverts_sel) < 2:
                    error_happened = True
                    continue
            else:
                sverts_sel = np.array(source.bound_box)

            sbounds, scenter = sbio.get_bounds_and_center(sverts_sel)
            sbounds_recpr = np.reciprocal(
                sbounds,
                # prevent division by 0
                out=np.ones_like(sbounds),
                where=sbounds != 0,
            )

            # assemble transformation matrix later applied to source
            transf_mat = \
                sbmm.to_transl_mat(tcenter) @ \
                sbmm.append_row_and_col(
                    trotmat @
                    sbmm.to_scale_mat(tbounds) @
                    sbmm.euler_to_rot_mat(np.array(self.rot_offset)) @
                    sbmm.to_scale_mat(sbounds_recpr)
                ) @ \
                sbmm.to_transl_mat(-scenter)

            if sdata.is_editmode:
                # somehow the mesh doesn't update if we stay in edit
                # mode
                bpy.ops.object.mode_set(mode='OBJECT')
                # transform transformation matrix from world to object
                # space
                transf_mat = np.array(source.matrix_world.inverted()) \
                    @ transf_mat
                # update every selected vertex with transformed
                # coordinates
                sverts_all[sselflags] = \
                    sbt.transf_pts(transf_mat, sverts_sel)
                # overwrite complete vertex list (also non-selected)
                sbio.set_vals(sverts, sverts_all)
                bpy.ops.object.mode_set(mode='EDIT')
            else:
                source.matrix_world = Matrix(transf_mat)

        if error_happened:
            self.report(
                {'ERROR_INVALID_INPUT'},
                "Select at least 2 vertices per selected source object")
        return {'FINISHED'}
Example #7
0
    def _core(self, context, ob, verts, to_del=[]):
        if len(verts) < 2:
            self.report({'ERROR_INVALID_INPUT'}, "Select at least 2 vertices")
            return

        mat_wrld = np.array(ob.matrix_world)
        in_editmode = context.mode == 'EDIT_MESH'

        if self.align_to_axes:
            # If we align sources to world axes, we are interested in
            # the bounds in world coordinates.
            verts = sbt.transf_vecs(mat_wrld, verts)
            # If we align sources to axes, we ignore ob's rotation.
            rotation = Euler()

        bounds, center = sbio.get_bounds_and_center(verts)

        if not self.align_to_axes:
            # Even though we want the ob bounds in object space if align
            # to axes is false, we still are interested in world scale
            # and center.
            bounds *= np.array(ob.matrix_world.to_scale())
            center = sbt.transf_point(mat_wrld, center)
            rotation = ob.matrix_world.to_euler()

        if self.delete_original and in_editmode:
            mode = context.tool_settings.mesh_select_mode
            if mode[0]:
                del_type = 'VERTS'
            elif mode[1]:
                del_type = 'EDGES'
            else:
                del_type = 'FACES'
            for o in to_del:
                sbmm.remove_selection(o.data, type=del_type)

        if self.replace_by == 'CYLINDER_Z':
            bpy.ops.mesh.primitive_cylinder_add(
                {'active_object': ob},
                vertices=self.resolution,
                radius=self.metric(bounds[:2]) * 0.5,
                depth=bounds[2],
                end_fill_type='TRIFAN',
                location=center,
                rotation=rotation)
        elif self.replace_by == 'CYLINDER_Y':
            rotation.rotate(Euler((1.57, 0.0, 0.0)))
            bpy.ops.mesh.primitive_cylinder_add(
                {'active_object': ob},
                vertices=self.resolution,
                radius=self.metric(bounds[::2]) * 0.5,
                depth=bounds[1],
                end_fill_type='TRIFAN',
                location=center,
                rotation=rotation)
        elif self.replace_by == 'CYLINDER_X':
            rotation.rotate(Euler((0.0, 1.57, 0.0)))
            bpy.ops.mesh.primitive_cylinder_add(
                {'active_object': ob},
                vertices=self.resolution,
                radius=self.metric(bounds[1:]) * 0.5,
                depth=bounds[0],
                end_fill_type='TRIFAN',
                location=center,
                rotation=rotation)
        elif self.replace_by == 'CUBOID':
            if in_editmode:
                sbmm.add_box_to_obj(ob=ob,
                                    location=center,
                                    rotation=rotation,
                                    size=bounds)
            else:
                sbmm.add_box_to_scene(context, center, rotation, bounds)
        elif self.replace_by == 'SPHERE':
            bpy.ops.mesh.primitive_uv_sphere_add({'active_object': ob},
                                                 segments=self.resolution * 2,
                                                 ring_count=self.resolution,
                                                 radius=self.metric(bounds) *
                                                 0.5,
                                                 location=center,
                                                 rotation=rotation)

        if not in_editmode:
            # apply material if existent in original
            try:
                mat = ob.data.materials[0]
            except IndexError:
                pass
            else:
                context.object.data.materials.append(mat)

            if self.delete_original:
                for o in to_del:
                    bpy.data.objects.remove(o)
Example #8
0
    def _execute_inner(self, obs):
        dim = self.dim
        dimhalf = dim * .5
        offbuf = gpu.types.GPUOffScreen(dim, dim)
        sample = sample_sphere if self.dom == 'SPHERE' \
            else sample_hemisphere

        # Construct depthpass shader
        shader = gpu.types.GPUShader(
            vertexcode='''
            uniform mat4 mvp;
            in vec3 pos;
            void main() {
                gl_Position = mvp * vec4(pos, 1);
            }''',
            fragcode='''
            out vec4 col;
            void main() {
                col = vec4(0, 0, 1, 1);
            }'''
        )
        shader.bind()

        # Create batch from all objects in edit mode
        verts, indcs, geoinfo = combine_meshes(obs)
        batch = batch_for_shader(
            shader, 'TRIS',
            {"pos": verts},
            indices=indcs,
        )
        batch.program_set(shader)

        # Find the center and bounds of all objects to calculate the
        # encompassing radius of the (hemi-)sphere on which render
        # positions will be sampled
        bounds, centr = get_bounds_and_center(verts)
        rad = np.linalg.norm(bounds[:2]) * .5 + 1
        del indcs, bounds

        # Spawn debug sphere with calculated radius
        if self._debug_spawn_sphere:
            bpy.ops.mesh.primitive_uv_sphere_add(
                radius=rad,
                location=centr,
                )

        # Render the objects from several views and mark seen vertices
        visibl = np.zeros(len(verts), dtype=np.bool)
        for _ in range(self.samplecnt):
            # Generate random points on the chosen domain from which
            # to render the objects
            # Chose rotation so the 'camera' looks to the center
            samplepos, (theta, phi) = sample(rad)
            view_mat_inv = make_transf_mat(
                transl=samplepos + centr,
                rot=(phi, 0, theta + np.pi * .5),
                )

            # Spawn debug camera at sampled position
            if self._debug_spawn_cams:
                bpy.ops.object.camera_add()
                bpy.context.object.matrix_world = Matrix(view_mat_inv)

            # Build the Model View Projection matrix from chosen
            # render position and radius
            # The model matrix has already been applied to the vertices
            # befor creating the batch
            mvp = make_proj_mat(
                fov=90,
                clip_start=rad * .25,
                clip_end=rad * 1.5,
                dimx=dim,
                dimy=dim,
                ) @ np.linalg.inv(view_mat_inv)
            shader.uniform_float("mvp", Matrix(mvp))
            del view_mat_inv, samplepos, theta, phi

            with offbuf.bind():
                # Render the selected objects into the offscreen buffer
                bgl.glDepthMask(bgl.GL_TRUE)
                bgl.glClear(bgl.GL_DEPTH_BUFFER_BIT)
                bgl.glEnable(bgl.GL_DEPTH_TEST)
                batch.draw()

                # Write texture back to CPU
                pxbuf = bgl.Buffer(bgl.GL_FLOAT, dim * dim)
                bgl.glReadBuffer(bgl.GL_BACK)
                bgl.glReadPixels(0, 0, dim, dim, bgl.GL_DEPTH_COMPONENT,
                                 bgl.GL_FLOAT, pxbuf)

            # Map depth values from [0, 1] to [-1, 1]
            pxbuf = np.asanyarray(pxbuf) * 2 - 1
            pxbuf.shape = (dim, dim)

            # Transform verts of active object to clip space
            tverts = mvp @ append_one(verts).T
            # Perspective divide to transform to NDCs [-1, 1]
            tverts /= tverts[3]

            # Find pixel coordinates of each vertex' projected position
            # by remapping x and y coordinates from NDCs to [0, dim]
            # Add .5 to make sure the flooring from conversion to int
            # is actually rounding
            uvs = tverts[:2] * dimhalf + (dimhalf + .5)
            uvs = uvs.astype(np.int32)

            # Map all vertices outside the view frustum to (0, 0)
            # so they don't sample the pixel array out of bounds
            invalid = np.any((uvs < 0) | (dim <= uvs), axis=0)
            uvs.T[invalid] = (0, 0)

            # For each vertex, get the depth at its projected pixel
            # and its distance to the render position
            imgdpth = pxbuf[(uvs[1], uvs[0])]
            camdist = tverts[2]
            # Set the distance of invalid vertices past [-1, 1] so they
            # won't be selected
            camdist[invalid] = 2

            # A vertex is visible if it's inside the view frustum
            # (valid) and not occluded by any face.
            # A vertex is occluded when its depth sampled from the
            # image is smaller than its distance to the camera.
            # A small error margin is added to prevent self-occlusion.
            # The result is logically or-ed with the result from other
            # render positions.
            visibl |= camdist <= (imgdpth + .001)

            # Create debug image of the rendered view
            if self._debug_create_img:
                # Grayscale to RGBA and [-1, 1] to [0, 1]
                pxbuf = np.repeat(pxbuf, 4) * .5 + .5
                pxbuf.shape = (dim, dim, 4)
                # Alpha channel is 1
                pxbuf[:, :, 3] = 1
                # Mark projected vertex positions in red
                pxbuf[(uvs[1], uvs[0])] = (1, 0, 0, 1)

                imgname = "Debug"
                if imgname not in bpy.data.images:
                    bpy.data.images.new(imgname, dim, dim)
                image = bpy.data.images[imgname]
                image.scale(dim, dim)
                image.pixels = pxbuf.ravel()

        # Split visible flag list back in original objects
        offbuf.free()
        start = 0
        for o, (end, _) in zip(obs, geoinfo):
            o.data.vertices.foreach_set('select', visibl[start:end])
            start = end
Example #9
0
    def _find_concave_patches(self, context):
        """
        A patch is a set of connected faces. For each patch containing
        at least two faces facing each other, return a list of vertex
        indices making up such a patch, together with its center point
        and diameter. A patch's border is along edges between faces
        facing away from each other (think ridges).
        """
        self._meshes.clear()
        for o in context.objects_in_mode_unique_data:
            o.update_from_editmode()
            data = o.data
            polys = data.polygons
            bob = bm.from_edit_mesh(data)
            bfaces = bob.faces
            bfaces.ensure_lookup_table()
            # bmesh has to recalculate face centers, so get them
            # directly from the mesh data instead
            centrs = get_vecs(polys, attr='center')
            # Bool array of vertex indices already visited.
            # Unselected faces will be True already.
            flags = get_scalars(polys)
            # Will contain a list of tuples. First entry is the list of
            # face indices of the patch. Second is the maximum angle
            # between two neighboring faces in the patch.
            # This list is only needed to not delete vertices while we
            # iterate the mesh.
            patches = []

            for i, new in enumerate(flags):
                if not new:
                    continue

                flags[i] = False
                # Faces to visit
                stack = [bfaces[i]]
                # Face indices of the patch
                findcs = []
                # Maximum dot product between two neighboring faces in
                # the patch.
                maxdot = 0

                while stack:
                    f = stack.pop()
                    n = f.normal
                    c = centrs[f.index]
                    findcs.append(f.index)

                    # Push all faces connected to f on stack...
                    for l in f.loops:
                        f2 = l.link_loop_radial_next.face
                        i2 = f2.index
                        # The dot product between f's normal and the
                        # vector between both face's centers is a
                        # simple way to measure if they are parallel
                        # (=0), concave (>0), or convex (<0).
                        angl = n.dot(normalize(centrs[i2] - c))
                        # but only if not already checked and f and f2
                        # are not convex (don't face away from each
                        # other)
                        if flags[i2] and angl > -1e-3:
                            maxdot = max(maxdot, angl)
                            flags[i2] = False
                            stack.append(f2)

                if len(findcs) > 2:
                    # pihalf: transform dot product result to rad angle
                    patches.append((findcs, maxdot * pihalf))

            del flags
            # second representation of patches, this time as a tuple of
            # face indices, max angle, and diameter
            patches2 = []

            for findcs, maxangl in patches:
                bounds, _ = get_bounds_and_center(centrs[findcs])
                patches2.append((findcs, maxangl, np.linalg.norm(bounds)))
            self._meshes.append((data, patches2))