def event(self, event):
        super().event(event)
        modifiers = QApplication.keyboardModifiers()
        ctrl_is_active = modifiers & Qt.ControlModifier

        if event.type == Event.MousePressEvent and MouseEvent.LeftButton in event.buttons and self._controller.getToolsEnabled(
        ):
            if ctrl_is_active:
                self._controller.setActiveTool("TranslateTool")
                return

            if self._skip_press:
                # The selection was previously cleared, do not add/remove an anti-support mesh but
                # use this click for selection and reactivating this tool only.
                self._skip_press = False
                return

            if self._selection_pass is None:
                # The selection renderpass is used to identify objects in the current view
                self._selection_pass = Application.getInstance().getRenderer(
                ).getRenderPass("selection")
            picked_node = self._controller.getScene().findObject(
                self._selection_pass.getIdAtPosition(event.x, event.y))
            if not picked_node:
                # There is no slicable object at the picked location
                return

            node_stack = picked_node.callDecoration("getStack")
            if node_stack:
                if node_stack.getProperty("support_mesh", "value"):
                    self._removeSupportMesh(picked_node)
                    return

                elif node_stack.getProperty(
                        "anti_overhang_mesh",
                        "value") or node_stack.getProperty(
                            "infill_mesh", "value") or node_stack.getProperty(
                                "cutting_mesh", "value"):
                    # Only "normal" meshes can have support_mesh added to them
                    return

            # Create a pass for picking a world-space location from the mouse location
            active_camera = self._controller.getScene().getActiveCamera()
            picking_pass = PickingPass(active_camera.getViewportWidth(),
                                       active_camera.getViewportHeight())
            picking_pass.render()

            picked_position = picking_pass.getPickedPosition(event.x, event.y)

            # Add the support_mesh cube at the picked location
            self._createSupportMesh(picked_node, picked_position)
Example #2
0
    def event(self, event):
        super().event(event)
        modifiers = QApplication.keyboardModifiers()
        ctrl_is_active = modifiers & Qt.ControlModifier

        if event.type == Event.MousePressEvent and MouseEvent.LeftButton in event.buttons and self._controller.getToolsEnabled():
            if ctrl_is_active:
                self._controller.setActiveTool("TranslateTool")
                return

            if self._skip_press:
                # The selection was previously cleared, do not add/remove an anti-support mesh but
                # use this click for selection and reactivating this tool only.
                self._skip_press = False
                return

            if self._selection_pass is None:
                # The selection renderpass is used to identify objects in the current view
                self._selection_pass = Application.getInstance().getRenderer().getRenderPass("selection")
            picked_node = self._controller.getScene().findObject(self._selection_pass.getIdAtPosition(event.x, event.y))
            if not picked_node:
                # There is no slicable object at the picked location
                return

            node_stack = picked_node.callDecoration("getStack")
            if node_stack:
                if node_stack.getProperty("anti_overhang_mesh", "value"):
                    self._removeEraserMesh(picked_node)
                    return

                elif node_stack.getProperty("support_mesh", "value") or node_stack.getProperty("infill_mesh", "value") or node_stack.getProperty("cutting_mesh", "value"):
                    # Only "normal" meshes can have anti_overhang_meshes added to them
                    return

            # Create a pass for picking a world-space location from the mouse location
            active_camera = self._controller.getScene().getActiveCamera()
            picking_pass = PickingPass(active_camera.getViewportWidth(), active_camera.getViewportHeight())
            picking_pass.render()

            picked_position = picking_pass.getPickedPosition(event.x, event.y)

            # Add the anti_overhang_mesh cube at the picked location
            self._createEraserMesh(picked_node, picked_position)
Example #3
0
    def _constructSupport(self, buffer: QImage) -> None:
        depth_pass = PickingPass(
            buffer.width(), buffer.height()
        )  #Instead of using the picking pass to pick for us, we need to bulk-pick digits so do this in Numpy.
        depth_pass.render()
        depth_image = depth_pass.getOutput()
        camera = CuraApplication.getInstance().getController().getScene(
        ).getActiveCamera()

        #to_support = qimage2ndarray.raw_view(buffer)
        #to_support= _qimageview(_qt.QImage(buffer))
        to_support = self._raw_view(buffer)

        #depth = qimage2ndarray.recarray_view(depth_image)
        depth = self._recarray_view(depth_image)

        depth.a = 0  #Discard alpha channel.
        depth = depth.view(dtype=_np.int32).astype(
            _np.float32
        ) / 1000  #Conflate the R, G and B channels to one 24-bit (cast to 32) float. Divide by 1000 to get mm.
        support_positions_2d = _np.array(
            _np.where(_np.bitwise_and(to_support == 255, depth < 16777))
        )  #All the 2D coordinates on the screen where we want support. The 16777 is for points that don't land on a model.
        support_depths = _np.take(
            depth, support_positions_2d[0, :] * depth.shape[1] +
            support_positions_2d[1, :])  #The depth at those pixels.
        support_positions_2d = support_positions_2d.transpose(
        )  #We want rows with pixels, not columns with pixels.
        if len(support_positions_2d) == 0:
            Logger.log(
                "i",
                "Support was not drawn on the surface of any objects. Not creating support."
            )
            return
        support_positions_2d[:, [0, 1]] = support_positions_2d[:, [
            1, 0
        ]]  #Swap columns to get OpenGL's coordinate system.
        camera_viewport = _np.array(
            [camera.getViewportWidth(),
             camera.getViewportHeight()])
        support_positions_2d = support_positions_2d * 2.0 / camera_viewport - 1.0  #Scale to view coordinates (range -1 to 1).
        inverted_projection = _np.linalg.inv(
            camera.getProjectionMatrix().getData())
        transformation = camera.getWorldTransformation().getData()
        transformation[:,
                       1] = -transformation[:,
                                            1]  #Invert Z to get OpenGL's coordinate system.

        #For each pixel, get the near and far plane.
        near = _np.ndarray((support_positions_2d.shape[0], 4))
        near.fill(1)
        near[0:support_positions_2d.shape[0],
             0:support_positions_2d.shape[1]] = support_positions_2d
        near[:, 2].fill(-1)
        near = _np.dot(inverted_projection, near.transpose())
        near = _np.dot(transformation, near)
        near = near[0:3] / near[3]
        far = _np.ndarray((support_positions_2d.shape[0], 4))
        far.fill(1)
        far[0:support_positions_2d.shape[0],
            0:support_positions_2d.shape[1]] = support_positions_2d
        far = _np.dot(inverted_projection, far.transpose())
        far = _np.dot(transformation, far)
        far = far[0:3] / far[3]

        #Direction is from near plane pixel to far plane pixel, normalised.
        direction = near - far
        direction /= _np.linalg.norm(direction, axis=0)

        #Final position is in the direction of the pixel, moving with <depth> mm away from the camera position.
        support_positions_3d = (
            support_depths - 1
        ) * direction  #We want the support to appear just before the surface, not behind the surface, so - 1.
        support_positions_3d = support_positions_3d.transpose()
        camera_position_data = camera.getPosition().getData()
        support_positions_3d = support_positions_3d + camera_position_data

        #Create the vertices for the 3D mesh.
        #This mesh consists of a diamond-shape for each position that we traced.
        n = support_positions_3d.shape[0]
        Logger.log(
            "i",
            "Adding support in {num_pixels} locations.".format(num_pixels=n))
        vertices = support_positions_3d.copy().astype(_np.float32)
        vertices = _np.resize(vertices,
                              (n * 6, support_positions_3d.shape[1]
                               ))  #Resize will repeat all coordinates 6 times.
        #For each position, create a diamond shape around the position with 6 vertices.
        vertices[
            n * 0:n * 1,
            0] -= support_depths * 0.001 * self.globule_size  #First corner (-x, +y).
        vertices[n * 0:n * 1, 2] += support_depths * 0.001 * self.globule_size
        vertices[
            n * 1:n * 2,
            0] += support_depths * 0.001 * self.globule_size  #Second corner (+x, +y).
        vertices[n * 1:n * 2, 2] += support_depths * 0.001 * self.globule_size
        vertices[
            n * 2:n * 3,
            0] -= support_depths * 0.001 * self.globule_size  #Third corner (-x, -y).
        vertices[n * 2:n * 3, 2] -= support_depths * 0.001 * self.globule_size
        vertices[
            n * 3:n * 4,
            0] += support_depths * 0.001 * self.globule_size  #Fourth corner (+x, -y)
        vertices[n * 3:n * 4, 2] -= support_depths * 0.001 * self.globule_size
        vertices[n * 4:n * 5,
                 1] += support_depths * 0.001 * self.globule_size  #Top side.
        vertices[
            n * 5:n * 6,
            1] -= support_depths * 0.001 * self.globule_size  #Bottom side.

        #Create the faces of the diamond.
        indices = _np.arange(n, dtype=_np.int32)
        indices = _np.kron(indices, _np.ones(
            (3, 1))).astype(_np.int32).transpose()
        indices = _np.resize(
            indices, (n * 8, 3)
        )  #Creates 8 triangles using 3 times the same vertex, for each position: [[0, 0, 0], [1, 1, 1], ... , [0, 0, 0], [1, 1, 1], ... ]

        #indices[n * 0: n * 1, 0] += n * 0 #First corner.
        indices[n * 0:n * 1, 1] += n * 1  #Second corner.
        indices[n * 0:n * 1, 2] += n * 4  #Top side.

        indices[n * 1:n * 2, 0] += n * 1  #Second corner.
        indices[n * 1:n * 2, 1] += n * 3  #Fourth corner.
        indices[n * 1:n * 2, 2] += n * 4  #Top side.

        indices[n * 2:n * 3, 0] += n * 3  #Fourth corner.
        indices[n * 2:n * 3, 1] += n * 2  #Third corner.
        indices[n * 2:n * 3, 2] += n * 4  #Top side.

        indices[n * 3:n * 4, 0] += n * 2  #Third corner.
        #indices[n * 3: n * 4, 1] += n * 0 #First corner.
        indices[n * 3:n * 4, 2] += n * 4  #Top side.

        indices[n * 4:n * 5, 0] += n * 1  #Second corner.
        #indices[n * 4: n * 5, 1] += n * 0 #First corner.
        indices[n * 4:n * 5, 2] += n * 5  #Bottom side.

        indices[n * 5:n * 6, 0] += n * 3  #Fourth corner.
        indices[n * 5:n * 6, 1] += n * 1  #Second corner.
        indices[n * 5:n * 6, 2] += n * 5  #Bottom side.

        indices[n * 6:n * 7, 0] += n * 2  #Third corner.
        indices[n * 6:n * 7, 1] += n * 3  #Fourth corner.
        indices[n * 6:n * 7, 2] += n * 5  #Bottom side.

        #indices[n * 7: n * 8, 0] += n * 0 #First corner.
        indices[n * 7:n * 8, 1] += n * 2  #Third corner.
        indices[n * 7:n * 8, 2] += n * 5  #Bottom side.

        builder = MeshBuilder()
        builder.addVertices(vertices)
        builder.addIndices(indices)

        #Create the scene node.
        scene = CuraApplication.getInstance().getController().getScene()
        new_node = CuraSceneNode(parent=scene.getRoot(), name="BrushSupport")
        new_node.setSelectable(False)
        new_node.setMeshData(builder.build())
        new_node.addDecorator(
            BuildPlateDecorator(CuraApplication.getInstance().
                                getMultiBuildPlateModel().activeBuildPlate))
        new_node.addDecorator(SliceableObjectDecorator())
        operation = GroupedOperation()

        #Figure out which mesh this piece of support belongs to.
        #TODO: You can draw support in one stroke over multiple meshes. The support would belong to an arbitrary one of these.
        selection_pass = CuraApplication.getInstance().getRenderer(
        ).getRenderPass("selection")
        parent_id = selection_pass.getIdAtPosition(
            support_positions_2d[0][0], support_positions_2d[0]
            [1])  #Find the selection under the first support pixel.
        parent_node = scene.getRoot()
        if not parent_id:
            Logger.log("d", "Can't link custom support to any scene node.")
        else:
            for node in BreadthFirstIterator(scene.getRoot()):
                if id(node) == parent_id:
                    parent_node = node
                    break

        #Add the appropriate per-object settings.
        stack = new_node.callDecoration(
            "getStack"
        )  #Created by SettingOverrideDecorator that is automatically added to CuraSceneNode.
        settings = stack.getTop()
        support_mesh_instance = SettingInstance(
            stack.getSettingDefinition("support_mesh"), settings)
        support_mesh_instance.setProperty("value", True)
        support_mesh_instance.resetState()
        settings.addInstance(support_mesh_instance)
        drop_down_instance = SettingInstance(
            stack.getSettingDefinition("support_mesh_drop_down"), settings)
        drop_down_instance.setProperty("value", True)
        drop_down_instance.resetState()
        settings.addInstance(drop_down_instance)

        #Add the scene node to the scene (and allow for undo).
        operation.addOperation(
            AddSceneNodeOperation(new_node, scene.getRoot())
        )  #Set the parent to root initially, then change the parent, so that we don't have to alter the transformation.
        operation.addOperation(SetParentOperation(new_node, parent_node))
        operation.push()

        scene.sceneChanged.emit(new_node)