def rotateImage90(self, direction): """Rotate image 90º clockwise or counterclockwise.""" if self.image.isNull() == False: if direction == "cw": transform90 = QTransform().rotate(90) elif direction == "ccw": transform90 = QTransform().rotate(-90) pixmap = QPixmap(self.image) #TODO: Try flipping the height/width when flipping the image rotated = pixmap.transformed(transform90, mode=Qt.SmoothTransformation) self.resize(self.image.height(), self.image.width()) #rotated = pixmap.trueMatrix(transform90, pixmap.width, pixmap.height) #self.image_label.setPixmap(rotated.scaled(self.image_label.size(), # Qt.KeepAspectRatio, Qt.SmoothTransformation)) self.image = QImage(rotated) #self.setPixmap(rotated) self.setPixmap( rotated.scaled(self.size(), Qt.KeepAspectRatioByExpanding, Qt.SmoothTransformation)) self.repaint() # repaint the child widget else: # No image to rotate pass
def bind(self, texture_unit): """Bind the texture to a certain texture unit. :param texture_unit: The texture unit to bind to. """ if not self._qt_texture.isCreated(): if self._file_name != None: self._image = QImage(self._file_name).mirrored() elif self._image is None: # No filename or image set. self._image = QImage(1, 1, QImage.Format.Format_ARGB32) self._image.fill(0) self._qt_texture.setData(self._image) self._qt_texture.setMinMagFilters(QOpenGLTexture.Filter.Linear, QOpenGLTexture.Filter.Linear) self._qt_texture.bind(texture_unit)
def insertFromMimeData(self, source): cursor = self.textCursor() document = self.document() if source.hasUrls(): for u in source.urls(): file_ext = splitext(str(u.toLocalFile())) if u.isLocalFile() and file_ext in IMAGE_EXTENSIONS: image = QImage(u.toLocalFile()) document.addResource(QTextDocument.ImageResource, u, image) cursor.insertImage(u.toLocalFile()) else: break else: return elif source.hasImage(): image = source.imageData() uuid = hexuuid() document.addResource(QTextDocument.ImageResource, uuid, image) cursor.insertImage(uuid) return super(TextEdit, self).insertFromMimeData(source)
def convertToRGB(self): """Convert image to RGB format.""" if self.image.isNull() == False: converted_img = self.image.convertToFormat(QImage.Format_RGB32) #self.image = converted_img self.image = QImage(converted_img) self.setPixmap(QPixmap().fromImage(converted_img)) self.repaint()
def convertToGray(self): """Convert image to grayscale.""" if self.image.isNull() == False: converted_img = self.image.convertToFormat( QImage.Format_Grayscale16) #self.image = converted_img self.image = QImage(converted_img) self.setPixmap(QPixmap().fromImage(converted_img)) self.repaint()
def cropImage(self): """Crop selected portions in the image.""" if self.image.isNull() == False: rect = QRect(10, 20, 400, 200) original_image = self.image cropped = original_image.copy(rect) self.image = QImage(cropped) self.setPixmap(QPixmap().fromImage(cropped))
def __init__(self): super(MainWindow, self).__init__() self.title = "Image Viewer" self.setWindowTitle(self.title) self.lastY = 0 self.image = QImage(2080, 1, QImage.Format.Format_Grayscale8) self.label = QLabel(self) self.resize (2080, 500)
def getOutput(self) -> QImage: """Get the pixel data produced by this render pass. This returns an object that contains the pixel data for this render pass. :note The current object type returned is currently dependant on the specific implementation of the UM.View.GL.FrameBufferObject class. """ if self._fbo is None: Logger.log( "w", "FrameBufferObject has been released. Can't get frame output.") return QImage() return self._fbo.getContents()
def opencv_to_qt(img) -> QImage: """ Convert OpenCV image to PyQT image by changing format to RGB/RGBA from BGR """ qformat = QImage.Format.Format_Indexed8 if len(img.shape) == 3: if img.shape[2] == 4: # RGBA qformat = QImage.Format.Format_RGBA8888 else: # RGB qformat = QImage.Format.Format_RGB888 img = numpy.require(img, numpy.uint8, "C") out_image = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat) # BGR to RGB out_image = out_image.rgbSwapped() return out_image
def __init__(self, parent, image=None): super().__init__(parent) self.parent = parent self.image = QImage() #self.image = "images/parrot.png" #self.original_image = self.image.copy self.original_image = self.image self.rubber_band = QRubberBand(QRubberBand.Shape.Rectangle, self) # setBackgroundRole() will create a bg for the image #self.setBackgroundRole(QPalette.Base) self.setSizePolicy(QSizePolicy.Policy.Ignored, QSizePolicy.Policy.Ignored) self.setScaledContents(True) # Load image self.setPixmap(QPixmap().fromImage(self.image)) self.setAlignment(Qt.Alignment.AlignCenter)
def test_sanity(tmp_path): # Segfault test app = QApplication([]) ex = Example() assert app # Silence warning assert ex # Silence warning for mode in ("1", "RGB", "RGBA", "L", "P"): # to QPixmap im = hopper(mode) data = ImageQt.toqpixmap(im) assert isinstance(data, QPixmap) assert not data.isNull() # Test saving the file tempfile = str(tmp_path / f"temp_{mode}.png") data.save(tempfile) # Render the image qimage = ImageQt.ImageQt(im) data = QPixmap.fromImage(qimage) qt_format = QImage.Format if ImageQt.qt_version == "6" else QImage qimage = QImage(128, 128, qt_format.Format_ARGB32) painter = QPainter(qimage) image_label = QLabel() image_label.setPixmap(data) image_label.render(painter, QPoint(0, 0), QRegion(0, 0, 128, 128)) painter.end() rendered_tempfile = str(tmp_path / f"temp_rendered_{mode}.png") qimage.save(rendered_tempfile) assert_image_equal_tofile(im.convert("RGBA"), rendered_tempfile) # from QPixmap roundtrip(hopper(mode)) app.quit() app = None
def openImage(self): """Load a new image into the """ image_file, _ = QFileDialog.getOpenFileName( self, "Open Image", "", "PNG Files (*.png);;JPG Files (*.jpeg *.jpg );;Bitmap Files (*.bmp);;\ GIF Files (*.gif)") if image_file: # Reset values when opening an image self.parent.zoom_factor = 1 #self.parent.scroll_area.setVisible(True) self.parent.print_act.setEnabled(True) self.parent.updateActions() # Reset all sliders self.parent.brightness_slider.setValue(0) # Get image format image_format = self.image.format() self.image = QImage(image_file) self.original_image = self.image.copy() #pixmap = QPixmap(image_file) self.setPixmap(QPixmap().fromImage(self.image)) #image_size = self.image_label.sizeHint() self.resize(self.pixmap().size()) #self.scroll_area.setMinimumSize(image_size) #self.image_label.setPixmap(pixmap.scaled(self.image_label.size(), # Qt.KeepAspectRatio, Qt.SmoothTransformation)) elif image_file == "": # User selected Cancel pass else: QMessageBox.information(self, "Error", "Unable to open image.", QMessageBox.Ok)
def flipImage(self, axis): """ Mirror the image across the horizontal axis. """ if self.image.isNull() == False: if axis == "horizontal": flip_h = QTransform().scale(-1, 1) pixmap = QPixmap(self.image) flipped = pixmap.transformed(flip_h) elif axis == "vertical": flip_v = QTransform().scale(1, -1) pixmap = QPixmap(self.image) flipped = pixmap.transformed(flip_v) #self.image_label.setPixmap(flipped) #self.image_label.setPixmap(flipped.scaled(self.image_label.size(), # Qt.KeepAspectRatio, Qt.SmoothTransformation)) self.image = QImage(flipped) self.setPixmap(flipped) #self.image = QPixmap(flipped) self.repaint() else: # No image to flip pass
def resizeImage(self): """Resize image.""" #TODO: Resize image by specified size if self.image.isNull() == False: resize = QTransform().scale(0.5, 0.5) pixmap = QPixmap(self.image) resized_image = pixmap.transformed(resize, mode=Qt.SmoothTransformation) #rotated = pixmap.trueMatrix(transform90, pixmap.width, pixmap.height) #self.image_label.setPixmap(rotated) #self.image_label.setPixmap(rotated.scaled(self.image_label.size(), # Qt.KeepAspectRatio, Qt.SmoothTransformation)) self.image = QImage(resized_image) self.setPixmap(resized_image) #self.image = QPixmap(rotated) self.setScaledContents(True) self.repaint() # repaint the child widget else: # No image to rotate pass
def take_snapshot(camera_position, camera_lookat, is_layer_view) -> typing.Optional[QImage]: """ Take a snapshot of the current scene. :param camera_position: The position of the camera to take the snapshot with. :param camera_lookat: The position of the focal point of the camera. :param is_layer_view: Whether we're looking at layer view or the model itself. :return: A screenshot of the current scene. """ application = cura.CuraApplication.CuraApplication.getInstance() plugin_registry = application.getPluginRegistry() # Set the camera to the desired position. We'll use the actual camera for the snapshot just because it looks cool while it's busy. camera = application.getController().getScene().getActiveCamera() camera.setPosition( UM.Math.Vector.Vector(camera_position[0], camera_position[2], camera_position[1]) ) # Note that these are OpenGL coordinates, swapping Y and Z. if not camera_lookat: bounding_box = UM.Math.AxisAlignedBox.AxisAlignedBox() for node in UM.Scene.Iterator.DepthFirstIterator.DepthFirstIterator( cura.CuraApplication.CuraApplication.getInstance( ).getController().getScene().getRoot()): if node.isSelectable(): bounding_box = bounding_box + node.getBoundingBox() camera_lookat = bounding_box.center else: camera_lookat = UM.Math.Vector.Vector(camera_lookat[0], camera_lookat[2], camera_lookat[1]) camera.lookAt(camera_lookat) if abs(camera.getPosition().x - camera_lookat.x) < 0.01 and abs( camera.getPosition().z - camera_lookat.z) < 0.01: # Looking straight up or straight down. # Make sure the yaw of the camera is consistent regardless of previous position. if camera.getPosition().y > camera_lookat.y: camera.setOrientation(UM.Math.Quaternion.Quaternion(-2, 0, 0, 2)) else: camera.setOrientation(UM.Math.Quaternion.Quaternion(2, 0, 0, 2)) time.sleep( 2 ) # Some time to update the scene nodes. Don't know if this is necessary but it feels safer. # Use a transparent background. gl_bindings = UM.View.GL.OpenGL.OpenGL.getInstance().getBindingsObject() gl_bindings.glClearColor(0.0, 0.0, 0.0, 0.0) gl_bindings.glClear(gl_bindings.GL_COLOR_BUFFER_BIT | gl_bindings.GL_DEPTH_BUFFER_BIT) try: # In Qt6 it's an enum, in Qt5 it's a field of QImage. colour_format = QImage.Format.Format_ARGB32 # For Qt6. except AttributeError: colour_format = QImage.Format_ARGB32 # For Qt5. if is_layer_view: # Remove any nozzle node. It can get in the way of what we want to see and influence cropping of the image badly. simulation_view_plugin = plugin_registry.getPluginObject( "SimulationView") for node in UM.Scene.Iterator.DepthFirstIterator.DepthFirstIterator( application.getController().getScene().getRoot()): if hasattr( node, "_createNozzleMesh" ): # This node is a NozzleNode (the actual class is not exposed to us outside the plug-in). node.getParent().removeChild(node) render_pass = simulation_view_plugin.getSimulationPass() render_pass.render() time.sleep(1.2) screenshot = render_pass.getOutput() print("---- screenshot size:", screenshot.width(), "x", screenshot.height()) if screenshot.width() != render_width or screenshot.height( ) != render_height: print( "---- render output not correct size! Resizing window to compensate." ) main_window = application.getMainWindow() delta_width = render_width - screenshot.width() delta_height = render_height - screenshot.height() main_window.setWidth(main_window.width() + delta_width) main_window.setHeight(main_window.height() + delta_height) return None # Failed to render. Try again after waiting outside of Qt thread. # Remove alpha channel from this picture. We don't want the semi-transparent support since we don't draw the object outline here. # Sadly, QImage.convertToFormat has only 2 formats with boolean alpha and they both premultiply. So we'll go the hard way: Through Numpy. pixel_bits = screenshot.bits().asarray(screenshot.sizeInBytes()) pixels = numpy.frombuffer(pixel_bits, dtype=numpy.uint8).reshape( [screenshot.height(), screenshot.width(), 4]) opaque = numpy.nonzero(pixels[:, :, 0]) pixels[opaque[0], opaque[1], 3] = 255 return QImage( pixels.data, pixels.shape[1], pixels.shape[0], colour_format ).copy( ) # Make a copy because the pixel data will go out of scope for Numpy, so that would be invalid memory. else: # Render the objects themselves! Going to be quite complex here since the render is highly specialised in what it shows and what it doesn't. view = plugin_registry.getPluginObject("SolidView") view._checkSetup() renderer = view.getRenderer() support_angle = application.getGlobalContainerStack().getProperty( "support_angle", "value") view._enabled_shader.setUniformValue( "u_overhangAngle", math.cos( math.radians(90 - support_angle))) # Correct overhang angle. view._enabled_shader.setUniformValue( "u_lowestPrintableHeight", -1.0) # Don't show initial layer height. object_batch = renderer.createRenderBatch(shader=view._enabled_shader) renderer.addRenderBatch(object_batch) for node in UM.Scene.Iterator.DepthFirstIterator.DepthFirstIterator( application.getController().getScene().getRoot()): if not node.getMeshData() or not node.isSelectable(): continue uniforms = {} # Get the object's colour. extruder_index = int( node.callDecoration("getActiveExtruderPosition")) material_color = application.getExtrudersModel().getItem( extruder_index)["color"] uniforms["diffuse_color"] = [ int(material_color[1:3], 16) / 255, int(material_color[3:5], 16) / 255, int(material_color[5:7], 16) / 255, 1.0 ] # Render with special shaders for special types of meshes, or otherwise in the normal batch. if node.callDecoration("isNonPrintingMesh") and ( node.callDecoration("isInfillMesh") or node.callDecoration("isCuttingMesh")): renderer.queueNode(node, shader=view._non_printing_shader, uniforms=uniforms, transparent=True) elif node.callDecoration("isSupportMesh"): uniforms["diffuse_color_2"] = [ uniforms["diffuse_color"][0] * 0.6, uniforms["diffuse_color"][1] * 0.6, uniforms["diffuse_color"][2] * 0.6, 1.0 ] renderer.queueNode(node, shader=view._support_mesh_shader, uniforms=uniforms) else: object_batch.addItem( node.getWorldTransformation(copy=False), node.getMeshData(), uniforms=uniforms, normal_transformation=node.getCachedNormalMatrix()) default_pass = renderer.getRenderPass("default") default_pass.render() time.sleep(1.2) normal_shading = default_pass.getOutput() xray_pass = renderer.getRenderPass("xray") renderer.addRenderPass(xray_pass) xray_pass.render() time.sleep(1.2) xray_shading = xray_pass.getOutput() # Manually composite these shadings. Because the composite shader also adds a background colour. normal_data = normal_shading.bits().asarray( normal_shading.sizeInBytes()) composite_pixels = numpy.frombuffer(normal_data, dtype=numpy.uint8).reshape([ normal_shading.height(), normal_shading.width(), 4 ]) # Start from the normal image. colours = numpy.true_divide(composite_pixels[:, :, 0:3], 255) # Scaled to [0, 1]. alpha = numpy.true_divide(composite_pixels[:, :, 3], 255) xray_data = xray_shading.bits().asarray(xray_shading.sizeInBytes()) xray_pixels = numpy.frombuffer(xray_data, dtype=numpy.uint8).reshape( [xray_shading.height(), xray_shading.width(), 4]) xray_pixels = numpy.mod( xray_pixels[:, :, 0:3], 10 ) // 5 # The X-ray shader creates increments of 5 for some reason. If there are an odd number of increments (not divisible by 10) then it must be highlighted. hue_shift = ((alpha - 0.333) * 6.2831853) cos_shift = numpy.repeat(numpy.expand_dims(numpy.cos(-hue_shift), axis=2), 3, axis=2) sin_shift = numpy.repeat(numpy.expand_dims(numpy.sin(-hue_shift), axis=2), 3, axis=2) k = numpy.array( [0.57735, 0.57735, 0.57735] ) # 1/sqrt(3), resulting in a diagonal unit vector around which we rotate the channels. cross_colour = numpy.cross(colours, k) * -1 dot_colour = numpy.repeat(numpy.expand_dims(numpy.dot(colours, k), axis=2), 3, axis=2) rotated_hue = colours * cos_shift + cross_colour * sin_shift + ( cos_shift * -1 + 1.0) * dot_colour * k # Rodrigues' rotation formula! rotated_hue = rotated_hue * 255 composite_pixels[:, :, 0: 3] -= composite_pixels[:, :, 0: 3] * xray_pixels # Don't use the normal colour for x-rayed pixels. composite_pixels[:, :, 0:3] += (rotated_hue * xray_pixels).astype( "uint8") # Use the rotated colour instead. composite_pixels[:, :, 3][alpha > 0.1] = 255 return QImage( composite_pixels.data, composite_pixels.shape[1], composite_pixels.shape[0], colour_format ).copy( ) # Make a copy because the pixel data will go out of scope for Numpy, so that would be invalid memory.
def get_masked_image(path, size=64, overlay_text=""): """ Returns a pixmap from an image file masked with a smooth circle. The returned pixmap will have a size of *size* × *size* pixels. :param str path: Path to image file. :param int size: Target size. Will be the diameter of the masked image. :param str overlay_text: Overlay text. This will be shown in white sans-serif on top of the image. :return: Masked image with overlay text. :rtype: QPixmap """ with open(path, "rb") as f: imgdata = f.read() imgtype = path.split(".")[-1] # Load image and convert to 32-bit ARGB (adds an alpha channel): image = QImage.fromData(imgdata, imgtype) image.convertToFormat(QImage.Format.Format_ARGB32) # Crop image to a square: imgsize = min(image.width(), image.height()) width = (image.width() - imgsize) / 2 height = (image.height() - imgsize) / 2 rect = QRect( round(width), round(height), imgsize, imgsize, ) image = image.copy(rect) # Create the output image with the same dimensions and an alpha channel # and make it completely transparent: out_img = QImage(imgsize, imgsize, QImage.Format.Format_ARGB32) out_img.fill(Qt.GlobalColor.transparent) # Create a texture brush and paint a circle with the original image onto # the output image: brush = QBrush(image) # Create texture brush painter = QPainter(out_img) # Paint the output image painter.setBrush(brush) # Use the image texture brush painter.setPen(Qt.PenStyle.NoPen) # Don't draw an outline painter.setRenderHint(QPainter.RenderHint.Antialiasing, True) # Use AA painter.drawEllipse(0, 0, imgsize, imgsize) # Actually draw the circle if overlay_text: # draw text font = QtGui.QFont("Arial Rounded MT Bold") font.setPointSize(imgsize * 0.4) painter.setFont(font) painter.setPen(Qt.GlobalColor.white) painter.drawText(QRect(0, 0, imgsize, imgsize), Qt.AlignmentFlag.AlignCenter, overlay_text) painter.end() # We are done (segfault if you forget this) # Convert the image to a pixmap and rescale it. Take pixel ratio into # account to get a sharp image on retina displays: pr = QtWidgets.QApplication.instance().devicePixelRatio() pm = QPixmap.fromImage(out_img) pm.setDevicePixelRatio(pr) size = int(pr * size) pm = pm.scaled( size, size, Qt.AspectRatioMode.KeepAspectRatio, Qt.TransformationMode.SmoothTransformation, ) return pm
def __init__(self): super().__init__() self.initializeUI() self.image = QImage()