def handlePrint(self, data): with renderLock: self.document, copies = data self.update() if platform == "linux": bmp = QImage(self.size[0], self.size[1], QImage.Format_Mono) self.paintEvent(None, bmp) buffer = QBuffer() buffer.open(QBuffer.ReadWrite) bmp.save(buffer, "BMP") img = Image.open(io.BytesIO(buffer.data())) img.save("/tmp/image.png") for i in range(copies): os.system("lpr /tmp/image.png") elif platform == "win32": pix = self.grab() bmp = QImage(pix) buffer = QBuffer() buffer.open(QBuffer.ReadWrite) bmp.save(buffer, "BMP") img = Image.open(io.BytesIO(buffer.data())) printerName = win32print.GetDefaultPrinter() deviceContext = win32ui.CreateDC() deviceContext.CreatePrinterDC(printerName) deviceContext.StartDoc("Inventory Label") for i in range(copies): deviceContext.StartPage() dib = ImageWin.Dib(img) dib.draw(deviceContext.GetHandleOutput(), (0, 0, self.size[0], self.size[1])) deviceContext.EndPage() deviceContext.EndDoc() deviceContext.DeleteDC()
def fromqimage(im): buffer = QBuffer() buffer.open(QIODevice.ReadWrite) im.save(buffer, 'ppm') b = BytesIO() try: b.write(buffer.data()) except TypeError: # workaround for Python 2 b.write(str(buffer.data())) buffer.close() b.seek(0) return PIL.Image.open(b)
def save(self): """ Public method to save the zoom values. """ if not self.__loaded: return from WebBrowser.WebBrowserWindow import WebBrowserWindow if not WebBrowserWindow.isPrivate() and bool(self.__iconDatabasePath): db = {} for url, icon in self.__iconsDB.items(): ba = QByteArray() buffer = QBuffer(ba) buffer.open(QIODevice.WriteOnly) icon.pixmap(32).toImage().save(buffer, "PNG") db[url] = bytes(buffer.data()).decode(self.__encoding) filename = os.path.join(self.__iconDatabasePath, self.__iconsFileName) try: f = open(filename, "w") json.dump(db, f) f.close() except (IOError, OSError): # ignore silentyl pass
def pixmapToPIL(self): pmap = self.image.pixmap() imageBuffer = QBuffer() imageBuffer.open(QBuffer.ReadWrite) pmap.save(imageBuffer, "PNG") img = Image.open(io.BytesIO(imageBuffer.data())) return img
def store_file(self, id, file): id = id.replace('/', '_') directory = ApplicationData.get('images') filename = os.path.join(directory, id + '.png') if filename == os.path.normpath(file): return self.iconmap.get(id, None) makedirs(directory) pixmap = QPixmap() if file is not None and pixmap.load(file): if pixmap.size().width() > self.max_size or pixmap.size().height() > self.max_size: pixmap = pixmap.scaled(self.max_size, self.max_size, Qt.KeepAspectRatio, Qt.SmoothTransformation) buffer = QBuffer() pixmap.save(buffer, 'png') data = str(buffer.data()) with open(filename, 'wb') as f: f.write(data) icon = QIcon(pixmap) icon.filename = filename icon.content = data icon.content_type = 'image/png' else: unlink(filename) icon = None self.iconmap[id] = icon return icon
def store_data(self, id, data): id = id.replace('/', '_') directory = ApplicationData.get('images') filename = os.path.join(directory, id + '.png') makedirs(directory) pixmap = QPixmap() if data is not None and pixmap.loadFromData(data): image_size = pixmap.size() if image_size.width() > self.max_size or image_size.height() > self.max_size: pixmap = pixmap.scaled(self.max_size, self.max_size, Qt.KeepAspectRatio, Qt.SmoothTransformation) if imghdr.what(None, data) != 'png' or pixmap.size() != image_size: buffer = QBuffer() pixmap.save(buffer, 'png') data = str(buffer.data()) with open(filename, 'wb') as f: f.write(data) icon = QIcon(pixmap) icon.filename = filename icon.content = data icon.content_type = 'image/png' else: unlink(filename) icon = None self.iconmap[id] = icon return icon
def pmap_to_pil_img(pmap): buffer = QBuffer() buffer.open(QBuffer.ReadWrite) img = QImage(pmap) img.save(buffer, "PNG") return Image.open(io.BytesIO(buffer.data()))
def encode_image(self, size: QSize = None, *, fmt="PNG") -> bytes: """ Render to a bitmap image and convert it to a data stream. """ im = self.draw_image(size) buf = QBuffer() buf.open(QIODevice.WriteOnly) im.save(buf, fmt) return buf.data()
class Audio: def __init__(self, chunksize=512, rate=44100, channel=2, sample_size=8 ,codec="audio/pcm", threshold=500, save_dir=None): self.chunksize = chunksize self.rate = rate self.sample_size = sample_size self.channel = channel self.sampleWidth = 2 # self.format = QAudioFormat() self.format.setChannelCount(self.channel) self.format.setSampleRate(self.rate) self.format.setSampleSize(self.sample_size) self.format.setCodec(codec) self.format.setByteOrder(QAudioFormat.LittleEndian) # 1 self.format.setSampleType(QAudioFormat.UnSignedInt) # 2 这个应该就决定了录音的质量,不然会有很强的滋滋声音 # self.block = b"" # bytes 类型 self.record_buffer = QBuffer() self.play_buffer = QBuffer() # 不能用QIODevice(),因为这是个c++的虚类(还没有python实体化?), # 顺便也就不用所谓的QAudioBuffer类了 self.pos = 0 self.duration = 0 # self.threshold = threshold self.save_dir = save_dir self.save_path = "./sound/test.wav" def saveWave(self): with wave.open(self.save_path, 'wb') as wf: wf.setnchannels(self.channel) wf.setsampwidth(self.sampleWidth) wf.setframerate(self.rate) wf.writeframes(self.record_buffer.data())
def QPixmap_to_PIL(self): img = self.canvas.pixmap().toImage() buffer = QBuffer() buffer.open(QBuffer.ReadWrite) img.save(buffer, "PNG") bytes = io.BytesIO(buffer.data()) return bytes
def eps(self, filename, rect=None, resolution=72.0, paperColor=None): """Create a EPS (Encapsulated Postscript) file for the selected rect or the whole page. This needs the popplerqt5 module. The filename may be a string or a QIODevice object. The rectangle is relative to our top-left position. Normally vector graphics are rendered, but in cases where that is not possible, the resolution will be used to determine the DPI for the generated rendering. """ buf = QBuffer() buf.open(QBuffer.WriteOnly) success = self.pdf(buf, rect, resolution, paperColor) buf.close() if success: from . import poppler for pdf in poppler.PopplerPage.load(buf.data()): ps = pdf.document.psConverter() ps.setPageList([pdf.pageNumber + 1]) if isinstance(filename, str): ps.setOutputFileName(filename) else: ps.setOutputDevice(filename) try: ps.setPSOptions(ps.PSOption(ps.Printing | ps.StrictMargins)) ps.setPSOptions( ps.PSOption(ps.Printing | ps.StrictMargins | ps.PrintToEPS)) except AttributeError: pass ps.setVDPI(resolution) ps.setHDPI(resolution) return ps.convert() return False
def write(self, stream, nodes, mode = MeshWriter.OutputMode.BinaryMode): archive = VirtualFile() archive.openStream(stream, "application/x-ufp", OpenMode.WriteOnly) #Store the g-code from the scene. archive.addContentType(extension = "gcode", mime_type = "text/x-gcode") gcode_textio = StringIO() #We have to convert the g-code into bytes. PluginRegistry.getInstance().getPluginObject("GCodeWriter").write(gcode_textio, None) gcode = archive.getStream("/3D/model.gcode") gcode.write(gcode_textio.getvalue().encode("UTF-8")) archive.addRelation(virtual_path = "/3D/model.gcode", relation_type = "http://schemas.ultimaker.org/package/2018/relationships/gcode") #Store the thumbnail. if self._snapshot: archive.addContentType(extension = "png", mime_type = "image/png") thumbnail = archive.getStream("/Metadata/thumbnail.png") thumbnail_buffer = QBuffer() thumbnail_buffer.open(QBuffer.ReadWrite) thumbnail_image = self._snapshot thumbnail_image.save(thumbnail_buffer, "PNG") thumbnail.write(thumbnail_buffer.data()) archive.addRelation(virtual_path = "/Metadata/thumbnail.png", relation_type = "http://schemas.openxmlformats.org/package/2006/relationships/metadata/thumbnail", origin = "/3D/model.gcode") else: Logger.log("d", "Thumbnail not created, cannot save it") archive.close() return True
def store_file(self, id, file): id = id.replace('/', '_') directory = ApplicationData.get('images') filename = os.path.join(directory, id + '.png') if filename == os.path.normpath(file): return self.iconmap.get(id, None) makedirs(directory) pixmap = QPixmap() if file is not None and pixmap.load(file): if pixmap.size().width() > self.max_size or pixmap.size().height( ) > self.max_size: pixmap = pixmap.scaled(self.max_size, self.max_size, Qt.KeepAspectRatio, Qt.SmoothTransformation) buffer = QBuffer() pixmap.save(buffer, 'png') data = str(buffer.data()) with open(filename, 'wb') as f: f.write(data.encode()) icon = QIcon(pixmap) icon.filename = filename icon.content = data icon.content_type = 'image/png' else: unlink(filename) icon = None self.iconmap[id] = icon return icon
def store_data(self, id, data): id = id.replace('/', '_') directory = ApplicationData.get('images') filename = os.path.join(directory, id + '.png') makedirs(directory) pixmap = QPixmap() if data is not None and pixmap.loadFromData(data): image_size = pixmap.size() if image_size.width() > self.max_size or image_size.height( ) > self.max_size: pixmap = pixmap.scaled(self.max_size, self.max_size, Qt.KeepAspectRatio, Qt.SmoothTransformation) if imghdr.what(None, data) != 'png' or pixmap.size() != image_size: buffer = QBuffer() pixmap.save(buffer, 'png') data = str(buffer.data()) with open(filename, 'wb') as f: f.write(data.encode()) icon = QIcon(pixmap) icon.filename = filename icon.content = data icon.content_type = 'image/png' else: unlink(filename) icon = None self.iconmap[id] = icon return icon
def _encodeSnapshot(self, snapshot): Major = 0 Minor = 0 try: Major = int(CuraVersion.split(".")[0]) Minor = int(CuraVersion.split(".")[1]) except: pass if Major < 5: from PyQt5.QtCore import QByteArray, QIODevice, QBuffer else: from PyQt6.QtCore import QByteArray, QIODevice, QBuffer Logger.log("d", "Encoding thumbnail image...") try: thumbnail_buffer = QBuffer() if Major < 5: thumbnail_buffer.open(QBuffer.ReadWrite) else: thumbnail_buffer.open(QBuffer.OpenModeFlag.ReadWrite) thumbnail_image = snapshot thumbnail_image.save(thumbnail_buffer, "JPG") base64_bytes = base64.b64encode(thumbnail_buffer.data()) base64_message = base64_bytes.decode('ascii') thumbnail_buffer.close() return base64_message except Exception: Logger.logException("w", "Failed to encode snapshot image")
def get_plant_name(self, image): data = QByteArray() buffer = QBuffer(data) buffer.open(QIODevice.WriteOnly) image = image.toImage() image.save(buffer, "jpg") return BaiduAPi.get_plant_name(buffer.data())
def tooltip(image, pos): px = image buffer = QBuffer() buffer.open(QIODevice.WriteOnly) px.save(buffer, "PNG", quality=100) image = bytes(buffer.data().toBase64()).decode() tt = "<p><img src='data:image/png;base64,{}'></p>".format(image) imageTooltiper.editor.doTooltip(pos, tt)
def createDocument(self): from . import poppler rect = self.autoCroppedRect() buf = QBuffer() buf.open(QBuffer.WriteOnly) success = self.page().pdf(buf, rect, self.resolution, self.paperColor) buf.close() return poppler.PopplerDocument(buf.data(), self.renderer())
def qimage_to_bytes(qimg: QImage) -> bytes: buffer = QBuffer() buffer.open(QBuffer.ReadWrite) try: qimg.save(buffer, 'jpg') return bytes(buffer.data()) finally: buffer.close()
def export(self): rect = self.autoCroppedRect() buf = QBuffer() buf.open(QBuffer.WriteOnly) success = self.page().eps(buf, rect, self.resolution, self.paperColor) buf.close() if success: return buf.data()
def render_png(self, svg_data: str, *, compression=40) -> bytes: """ Rasterize an SVG image to PNG format and return the raw bytes. """ im = self._render(svg_data) buf = QBuffer() writer = QImageWriter(buf, b'PNG') writer.setCompression(compression) writer.write(im) return buf.data().data()
def pixmapToBytesIO(pixmap:QPixmap) -> BytesIO: #get image data bio=BytesIO() buff=QBuffer() buff.open(QBuffer.ReadWrite) pixmap.toImage().save(buff,"PNG") bio.write(buff.data()) bio.seek(0) return bio
def pngbinary2Qlabel(databinary): buff = QBuffer() buff.open(QIODevice.WriteOnly) buff.write(databinary) dat = buff.data() pixmap = QtGui.QPixmap() pixmap.loadFromData(dat, 'PNG') label = QtWidgets.QLabel() label.setPixmap(pixmap) return label
def getBrowserScreenshot(self): #print('Taking screenshot') windowSize = self.browser.size() pixmap = QPixmap(windowSize) self.browser.render(pixmap) buffer = QBuffer() buffer.open(QBuffer.ReadWrite) pixmap.save(buffer, "PNG") pil_im = Image.open(io.BytesIO(buffer.data())) return pil_im
def set_avatar(): """установка аватарки""" image_path = QFileDialog.getOpenFileName(window, 'Choose file', '', 'Images (*.jpg)')[0] image = QImage(image_path).scaled(256, 256, Qt.KeepAspectRatio) buffer = QBuffer() buffer.open(QIODevice.ReadWrite) image.save(buffer, 'JPG') client.add_my_avatar(buffer.data()) draw_avatar(image)
def set_colors(data: bin, fg: QColor, bg: QColor, trans: QColor, swap_fg_bg=False) -> bin: # pylint: disable=too-many-locals """ Burns foreground and background colors into a raster image, and returns the results as a PNG binary """ image = QImage() image.loadFromData(data) if image.isNull(): raise UnreadablePictureException( 'Could not read embedded picture data') image = image.convertToFormat(QImage.Format_ARGB32) ucharptr = image.bits() ucharptr.setsize(image.byteCount() * image.height()) fg_rgba = qRgba(fg.red(), fg.green(), fg.blue(), fg.alpha()) if fg and fg.isValid() else None bg_rgba = qRgba(bg.red(), bg.green(), bg.blue(), bg.alpha()) if bg and bg.isValid() else None COLOR_TOLERANCE = 40 fg_comp = 0 bg_comp = 255 for y in range(image.height()): start = y * image.width() * 4 for x in range(image.width()): x_start = x * 4 + start rgba = struct.unpack('I', ucharptr[x_start:x_start + 4])[0] if trans and abs(qRed(rgba) - trans.red( )) < COLOR_TOLERANCE and abs(qGreen(rgba) - trans.green( )) < COLOR_TOLERANCE and abs(qBlue(rgba) - trans.blue()) < COLOR_TOLERANCE: ucharptr[x_start:x_start + 4] = struct.pack( 'I', qRgba(0, 0, 0, 0)) elif fg_rgba is not None and abs( qRed(rgba) - fg_comp) < COLOR_TOLERANCE and abs( qGreen(rgba) - fg_comp) < COLOR_TOLERANCE and abs( qBlue(rgba) - fg_comp) < COLOR_TOLERANCE: ucharptr[x_start:x_start + 4] = struct.pack('I', fg_rgba) elif bg_rgba is not None and abs( qRed(rgba) - bg_comp) < COLOR_TOLERANCE and abs( qGreen(rgba) - bg_comp) < COLOR_TOLERANCE and abs( qBlue(rgba) - bg_comp) < COLOR_TOLERANCE: ucharptr[x_start:x_start + 4] = struct.pack('I', bg_rgba) # convert to PNG png_data = QBuffer() image.save(png_data, "png") return png_data.data()
def QImagetoPIL(self, qimage): buffer = QBuffer() buffer.open(QIODevice.ReadWrite) qimage.save(buffer, "PNG") byteio = io.BytesIO() byteio.write(buffer.data()) buffer.close() byteio.seek(0) pil_im = Image.open(byteio) return pil_im
def fromqimage(im): buffer = QBuffer() buffer.open(QIODevice.ReadWrite) # preserve alha channel with png # otherwise ppm is more friendly with Image.open if im.hasAlphaChannel(): im.save(buffer, 'png') else: im.save(buffer, 'ppm') b = BytesIO() try: b.write(buffer.data()) except TypeError: # workaround for Python 2 b.write(str(buffer.data())) buffer.close() b.seek(0) return Image.open(b)
def tooltip(image, pos): """ Display a tooltip with an image at the given position. """ px = image buffer = QBuffer() buffer.open(QIODevice.WriteOnly) px.save(buffer, "PNG", quality=100) image = bytes(buffer.data().toBase64()).decode() tt = "<p><img src='data:image/png;base64,{}'></p>".format(image) ImageTooltip.editor.doTooltip(pos, tt)
def load_from_file(self): # producer slot self.ui.label.setText("Select an image file") file_name, t = QFileDialog.getOpenFileName(self, None, None, "Images (*.png *.jpg)") if not file_name: return image = QImage() if not image.load(file_name): self.ui.label.setText( "Selected file is not an image, please select another.") return self.ui.label.setPixmap(QPixmap.fromImage(image)) # Get the image data: buf = QBuffer() buf.open(QBuffer.ReadWrite) out = QDataStream(buf) out << image try: from prodcon_ipc.producer_ipc import ScopedProducer with ScopedProducer(self.producer_ipc, buf.size()) as sp: # Copy image data from buf into shared memory area: sp.data()[:sp.size()] = buf.data().data()[:sp.size()] except Exception as err: self.ui.label.setText(str(err)) if SHARED_STRUCT == 1: # Read from shared memory, increase value and write it back: if self.shmem_config.isAttached() or self.shmem_config.attach(): if self.shmem_config.lock(): counter, stop_flag, _ = struct.unpack( STRUCT_FORMAT, self.shmem_config.constData()) data = struct.pack(STRUCT_FORMAT, counter + 1, stop_flag, str(os.path.basename(file_name)[:30])) size = min(struct.calcsize(STRUCT_FORMAT), self.shmem_config.size()) self.shmem_config.data()[:size] = data[:size] self.shmem_config.unlock() if stop_flag: # stop producing? logzero.logger.info( "Consumer requested to stop the production.") sys.exit(0) else: logzero.logger.error("unable to lock " + self.shmem_config.key()) #self.shmem_config.detach() else: logzero.logger.error("unable to attach " + self.shmem_config.key())
def fillImageFaster(self, begin, paint, current, image): buffer = QBuffer() buffer.open(QBuffer.ReadWrite) image.save(buffer, "PNG") pil_im = Image.open(io.BytesIO(buffer.data())) ImageDraw.floodfill(pil_im, begin, (paint.red(), paint.green(), paint.blue())) self.image().image = QtGui.QImage(pil_im.convert("RGB").tobytes("raw", "RGB"), pil_im.size[0], pil_im.size[1], QtGui.QImage.Format_RGB888) self.update()
def scale(image, width, height): edited = QImage.fromData(image.data, format_for(image.mime)) if edited.isNull(): return image scaled = edited.scaled(width, height, Qt.KeepAspectRatio, Qt.SmoothTransformation) buffer = QBuffer() buffer.open(QIODevice.WriteOnly) scaled.save(buffer, format_for(image.mime)) buffer.close() return Image(mime=image.mime, data=buffer.data(), desc=image.desc, type_=image.type)
def pixmap_to_bytes(self, image, image_format='jpg'): """ Pixmap转字节 :param image: pixmap :param image_format: str :return: bytes """ byte_array = QByteArray() buffer = QBuffer(byte_array) buffer.open(QIODevice.WriteOnly) image.save(buffer, image_format) return buffer.data()
def mask_model(self): self.capture.capture() try: if self.scaledImage == None: return buffer = QBuffer() buffer.open(QBuffer.ReadWrite) self.scaledImage.save(buffer, "jpg") image = Image.open(io.BytesIO(buffer.data())) # buffer.close() except TypeError: # print("q_img error") # buffer.close() return # Create the array of the right shape to feed into the keras model # The 'length' or number of images you can put into the array is # determined by the first position in the shape tuple, in this case 1. data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) # Replace this with the path to your image # str(self.root_path) + '/imgs/xx.png' # image = Image.open('test_photo.jpg') # resize the image to a 224x224 with the same strategy as in TM2: # resizing the image to be at least 224x224 and then cropping from the center size = (224, 224) image = ImageOps.fit(image, size, Image.ANTIALIAS) # turn the image into a numpy array image_array = np.asarray(image) # # display the resized image # image.show() # Normalize the image normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # Load the image into the array data[0] = normalized_image_array # run the inference prediction = self.model.predict(data) print(prediction) if prediction[0][0] > 0.9 and self.shield == False: # playsound(str(self.root_path) + '/musics/alert.mp3', False) shield = True self.title_label.setText("통과입니다.") self.title_label.setStyleSheet("color : #0000FF") else: self.title_label.setText("마스크를 착용해주세요.") self.title_label.setStyleSheet("color : #00FF00")
def loadFromFile(self): """ This slot function is called when the "Load Image From File..." button is pressed on the firs Dialog process. First, it tests whether the process is already connected to a shared memory segment and, if so, detaches from that segment. This ensures that we always start the example from the beginning if we run it multiple times with the same two Dialog processes. After detaching from an existing shared memory segment, the user is prompted to select an image file. The selected file is loaded into a QImage. The QImage is displayed in the Dialog and streamed into a QBuffer with a QDataStream. Next, it gets a new shared memory segment from the system big enough to hold the image data in the QBuffer, and it locks the segment to prevent the second Dialog process from accessing it. Then it copies the image from the QBuffer into the shared memory segment. Finally, it unlocks the shared memory segment so the second Dialog process can access it. After self function runs, the user is expected to press the "Load Image from Shared Memory" button on the second Dialog process. """ if self.sharedMemory.isAttached(): self.detach() self.ui.label.setText("Select an image file") fileName, _ = QFileDialog.getOpenFileName(self, None, None, "Images (*.png *.xpm *.jpg)") image = QImage() if not image.load(fileName): self.ui.label.setText( "Selected file is not an image, please select another.") return self.ui.label.setPixmap(QPixmap.fromImage(image)) # Load into shared memory. buf = QBuffer() buf.open(QBuffer.ReadWrite) out = QDataStream(buf) out << image size = buf.size() if not self.sharedMemory.create(size): self.ui.label.setText("Unable to create shared memory segment.") return size = min(self.sharedMemory.size(), size) self.sharedMemory.lock() # Copy image data from buf into shared memory area. self.sharedMemory.data()[:] = buf.data().data() self.sharedMemory.unlock()
def fromqimage(im): """ :param im: A PIL Image object, or a file name (given either as Python string or a PyQt string object) """ buffer = QBuffer() buffer.open(QIODevice.ReadWrite) # preserve alha channel with png # otherwise ppm is more friendly with Image.open if im.hasAlphaChannel(): im.save(buffer, 'png') else: im.save(buffer, 'ppm') b = BytesIO() try: b.write(buffer.data()) except TypeError: # workaround for Python 2 b.write(str(buffer.data())) buffer.close() b.seek(0) return Image.open(b)
def write(self, stream, nodes, mode = MeshWriter.OutputMode.BinaryMode): archive = VirtualFile() archive.openStream(stream, "application/x-ufp", OpenMode.WriteOnly) #Store the g-code from the scene. archive.addContentType(extension = "gcode", mime_type = "text/x-gcode") gcode_textio = StringIO() #We have to convert the g-code into bytes. gcode_writer = cast(MeshWriter, PluginRegistry.getInstance().getPluginObject("GCodeWriter")) success = gcode_writer.write(gcode_textio, None) if not success: #Writing the g-code failed. Then I can also not write the gzipped g-code. self.setInformation(gcode_writer.getInformation()) return False gcode = archive.getStream("/3D/model.gcode") gcode.write(gcode_textio.getvalue().encode("UTF-8")) archive.addRelation(virtual_path = "/3D/model.gcode", relation_type = "http://schemas.ultimaker.org/package/2018/relationships/gcode") self._createSnapshot() #Store the thumbnail. if self._snapshot: archive.addContentType(extension = "png", mime_type = "image/png") thumbnail = archive.getStream("/Metadata/thumbnail.png") thumbnail_buffer = QBuffer() thumbnail_buffer.open(QBuffer.ReadWrite) thumbnail_image = self._snapshot thumbnail_image.save(thumbnail_buffer, "PNG") thumbnail.write(thumbnail_buffer.data()) archive.addRelation(virtual_path = "/Metadata/thumbnail.png", relation_type = "http://schemas.openxmlformats.org/package/2006/relationships/metadata/thumbnail", origin = "/3D/model.gcode") else: Logger.log("d", "Thumbnail not created, cannot save it") # Store the material. application = Application.getInstance() machine_manager = application.getMachineManager() material_manager = application.getMaterialManager() global_stack = machine_manager.activeMachine material_extension = "xml.fdm_material" material_mime_type = "application/x-ultimaker-material-profile" try: archive.addContentType(extension = material_extension, mime_type = material_mime_type) except: Logger.log("w", "The material extension: %s was already added", material_extension) added_materials = [] for extruder_stack in global_stack.extruders.values(): material = extruder_stack.material try: material_file_name = material.getMetaData()["base_file"] + ".xml.fdm_material" except KeyError: Logger.log("w", "Unable to get base_file for the material %s", material.getId()) continue material_file_name = "/Materials/" + material_file_name # The same material should not be added again. if material_file_name in added_materials: continue material_root_id = material.getMetaDataEntry("base_file") material_group = material_manager.getMaterialGroup(material_root_id) if material_group is None: Logger.log("e", "Cannot find material container with root id [%s]", material_root_id) return False material_container = material_group.root_material_node.getContainer() try: serialized_material = material_container.serialize() except NotImplementedError: Logger.log("e", "Unable serialize material container with root id: %s", material_root_id) return False material_file = archive.getStream(material_file_name) material_file.write(serialized_material.encode("UTF-8")) archive.addRelation(virtual_path = material_file_name, relation_type = "http://schemas.ultimaker.org/package/2018/relationships/material", origin = "/3D/model.gcode") added_materials.append(material_file_name) archive.close() return True
def to_jpeg(self, quality=None): if quality is None: quality = defaults.JPEG_QUALITY buf = QBuffer() self.img.save(buf, 'jpeg', quality) return bytes(buf.data())
def to_png(self, complevel=defaults.PNG_COMPRESSION_LEVEL): quality = 90 - (complevel * 10) buf = QBuffer() self.img.save(buf, 'png', quality) return bytes(buf.data())
def __writeTileset(self, w, tileset, firstGid): w.writeStartElement("tileset") if (firstGid > 0): w.writeAttribute("firstgid", str(firstGid)) fileName = tileset.fileName() if fileName != '': source = fileName if (not self.mUseAbsolutePaths): source = self.mMapDir.relativeFilePath(source) w.writeAttribute("source", source) # Tileset is external, so no need to write any of the stuff below w.writeEndElement() return w.writeAttribute("name", tileset.name()) w.writeAttribute("tilewidth", str(tileset.tileWidth())) w.writeAttribute("tileheight", str(tileset.tileHeight())) tileSpacing = tileset.tileSpacing() margin = tileset.margin() if (tileSpacing != 0): w.writeAttribute("spacing", str(tileSpacing)) if (margin != 0): w.writeAttribute("margin", str(margin)) w.writeAttribute("tilecount", str(tileset.tileCount())) offset = tileset.tileOffset() if (not offset.isNull()): w.writeStartElement("tileoffset") w.writeAttribute("x", str(offset.x())) w.writeAttribute("y", str(offset.y())) w.writeEndElement() # Write the tileset properties self.__writeProperties(w, tileset.properties()) # Write the image element imageSource = tileset.imageSource() if imageSource != '': w.writeStartElement("image") source = imageSource if (not self.mUseAbsolutePaths): source = self.mMapDir.relativeFilePath(source) w.writeAttribute("source", source) transColor = tileset.transparentColor() if (transColor.isValid()): w.writeAttribute("trans", transColor.name()[1]) if (tileset.imageWidth() > 0): w.writeAttribute("width", str(tileset.imageWidth())) if (tileset.imageHeight() > 0): w.writeAttribute("height", str(tileset.imageHeight())) w.writeEndElement() # Write the terrain types if (tileset.terrainCount() > 0): w.writeStartElement("terraintypes") for i in range(tileset.terrainCount()): t = tileset.terrain(i) w.writeStartElement("terrain") w.writeAttribute("name", t.name()) w.writeAttribute("tile", str(t.imageTileId())) self.__writeProperties(w, t.properties()) w.writeEndElement() w.writeEndElement() # Write the properties for those tiles that have them for i in range(tileset.tileCount()): tile = tileset.tileAt(i) properties = tile.properties() terrain = tile.terrain() probability = tile.probability() objectGroup = tile.objectGroup() if (not properties.isEmpty() or terrain != 0xFFFFFFFF or probability != 1.0 or imageSource=='' or objectGroup or tile.isAnimated()): w.writeStartElement("tile") w.writeAttribute("id", str(i)) if (terrain != 0xFFFFFFFF): w.writeAttribute("terrain", makeTerrainAttribute(tile)) if (probability != 1.0): w.writeAttribute("probability", str(probability)) if (not properties.isEmpty()): self.__writeProperties(w, properties) if imageSource=='': w.writeStartElement("image") tileSize = tile.size() if (not tileSize.isNull()): w.writeAttribute("width", str(tileSize.width())) w.writeAttribute("height", str(tileSize.height())) if (tile.imageSource()==''): w.writeAttribute("format", "png") w.writeStartElement("data") w.writeAttribute("encoding", "base64") buffer = QBuffer() tile.image().save(buffer, "png") w.writeCharacters(buffer.data().toBase64()) w.writeEndElement() # else: source = tile.imageSource() if (not self.mUseAbsolutePaths): source = self.mMapDir.relativeFilePath(source) w.writeAttribute("source", source) w.writeEndElement() # if (objectGroup): self.__writeObjectGroup(w, objectGroup) if (tile.isAnimated()): frames = tile.frames() w.writeStartElement("animation") for frame in frames: w.writeStartElement("frame") w.writeAttribute("tileid", str(frame.tileId)) w.writeAttribute("duration", str(frame.duration)) w.writeEndElement() # w.writeEndElement() # w.writeEndElement() # w.writeEndElement()