def feed(self, document): self.window.fill(QColor.fromRgba(0xFFFFFFFF)) n_darken = count_multiples(cycle(self.frame_counter), cycle(self.frame_counter + 1), DARKEN_CYCLE) for i in range(n_darken): self.darken_overlay(QPainter(self.overlay)) # Start writing here for m in document["Map"]: m.draw(QPainter(self.map)) if "Photons" in document and document["Photons"] is not None: for photon in document["Photons"]: photon.draw(QPainter(self.overlay)) for bot in document["Bots"]: bot.draw(QPainter(self.map)) self.draw_label(QPainter(self.window), document["Metrics"]) self.compose_layers() self.frame_counter += 1 out_image = QImage(self.window).convertToFormat(QImage.Format_RGB888) width = out_image.width() height = out_image.height() ptr = out_image.constBits() s = ptr.asstring(width * height * 3) arr = np.fromstring(s, dtype=np.uint8).reshape((height, width, 3)) self.pipe.stdin.write(arr.tobytes())
def create_font_array(self): # Load the first image to get font size img = QImage(os.path.join(settings.gfx_path, 'font/32.png')) imgsize = (img.width(), img.height()) self.char_ar = float(imgsize[1]) / imgsize[0] # Set-up the texture array self.tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, self.tex_id) gl.glTexImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, gl.GL_RGBA8, imgsize[0], imgsize[1], 127 - 30, 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, None) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER) # We're using the ASCII range 32-126; space, uppercase, lower case, numbers, brackets, punctuation marks for i in range(30, 127): img = QImage(os.path.join(settings.gfx_path, 'font/%d.png' % i)).convertToFormat(QImage.Format_ARGB32) ptr = c_void_p(int(img.constBits())) gl.glTexSubImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, 0, 0, i - 30, imgsize[0], imgsize[1], 1, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr)
def load_lcd_font(): files = sorted(glob('mcp_font/*.png')) img = QImage(files[0]) imgsize = (img.width(), img.height()) # Set-up the texture array tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, tex_id) gl.glTexImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, gl.GL_RGBA8, imgsize[0], imgsize[1], len(files), 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, None) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER) for i, fname in enumerate(files): img = QImage(fname).convertToFormat(QImage.Format_ARGB32) ptr = c_void_p(int(img.constBits())) gl.glTexSubImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, imgsize[0], imgsize[1], 1, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr) return tex_id
def imageAsArray(image: QImage): image = image.convertToFormat(QImage.Format_RGB32) width = image.width() height = image.height() ptr = image.constBits() ptr.setsize(height * width * 4) arr = np.frombuffer(ptr, np.uint8).reshape((height, width, 4)) # GBRff arr = np.delete(arr, -1, axis=2) # GBR return np.fliplr(arr)
def load_texture(fname): img = QImage(fname) ptr = c_void_p(int(img.constBits())) tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, tex_id) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, img.width(), img.height(), 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr) gl.glGenerateMipmap(gl.GL_TEXTURE_2D) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR_MIPMAP_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) return tex_id
def load_texture(fname): img = QImage(fname) ptr = c_void_p(int(img.constBits())) tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, tex_id) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, img.width(), img.height(), 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr) gl.glGenerateMipmap(gl.GL_TEXTURE_2D) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR_MIPMAP_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) return tex_id
def initHeightMap(self, fileName): heightImage = QImage(fileName) bits = heightImage.constBits().asarray(heightImage.byteCount()) colorTable = heightImage.colorTable() layerData = bytearray(self.layerDataSize**2) index = 0 for i in range(self.layerDataSize): for j in range(self.layerDataSize): layerData[index] = qRed(colorTable[bits[index]]) index += 1 return layerData
def run(self): self._abort = False while True: with QMutexLocker(self._mutex): if self._abort: break frame = self.frame self.frame = None pixel_format = frame.pixelFormat() image_format = QVideoFrame.imageFormatFromPixelFormat(pixel_format) if image_format == QImage.Format_Invalid: qDebug("WARNING: Could not convert video frame to image!") return if not frame.map(QAbstractVideoBuffer.ReadOnly): qDebug("WARNING: Could not map video frame!") return width = frame.width() height = frame.height() bytes_per_line = frame.bytesPerLine() image = QImage(frame.bits(), width, height, bytes_per_line, image_format) image = image.convertToFormat(QImage.Format_RGB32) frame.unmap() # fix upside-down data for windows if platform.system() == "Windows": image = image.mirrored(vertical=True) # now convert QImage to ndarray pointer = image.constBits() pointer.setsize(image.byteCount()) array = np.array(pointer).reshape(image.height(), image.width(), 4) # get rid of the transparency channel and organize the colors as rgb # NB: it would be safer to figure out the image format first, and where the transparency channel is # stored... array = array[:, :, 0:3:][:, :, ::-1] self.ndarray_available.emit(array) # see if new data is available, go to sleep if not with QMutexLocker(self._mutex): if self.frame is None: self._condition.wait(self._mutex)
def load_lcd_font(): files = sorted(glob('mcp_font/*.png')) img = QImage(files[0]) imgsize = (img.width(), img.height()) # Set-up the texture array tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, tex_id) gl.glTexImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, gl.GL_RGBA8, imgsize[0], imgsize[1], len(files), 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, None) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER) for i, fname in enumerate(files): img = QImage(fname).convertToFormat(QImage.Format_ARGB32) ptr = c_void_p(int(img.constBits())) gl.glTexSubImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, imgsize[0], imgsize[1], 1, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr) return tex_id
def create_font_array(self, path): # Load the first image to get font size img = QImage(path + '32.png') imgsize = (img.width(), img.height()) self.char_ar = float(imgsize[1]) / imgsize[0] # Set-up the texture array self.tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, self.tex_id) gl.glTexImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, gl.GL_RGBA8, imgsize[0], imgsize[1], 127 - 32, 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, None) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER) # We're using the ASCII range 32-126; space, uppercase, lower case, numbers, brackets, punctuation marks for i in range(32, 127): img = QImage(path + '%d.png' % i).convertToFormat(QImage.Format_ARGB32) ptr = c_void_p(int(img.constBits())) gl.glTexSubImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, 0, 0, i - 32, imgsize[0], imgsize[1], 1, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr)
def create_font_array(self, char_height=62, pixel_margin=1, font_family='Courier', font_weight=50): # Load font and get the dimensions of one character (assuming monospaced font) f = QFont(font_family) f.setPixelSize(char_height) f.setWeight(font_weight) fm = QFontMetrics(f, QImage()) char_width = char_height = 0 char_y = 999 for i in range(32, 127): bb = fm.boundingRect(chr(i)) char_width = max(char_width, bb.width()) char_height = max(char_height, bb.height()) char_y = min(char_y, bb.y()) imgsize = (char_width + 2 * pixel_margin, char_height + 2 * pixel_margin) self.char_ar = float(imgsize[1]) / imgsize[0] # init the image and the painter that will draw the characters to each image img = QImage(imgsize[0], imgsize[1], QImage.Format_ARGB32) ptr = c_void_p(int(img.constBits())) painter = QPainter(img) painter.setFont(f) painter.setPen(QColor(255, 255, 255, 255)) # Set-up the texture array self.tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, self.tex_id) gl.glTexImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, gl.GL_RGBA8, imgsize[0], imgsize[1], 127 - 32, 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, None) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER) # We're using the ASCII range 32-126; space, uppercase, lower case, numbers, brackets, punctuation marks for i in range(32, 127): img.fill(0) painter.drawText(pixel_margin, pixel_margin - char_y, chr(i)) gl.glTexSubImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, 0, 0, i - 32, imgsize[0], imgsize[1], 1, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr) # We're done, close the painter, and return the texture ID, char width and char height painter.end()
def create_font_array(self, char_height=62, pixel_margin=1, font_family='Courier', font_weight=50): # Load font and get the dimensions of one character (assuming monospaced font) f = QFont(font_family) f.setPixelSize(char_height) f.setWeight(font_weight) fm = QFontMetrics(f, QImage()) char_width = char_height = 0 char_y = 999 for i in range(32, 127): bb = fm.boundingRect(chr(i)) char_width = max(char_width, bb.width()) char_height = max(char_height, bb.height()) char_y = min(char_y, bb.y()) imgsize = (char_width + 2 * pixel_margin, char_height + 2 * pixel_margin) self.char_ar = float(imgsize[1]) / imgsize[0] # init the image and the painter that will draw the characters to each image img = QImage(imgsize[0], imgsize[1], QImage.Format_ARGB32) ptr = c_void_p(int(img.constBits())) painter = QPainter(img) painter.setFont(f) painter.setPen(QColor(255, 255, 255, 255)) # Set-up the texture array self.tex_id = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, self.tex_id) gl.glTexImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, gl.GL_RGBA8, imgsize[0], imgsize[1], 127 - 32, 0, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, None) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER) gl.glTexParameterf(gl.GL_TEXTURE_2D_ARRAY, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER) # We're using the ASCII range 32-126; space, uppercase, lower case, numbers, brackets, punctuation marks for i in range(32, 127): img.fill(0) painter.drawText(pixel_margin, pixel_margin - char_y, chr(i)) gl.glTexSubImage3D(gl.GL_TEXTURE_2D_ARRAY, 0, 0, 0, i - 32, imgsize[0], imgsize[1], 1, gl.GL_BGRA, gl.GL_UNSIGNED_BYTE, ptr) # We're done, close the painter, and return the texture ID, char width and char height painter.end()
def b_layer_create(self): temp_dir = tempfile.gettempdir() + "/rcpg10.svg" body = self.rcpg_object.get_svg_string() with open(temp_dir, 'w', encoding='utf-8') as f: f.write(body) app = Krita.instance() doc = app.activeDocument() if doc != None: root = doc.rootNode() layer = doc.createNode("Panels", "paintLayer") img = QImage(temp_dir) img = img.scaled(doc.width(), doc.height(), aspectRatioMode=Qt.KeepAspectRatio, transformMode=Qt.SmoothTransformation) if not img.isNull(): img.convertToFormat(QImage.Format_RGBA8888) ptr = img.constBits() ptr.setsize(img.byteCount()) layer.setPixelData(bytes(ptr.asarray()), 0, 0, img.width(), img.height()) root.addChildNode(layer, None) doc.refreshProjection()
def make_white(self, layer): pixelBytes = layer.pixelData(0, 0, self.currentDoc.width(), self.currentDoc.height()) imageData = QImage(pixelBytes, self.currentDoc.width(), self.currentDoc.height(), QImage.Format_RGBA8888) for x in range(0, self.currentDoc.width()): for y in range(0, self.currentDoc.height()): pixel = imageData.pixelColor(x, y) imageData.setPixelColor(x, y, QColor(255, 255, 255, pixel.alpha())) ptr = imageData.constBits() ptr.setsize(imageData.byteCount()) layer.setPixelData(bytes(ptr.asarray()), 0, 0, self.currentDoc.width(), self.currentDoc.height()) self.currentDoc.refreshProjection() self.i += 1 self.save(layer, self.path + '/' + self.file + str(self.i) + '.png') layer.setPixelData(pixelBytes, 0, 0, self.currentDoc.width(), self.currentDoc.height()) self.currentDoc.refreshProjection()
class Window(QMainWindow): """ The main window of the application """ def __init__(self, input_shape, shared_data): super().__init__() self._input_shape = input_shape self._shared_data = shared_data self.initUI() self._drawing = False self._last_point = QPoint() def initUI(self): icon = "icons/app.png" self.setWindowTitle("Model Test UI") self.setGeometry(WINDOW_TOP, WINDOW_LEFT, WINDOW_WIDTH, WINDOW_HEIGHT) self.setFixedSize(self.size()) self.setWindowIcon(QIcon(get_absolute_path(icon))) self._image = QImage(self.size(), QImage.Format_Grayscale8) self._image.fill(Qt.white) main_menu = self.menuBar() edit_menu = main_menu.addMenu("Edit") clear_action = QAction("Clear", self) clear_action.setShortcut("Return") clear_action.triggered.connect(self.clear) edit_menu.addAction(clear_action) self.statusBar().setSizeGripEnabled(False) self.statusBar().showMessage("prediction:") def mousePressEvent(self, event): if event.button() == Qt.LeftButton: self._drawing = True self._last_point = event.pos() def mouseMoveEvent(self, event): if (event.buttons() & Qt.LeftButton) & self._drawing: painter = QPainter(self._image) painter.setPen( QPen(BRUSH_COLOR, BRUSH_SIZE, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)) painter.drawLine(self._last_point, event.pos()) self._last_point = event.pos() self.update() def mouseReleaseEvent(self, event): if event.button() == Qt.LeftButton: self._drawing = False self.evaluateInput() def paintEvent(self, _): canvas_painter = QPainter(self) canvas_painter.drawImage(self.rect(), self._image, self._image.rect()) def evaluateInput(self): width = self._image.width() height = self._image.height() ptr = self._image.constBits() ptr.setsize(width * height) image_data = np.frombuffer(ptr, dtype=np.uint8).reshape( (width, height)) data = preprocess_image_data(image_data, self._input_shape, invert=True, center=True, fit=True) self._shared_data.provide(data) def showResult(self, text: str): self.statusBar().showMessage(f"prediction: {text}") def clear(self): self.statusBar().showMessage("prediction:") self._image.fill(Qt.white) self.update()
def readBmp(self, file, len=None, off=0, silent=False, rotate=True): """ Reads DOC-standard bat recordings in 8x row-compressed BMP format. For similarity with readWav, accepts len and off args, in seconds. rotate: if True, rotates to match setImage and other spectrograms (rows=time) otherwise preserves normal orientation (cols=time) """ # !! Important to set these, as they are used in other functions self.sampleRate = 176000 self.incr = 512 img = QImage(file, "BMP") h = img.height() w = img.width() colc = img.colorCount() if h == 0 or w == 0: print("ERROR: image was not loaded") return (1) # Check color format and convert to grayscale if not silent and (not img.allGray() or colc > 256): print( "Warning: image provided not in 8-bit grayscale, information will be lost" ) img.convertTo(QImage.Format_Grayscale8) # Convert to numpy # (remember that pyqtgraph images are column-major) ptr = img.constBits() ptr.setsize(h * w * 1) img2 = np.array(ptr).reshape(h, w) # Determine if original image was rotated, based on expected num of freq bins and freq 0 being empty # We also used to check if np.median(img2[-1,:])==0, # but some files happen to have the bottom freq bin around 90, so we cannot rely on that. if h == 64: # standard DoC format pass elif w == 64: # seems like DoC format, rotated at -90* img2 = np.rot90(img2, 1, (1, 0)) w, h = h, w else: print("ERROR: image does not appear to be in DoC format!") print("Format details:") print(img2) print(h, w) print(min(img2[-1, :]), max(img2[-1, :])) print(np.sum(img2[-1, :] > 0)) print(np.median(img2[-1, :])) return (1) # Could skip that for visual mode - maybe useful for establishing contrast? img2[-1, :] = 254 # lowest freq bin is 0, flip that img2 = 255 - img2 # reverse value having the black as the most intense img2 = img2 / np.max(img2) # normalization img2 = img2[:, 1:] # Cutting first time bin because it only contains the scale and cutting last columns img2 = np.repeat( img2, 8, axis=0) # repeat freq bins 7 times to fit invertspectrogram self.data = [] self.fileLength = (w - 2) * self.incr + self.window_width # in samples # Alternatively: # self.fileLength = self.convertSpectoAmpl(h-1)*self.sampleRate # NOTE: conversions will use self.sampleRate and self.incr, so ensure those are already set! # trim to specified offset and length: if off > 0 or len is not None: # Convert offset from seconds to pixels off = int(self.convertAmpltoSpec(off)) if len is None: img2 = img2[:, off:] else: # Convert length from seconds to pixels: len = int(self.convertAmpltoSpec(len)) img2 = img2[:, off:(off + len)] if rotate: # rotate for display, b/c required spectrogram dimensions are: # t increasing over rows, f increasing over cols # This will be enough if the original image was spectrogram-shape. img2 = np.rot90(img2, 1, (1, 0)) self.sg = img2 if QtMM: self.audioFormat.setChannelCount(0) self.audioFormat.setSampleSize(0) self.audioFormat.setSampleRate(self.sampleRate) #else: #self.audioFormat['channelCount'] = 0 #self.audioFormat['sampleSize'] = 0 #self.audioFormat['sampleRate'] = self.sampleRate self.minFreq = 0 self.maxFreq = self.sampleRate // 2 self.minFreqShow = max(self.minFreq, self.minFreqShow) self.maxFreqShow = min(self.maxFreq, self.maxFreqShow) if not silent: print("Detected BMP format: %d x %d px, %d colours" % (w, h, colc)) return (0)
def run_api(): # pdf = Poppler.Document() # page = Poppler.Page() pdf = 0 page = 0 image = QImage() byte_data = QByteArray() color_theme = 'light' #allowed color themes: 'light','dark','sepia' while True: s = input() tokens = s.split(' ') if tokens[0] == 'open': #open a pdf file pdf = Poppler.Document.load(tokens[1]) pdf.setRenderHint(Poppler.Document.TextAntialiasing) if tokens[0] == 'page': #open a page of the current pdf page = pdf.page(tokens[1]) if tokens[0] == 'pages': #returns number of pages print(pdf.numPages()) if tokens[0] == 'color': #set color theme color_theme = tokens[1] if tokens[0] == 'textlist': #outputs textlist as a json file textlist = page.textList() textlist_json = [] for text in textlist: bb = text.boundingBox() d = {'text': text.text(), 'x': bb.x(), 'y': bb.y(), 'width': bb.width(), 'height': bb.height()} textlist_json.append(d) print(json.dumps(textlist_json)) if tokens[0] == 'render': #output pixel data to stdout image = page.renderToImage(3 * 72, 3 * 72, -1, -1, -1, -1) print(image.format()) if color_theme == 'dark': for x in range(image.width()): for y in range(image.height()): rgb = image.pixelColor(x, y) rgb.setRed(int(linear_interpolation(234, 68, rgb.red() / 255))) rgb.setGreen(int(linear_interpolation(234, 68, rgb.green() / 255))) rgb.setBlue(int(linear_interpolation(234, 68, rgb.blue() / 255))) image.setPixelColor(x, y, rgb) # print(image.data) # bits = image.bits() # byte_data.fromRawData(image.data) # print(byte_data) ba = QByteArray() buffer = QBuffer(ba) buffer.open(QIODevice.WriteOnly) image.save(buffer, "PNG") # writes image into ba in PNG format ba2 = buffer.data() for i in ba. bits = image.constBits() bits.setsize(image.width() * image.height() * 4) print(str(bits)) print(dir(bits)) print(bits.asarray()) print(str(bits.asarray())) imgPtr = c_void_p(bits.__int__()) print(dir(imgPtr)) # for i in range(len(bits)): # print(bits[i]) # print(image.constBits()[0]) # print(bits) # print(type(bits)) # for i in range(image.width()): # for j in range(image.height()): # # print(image.pixel(i, j)) # print(bits[i]) # # color = QColor(image.pixel(i,j)) # # print(image.pixel(i,j).red(), image.pixel(i,j).green(), image.pixel(i,j).blue()) if tokens[0] == 'toc': toc = pdf.toc()
def qImage2Numpy(qimg: QImage, channels=3) -> np.ndarray: ptr = qimg.constBits() ptr.setsize(qimg.byteCount()) mat = np.array(ptr).reshape(qimg.height(), qimg.width(), channels) return mat
class Canvas(QMainWindow): def __init__(self): super().__init__() self.width = 400 self.height = 400 self.setWindowTitle("Draw Digit") self.setGeometry(100, 100, self.width, self.height) self.setWindowIcon(QIcon("write_pencil-512.png")) self.image = QImage(self.size(), QImage.Format_RGB32) self.image.fill(Qt.black) self.lastPoint = QPoint() self.drawing = False # image array self.image_np = np.zeros([self.width, self.height]) button1 = QPushButton("Ok", self) button1.move(2, 2) button1.clicked.connect(self.enterFunc) def paintEvent(self, event): canvasPainter = QPainter(self) canvasPainter.drawImage(self.rect(), self.image, self.image.rect()) def enterFunc(self): ptr = self.image.constBits() ptr.setsize(self.image.byteCount()) self.image_np = np.array(ptr).reshape(self.width, self.height, 4) self.image_np = self.image_np[:, :, 0] self.image_np = self.image_np / 255.0 if np.sum(self.image_np) == 0: print("please write a digit") else: plt.figure(figsize=(1, 1), dpi=200) plt.imshow(self.image_np, cmap="gray") plt.axis("off") plt.grid(False) plt.savefig("./input_Image.png") self.close() def mousePressEvent(self, event): if event.button() == Qt.LeftButton: self.lastPoint = event.pos() self.drawing = True print(self.lastPoint) def mouseMoveEvent(self, event): if (event.buttons() == Qt.LeftButton) & self.drawing: painter = QPainter(self.image) painter.setPen( QPen(Qt.white, 15, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)) painter.drawLine(self.lastPoint, event.pos()) self.lastPoint = event.pos() self.update() def mouseReleaseEvent(self, event): if event.button == Qt.LeftButton: self.drawing = False
def _convert_image(self, qimage: QImage) -> Optional[QDBusArgument]: """Convert a QImage to the structure DBus expects. https://specifications.freedesktop.org/notification-spec/latest/ar01s05.html#icons-and-images-formats """ bits_per_color = 8 has_alpha = qimage.hasAlphaChannel() if has_alpha: image_format = QImage.Format_RGBA8888 channel_count = 4 else: image_format = QImage.Format_RGB888 channel_count = 3 qimage.convertTo(image_format) bytes_per_line = qimage.bytesPerLine() width = qimage.width() height = qimage.height() image_data = QDBusArgument() image_data.beginStructure() image_data.add(width) image_data.add(height) image_data.add(bytes_per_line) image_data.add(has_alpha) image_data.add(bits_per_color) image_data.add(channel_count) try: size = qimage.sizeInBytes() except TypeError: # WORKAROUND for # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042919.html # byteCount() is obsolete, but sizeInBytes() is only available with # SIP >= 5.3.0. size = qimage.byteCount() # Despite the spec not mandating this, many notification daemons mandate that # the last scanline does not have any padding bytes. # # Or in the words of dunst: # # The image is serialised rowwise pixel by pixel. The rows are aligned by a # spacer full of garbage. The overall data length of data + garbage is # called the rowstride. # # Mind the missing spacer at the last row. # # len: |<--------------rowstride---------------->| # len: |<-width*pixelstride->| # row 1: | data for row 1 | spacer of garbage | # row 2: | data for row 2 | spacer of garbage | # | . | spacer of garbage | # | . | spacer of garbage | # | . | spacer of garbage | # row n-1: | data for row n-1 | spacer of garbage | # row n: | data for row n | # # Source: # https://github.com/dunst-project/dunst/blob/v1.6.1/src/icon.c#L292-L309 padding = bytes_per_line - width * channel_count assert 0 <= padding <= 3, (padding, bytes_per_line, width, channel_count) size -= padding if padding and self._quirks.no_padded_images: return None bits = qimage.constBits().asstring(size) image_data.add(QByteArray(bits)) image_data.endStructure() return image_data
def qimg2np(im: QImage) -> np.ndarray: ptr = im.constBits() h, w = im.height(), im.width() ptr.setsize(h * w * 4) im_np = np.frombuffer(ptr, 'uint8').reshape((h, w, 4)) return im_np