def encode_image(image: QImage) -> str: image_bytes = QByteArray() buffer = QBuffer(image_bytes) buffer.open(QIODevice.WriteOnly) # noinspection PyTypeChecker image.save(buffer, "PNG") # writes pixmap into bytes in PNG format encoded_bytes = image_bytes.toBase64() encoded_string = encoded_bytes.data().decode() return encoded_string
def load_pixmap(name: str, size: QSize = None) -> QPixmap: file_path = ':/shibumi_images/' + name image = QImage(str(file_path)) if image.isNull(): raise ValueError(f'Unable to load image {file_path}.') pixmap = QPixmap(image) if size is not None: pixmap = ShibumiDisplay.scale_pixmap(pixmap, size.width(), size.height()) return pixmap
def convert_cv_qt(self, cv_img): """Convert from an opencv image to QPixmap""" rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w convert_to_Qt_format = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888) p = convert_to_Qt_format.scaled(self.cam.width(), self.cam.height(), Qt.KeepAspectRatio) return QPixmap.fromImage(p)
def paint_with_opacity(pixmap: QPixmap, opacity: float): transparent_image = QImage(QSize(36, 36), QImage.Format_ARGB32_Premultiplied) transparent_image.fill(Qt.transparent) painter = QPainter(transparent_image) painter.setOpacity(opacity) painter.drawPixmap(18 - pixmap.width() / 2, 18 - pixmap.height() / 2, pixmap) painter.end() return QPixmap.fromImage(transparent_image)
def save_image_in_tempfolder(image: QtGui.QImage, postfix: str = "", log_level=logging.DEBUG): """For debugging it can be useful to store the cropped image.""" if logger.getEffectiveLevel() == log_level: file_dir = Path(tempfile.gettempdir()) / "normcap" file_dir.mkdir(exist_ok=True) now = datetime.datetime.now() file_name = f"{now:%Y-%m-%d_%H-%M-%S_%f}{postfix}.png" image.save(str(file_dir / file_name)) logger.debug("Store debug image in: %s", file_dir / file_name)
def encode_image(image: QImage) -> str: image_bytes = QByteArray() buffer = QBuffer(image_bytes) buffer.open(QIODevice.WriteOnly) # type: ignore # writes pixmap into bytes in PNG format image.save(buffer, "PNG") # type: ignore raw_bytes = buffer.data().data() b64_bytes = standard_b64encode(raw_bytes) b64_string = b64_bytes.decode('UTF-8') return b64_string
def update_frame_image(self, frame_index: int): """ 刷新帧图象显示 :param frame_index: 帧索引 :return: None """ base64 = self._frame_base64_dict[frame_index] byte_arr = QByteArray(base64) img = QImage() img.loadFromData(QByteArray.fromBase64(byte_arr)) pixmap = QPixmap.fromImage(img) self.current_frame_item.setPixmap(pixmap) self.current_frame_index = frame_index
def get_bit(image: QImage, index) -> bool: pixel_index = index // 3 x = pixel_index % image.width() y = pixel_index // image.width() switch = index % 3 if switch == 0: return (image.pixel(x, y) >> 16) & 1 elif switch == 1: return (image.pixel(x, y) >> 8) & 1 elif switch == 2: return image.pixel(x, y) & 1 else: raise Exception()
def convert_frame(self, frame): qImage = None if len(frame.shape) == 3: height, width, byteValue = frame.shape byteValue = byteValue * width qImage = QImage(frame, width, height, byteValue, QImage.Format_BGR888) else: #This is a mask height, width = frame.shape qImage = QImage(frame, width, height, QImage.Format_Grayscale8) return QPixmap(qImage)
def display_pic(self): ret, face = self.capture.read() self.frame = face frame = cv2.flip(cv2.cvtColor(face, cv2.COLOR_RGB2BGR), 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.label.setPixmap(QPixmap.fromImage(image))
def updatePaintNode(self, node, data): if not self._figure: return if not self._figure.canvas: return if not self._figure.canvas.renderer: return image = QImage(self._figure.canvas.buffer_rgba(), self._figure.canvas.renderer.width, self._figure.canvas.renderer.height, QImage.Format_RGBA8888) texture = self.window().createTextureFromImage(image) if not self._node: self._node = QSGSimpleTextureNode() self._node.setFiltering(QSGTexture.Linear) self._node.setTexture(texture) self._node.setRect(0, 0, self.width(), self.height()) return self._node
def _initialise(self): """ Draws the login page. """ self.setGeometry(self.left, self.top, self.width, self.height) layout = QGridLayout() # Set background image image = QImage(IMAGES_FILE_PATH + "login.jpg") image_scaled = image.scaled(QSize( self.width, self.height)) # resize Image to widgets size palette = QPalette() palette.setBrush(QPalette.Window, QBrush(image_scaled)) self.setPalette(palette) # Set username label label_name = QLabel('<font size="3"> Käyttäjätunnus </font>') self.lineEdit_username = QLineEdit() self.lineEdit_username.setPlaceholderText('Anna käyttäjätunnus') layout.addWidget(label_name, 0, 0) layout.addWidget(self.lineEdit_username, 0, 1) # Set password label label_password = QLabel('<font size="3"> Salasana </font>') self.lineEdit_password = QLineEdit() self.lineEdit_password.setPlaceholderText('Anna salasana') layout.addWidget(label_password, 1, 0) layout.addWidget(self.lineEdit_password, 1, 1) button_login = QPushButton('Kirjaudu') button_login.clicked.connect(self._check_password) layout.addWidget(button_login, 2, 0, 1, 2) layout.setRowMinimumHeight(2, 10) button_create = QPushButton('Luo uusi käyttäjä') button_create.clicked.connect(self._handle_show_create_user_view) layout.addWidget(button_create, 3, 0, 1, 2) layout.setRowMinimumHeight(2, 10) # Draw close button button_close = QPushButton('Lopeta') button_close.clicked.connect(self._handle_end) layout.addWidget(button_close, 4, 0, 1, 2) layout.setRowMinimumHeight(1, 10) layout.setContentsMargins(600, 400, 600, 400) self.setLayout(layout)
class EntropyWidget(QWidget): def __init__(self, parent, view, data): super(EntropyWidget, self).__init__(parent) self.view = view self.data = data self.raw_data = data.file.raw self.block_size = (len(self.raw_data) / 4096) + 1 if self.block_size < 1024: self.block_size = 1024 self.width = int(len(self.raw_data) / self.block_size) self.image = QImage(self.width, 1, QImage.Format_ARGB32) self.image.fill(QColor(0, 0, 0, 0)) self.thread = EntropyThread(self.raw_data, self.image, self.block_size) self.started = False self.timer = QTimer() self.timer.timeout.connect(self.timerEvent) self.timer.setInterval(100) self.timer.setSingleShot(False) self.timer.start() self.setMinimumHeight(UIContext.getScaledWindowSize(32, 32).height()) def paintEvent(self, event): p = QPainter(self) p.drawImage(self.rect(), self.image) p.drawRect(self.rect()) def sizeHint(self): return QSize(640, 32) def timerEvent(self): if not self.started: self.thread.start() self.started = True if self.thread.updated: self.thread.updated = False self.update() def mousePressEvent(self, event): if event.button() != Qt.LeftButton: return frac = float(event.x()) / self.rect().width() offset = int(frac * self.width * self.block_size) self.view.navigateToFileOffset(offset)
def __init__(self): # Sidebar icons are 28x28 points. Should be at least 56x56 pixels for # HiDPI display compatibility. They will be automatically made theme # aware, so you need only provide a grayscale image, where white is # the color of the shape. icon = QImage(56, 56, QImage.Format_RGB32) icon.fill(0) # Render an "H" as the example icon p = QPainter() p.begin(icon) p.setFont(QFont("Open Sans", 56)) p.setPen(QColor(255, 255, 255, 255)) p.drawText(QRectF(0, 0, 56, 56), Qt.AlignCenter, "H") p.end() SidebarWidgetType.__init__(self, icon, "Hello")
def convert(self) -> Texture2DDescription: self.validate() from PySide6.QtGui import QImage img = QImage(self.filePath) n = _cChannels[self.channels] if n == 4: img = img.convertToFormat(QImage.Format_RGBA8888) else: img = img.convertToFormat(QImage.Format_RGB888) # noinspection PyTypeChecker bits: memoryview = img.constBits() # noinspection PyTypeChecker byts: bytes = bits.tobytes() w, h = img.width(), img.height() if n == 1: assert len(byts) == w * h * 3 byts = byts[::3] assert len(byts) == w * h elif n == 2: assert len(byts) == w * h * 3 byts = b''.join(byts[i:i + 2] for i in range(len(byts) // 3)) assert len(byts) == w * h * 2 desc = Texture2DDescription(w, h, byts, self.channels, self.dataFormat, self.tilingX, self.mipMaps, self.linearFiltering) desc.tilingY = self.tilingY desc._label = self._label return desc
def view_cam(self): ret, self.image = self.cap.read() self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) height, width, channel = self.image.shape step = channel * width self.rectangle_face() q_img = QImage(self.image.data, width, height, step, QImage.Format_RGB888) self.main_window.ui.label.setPixmap(QPixmap.fromImage(q_img))
class Image(qrcode.image.base.BaseImage): def __init__(self, border, width, box_size): self.border = border self.width = width self.box_size = box_size size = (width + border * 2) * box_size self._image = QImage(size, size, QImage.Format_RGB16) self._image.fill(Qt.white) def pixmap(self): return QPixmap.fromImage(self._image) def drawrect(self, row, col): painter = QPainter(self._image) painter.fillRect((col + self.border) * self.box_size, (row + self.border) * self.box_size, self.box_size, self.box_size, Qt.black) def save(self, stream, kind=None): pass
def run(self): """Gets the next frame of video and signals the GUI that it's ready.""" cur_frame = 0 cap = VideoCapture(self.filename) while cap.isOpened(): ret, frame = cap.read() if ret: cap.set(CAP_PROP_POS_FRAMES, cur_frame) cur_frame += 1 rgb = cvtColor(frame, COLOR_BGR2RGB) height, width, chars = rgb.shape bytes_per_line = chars * width qimg = QImage(rgb.data, width, height, bytes_per_line, # type: ignore QImage.Format_RGB888) image = qimg.scaled( *self.client.media_size, Qt.KeepAspectRatio, Qt.FastTransformation, ) self.client.media = image
def array2d_to_pixmap(array: np.ndarray, normalize=False, colormap: int = cv.COLORMAP_VIRIDIS) -> QPixmap: """ Convert a 2D array (monochrome image) to a QPixmap :param numpy.ndarray array: The array to convert. :param bool normalize: If `True` then apply colormap. :param colormap: Colomap used if normalize is `True` :return: QPixmap containing the array. """ assert array.ndim == 2 if normalize: array = apply_colormap(array, colormap) height, width, color_bytes = array.shape return QPixmap.fromImage( QImage(array.data, width, height, color_bytes * width, QImage.Format_BGR888)) height, width = array.shape return QPixmap.fromImage( QImage(array.data, width, height, width, QImage.Format_Grayscale8))
def array3d_to_pixmap(array: np.ndarray) -> QPixmap: """ Convert a 3D array (color image) to a QPixmap. :param array: The array to convert. :return: QPixmap containing the array. """ assert array.ndim == 3 height, width, color_bytes = array.shape return QPixmap.fromImage( QImage(array.data, width, height, color_bytes * width, QImage.Format_BGR888))
def __init__(self): super(MainWindow, self).__init__() self.cameraInfo = QCameraInfo.defaultCamera() self.camera = QCamera(self.cameraInfo) self.camera.setCaptureMode(QCamera.CaptureStillImage) self.imageCapture = QCameraImageCapture(self.camera) self.imageCapture.imageCaptured.connect(self.imageCaptured) self.imageCapture.imageSaved.connect(self.imageSaved) self.currentPreview = QImage() toolBar = QToolBar() self.addToolBar(toolBar) fileMenu = self.menuBar().addMenu("&File") shutterIcon = QIcon( os.path.join(os.path.dirname(__file__), "shutter.svg")) self.takePictureAction = QAction(shutterIcon, "&Take Picture", self, shortcut="Ctrl+T", triggered=self.takePicture) self.takePictureAction.setToolTip("Take Picture") fileMenu.addAction(self.takePictureAction) toolBar.addAction(self.takePictureAction) exitAction = QAction(QIcon.fromTheme("application-exit"), "E&xit", self, shortcut="Ctrl+Q", triggered=self.close) fileMenu.addAction(exitAction) aboutMenu = self.menuBar().addMenu("&About") aboutQtAction = QAction("About &Qt", self, triggered=qApp.aboutQt) aboutMenu.addAction(aboutQtAction) self.tabWidget = QTabWidget() self.setCentralWidget(self.tabWidget) self.cameraViewfinder = QCameraViewfinder() self.camera.setViewfinder(self.cameraViewfinder) self.tabWidget.addTab(self.cameraViewfinder, "Viewfinder") if self.camera.status() != QCamera.UnavailableStatus: name = self.cameraInfo.description() self.setWindowTitle("PySide6 Camera Example (" + name + ")") self.statusBar().showMessage("Starting: '" + name + "'", 5000) self.camera.start() else: self.setWindowTitle("PySide6 Camera Example") self.takePictureAction.setEnabled(False) self.statusBar().showMessage("Camera unavailable", 5000)
def assert_equal(self): __tracebackhide__ = True self.end() self.different_pixels = 0 actual_image: QImage = self.actual.device().toImage() expected_image: QImage = self.expected.device().toImage() diff_pixmap = QPixmap(actual_image.width(), actual_image.height()) diff = QPainter(diff_pixmap) try: white = QColor('white') diff.fillRect(0, 0, actual_image.width(), actual_image.height(), white) for x in range(actual_image.width()): for y in range(actual_image.height()): actual_colour = actual_image.pixelColor(x, y) expected_colour = expected_image.pixelColor(x, y) diff.setPen( self.diff_colour(actual_colour, expected_colour, x, y)) diff.drawPoint(x, y) finally: diff.end() diff_image: QImage = diff.device().toImage() display_diff(actual_image, diff_image, expected_image, self.different_pixels) if self.different_pixels == 0: return actual_image.save(str(self.work_dir / (self.name + '_actual.png'))) expected_image.save(str(self.work_dir / (self.name + '_expected.png'))) diff_path = self.work_dir / (self.name + '_diff.png') is_saved = diff_image.save(str(diff_path)) diff_width = self.diff_max_x - self.diff_min_x + 1 diff_height = self.diff_max_y - self.diff_min_y + 1 diff_section = QImage(diff_width, diff_height, QImage.Format_RGB32) diff_section_painter = QPainter(diff_section) try: diff_section_painter.drawPixmap(0, 0, diff_width, diff_height, QPixmap.fromImage(diff_image), self.diff_min_x, self.diff_min_y, diff_width, diff_height) finally: diff_section_painter.end() # To see an image dumped in the Travis CI log, copy the text from the # log, and paste it in test_pixmap_differ.test_decode_image. print(f'Encoded image of differing section ' f'({self.diff_min_x}, {self.diff_min_y}) - ' f'({self.diff_max_x}, {self.diff_max_y}):') print(encode_image(diff_section)) message = f'Found {self.different_pixels} different pixels, ' message += f'see' if is_saved else 'could not write' message += f' {diff_path.relative_to(Path(__file__).parent.parent)}.' assert self.different_pixels == 0, message
def display_diff(actual_image: QImage, diff_image: QImage, expected_image: QImage, diff_count: int): # Display image when in live turtle mode. display_image = getattr(turtle.Turtle, 'display_image', None) if display_image is None: return t = turtle.Turtle() # noinspection PyUnresolvedReferences screen = t.screen # type: ignore w = screen.cv.cget('width') h = screen.cv.cget('height') ox, oy = w / 2, h / 2 text_space = (h - actual_image.height() - diff_image.height() - expected_image.height()) text_height = max(20, text_space // 3) font = ('Arial', text_height // 2, 'Normal') t.penup() t.goto(-ox, oy) t.right(90) t.forward(text_height) t.write(f'Actual', font=font) display_image(ox + t.xcor(), oy - t.ycor(), image=encode_image(actual_image)) t.forward(actual_image.height()) t.forward(text_height) t.write(f'Diff ({diff_count} pixels)', font=font) display_image(ox + t.xcor(), oy - t.ycor(), image=encode_image(diff_image)) t.forward(diff_image.height()) t.forward(text_height) t.write('Expected', font=font) display_image(ox + t.xcor(), oy - t.ycor(), image=encode_image(expected_image)) t.forward(expected_image.height())
def _receive_messages(self) -> None: """Receive messages from the server.""" while True: try: self.recv_lock.acquire() msg_type, msg = open_package( self.session.srv_key, self.private_key, self.socket ) self.recv_lock.release() if len(msg) == 0: break if msg_type == "MSG": self.window.inter.chat.appendPlainText(msg.decode()) elif msg_type == "SES": self._set_session_vars(msg.decode()) continue elif msg_type == "FOL": self._folders_and_files(msg.decode()) continue elif msg_type == "IMG": img = Image.open(BytesIO(msg)) rgb = cvtColor(img, COLOR_BGR2RGB) height, width, chars = rgb.shape bytes_per_line = chars * width qimg = QImage(rgb.data, width, height, bytes_per_line, # type: ignore QImage.Format_RGB888) image = qimg.scaled( *self.media_size, Qt.KeepAspectRatio, Qt.FastTransformation, ) self.media = image continue elif msg_type == "ERR": self.status = msg except OSError: self.recv_lock.release() finally: t.sleep(0.1)
def __init__(self, parent, view, data): super(EntropyWidget, self).__init__(parent) self.view = view self.data = data self.raw_data = data.file.raw self.block_size = (len(self.raw_data) / 4096) + 1 if self.block_size < 1024: self.block_size = 1024 self.width = int(len(self.raw_data) / self.block_size) self.image = QImage(self.width, 1, QImage.Format_ARGB32) self.image.fill(QColor(0, 0, 0, 0)) self.thread = EntropyThread(self.raw_data, self.image, self.block_size) self.started = False self.timer = QTimer() self.timer.timeout.connect(self.timerEvent) self.timer.setInterval(100) self.timer.setSingleShot(False) self.timer.start() self.setMinimumHeight(UIContext.getScaledWindowSize(32, 32).height())
def update_cluster_preview(self, image: Union[np.ndarray, str]) -> None: """ Load an image from a string or an array and update the cluster preview. :param image: Can be both a numpy array and a string. """ if isinstance(image, np.ndarray): self.__update_cluster_preview(array2d_to_pixmap(image, normalize=True, colormap=cv.COLORMAP_JET)) return if isinstance(image, str): self.__update_cluster_preview(QPixmap.fromImage(QImage(image))) return raise ValueError("Invalid image type: {}".format(type(image)))
def decodeQR(self, qr_image: QImage): cropped = qr_image.copy( self.calculate_center_square(qr_image).toRect()) # TODO: the same code is present in slips.py -> move to one place buffer = QBuffer() buffer.open(QBuffer.ReadWrite) cropped.save(buffer, "BMP") try: pillow_image = Image.open(io.BytesIO(buffer.data())) except UnidentifiedImageError: print("Image format isn't supported") return barcodes = pyzbar.decode(pillow_image, symbols=[pyzbar.ZBarSymbol.QRCODE]) if barcodes: self.decodedQR.emit(barcodes[0].data.decode('utf-8'))
def assert_equal(self): __tracebackhide__ = True self.end() self.different_pixels = 0 actual_image: QImage = self.actual.device().toImage() expected_image: QImage = self.expected.device().toImage() diff_pixmap = QPixmap(actual_image.width(), actual_image.height()) diff = QPainter(diff_pixmap) try: white = QColor('white') diff.fillRect(0, 0, actual_image.width(), actual_image.height(), white) for x in range(actual_image.width()): for y in range(actual_image.height()): actual_colour = actual_image.pixelColor(x, y) expected_colour = expected_image.pixelColor(x, y) diff.setPen( self.diff_colour(actual_colour, expected_colour, x, y)) diff.drawPoint(x, y) finally: diff.end() diff_image: QImage = diff.device().toImage() display_diff(actual_image, diff_image, expected_image, self.different_pixels) if self.different_pixels == 0: return actual_image.save(str(self.work_dir / (self.name + '_actual.png'))) expected_image.save(str(self.work_dir / (self.name + '_expected.png'))) diff_path = self.work_dir / (self.name + '_diff.png') is_saved = diff_image.save(str(diff_path)) diff_width = self.diff_max_x - self.diff_min_x + 1 diff_height = self.diff_max_y - self.diff_min_y + 1 diff_section = QImage(diff_width, diff_height, QImage.Format_RGB32) diff_section_painter = QPainter(diff_section) try: diff_section_painter.drawPixmap(0, 0, diff_width, diff_height, QPixmap.fromImage(diff_image), self.diff_min_x, self.diff_min_y, diff_width, diff_height) finally: diff_section_painter.end() message = f'Found {self.different_pixels} different pixels.' assert self.different_pixels == 0, message
def export_arr(self, frame_index: int): self.scene.update_frame(frame_index) img = QImage(self.video_data.width, self.video_data.height, QImage.Format_ARGB32) painter = QPainter() painter.begin(img) self.scene.render(painter) painter.end() shape = (img.height(), img.bytesPerLine() * 8 // img.depth(), 4) ptr = img.bits() arr = np.array(ptr, dtype=np.uint8).reshape(shape) arr = arr[..., :3] return arr
def build_gui(watcher): app = QApplication(title="title here") menu = QMenu() rebuild_menu(menu, app, watcher) with pkg_resources.path(assets, ICON_PATH) as icon_res_path: image = QImage(str(icon_res_path)) print(icon_res_path, image) pixmap = QPixmap.from_image(image) icon = QIcon(pixmap) systray = QSystemTrayIcon(icon) systray.set_context_menu(menu) systray.show() # systray must be returned or it will be garbage collected return app, menu, systray