def create_thumbnail(name='./.thumbnail', model='tfm', geometry=False, chemistry=False, background=None): if background is not None and os.path.exists(background): img = QImage(background) base = img.scaled(300, 300).scaled(128, 128, Qt.IgnoreAspectRatio) else: base = QImage(QSize(128, 128), QImage.Format_ARGB32) base.fill(Qt.white) painter = QPainter(base) # add images model = QImage(get_image_path(model + '.svg')) painter.drawImage(0, 128 - 24, model) if geometry: geo = QImage(get_image_path('geometry.svg')) painter.drawImage(24, 128 - 24, geo) if chemistry: geo = QImage(get_image_path('chemistry.svg')) painter.drawImage(24 * 2, 128 - 24, geo) base.save(name, "PNG") del painter
def get_icon(iconpath, size=None): image = QImage(iconpath) if size is not None: qsize = QtCore.QSize(*size) image = image.scaled(qsize) pixmap = QPixmap.fromImage(image) iconw = QIcon(pixmap) return iconw
def display_video_stream(self): _, frame = self.capture.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) scaled_image = image.scaled(self.video_size) self.image_label.setPixmap(QPixmap.fromImage(scaled_image)) self.node.video_picture_updated(frame)
def cmap2pixmap(cmap, steps=50): """Convert a Ginga colormap into a QPixmap """ inds = numpy.linspace(0, 1, steps) n = len(cmap.clst) - 1 tups = [ cmap.clst[int(x*n)] for x in inds ] rgbas = [QColor(int(r * 255), int(g * 255), int(b * 255), 255).rgba() for r, g, b in tups] im = QImage(steps, 1, QImage.Format_Indexed8) im.setColorTable(rgbas) for i in range(steps): im.setPixel(i, 0, i) im = im.scaled(128, 32) pm = QPixmap.fromImage(im) return pm
def run(self): try: import cv2 except ImportError as e: self.errored.emit(str(e)) self.stopped.emit() return try: ueye = UEye(self._cam_id, self._image_width, self._image_height) except CameraException as e: self.errored.emit(str(e)) self.stopped.emit() return self._ueye.start_video_capture() while True: try: self._update_mutex.lock() if self._stopped: ueye.close() del ueye self.stopped.emit() return finally: self._update_mutex.unlock() frame = ueye.get_video_frame() if frame is None: ueye.close() del ueye self.stopped.emit() return rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w convert_to_qt_format = QImage( rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888 ) try: self._update_mutex.lock() p = convert_to_qt_format.scaled( self._width, self._height, Qt.KeepAspectRatio ) finally: self._update_mutex.unlock() self.pixmapReady.emit(p)
def show_image(self, img): self.resize(200, 200) try: rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) except cv2.error: return h, w, ch = rgb_image.shape bytes_per_line = ch * w qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888) img_w = qt_image.width() img_h = qt_image.height() proportion = img_w / img_h self.resize(self.width() * proportion, self.height()) qt_image = qt_image.scaled(self.width(), self.height()) self.setPixmap(QPixmap(qt_image)) self.node.update_shape()