def test_image_to_bitmap(self): qimage = QImage(32, 64, QImage.Format.Format_RGB32) qimage.fill(QColor(0x44, 0x88, 0xcc)) qpixmap = image_to_bitmap(qimage) self.assertIsInstance(qpixmap, QPixmap)
def convert_bitmap(image, width=0, height=0): if isinstance(image, ImageResource): pix = traitsui_convert_bitmap(image) elif isinstance(image, (PILImage.Image, )): try: data = image.tostring('raw', 'RGBA') except NotImplementedError: data = image.tobytes('raw', 'RGBA') im = QImage(data, image.size[0], image.size[1], QImage.Format_ARGB32) pix = QPixmap.fromImage(QImage.rgbSwapped(im)) else: s = image.shape if len(s) >= 2: pix = QPixmap.fromImage(array2qimage(image)) else: pix = QPixmap() if pix: if width > 0 and height > 0: pix = pix.scaled(width, height) elif width > 0: pix = pix.scaledToWidth(width) if height > 0: pix = pix.scaledToHeight(height) return pix
def test_image_to_array_rgba(self): qimage = QImage(32, 64, QImage.Format.Format_ARGB32) qimage.fill(QColor(0x44, 0x88, 0xcc, 0xee)) array = image_to_array(qimage) self.assertEqual(array.shape, (64, 32, 4)) self.assertEqual(array.dtype, np.dtype('uint8')) self.assertTrue(np.all(array[:, :, 0] == 0x44)) self.assertTrue(np.all(array[:, :, 1] == 0x88)) self.assertTrue(np.all(array[:, :, 2] == 0xcc)) self.assertTrue(np.all(array[:, :, 3] == 0xee))
def _update(self): if self.value: # w, h = self.control.width(), self.control.height() # img = self.value.get_image_data(size=(w, h)) img = self.value.get_image_data() if img is not None: s = img.shape if s: im = QImage(img, s[1], s[0], QImage.Format_RGB32) # im = QImage(img, s[1], s[0], QImage.Format_RGB16) if self.swap: im = QImage.rgbSwapped(im) pix = QPixmap.fromImage(im) self.control.setPixmap(pix)
def convert_bitmap(image, width=0, height=0): if isinstance(image, ImageResource): pix = traitsui_convert_bitmap(image) elif isinstance(image, (PILImage.Image,)): try: data = image.tostring('raw', 'RGBA') except NotImplementedError: data = image.tobytes('raw', 'RGBA') im = QImage(data, image.size[0], image.size[1], QImage.Format_ARGB32) pix = QPixmap.fromImage(QImage.rgbSwapped(im)) else: s = image.shape if len(s) >= 2: pix = QPixmap.fromImage(array2qimage(image)) else: pix = QPixmap() if pix: if width > 0 and height > 0: pix = pix.scaled(width, height) elif width > 0: pix = pix.scaledToWidth(width) if height > 0: pix = pix.scaledToHeight(height) return pix
def QImage_from_np(image): assert (np.max(image) <= 255) image8 = image.astype(np.uint8, order='C', casting='unsafe') height, width, colors = image8.shape bytesPerLine = 4 * width image = QImage(image8.data, width, height, bytesPerLine, QImage.Format_RGB32) return image
def array_to_image(array): """ Convert a numpy array to a QImage. This copies the data before passing it to Qt. Parameters ---------- array : ndarray An N x M x {3, 4} array of unsigned 8-bit ints. The image format is assumed to be RGB or RGBA, based on the shape. Return ------ image : QImage The QImage created from the data. The pixel format is QImage.Format.Format_RGB32. """ import numpy as np if array.ndim != 3: raise ValueError("Array must be either RGB or RGBA values.") height, width, channels = array.shape data = np.empty((height, width, 4), dtype='uint8') if channels == 3: data[:, :, [2, 1, 0]] = array data[:, :, 3] = 0xff elif channels == 4: data[:, :, [2, 1, 0, 3]] = array else: raise ValueError("Array must be either RGB or RGBA values.") bytes_per_line = 4 * width if channels == 3: image = QImage(data.data, width, height, bytes_per_line, QImage.Format.Format_RGB32) elif channels == 4: image = QImage(data.data, width, height, bytes_per_line, QImage.Format.Format_ARGB32) # keep a reference to the array to ensure underlying data is available image._numpy_data = data return image
def set_tile(self, image): data = image.tobytes('raw', 'RGB') im = QImage(data, image.size[0], image.size[1], QImage.Format_RGB888) pix = QPixmap.fromImage(im) self._pix_maps.append(pix) # print(self._pix_map) self.update()
def present(self, frame): cloned_frame = QVideoFrame(frame) cloned_frame.map(QAbstractVideoBuffer.ReadOnly) image = QImage( cloned_frame.bits(), cloned_frame.width(), cloned_frame.height(), cloned_frame.bytesPerLine(), QVideoFrame.imageFormatFromPixelFormat(cloned_frame.pixelFormat()) ) self.frameAvailable.emit(image) return True
def __init__(self, parent=None, image_func=None): super().__init__(parent) self.image = QImage() self._np_image = np.zeros(shape=(0, 0, 4)) self.painter = None self.resizeEvent(None) if image_func is None: def I_fun(image, bbox): # Don't bother with creating an ndarray version return image, self._np_image self.image_func = I_fun else: self.image_func = image_func
def __init__(self, parent=None, image_func=None): import numpy as np super().__init__(parent) self.image = QImage() self._np_image = np.zeros(shape=(0, 0, 4)) self.painter = None self.resizeEvent(None) if image_func is None: # Don't bother with creating an ndarray version self.image_func = lambda image, bbox: image, self._np_image else: self.image_func = image_func
def test_resize_image_expand(self): qimage = QImage(32, 64, QImage.Format.Format_RGB32) qimage.fill(QColor(0x44, 0x88, 0xcc)) qimage = resize_image(qimage, (128, 128), AspectRatio.keep_expand) self.assertIsInstance(qimage, QImage) self.assertEqual(qimage.width(), 128) self.assertEqual(qimage.height(), 256)
def test_resize_image_smooth(self): qimage = QImage(32, 64, QImage.Format.Format_RGB32) qimage.fill(QColor(0x44, 0x88, 0xcc)) qimage = resize_image(qimage, (128, 128), mode=ScaleMode.smooth) self.assertIsInstance(qimage, QImage) self.assertEqual(qimage.width(), 128) self.assertEqual(qimage.height(), 128)
def set_frame(self): ok, data = self.cap.read() shape = data.shape im = QImage(data, shape[1], shape[0], QImage.Format_RGB888) pix = QPixmap.fromImage(QImage.rgbSwapped(im)) self.label.setPixmap(pix)
def test_image_to_array_bad(self): qimage = QImage(32, 64, QImage.Format.Format_RGB30) qimage.fill(QColor(0x44, 0x88, 0xcc)) with self.assertRaises(ValueError): image_to_array(qimage)