def core_render_test2 (self, i, j, ray_dir_norm, ray_dir): ''' This method plainly displays some camera rays intersections with any polygon in the scene. No optimisation here, just brute force approach. ''' if j%10 == 0 and i%10 == 0: # display to screen every 10 lines 10 pixels apart. tmp_isect_param = self.intersectRayTriangles (self.__world_origin, ray_dir_norm) if tmp_isect_param == None: self.__image.setPixel (i, j, qRgb (0, 0, 0)) else: self.__image.setPixel (i, j, qRgb (255, 255, 0)) # position = self.__world_origin, orientation = ray_dir_norm # fire inters_created signal : payload -> position in space, color intersections_pos = [self.__world_origin[0] + ray_dir_norm[0]*tmp_isect_param, self.__world_origin[1] + ray_dir_norm[1]*tmp_isect_param, self.__world_origin[2] + ray_dir_norm[2]*tmp_isect_param] # fire line_created signal : payload -> line origin in space, line direction, line type self.emit (self.__SIGNAL_LineCreated, self.__world_origin, intersections_pos, QString('p')) self.emit (self.__SIGNAL_IntersCreated, intersections_pos, [0,0,255]) # fire vector_created signal : payload -> vector's origin in space, vector direction, vector's type (o:outwards, i:inwards) self.emit (self.__SIGNAL_VectorCreated, intersections_pos, ray_dir_norm, QString('i'))
def __init__(self, parent, width=300, height=200 ): QWidget.__init__(self, parent) self.image = QImage( width, height , QImage.Format_Mono ) self.image.setColor( 0, qRgb(0,0,0) ) self.image.setColor( 1, qRgb(255,255,255) ) self.scribble = 0 self.clearScreen()
def core_render_test2(self, i, j, ray_dir_norm, ray_dir): ''' This method plainly displays some camera rays intersections with any polygon in the scene. No optimisation here, just brute force approach. ''' if j % 10 == 0 and i % 10 == 0: # display to screen every 10 lines 10 pixels apart. tmp_isect_param = self.intersectRayTriangles( self.__world_origin, ray_dir_norm) if tmp_isect_param == None: self.__image.setPixel(i, j, qRgb(0, 0, 0)) else: self.__image.setPixel(i, j, qRgb(255, 255, 0)) # position = self.__world_origin, orientation = ray_dir_norm # fire inters_created signal : payload -> position in space, color intersections_pos = [ self.__world_origin[0] + ray_dir_norm[0] * tmp_isect_param, self.__world_origin[1] + ray_dir_norm[1] * tmp_isect_param, self.__world_origin[2] + ray_dir_norm[2] * tmp_isect_param ] # fire line_created signal : payload -> line origin in space, line direction, line type self.emit(self.__SIGNAL_LineCreated, self.__world_origin, intersections_pos, QString('p')) self.emit(self.__SIGNAL_IntersCreated, intersections_pos, [0, 0, 255]) # fire vector_created signal : payload -> vector's origin in space, vector direction, vector's type (o:outwards, i:inwards) self.emit(self.__SIGNAL_VectorCreated, intersections_pos, ray_dir_norm, QString('i'))
def update_properties(self): ## Mostly copied from OWScatterPlotGraph if not self.plot(): return if not self.rect: x, y = self.axes() self.rect = self.plot().data_rect_for_axes(x, y) s = self.graph_transform().mapRect(self.rect).size().toSize() if not s.isValid(): return rx = s.width() ry = s.height() rx -= rx % self.granularity ry -= ry % self.granularity p = self.graph_transform().map(QPointF( 0, 0)) - self.graph_transform().map(self.rect.topLeft()) p = p.toPoint() ox = p.x() oy = -p.y() if self.classifier.classVar.is_continuous: imagebmp = orangeom.potentialsBitmap(self.classifier, rx, ry, ox, oy, self.granularity, self.scale) palette = [ qRgb(255. * i / 255., 255. * i / 255., 255 - (255. * i / 255.)) for i in range(255) ] + [qRgb(255, 255, 255)] else: imagebmp, nShades = orangeom.potentialsBitmap( self.classifier, rx, ry, ox, oy, self.granularity, self.scale, self.spacing) palette = [] sortedClasses = get_variable_values_sorted( self.classifier.domain.classVar) for cls in self.classifier.classVar.values: color = self.plot().discPalette.getRGB( sortedClasses.index(cls)) towhite = [255 - c for c in color] for s in range(nShades): si = 1 - float(s) / nShades palette.append( qRgb(*tuple( [color[i] + towhite[i] * si for i in (0, 1, 2)]))) palette.extend( [qRgb(255, 255, 255) for i in range(256 - len(palette))]) self.potentialsImage = QImage(imagebmp, rx, ry, QImage.Format_Indexed8) self.potentialsImage.setColorTable( ColorPaletteDlg.signedPalette(palette ) if qVersion() < "4.5" else palette) self.potentialsImage.setNumColors(256) self.pixmap_item.setPixmap(QPixmap.fromImage(self.potentialsImage)) self.pixmap_item.setPos(self.graph_transform().map( self.rect.bottomLeft()))
def core_render_test1(self, i, j, ray_dir_norm, ray_dir): ''' This method just "renders" plain vector directions from the center of the camera. No fancy user controls. Just a sweep. ''' ff = 255 a = 1 h = 0.5 intersections_pos = [ self.__world_origin[0] + ray_dir[0], self.__world_origin[1] + ray_dir[1], self.__world_origin[2] + ray_dir[2] ] self.__image.setPixel( i, j, qRgb((ff * (a + ray_dir[0]) * h), (ff * (a + ray_dir[1]) * h), 0)) if j % 100 == 0 and i % 100 == 0: # display to screen every 10 lines 10 pixels apart. # fire line_created signal : payload -> line origin in space, line direction, line type # position = self.__world_origin, orientation = world_ray self.emit(self.__SIGNAL_LineCreated, self.__world_origin, ray_dir, QString('o')) # fire vector_created signal : payload -> vector's origin in space, vector direction, vector's type (o:outwards, i:inwards) self.emit(self.__SIGNAL_VectorCreated, self.__world_origin, ray_dir, QString('o')) # fire inters_created signal : payload -> position in space, color self.emit(self.__SIGNAL_IntersectCreated, intersections_pos, [0, 0, ff])
def save(output_filename, labels, palette, dpi, options): '''Save the label/palette pair out as an indexed PNG image. This optionally saturates the pallete by mapping the smallest color component to zero and the largest one to 255, and also optionally sets the background color to pure white. ''' if not options.quiet: print(' saving {}...'.format(output_filename)) if options.saturate: palette = palette.astype(np.float32) pmin = palette.min() pmax = palette.max() palette = 255 * (palette - pmin) / (pmax - pmin) palette = palette.astype(np.uint8) if options.white_bg: palette = palette.copy() palette[0] = (255, 255, 255) #output_img = Image.fromarray(labels, 'P') #QImage.fromData(QByteArray, Format) #output_img.putpalette(palette.flatten()) #output_img.save(output_filename, dpi=dpi) QtImage = q2n.gray2qimage(labels, False) nestedColorMap = palette colors = [] for color in nestedColorMap: r, g, b = color colors.append(qRgb(r, g, b)) QtImage.setColorTable(colors) QtImage.save(output_filename)
def updateMask(control_image_path, rendered_image_path, mask_image_path): control_image = imageFromPath(control_image_path) if not control_image: error('Could not read control image {}'.format(control_image_path)) rendered_image = imageFromPath(rendered_image_path) if not rendered_image: error('Could not read rendered image {}'.format(rendered_image_path)) if not rendered_image.width() == control_image.width( ) or not rendered_image.height() == control_image.height(): print( 'Size mismatch - control image is {}x{}, rendered image is {}x{}'. format(control_image.width(), control_image.height(), rendered_image.width(), rendered_image.height())) max_width = min(rendered_image.width(), control_image.width()) max_height = min(rendered_image.height(), control_image.height()) #read current mask, if it exist mask_image = imageFromPath(mask_image_path) if mask_image.isNull(): print 'Mask image does not exist, creating {}'.format(mask_image_path) mask_image = QImage(control_image.width(), control_image.height(), QImage.Format_ARGB32) mask_image.fill(QColor(0, 0, 0)) #loop through pixels in rendered image and compare mismatch_count = 0 linebytes = max_width * 4 for y in xrange(max_height): control_scanline = control_image.constScanLine(y).asstring(linebytes) rendered_scanline = rendered_image.constScanLine(y).asstring(linebytes) mask_scanline = mask_image.scanLine(y).asstring(linebytes) for x in xrange(max_width): currentTolerance = qRed( struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0]) if currentTolerance == 255: #ignore pixel continue expected_rgb = struct.unpack('I', control_scanline[x * 4:x * 4 + 4])[0] rendered_rgb = struct.unpack('I', rendered_scanline[x * 4:x * 4 + 4])[0] difference = colorDiff(expected_rgb, rendered_rgb) if difference > currentTolerance: #update mask image mask_image.setPixel(x, y, qRgb(difference, difference, difference)) mismatch_count += 1 if mismatch_count: #update mask mask_image.save(mask_image_path, "png") print 'Updated {} pixels in {}'.format(mismatch_count, mask_image_path) else: print 'No mismatches in {}'.format(mask_image_path)
def update_properties(self): ## Mostly copied from OWScatterPlotGraph if not self.plot(): return if not self.rect: x, y = self.axes() self.rect = self.plot().data_rect_for_axes(x, y) s = self.graph_transform().mapRect(self.rect).size().toSize() if not s.isValid(): return rx = s.width() ry = s.height() rx -= rx % self.granularity ry -= ry % self.granularity p = self.graph_transform().map(QPointF(0, 0)) - self.graph_transform().map(self.rect.topLeft()) p = p.toPoint() ox = p.x() oy = -p.y() if isinstance(self.classifier.classVar, ContinuousVariable): imagebmp = orangeom.potentialsBitmap(self.classifier, rx, ry, ox, oy, self.granularity, self.scale) palette = [qRgb(255.0 * i / 255.0, 255.0 * i / 255.0, 255 - (255.0 * i / 255.0)) for i in range(255)] + [ qRgb(255, 255, 255) ] else: imagebmp, nShades = orangeom.potentialsBitmap( self.classifier, rx, ry, ox, oy, self.granularity, self.scale, self.spacing ) palette = [] sortedClasses = get_variable_values_sorted(self.classifier.domain.classVar) for cls in self.classifier.classVar.values: color = self.plot().discPalette.getRGB(sortedClasses.index(cls)) towhite = [255 - c for c in color] for s in range(nShades): si = 1 - float(s) / nShades palette.append(qRgb(*tuple([color[i] + towhite[i] * si for i in (0, 1, 2)]))) palette.extend([qRgb(255, 255, 255) for i in range(256 - len(palette))]) self.potentialsImage = QImage(imagebmp, rx, ry, QImage.Format_Indexed8) self.potentialsImage.setColorTable(ColorPaletteDlg.signedPalette(palette) if qVersion() < "4.5" else palette) self.potentialsImage.setNumColors(256) self.pixmap_item.setPixmap(QPixmap.fromImage(self.potentialsImage)) self.pixmap_item.setPos(self.graph_transform().map(self.rect.bottomLeft()))
def __init__(self): self.d_hue1 = 0 self.d_hue2 = 359 self.d_saturation = 150 self.d_value = 200 self.d_rgbMin = qRgb() self.d_rgbMax = 0 self.d_rgbTable[360] self.updateTable()
def updateMask(control_image_path, rendered_image_path, mask_image_path): control_image = imageFromPath(control_image_path) if not control_image: error('Could not read control image {}'.format(control_image_path)) rendered_image = imageFromPath(rendered_image_path) if not rendered_image: error('Could not read rendered image {}'.format(rendered_image_path)) if not rendered_image.width() == control_image.width() or not rendered_image.height() == control_image.height(): print ('Size mismatch - control image is {}x{}, rendered image is {}x{}'.format(control_image.width(), control_image.height(), rendered_image.width(), rendered_image.height())) max_width = min(rendered_image.width(), control_image.width()) max_height = min(rendered_image.height(), control_image.height()) #read current mask, if it exist mask_image = imageFromPath(mask_image_path) if mask_image.isNull(): print 'Mask image does not exist, creating {}'.format(mask_image_path) mask_image = QImage(control_image.width(), control_image.height(), QImage.Format_ARGB32) mask_image.fill(QColor(0, 0, 0)) #loop through pixels in rendered image and compare mismatch_count = 0 linebytes = max_width * 4 for y in xrange(max_height): control_scanline = control_image.constScanLine(y).asstring(linebytes) rendered_scanline = rendered_image.constScanLine(y).asstring(linebytes) mask_scanline = mask_image.scanLine(y).asstring(linebytes) for x in xrange(max_width): currentTolerance = qRed(struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0]) if currentTolerance == 255: #ignore pixel continue expected_rgb = struct.unpack('I', control_scanline[x * 4:x * 4 + 4])[0] rendered_rgb = struct.unpack('I', rendered_scanline[x * 4:x * 4 + 4])[0] difference = colorDiff(expected_rgb, rendered_rgb) if difference > currentTolerance: #update mask image mask_image.setPixel(x, y, qRgb(difference, difference, difference)) mismatch_count += 1 if mismatch_count: #update mask mask_image.save(mask_image_path, "png") print 'Updated {} pixels in {}'.format(mismatch_count, mask_image_path) else: print 'No mismatches in {}'.format(mask_image_path)
def grayscale_pixmap(pixmap): from PyQt4.QtGui import qGray, qRgb image = pixmap.toImage() width = pixmap.width() height = pixmap.height() for i in range(0, width): for j in range(0, height): col = image.pixel(i, j) gray = qGray(col) image.setPixel(i, j, qRgb(gray, gray, gray)) pixmap = pixmap.fromImage(image) return pixmap
def read(fp): colorTable = colorTable256([]) for i in xrange(255, -1, -1): chars = fp.read(3) assert isinstance( chars, str), "file object passd for 'act.read' must be 'binary mode'" if len(chars) < 3: break r, g, b = map(ord, chars) colorTable[i] = qRgb(r, g, b) return colorTable
def _create_icon(self, color_map_name, image, values): """" :type color_map_name: str :type image: QImage :type values: np.ndarray """ color_map = ScalarMappable(cmap=color_map_name) rgba = color_map.to_rgba(values, bytes=True) color_table = [qRgb(c[0], c[1], c[2]) for c in rgba] image.setColorTable(color_table) return QPixmap.fromImage(image).scaledToWidth(128)
def _column1(): if role == Qt.DecorationRole: continuous_palette = ContinuousPaletteGenerator(*var.colors) line = continuous_palette.getRGB(np.arange(0, 1, 1 / 256)) data = np.arange(0, 256, dtype=np.int8). \ reshape((1, 256)). \ repeat(16, 0) img = QImage(data, 256, 16, QImage.Format_Indexed8) img.setColorCount(256) img.setColorTable([qRgb(*x) for x in line]) img.data = data return img if role == Qt.ToolTipRole: return "{} - {}".format(self._encode_color(var.colors[0]), self._encode_color(var.colors[1])) if role == ColorRole: return var.colors
def __init__(self, vncclient=None, parent=None): super(VNCViewer, self).__init__(parent) self.setMouseTracking(True) self.setFocusPolicy(Qt.WheelFocus) #self.setCursor(Qt.BlankCursor) self.client = vncclient or parent.client self.client.started.connect(self._SH_ClientStarted) self.client.finished.connect(self._SH_ClientFinished) self.client.imageSizeChanged.connect(self._SH_ImageSizeChanged) self.client.imageChanged.connect(self._SH_ImageUpdated) self.client.passwordRequested.connect(self._SH_PasswordRequested, Qt.BlockingQueuedConnection) self.colors_8bit = [qRgb((i&0x07) << 5, (i&0x38) << 2, i&0xc0) for i in range(256)] self.scale = False self.view_only = False self._client_active = False self._has_mouse_over = False self._active_keys = ActiveKeys()
def openText(filename): colorTable = colorTable256([]) with io.open(filename, "r") as fp: strs = re.split(r",|(?<!,)\s*\n", fp.read(), 256 * 3) from iterutils import grouper for i, rgb in enumerate(grouper(3, strs)): if i >= 256: break rgb = list(rgb) for k, s in enumerate(rgb): if s is None: rgb[k] = 0 else: s = s.strip() if not s: rgb[k] = 0 else: rgb[k] = int(s) colorTable[i] = qRgb(*rgb) return colorTable
def core_render_test1 (self, i, j, ray_dir_norm, ray_dir): ''' This method just "renders" plain vector directions from the center of the camera. No fancy user controls. Just a sweep. ''' ff = 255; a = 1; h = 0.5; intersections_pos = [self.__world_origin[0] + ray_dir[0], self.__world_origin[1] + ray_dir[1], self.__world_origin[2] + ray_dir[2]] self.__image.setPixel (i, j, qRgb ((ff * (a + ray_dir[0]) * h), (ff * (a + ray_dir[1]) * h), 0)) if j%100 == 0 and i%100 == 0: # display to screen every 10 lines 10 pixels apart. # fire line_created signal : payload -> line origin in space, line direction, line type # position = self.__world_origin, orientation = world_ray self.emit (self.__SIGNAL_LineCreated, self.__world_origin, ray_dir, QString('o')) # fire vector_created signal : payload -> vector's origin in space, vector direction, vector's type (o:outwards, i:inwards) self.emit (self.__SIGNAL_VectorCreated, self.__world_origin, ray_dir, QString('o')) # fire inters_created signal : payload -> position in space, color self.emit (self.__SIGNAL_IntersectCreated, intersections_pos, [0,0,ff])
def __init__(self, vncclient=None, parent=None): super(VNCViewer, self).__init__(parent) self.setMouseTracking(True) self.setFocusPolicy(Qt.WheelFocus) #self.setCursor(Qt.BlankCursor) self.client = vncclient or parent.client self.client.started.connect(self._SH_ClientStarted) self.client.finished.connect(self._SH_ClientFinished) self.client.imageSizeChanged.connect(self._SH_ImageSizeChanged) self.client.imageChanged.connect(self._SH_ImageUpdated) self.client.passwordRequested.connect(self._SH_PasswordRequested, Qt.BlockingQueuedConnection) self.colors_8bit = [ qRgb((i & 0x07) << 5, (i & 0x38) << 2, i & 0xc0) for i in range(256) ] self.scale = False self.view_only = False self._client_active = False self._has_mouse_over = False self._active_keys = ActiveKeys()
def data(self, index, role=Qt.DisplayRole): row, col = index.row(), index.column() if col == 0: return ColorTableModel.data(self, index, role) if col > 1: return var = self.variables[row] if role == Qt.DecorationRole: continuous_palette = ContinuousPaletteGenerator(*var.colors) line = continuous_palette.getRGB(np.arange(0, 1, 1 / 256)) data = np.arange(0, 256, dtype=np.int8).\ reshape((1, 256)).\ repeat(16, 0) img = QImage(data, 256, 16, QImage.Format_Indexed8) img.setColorCount(256) img.setColorTable([qRgb(*x) for x in line]) img.data = data return img if role == Qt.ToolTipRole: return "{} - {}".format(self._encode_color(var.colors[0]), self._encode_color(var.colors[1])) if role == ColorRole: return var.colors
def np_to_qimage(np_img, copy=False): gray_color_table = [qRgb(i, i, i) for i in range(256)] if np_img is None: return QImage() if np_img.dtype != np.uint8: print np_img.dtype np.clip(np_img, 0, 255, out=np_img) np_img = np_img.astype('uint8') if len(np_img.shape) == 2: qimg = QImage(np_img.data, np_img.shape[1], np_img.shape[0], np_img.strides[0], QImage.Format_Indexed8) qimg.setColorTable(gray_color_table) return qimg.copy() if copy else qimg elif len(np_img.shape) == 3: if np_img.shape[2] == 3: qimg = QImage(np_img.data, np_img.shape[1], np_img.shape[0], np_img.strides[0], QImage.Format_RGB888) return qimg.copy() if copy else qimg elif np_img.shape[2] == 4: qimg = QImage(np_img.data, np_img.shape[1], np_img.shape[0], np_img.strides[0], QImage.Format_ARGB32) return qimg.copy() if copy else qimg raise NotImplementedError
def create_preview(input_filename, height, options): ''' Create a preview using the given parameters of param "options". :param input_filename: valid Filename (String) Note: This will not be checked :param options: Namespace object like from args.parse() :return: QImage-Object ''' #print("Loading Preview for:", input_filename, height, options) img, dpi = load(input_filename, height) samples = sample_pixels(img, options) palette = get_palette(samples, options) labels = apply_palette(img, palette, options) if options.saturate: palette = palette.astype(np.float32) pmin = palette.min() pmax = palette.max() palette = 255 * (palette - pmin) / (pmax - pmin) palette = palette.astype(np.uint8) if options.white_bg: palette = palette.copy() palette[0] = (255, 255, 255) QtImage = q2n.gray2qimage(labels, False) nestedColorMap = palette colors = [] for color in nestedColorMap: r, g, b = color colors.append(qRgb(r, g, b)) QtImage.setColorTable(colors) return QtImage
from sloth.core.exceptions import NotImplementedException from PyQt4.QtGui import QImage, qRgb import numpy as np import random import colorsys gray_color_table = [qRgb(i, i, i) for i in range(256)] def toQImage(im, copy=False): if im is None: return QImage() if im.dtype == np.uint8: if len(im.shape) == 2: qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_Indexed8) qim.setColorTable(gray_color_table) return qim.copy() if copy else qim elif len(im.shape) == 3: if im.shape[2] == 3: qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGB888); return qim.copy() if copy else qim elif im.shape[2] == 4: qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_ARGB32); return qim.copy() if copy else qim raise NotImplementedException('no conversion to QImage implemented for given image type (depth: %s, shape: %s)' % (im.dtype, im.shape)) def gen_colors(s=0.99, v=0.99, h=None, color_space='rgb', _golden_ratio_conjugate=0.618033988749895): """A generator for random colors such that adjacent colors are as distinct as possible. Parameters
import numpy as np import random import colorsys from PyQt4.QtGui import QImage, qRgb from sloth.core.exceptions import NotImplementedException gray_color_table = [qRgb(i, i, i) for i in range(256)] def toQImage(im, copy=False): if im is None: return QImage() if im.dtype == np.uint8: if len(im.shape) == 2: qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_Indexed8) qim.setColorTable(gray_color_table) return qim.copy() if copy else qim elif len(im.shape) == 3: if im.shape[2] == 3: qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGB888) return qim.copy() if copy else qim elif im.shape[2] == 4: qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_ARGB32) return qim.copy() if copy else qim raise NotImplementedException( 'no conversion to QImage implemented for given image type (depth: %s, shape: %s)'
def rgb(r, g, b): # use qRgb to pack the colors, and then turn the resulting long # into a negative integer with the same bitpattern. return (qRgb(r, g, b) & 0xffffff) - 0x1000000
__version__ = '0.1' __date__ = '2015.04' import os import sys import ctypes from PyQt4.QtGui import (QImage, qRgb) LIBPATH = "/usr/local/lib64/" LIBPATH_W = r'win32' (L_INSERT, L_COPY, L_CLONE, L_COPY_CLONE) = map(ctypes.c_int, xrange(4)) # B&W Color Table. _bwCT = [qRgb(255, 255, 255), qRgb(0, 0, 0)] #Grayscale Color Table. _grayscaleCT = [qRgb(i, i, i) for i in range(256)] class BOX(ctypes.Structure): """ Leptonica box structure """ _fields_ = [("x", ctypes.c_int32), ("y", ctypes.c_int32), ("w", ctypes.c_int32), ("h", ctypes.c_int32), ("refcount", ctypes.c_uint32)] BOX_PTR_T = ctypes.POINTER(BOX)
import sys from PyQt4.QtCore import QRect, Qt, QPoint from PyQt4.QtGui import QApplication, QMainWindow, QLabel, qRgb, QImage, QPixmap, QFileDialog, QInputDialog from ui_mainwindow import Ui_MainWindow import numpy as np import pyopencl as cl from PIL import Image from timeit import default_timer as timer # ---------------------------------------------------------------------------------------------------------------------- # http://www.swharden.com/blog/2013-06-03-realtime-image-pixelmap-from-numpy-array-data-in-qt/ GREY_PALETTE = [qRgb(i, i, i) for i in range(256)] # ---------------------------------------------------------------------------------------------------------------------- class NLMeans: def __init__(self, ctx): self.ctx = ctx self.queue = cl.CommandQueue(ctx) self.prg = None self.mask = None self.ax = 4 self.sx = 2 self.a = 1.0 self.h = 1.0 self._build_kernel() self._build_mask()
def rgb(r, g, b): if PYQT_VERSION <= 263172: return (qRgb(r, g, b) & 0xffffff) - 0x1000000 return qRgb(r, g, b)
def clear_image(self): self.image.fill(qRgb(0, 0, 0)) self.update()
layer.setCrs(crs) render = QgsMapRenderer() render.setLayerSet([layer.id()]) iwidth = args.width iheight = int(iwidth * (extent.height() / extent.width())) print("Image size: %dx%d" % (iwidth, iheight)) dpi = args.dpi img = QImage(iwidth, iheight, QImage.Format_RGB32) img.setDotsPerMeterX(dpi / 25.4 * 1000) img.setDotsPerMeterY(dpi / 25.4 * 1000) img.fill(qRgb(255, 255, 255)) dpi = img.logicalDpiX() print("Image DPI: %d" % dpi) render.setOutputSize(QSize(img.width(), img.height()), dpi) render.setDestinationCrs(crs) render.setProjectionsEnabled(True) render.setMapUnits(crs.mapUnits()) render.setExtent(extent) print("Scale: %f" % render.scale()) painter = QPainter(img) painter.setRenderHint(QPainter.Antialiasing)
import os import sys import ctypes from PyQt4.QtGui import (QImage, qRgb) LIBPATH = "/usr/local/lib64/" LIBPATH_W = r'win32' (L_INSERT, L_COPY, L_CLONE, L_COPY_CLONE) = map(ctypes.c_int, xrange(4)) # B&W Color Table. _bwCT = [qRgb(255, 255, 255), qRgb(0, 0, 0)] #Grayscale Color Table. _grayscaleCT = [qRgb(i, i, i) for i in range(256)] class BOX(ctypes.Structure): """ Leptonica box structure """ _fields_ = [ ("x", ctypes.c_int32), ("y", ctypes.c_int32), ("w", ctypes.c_int32), ("h", ctypes.c_int32), ("refcount", ctypes.c_uint32) ]
def render(self, image, depth, viewport_scale, stroke_colour): # Sort the edges of the polygon by their minimum projected y # coordinates, discarding horizontal edges. width = image.width() height = image.height() z_max = 1 << 16 edges = [] l = len(self.points) for i in range(l): pxa, pya = self.projected[i] pxa = width / 2 + (pxa * viewport_scale) pya = height / 2 - (pya * viewport_scale) za = -self.points[i].z j = (i + 1) % l pxb, pyb = self.projected[j] pxb = width / 2 + (pxb * viewport_scale) pyb = height / 2 - (pyb * viewport_scale) zb = -self.points[j].z # Append the starting and finishing y coordinates, the starting # x coordinate, the dx/dy gradient of the edge, the starting # z coordinate and the dz/dy gradient of the edge. if int(pya) < int(pyb): edges.append((pya, pyb, pxa, (pxb - pxa) / (pyb - pya), za, (zb - za) / (pyb - pya))) elif int(pya) > int(pyb): edges.append((pyb, pya, pxb, (pxa - pxb) / (pya - pyb), zb, (za - zb) / (pya - pyb))) if not edges: return edges.sort() end_py = edges[-1][1] if end_py < 0: return py1, end_py1, px1, dx1, z1, dz1 = edges.pop(0) if py1 >= height: return py2, end_py2, px2, dx2, z2, dz2 = edges.pop(0) py = int(py1) if py < py1 or py < py2: py += 1 while py <= end_py and py < height: # Retrieve new edges as required. if py >= end_py1: if not edges: break py1, end_py1, px1, dx1, z1, dz1 = edges.pop(0) if py >= end_py2: if not edges: break py2, end_py2, px2, dx2, z2, dz2 = edges.pop(0) if py < 0: py += 1 continue # Calculate the starting and finishing x coordinates of the span # at the current y coordinate. sx1 = px1 + dx1 * (py - py1) sx2 = px2 + dx2 * (py - py2) # Calculate the starting and finishing z coordinates of the span # at the current y coordinate. sz1 = z1 + dz1 * (py - py1) sz2 = z2 + dz2 * (py - py2) # Do not render the span if it lies outside the image or has # values that cannot be stored in the depth buffer. # Truncate the span if it lies partially within the image. if sx1 > sx2: sx1, sx2 = sx2, sx1 sz1, sz2 = sz2, sz1 # Only calculate a depth gradient for the span if it is more than # one pixel wide. if sx1 != sx2: dz = (sz2 - sz1) / (sx2 - sx1) else: dz = 0.0 if sz1 <= 0 and sz2 <= 0: py += 1 continue elif sz1 >= z_max and sz2 >= z_max: py += 1 continue sx, end_sx = int(sx1), int(sx2) if sx < sx1: sx += 1 if sx >= width: py += 1 continue elif end_sx < 0: py += 1 continue if sx < 0: sx = 0 if end_sx >= width: end_sx = width - 1 # Draw the span. while sx <= end_sx: sz = sz1 + dz * (sx - sx1) if 0 < sz <= depth[int(sx)][int(py)]: if self.alpha < 1.0: pixel = image.pixel(sx, py) dr = qRed(pixel) dg = qGreen(pixel) db = qBlue(pixel) r = (1 - self.alpha) * dr + self.alpha * self.red g = (1 - self.alpha) * dg + self.alpha * self.green b = (1 - self.alpha) * db + self.alpha * self.blue image.setPixel(sx, py, qRgb(r, g, b)) else: depth[int(sx)][int(py)] = sz image.setPixel(sx, py, self.rgba) sx += 1 if stroke_colour: if 0 <= sx1 < width and 0 < sz1 <= depth[int(sx1)][int(py)]: image.setPixel(sx1, py, stroke_colour) if 0 <= sx2 < width and 0 < sz2 <= depth[int(sx2)][int(py)]: image.setPixel(sx2, py, stroke_colour) py += 1
def __init__(self, parent): super(ImageWidget, self).__init__() self.gray_color_table = [qRgb(i, i, i) for i in range(256)] self.initWidget()