示例#1
1
文件: iimage.py 项目: yuanjq/idoui
def convertToGray(image):
    grayImg = QImage(image.size(), QImage.Format_ARGB32)
    for i in range(image.width()):
        for j in range(image.height()):
            rgb = image.pixel(i, j)
            gray = qGray(rgb)
            alpha = qAlpha(rgb)
            grayPixel = qRgba(gray, gray, gray, alpha)
            grayImg.setPixel(i, j, grayPixel)
    return grayImg
示例#2
0
 def grayScale(self):
     """
     Public slot to convert the image to gray preserving transparency.
     """
     cmd = IconEditCommand(self, self.tr("Convert to Grayscale"),
                           self.__image)
     for x in range(self.__image.width()):
         for y in range(self.__image.height()):
             col = self.__image.pixel(x, y)
             if col != qRgba(0, 0, 0, 0):
                 gray = qGray(col)
                 self.__image.setPixel(
                     x, y, qRgba(gray, gray, gray, qAlpha(col)))
     self.update()
     self.setDirty(True)
     self.__undoStack.push(cmd)
     cmd.setAfterImage(self.__image)
示例#3
0
def colorDiff(c1, c2):
    redDiff = abs(qRed(c1) - qRed(c2))
    greenDiff = abs(qGreen(c1) - qGreen(c2))
    blueDiff = abs(qBlue(c1) - qBlue(c2))
    alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
    return max(redDiff, greenDiff, blueDiff, alphaDiff)
示例#4
0
def colorDiff(c1, c2):
    redDiff = abs(qRed(c1) - qRed(c2))
    greenDiff = abs(qGreen(c1) - qGreen(c2))
    blueDiff = abs(qBlue(c1) - qBlue(c2))
    alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
    return max(redDiff, greenDiff, blueDiff, alphaDiff)
示例#5
0
def isStretchableMarker(pixel):
    return (qAlpha(pixel) >> 7) & 1
示例#6
0
    def _generateSceneNode(self, file_name, xz_size, peak_height, base_height,
                           blur_iterations, max_size, lighter_is_higher,
                           use_transparency_model, transmittance_1mm):
        scene_node = SceneNode()

        mesh = MeshBuilder()

        img = QImage(file_name)

        if img.isNull():
            Logger.log("e", "Image is corrupt.")
            return None

        width = max(img.width(), 2)
        height = max(img.height(), 2)
        aspect = height / width

        if img.width() < 2 or img.height() < 2:
            img = img.scaled(width, height, Qt.IgnoreAspectRatio)

        base_height = max(base_height, 0)
        peak_height = max(peak_height, -base_height)

        xz_size = max(xz_size, 1)
        scale_vector = Vector(xz_size, peak_height, xz_size)

        if width > height:
            scale_vector = scale_vector.set(z=scale_vector.z * aspect)
        elif height > width:
            scale_vector = scale_vector.set(x=scale_vector.x / aspect)

        if width > max_size or height > max_size:
            scale_factor = max_size / width
            if height > width:
                scale_factor = max_size / height

            width = int(max(round(width * scale_factor), 2))
            height = int(max(round(height * scale_factor), 2))
            img = img.scaled(width, height, Qt.IgnoreAspectRatio)

        width_minus_one = width - 1
        height_minus_one = height - 1

        Job.yieldThread()

        texel_width = 1.0 / (width_minus_one) * scale_vector.x
        texel_height = 1.0 / (height_minus_one) * scale_vector.z

        height_data = numpy.zeros((height, width), dtype=numpy.float32)

        for x in range(0, width):
            for y in range(0, height):
                qrgb = img.pixel(x, y)
                if use_transparency_model:
                    height_data[y, x] = (
                        0.299 * math.pow(qRed(qrgb) / 255.0, 2.2) +
                        0.587 * math.pow(qGreen(qrgb) / 255.0, 2.2) +
                        0.114 * math.pow(qBlue(qrgb) / 255.0, 2.2))
                else:
                    height_data[y, x] = (
                        0.212655 * qRed(qrgb) + 0.715158 * qGreen(qrgb) +
                        0.072187 * qBlue(qrgb)
                    ) / 255  # fast computation ignoring gamma and degamma

        Job.yieldThread()

        if lighter_is_higher == use_transparency_model:
            height_data = 1 - height_data

        for _ in range(0, blur_iterations):
            copy = numpy.pad(height_data, ((1, 1), (1, 1)), mode="edge")

            height_data += copy[1:-1, 2:]
            height_data += copy[1:-1, :-2]
            height_data += copy[2:, 1:-1]
            height_data += copy[:-2, 1:-1]

            height_data += copy[2:, 2:]
            height_data += copy[:-2, 2:]
            height_data += copy[2:, :-2]
            height_data += copy[:-2, :-2]

            height_data /= 9

            Job.yieldThread()

        if use_transparency_model:
            divisor = 1.0 / math.log(
                transmittance_1mm / 100.0
            )  # log-base doesn't matter here. Precompute this value for faster computation of each pixel.
            min_luminance = (transmittance_1mm / 100.0)**(peak_height -
                                                          base_height)
            for (y, x) in numpy.ndindex(height_data.shape):
                mapped_luminance = min_luminance + (
                    1.0 - min_luminance) * height_data[y, x]
                height_data[y, x] = base_height + divisor * math.log(
                    mapped_luminance
                )  # use same base as a couple lines above this
        else:
            height_data *= scale_vector.y
            height_data += base_height

        if img.hasAlphaChannel():
            for x in range(0, width):
                for y in range(0, height):
                    height_data[y, x] *= qAlpha(img.pixel(x, y)) / 255.0

        heightmap_face_count = 2 * height_minus_one * width_minus_one
        total_face_count = heightmap_face_count + (width_minus_one * 2) * (
            height_minus_one * 2) + 2

        mesh.reserveFaceCount(total_face_count)

        # initialize to texel space vertex offsets.
        # 6 is for 6 vertices for each texel quad.
        heightmap_vertices = numpy.zeros(
            (width_minus_one * height_minus_one, 6, 3), dtype=numpy.float32)
        heightmap_vertices = heightmap_vertices + numpy.array(
            [[[0, base_height, 0], [0, base_height, texel_height],
              [texel_width, base_height, texel_height],
              [texel_width, base_height, texel_height],
              [texel_width, base_height, 0], [0, base_height, 0]]],
            dtype=numpy.float32)

        offsetsz, offsetsx = numpy.mgrid[0:height_minus_one, 0:width - 1]
        offsetsx = numpy.array(offsetsx, numpy.float32).reshape(
            -1, 1) * texel_width
        offsetsz = numpy.array(offsetsz, numpy.float32).reshape(
            -1, 1) * texel_height

        # offsets for each texel quad
        heightmap_vertex_offsets = numpy.concatenate([
            offsetsx,
            numpy.zeros((offsetsx.shape[0], offsetsx.shape[1]),
                        dtype=numpy.float32), offsetsz
        ], 1)
        heightmap_vertices += heightmap_vertex_offsets.repeat(6, 0).reshape(
            -1, 6, 3)

        # apply height data to y values
        heightmap_vertices[:, 0,
                           1] = heightmap_vertices[:, 5,
                                                   1] = height_data[:-1, :
                                                                    -1].reshape(
                                                                        -1)
        heightmap_vertices[:, 1, 1] = height_data[1:, :-1].reshape(-1)
        heightmap_vertices[:, 2,
                           1] = heightmap_vertices[:, 3, 1] = height_data[
                               1:, 1:].reshape(-1)
        heightmap_vertices[:, 4, 1] = height_data[:-1, 1:].reshape(-1)

        heightmap_indices = numpy.array(numpy.mgrid[0:heightmap_face_count *
                                                    3],
                                        dtype=numpy.int32).reshape(-1, 3)

        mesh._vertices[0:(heightmap_vertices.size //
                          3), :] = heightmap_vertices.reshape(-1, 3)
        mesh._indices[0:(heightmap_indices.size // 3), :] = heightmap_indices

        mesh._vertex_count = heightmap_vertices.size // 3
        mesh._face_count = heightmap_indices.size // 3

        geo_width = width_minus_one * texel_width
        geo_height = height_minus_one * texel_height

        # bottom
        mesh.addFaceByPoints(0, 0, 0, 0, 0, geo_height, geo_width, 0,
                             geo_height)
        mesh.addFaceByPoints(geo_width, 0, geo_height, geo_width, 0, 0, 0, 0,
                             0)

        # north and south walls
        for n in range(0, width_minus_one):
            x = n * texel_width
            nx = (n + 1) * texel_width

            hn0 = height_data[0, n]
            hn1 = height_data[0, n + 1]

            hs0 = height_data[height_minus_one, n]
            hs1 = height_data[height_minus_one, n + 1]

            mesh.addFaceByPoints(x, 0, 0, nx, 0, 0, nx, hn1, 0)
            mesh.addFaceByPoints(nx, hn1, 0, x, hn0, 0, x, 0, 0)

            mesh.addFaceByPoints(x, 0, geo_height, nx, 0, geo_height, nx, hs1,
                                 geo_height)
            mesh.addFaceByPoints(nx, hs1, geo_height, x, hs0, geo_height, x, 0,
                                 geo_height)

        # west and east walls
        for n in range(0, height_minus_one):
            y = n * texel_height
            ny = (n + 1) * texel_height

            hw0 = height_data[n, 0]
            hw1 = height_data[n + 1, 0]

            he0 = height_data[n, width_minus_one]
            he1 = height_data[n + 1, width_minus_one]

            mesh.addFaceByPoints(0, 0, y, 0, 0, ny, 0, hw1, ny)
            mesh.addFaceByPoints(0, hw1, ny, 0, hw0, y, 0, 0, y)

            mesh.addFaceByPoints(geo_width, 0, y, geo_width, 0, ny, geo_width,
                                 he1, ny)
            mesh.addFaceByPoints(geo_width, he1, ny, geo_width, he0, y,
                                 geo_width, 0, y)

        mesh.calculateNormals(fast=True)

        scene_node.setMeshData(mesh.build())

        return scene_node
示例#7
0
def draw_icon(icon, rect, painter, icon_mode, shadow=False):
    cache = icon.pixmap(rect.size())
    dip_offset = QPoint(1, -2)

    cache = QPixmap()
    pixname = "icon {0} {1} {2}".format(icon.cacheKey(), icon_mode,
                                        rect.height())
    if QPixmapCache.find(pixname) is None:
        pix = icon.pixmap(rect.size())
        device_pixel_ratio = pix.devicePixelRatio()
        radius = 3 * device_pixel_ratio
        offset = dip_offset * device_pixel_ratio
        cache = QPixmap(pix.size() + QSize(radius * 2, radius * 2))
        cache.fill(Qt.transparent)

        cache_painter = QPainter(cache)

        if icon_mode == QIcon.Disabled:
            im = pix.toImage().convertToFormat(QImage.Format_ARGB32)
            for y in range(0, im.height()):
                scanline = im.scanLine(y)
                for x in range(0, im.width()):
                    pixel = scanline
                    intensity = qGray(pixel)
                    scanline = qRgba(intensity, intensity, intensity,
                                     qAlpha(pixel))
                    scanline += 1
            pix = QPixmap.fromImage(im)

        # Draw shadow
        tmp = QImage(pix.size() + QSize(radius * 2, radius * 2),
                     QImage.Format_ARGB32_Premultiplied)
        tmp.fill(Qt.transparent)

        tmp_painter = QPainter(tmp)
        tmp_painter.setCompositionMode(QPainter.CompositionMode_Source)
        tmp_painter.drawPixmap(
            QRect(radius, radius, pix.width(), pix.height()), pix)
        tmp_painter.end()

        # Blur the alpha channel
        blurred = QImage(tmp.size(), QImage.Format_ARGB32_Premultiplied)
        blur_painter = QPainter(blurred)
        blur_painter.end()

        # tmp = blurred

        tmp_painter.begin(tmp)
        tmp_painter.setCompositionMode(QPainter.CompositionMode_SourceIn)
        tmp_painter.fillRect(tmp.rect(), QColor(0, 0, 0, 150))
        tmp_painter.end()

        tmp_painter.begin(tmp)
        tmp_painter.setCompositionMode(QPainter.CompositionMode_SourceIn)
        tmp_painter.fillRect(tmp.rect(), QColor(0, 0, 0, 150))
        tmp_painter.end()

        # Draw the blurred drop shadow
        cache_painter.drawImage(
            QRect(0, 0,
                  cache.rect().width(),
                  cache.rect().height()), tmp)
        # Draw the actual pixmap
        cache_painter.drawPixmap(
            QRect(
                QPoint(radius, radius) + offset,
                QSize(pix.width(), pix.height())), pix)
        cache_painter.end()
        cache.setDevicePixelRatio(device_pixel_ratio)
        QPixmapCache.insert(pixname, cache)

    target_rect = cache.rect()
    target_rect.setSize(target_rect.size() / cache.devicePixelRatio())
    target_rect.moveCenter(rect.center() - dip_offset)
    painter.drawPixmap(target_rect, cache)
示例#8
0
def draw_icon(icon, rect, painter, icon_mode, shadow=False):
    cache = icon.pixmap(rect.size())
    dip_offset = QPoint(1, -2)

    cache = QPixmap()
    pixname = "icon {0} {1} {2}".format(
        icon.cacheKey(), icon_mode, rect.height()
    )
    if QPixmapCache.find(pixname) is None:
        pix = icon.pixmap(rect.size())
        device_pixel_ratio = pix.devicePixelRatio()
        radius = 3 * device_pixel_ratio
        offset = dip_offset * device_pixel_ratio
        cache = QPixmap(pix.size() + QSize(radius * 2, radius * 2))
        cache.fill(Qt.transparent)

        cache_painter = QPainter(cache)

        if icon_mode == QIcon.Disabled:
            im = pix.toImage().convertToFormat(QImage.Format_ARGB32)
            for y in range(0, im.height()):
                scanline = im.scanLine(y)
                for x in range(0, im.width()):
                    pixel = scanline
                    intensity = qGray(pixel)
                    scanline = qRgba(
                        intensity, intensity, intensity, qAlpha(pixel))
                    scanline += 1
            pix = QPixmap.fromImage(im)

        # Draw shadow
        tmp = QImage(pix.size() + QSize(radius * 2, radius * 2),
                     QImage.Format_ARGB32_Premultiplied)
        tmp.fill(Qt.transparent)

        tmp_painter = QPainter(tmp)
        tmp_painter.setCompositionMode(QPainter.CompositionMode_Source)
        tmp_painter.drawPixmap(
            QRect(radius, radius, pix.width(), pix.height()), pix)
        tmp_painter.end()

        # Blur the alpha channel
        blurred = QImage(tmp.size(), QImage.Format_ARGB32_Premultiplied)
        blur_painter = QPainter(blurred)
        blur_painter.end()

        # tmp = blurred

        tmp_painter.begin(tmp)
        tmp_painter.setCompositionMode(QPainter.CompositionMode_SourceIn)
        tmp_painter.fillRect(tmp.rect(), QColor(0, 0, 0, 150))
        tmp_painter.end()

        tmp_painter.begin(tmp)
        tmp_painter.setCompositionMode(QPainter.CompositionMode_SourceIn)
        tmp_painter.fillRect(tmp.rect(), QColor(0, 0, 0, 150))
        tmp_painter.end()

        # Draw the blurred drop shadow
        cache_painter.drawImage(
            QRect(0, 0, cache.rect().width(), cache.rect().height()), tmp)
        # Draw the actual pixmap
        cache_painter.drawPixmap(
            QRect(QPoint(radius, radius) + offset,
                  QSize(pix.width(), pix.height())), pix)
        cache_painter.end()
        cache.setDevicePixelRatio(device_pixel_ratio)
        QPixmapCache.insert(pixname, cache)

    target_rect = cache.rect()
    target_rect.setSize(target_rect.size() / cache.devicePixelRatio())
    target_rect.moveCenter(rect.center() - dip_offset)
    painter.drawPixmap(target_rect, cache)