Esempio n. 1
0
def qimageview(image: QImage) -> ndarray:
    if not isinstance(image, QtGui.QImage):
        raise TypeError("image argument must be a QImage instance")

    shape = image.height(), image.width(), 4
    strides0 = image.bytesPerLine()

    format = image.format()

    dtype = "|u1"
    strides1 = 4
    strides2 = 1

    if format == QtGui.QImage.Format_Invalid:
        raise ValueError("qimageview got invalid QImage")

    image.__array_interface__ = {
        'shape': shape,
        'typestr': dtype,
        'data': getdata(image),
        'strides': (strides0, strides1, strides2),
        'version': 3,
    }

    result = np.asarray(image)
    result[..., :3] = result[..., 2::-1]
    del image.__array_interface__
    return result
Esempio n. 2
0
def qimage2numpy(qimage: QImage):
    """Convert QImage to numpy.ndarray.  The dtype defaults to uint8
    for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array)
    for 32bit color images.  You can pass a different dtype to use, or
    'array' to get a 3D uint8 array for color images."""
    result_shape = (qimage.height(), qimage.width())
    temp_shape = (qimage.height(), int(qimage.bytesPerLine() * 8 / qimage.depth()))
    if qimage.format() in (QImage.Format_ARGB32_Premultiplied,
                           QImage.Format_ARGB32,
                           QImage.Format_RGB32):
        dtype = np.uint8
        result_shape += (4,)
        temp_shape += (4,)
    elif qimage.format() == QtGui.QImage.Format_Indexed8:
        dtype = np.uint8
    else:
        raise ValueError("qimage2numpy only supports 32bit and 8bit images")
        # FIXME: raise error if alignment does not match
    buf = qimage.bits().asstring(qimage.byteCount())
    result = np.frombuffer(buf, dtype).reshape(temp_shape)
    if result_shape != temp_shape:
        result = result[:, :result_shape[1]]
    if qimage.format() == QImage.Format_RGB32 and dtype == np.uint8:
        result = result[..., :3]
    return result
Esempio n. 3
0
def main():

    img = QImage("D:\\Tulips.jpg")
    print("byteCount()=%d" % img.byteCount())
    print("bytesPerLine()=%d" % img.bytesPerLine())
    cv_np = convertQImageToMat(img)
    #cv_np = QImageToCvMat(img)  #测试没通过
    cv_img = cv_np
    print("%d,%d,%d" % (cv_img.shape[0], cv_img.shape[1], cv_img.shape[2]))

    cv_img = rotate_bound(cv_img, 30)

    cv2.imwrite("D:\\tulips_cv.jpg", cv_img)
    #cv2.imshow("OpenCV",cv_img)
    #cv2.waitKey()

    #cv_img = cv2.cvtColor(cv_np, cv2.COLOR_BGR2RGB)
    #w = cv_img.shape[0]
    #h = cv_img.shape[1]
    print("lenght=%d,(%d,%d)", (cv_img.size, cv_img.shape[0], cv_img.shape[1]))
    print("bit[1]=%d,bit[2]=%d,bit[3]=%d",
          (cv_img[0][0][0], cv_img[0][0][1], cv_img[0][0][2]))

    #qt_img = QImage(cv_img, w, h, w * 4, QImage.Format_RGB32)

    qt_img = np2qpixmap(cv_img)
    print("byteCount()=%d" % qt_img.byteCount())
    qt_img.save("D:\\tulips_qt.jpg")
    return cv_img
Esempio n. 4
0
                def read_and_convert_using_qt_to_raw_mono(
                        handler, filename, hs_cols, hs_rows, invert=True):
                    img = QImage(filename)
                    if img.isNull():
                        handler.show_error(
                            _('Could not load the image {} -- unknown format or other error'
                              ).format(os.path.basename(filename)))
                        return
                    if (img.width(), img.height()) != (
                            hs_cols, hs_rows):  # do we need to scale it ?
                        img = img.scaled(
                            hs_cols, hs_rows, Qt.IgnoreAspectRatio,
                            Qt.SmoothTransformation
                        )  # force to our dest size. Note that IgnoreAspectRatio guarantess the right size. Ther other modes don't
                        if img.isNull() or (img.width(), img.height()) != (
                                hs_cols, hs_rows):
                            handler.show_error(
                                _("Could not scale image to {} x {} pixels").
                                format(hs_cols, hs_rows))
                            return
                    bm = QBitmap.fromImage(
                        img, Qt.MonoOnly)  # ensures 1bpp, dithers any colors
                    if bm.isNull():
                        handler.show_error(
                            _('Could not convert image to monochrome'))
                        return
                    target_fmt = QImage.Format_Mono
                    img = bm.toImage().convertToFormat(
                        target_fmt,
                        Qt.MonoOnly | Qt.ThresholdDither | Qt.AvoidDither
                    )  # ensures MSB bytes again (above steps may have twiddled the bytes)
                    lineSzOut = hs_cols // 8  # bits -> num bytes per line
                    bimg = bytearray(hs_rows *
                                     lineSzOut)  # 1024 bytes for a 128x64 img
                    bpl = img.bytesPerLine()
                    if bpl < lineSzOut:
                        handler.show_error(
                            _("Internal error converting image"))
                        return
                    # read in 1 scan line at a time since the scan lines may be > our target packed image
                    for row in range(hs_rows):
                        # copy image scanlines 1 line at a time to destination buffer
                        ucharptr = img.constScanLine(
                            row)  # returned type is basically void*
                        ucharptr.setsize(
                            bpl)  # inform python how big this C array is
                        b = bytes(ucharptr)  # aaand.. work with bytes.

                        begin = row * lineSzOut
                        end = begin + lineSzOut
                        bimg[begin:end] = b[0:lineSzOut]
                        if invert:
                            for i in range(begin, end):
                                bimg[i] = ~bimg[i] & 0xff  # invert b/w
                    return bytes(bimg)
Esempio n. 5
0
    def apply_groups(self, pixmap: QPixmap, *groups: ManipulationGroup) -> QPixmap:
        """Manipulate pixmap according all manipulations in groups.

        Args:
            pixmap: The QPixmap to manipulate.
            groups: Manipulation groups containing all manipulations to apply in series.
        Returns:
            The manipulated pixmap.
        """
        _logger.debug("Manipulate: applying %d groups", len(groups))
        # Convert original pixmap to python bytes
        image = pixmap.toImage()
        bits = image.constBits()
        bits.setsize(image.byteCount())
        data = bits.asstring()
        # Apply changes on the byte-level
        for group in groups:
            data = self._apply_group(group, data)
        # Convert updated bytes back to pixmap
        image = QImage(
            data, image.width(), image.height(), image.bytesPerLine(), image.format()
        )
        return QPixmap(image)
Esempio n. 6
0
def run_facedetect(filename: str) -> Tuple[QImage, List[Any]]:
    image = QImage(filename)
    if image.format() != QImage.Format_RGB32:
        image = image.convertToFormat(QImage.Format_RGB32)

    image = image.scaled(image.width() * scale_factor,
                         image.height() * scale_factor,
                         Qt.IgnoreAspectRatio,
                         Qt.SmoothTransformation)

    bits = image.bits()
    bits.setsize(image.byteCount())

    array = numpy.ndarray(shape=(image.height(), image.bytesPerLine() // 4, 4), dtype=numpy.uint8,
                          buffer=bits)
    array = array[:image.height(), :image.width(), :3]

    img = cv2.imread(filename, cv2.IMREAD_COLOR)
    print(img.shape)
    print(array.shape)

    print(img)
    print()
    print(array)

    detector = dlib.get_frontal_face_detector()
    results = detector(img)
    print(results)
    results = detector(array)
    print(results)

    print("detected {} faces".format(len(results)))

    image = image.scaled(image.width() // scale_factor,
                         image.height() // scale_factor)

    return image, results
Esempio n. 7
0
def _test_pipedimagerpq():
    # vertices of a pentagon (roughly) centered in a 1000 x 1000 square
    pentagonpts = ( (504.5, 100.0), (100.0, 393.9),
                    (254.5, 869.4), (754.5, 869.4),
                    (909.0, 393.9),  )
    linepts = ( (350,  50),
                (200, 150),
                (400, 250),
                (300, 350),
                (150, 250),
                (100, 450) )
    # start PyQt
    testapp = QApplication(["PipedImagerPQ"])
    # create the list of commands to submit
    drawcmnds = []
    drawcmnds.append( { "action":"setTitle", "title":"Tester" } )
    drawcmnds.append( { "action":"show" } )
    drawcmnds.append( { "action":"clear", "color":"black"} )
    drawcmnds.append( { "action":"screenInfo"} )
    # create the image to be displayed
    testimage = QImage(500, 500, QImage.Format_ARGB32_Premultiplied)
    # initialize a black background
    testimage.fill(0xFF000000)
    # draw some things in the image
    testpainter = QPainter(testimage)
    testpainter.setBrush( QBrush(QColor(0, 255, 0, 128), Qt.SolidPattern) )
    testpainter.setPen( QPen(QBrush(QColor(255, 0, 0, 255), Qt.SolidPattern),
                         5.0, Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin) )
    testpainter.drawRect( QRectF(5.0, 255.0, 240.0, 240.0) )
    testpainter.setBrush( QBrush(QColor(0, 0, 255, 255), Qt.SolidPattern) )
    testpainter.setPen( QPen(QBrush(QColor(0, 0, 0, 255), Qt.SolidPattern),
                         5.0, Qt.DashLine, Qt.RoundCap, Qt.RoundJoin) )
    testpainter.drawPolygon( QPolygonF(
            [ QPointF(.25 * ptx, .25 * pty + 250) for (ptx, pty) in pentagonpts ] ) )
    testpainter.setBrush( Qt.NoBrush )
    testpainter.setPen( QPen(QBrush(QColor(255, 255, 255, 255), Qt.SolidPattern),
                         3.0, Qt.DashLine, Qt.RoundCap, Qt.RoundJoin) )
    testpainter.drawPolyline( QPolygonF(
            [ QPointF(pts, pty) for (pts, pty) in linepts ] ) )
    testpainter.end()
    # add the image command
    testimgwidth = testimage.width()
    testimgheight = testimage.height()
    testimgstride = testimage.bytesPerLine()
    # not a good way to get the pixel data
    testimgdata = bytearray(testimgheight * testimgstride)
    k = 0
    for pty in range(testimgheight):
        for ptx in range(testimgwidth):
            pixval = testimage.pixel(ptx, pty)
            (aval, rgbval) = divmod(pixval, 256 * 256 * 256)
            (rval, gbval) = divmod(rgbval, 256 * 256)
            (gval, bval) = divmod(gbval, 256)
            testimgdata[k] = bval
            k += 1
            testimgdata[k] = gval
            k += 1
            testimgdata[k] = rval
            k += 1
            testimgdata[k] = aval
            k += 1
    testblocksize = 4000
    testnumblocks = (testimgheight * testimgstride + testblocksize - 1) // testblocksize
    drawcmnds.append( { "action":"newImage",
                        "width":testimgwidth,
                        "height":testimgheight,
                        "stride":testimgstride } )
    for k in range(testnumblocks):
        if k < (testnumblocks - 1):
            blkdata = testimgdata[k*testblocksize:(k+1)*testblocksize]
        else:
            blkdata = testimgdata[k*testblocksize:]
        drawcmnds.append( { "action":"newImage",
                            "blocknum":k+1,
                            "numblocks":testnumblocks,
                            "startindex":k*testblocksize,
                            "blockdata":blkdata } )
    # finish the command list
    drawcmnds.append( { "action":"show" } )
    drawcmnds.append( { "action":"exit" } )
    # create a PipedImagerPQ in this process
    (cmndrecvpipe, cmndsendpipe) = multiprocessing.Pipe(False)
    (rspdrecvpipe, rspdsendpipe) = multiprocessing.Pipe(False)
    testviewer = PipedImagerPQ(cmndrecvpipe, rspdsendpipe)
    # create a command submitter dialog
    tester = _CommandSubmitterPQ(testviewer, cmndsendpipe,
                                   rspdrecvpipe, drawcmnds)
    tester.show()
    # let it all run
    testresult = testapp.exec_()
    if testresult != 0:
        sys.exit(testresult)
Esempio n. 8
0
    def _convert_image(self, qimage: QImage) -> Optional[QDBusArgument]:
        """Convert a QImage to the structure DBus expects.

        https://specifications.freedesktop.org/notification-spec/latest/ar01s05.html#icons-and-images-formats
        """
        bits_per_color = 8
        has_alpha = qimage.hasAlphaChannel()
        if has_alpha:
            image_format = QImage.Format_RGBA8888
            channel_count = 4
        else:
            image_format = QImage.Format_RGB888
            channel_count = 3

        qimage.convertTo(image_format)
        bytes_per_line = qimage.bytesPerLine()
        width = qimage.width()
        height = qimage.height()

        image_data = QDBusArgument()
        image_data.beginStructure()
        image_data.add(width)
        image_data.add(height)
        image_data.add(bytes_per_line)
        image_data.add(has_alpha)
        image_data.add(bits_per_color)
        image_data.add(channel_count)

        try:
            size = qimage.sizeInBytes()
        except TypeError:
            # WORKAROUND for
            # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042919.html
            # byteCount() is obsolete, but sizeInBytes() is only available with
            # SIP >= 5.3.0.
            size = qimage.byteCount()

        # Despite the spec not mandating this, many notification daemons mandate that
        # the last scanline does not have any padding bytes.
        #
        # Or in the words of dunst:
        #
        #     The image is serialised rowwise pixel by pixel. The rows are aligned by a
        #     spacer full of garbage. The overall data length of data + garbage is
        #     called the rowstride.
        #
        #     Mind the missing spacer at the last row.
        #
        #     len:     |<--------------rowstride---------------->|
        #     len:     |<-width*pixelstride->|
        #     row 1:   |   data for row 1    | spacer of garbage |
        #     row 2:   |   data for row 2    | spacer of garbage |
        #              |         .           | spacer of garbage |
        #              |         .           | spacer of garbage |
        #              |         .           | spacer of garbage |
        #     row n-1: |   data for row n-1  | spacer of garbage |
        #     row n:   |   data for row n    |
        #
        # Source:
        # https://github.com/dunst-project/dunst/blob/v1.6.1/src/icon.c#L292-L309
        padding = bytes_per_line - width * channel_count
        assert 0 <= padding <= 3, (padding, bytes_per_line, width,
                                   channel_count)
        size -= padding

        if padding and self._quirks.no_padded_images:
            return None

        bits = qimage.constBits().asstring(size)
        image_data.add(QByteArray(bits))

        image_data.endStructure()
        return image_data