コード例 #1
0
ファイル: cutshow_test.py プロジェクト: MM-kola/BrainSAR_beta
class FirstMainWin(QWidget):
    def __init__(self):
        super(QWidget, self).__init__()
        self.draw = QColor()
        self.draw.setBlue(53)
        self.draw.setGreen(53)
        self.draw.setRed(234)
        self.draw.setAlpha(0)
        self.initUI()
        # 设置窗口的尺寸
        self.setWindowTitle('显示图像')

    def initUI(self):
        self.resize(800, 300)
        self.move(300, 200)
        self.lbl = QLabel(self)
        # img = plt.imread('G:\BrainSAR_Beta\SAR图片\show.jpg')
        # print(img.shape)
        self.pil_image = QImage('G:\BrainSAR_Beta\SAR图片\show.jpg')
        self.pil_image.convertTo(QImage.Format_RGB888)
        print(self.pil_image.size(), self.pil_image.height())
        # self.pil_image = Image.open("G:\BrainSAR_Beta\SAR图片\show.jpg")
        self.initshow()
        # self.show()
        self.timer1 = QtCore.QTimer(self)  # 定义定时器,用于控制显示视频的帧率
        self.timer1.timeout.connect(self.fcku)
        self.timer1.start()
        self.plotdots()

    def fcku(self):
        pil_image = self.m_resize(self.width(), self.height(), self.pil_image)
        pixmap = QPixmap.fromImage(pil_image)
        self.lbl.resize(pil_image.width(), pil_image.height())
        self.lbl.setPixmap(pixmap)

    def m_resize(self, w_box, h_box,
                 pil_image):  # 参数是:要适应的窗口宽、高、Image.open后的图片
        w, h = pil_image.width(), pil_image.height()  # 获取图像的原始大小
        f1 = 1.0 * w_box / w
        f2 = 1.0 * h_box / h

        factor = min([f1, f2])

        width = int(w * factor)

        height = int(h * factor)

        return pil_image.scaled(width, height)

    # 用来描点展示,输入原图对应的像素点,plot到pixmap中
    def plotdots(self):
        for show in range(1, 5):
            x = random.randint(1500, 8000)
            y = random.randint(1500, 9000)
            for i in range(x - 1000, x + 1000):  # 遍历所有长度的点
                for j in range(y - 1000, y + 1000):  # 遍历所有宽度的点
                    self.pil_image.setPixelColor(i, j, self.draw)

    def initshow(self):
        pil_image = self.m_resize(self.width(), self.height(), self.pil_image)
        pixmap = QPixmap.fromImage(pil_image)
        self.lbl.resize(pil_image.width(), pil_image.height())
        self.lbl.setPixmap(pixmap)
コード例 #2
0
ファイル: notification.py プロジェクト: mkonig/qutebrowser
    def _convert_image(self, qimage: QImage) -> Optional[QDBusArgument]:
        """Convert a QImage to the structure DBus expects.

        https://specifications.freedesktop.org/notification-spec/latest/ar01s05.html#icons-and-images-formats
        """
        bits_per_color = 8
        has_alpha = qimage.hasAlphaChannel()
        if has_alpha:
            image_format = QImage.Format_RGBA8888
            channel_count = 4
        else:
            image_format = QImage.Format_RGB888
            channel_count = 3

        qimage.convertTo(image_format)
        bytes_per_line = qimage.bytesPerLine()
        width = qimage.width()
        height = qimage.height()

        image_data = QDBusArgument()
        image_data.beginStructure()
        image_data.add(width)
        image_data.add(height)
        image_data.add(bytes_per_line)
        image_data.add(has_alpha)
        image_data.add(bits_per_color)
        image_data.add(channel_count)

        try:
            size = qimage.sizeInBytes()
        except TypeError:
            # WORKAROUND for
            # https://www.riverbankcomputing.com/pipermail/pyqt/2020-May/042919.html
            # byteCount() is obsolete, but sizeInBytes() is only available with
            # SIP >= 5.3.0.
            size = qimage.byteCount()

        # Despite the spec not mandating this, many notification daemons mandate that
        # the last scanline does not have any padding bytes.
        #
        # Or in the words of dunst:
        #
        #     The image is serialised rowwise pixel by pixel. The rows are aligned by a
        #     spacer full of garbage. The overall data length of data + garbage is
        #     called the rowstride.
        #
        #     Mind the missing spacer at the last row.
        #
        #     len:     |<--------------rowstride---------------->|
        #     len:     |<-width*pixelstride->|
        #     row 1:   |   data for row 1    | spacer of garbage |
        #     row 2:   |   data for row 2    | spacer of garbage |
        #              |         .           | spacer of garbage |
        #              |         .           | spacer of garbage |
        #              |         .           | spacer of garbage |
        #     row n-1: |   data for row n-1  | spacer of garbage |
        #     row n:   |   data for row n    |
        #
        # Source:
        # https://github.com/dunst-project/dunst/blob/v1.6.1/src/icon.c#L292-L309
        padding = bytes_per_line - width * channel_count
        assert 0 <= padding <= 3, (padding, bytes_per_line, width,
                                   channel_count)
        size -= padding

        if padding and self._quirks.no_padded_images:
            return None

        bits = qimage.constBits().asstring(size)
        image_data.add(QByteArray(bits))

        image_data.endStructure()
        return image_data
コード例 #3
0
    def readBmp(self, file, len=None, off=0, silent=False, rotate=True):
        """ Reads DOC-standard bat recordings in 8x row-compressed BMP format.
            For similarity with readWav, accepts len and off args, in seconds.
            rotate: if True, rotates to match setImage and other spectrograms (rows=time)
                otherwise preserves normal orientation (cols=time)
        """
        # !! Important to set these, as they are used in other functions
        self.sampleRate = 176000
        self.incr = 512

        img = QImage(file, "BMP")
        h = img.height()
        w = img.width()
        colc = img.colorCount()
        if h == 0 or w == 0:
            print("ERROR: image was not loaded")
            return (1)

        # Check color format and convert to grayscale
        if not silent and (not img.allGray() or colc > 256):
            print(
                "Warning: image provided not in 8-bit grayscale, information will be lost"
            )
        img.convertTo(QImage.Format_Grayscale8)

        # Convert to numpy
        # (remember that pyqtgraph images are column-major)
        ptr = img.constBits()
        ptr.setsize(h * w * 1)
        img2 = np.array(ptr).reshape(h, w)

        # Determine if original image was rotated, based on expected num of freq bins and freq 0 being empty
        # We also used to check if np.median(img2[-1,:])==0,
        # but some files happen to have the bottom freq bin around 90, so we cannot rely on that.
        if h == 64:
            # standard DoC format
            pass
        elif w == 64:
            # seems like DoC format, rotated at -90*
            img2 = np.rot90(img2, 1, (1, 0))
            w, h = h, w
        else:
            print("ERROR: image does not appear to be in DoC format!")
            print("Format details:")
            print(img2)
            print(h, w)
            print(min(img2[-1, :]), max(img2[-1, :]))
            print(np.sum(img2[-1, :] > 0))
            print(np.median(img2[-1, :]))
            return (1)

        # Could skip that for visual mode - maybe useful for establishing contrast?
        img2[-1, :] = 254  # lowest freq bin is 0, flip that
        img2 = 255 - img2  # reverse value having the black as the most intense
        img2 = img2 / np.max(img2)  # normalization
        img2 = img2[:,
                    1:]  # Cutting first time bin because it only contains the scale and cutting last columns
        img2 = np.repeat(
            img2, 8,
            axis=0)  # repeat freq bins 7 times to fit invertspectrogram

        self.data = []
        self.fileLength = (w - 2) * self.incr + self.window_width  # in samples
        # Alternatively:
        # self.fileLength = self.convertSpectoAmpl(h-1)*self.sampleRate

        # NOTE: conversions will use self.sampleRate and self.incr, so ensure those are already set!
        # trim to specified offset and length:
        if off > 0 or len is not None:
            # Convert offset from seconds to pixels
            off = int(self.convertAmpltoSpec(off))
            if len is None:
                img2 = img2[:, off:]
            else:
                # Convert length from seconds to pixels:
                len = int(self.convertAmpltoSpec(len))
                img2 = img2[:, off:(off + len)]

        if rotate:
            # rotate for display, b/c required spectrogram dimensions are:
            #  t increasing over rows, f increasing over cols
            # This will be enough if the original image was spectrogram-shape.
            img2 = np.rot90(img2, 1, (1, 0))

        self.sg = img2

        if QtMM:
            self.audioFormat.setChannelCount(0)
            self.audioFormat.setSampleSize(0)
            self.audioFormat.setSampleRate(self.sampleRate)
        #else:
        #self.audioFormat['channelCount'] = 0
        #self.audioFormat['sampleSize'] = 0
        #self.audioFormat['sampleRate'] = self.sampleRate

        self.minFreq = 0
        self.maxFreq = self.sampleRate // 2
        self.minFreqShow = max(self.minFreq, self.minFreqShow)
        self.maxFreqShow = min(self.maxFreq, self.maxFreqShow)

        if not silent:
            print("Detected BMP format: %d x %d px, %d colours" % (w, h, colc))
        return (0)