def present(self, frame: QVideoFrame):
        print("present called")

        if frame.isValid():

            clone_frame = QVideoFrame(frame)

            clone_frame.map(QAbstractVideoBuffer.ReadOnly)
            image = QImage(clone_frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(), \
                QVideoFrame.imageFormatFromPixelFormat(frame.pixelFormat()))
            clone_frame.unmap()

            self.frame_available.emit(image)

        if self.surfaceFormat().pixelFormat() != frame.pixelFormat() or \
            self.surfaceFormat().frameSize() != frame.size():
            self.setError(QAbstractVideoSurface.IncorrectFormatError)
            self.stop()

            print("present finished: Return False")
            return False
        else:
            self.current_frame = frame

            print("present finished: Return True")
            return True
    def present(self, frame: QVideoFrame) -> bool:
        if not frame.isValid():
            return False

        image_format = QVideoFrame.imageFormatFromPixelFormat(frame.pixelFormat())
        if image_format == QVideoFrame.Format_Invalid:
            _logger.info(_('QR code scanner for video frame with invalid pixel format'))
            return False

        if not frame.map(QAbstractVideoBuffer.ReadOnly):
            _logger.info(_('QR code scanner failed to map video frame'))
            return False

        try:
            img = QImage(frame.bits(), frame.width(), frame.height(), image_format)

            # Check whether we need to flip the image on any axis
            surface_format = self.surfaceFormat()
            flip_x = surface_format.isMirrored()
            flip_y = surface_format.scanLineDirection() == QVideoSurfaceFormat.BottomToTop

            # Mirror the image if needed
            if flip_x or flip_y:
                img = img.mirrored(flip_x, flip_y)

            # Create a copy of the image so the original frame data can be freed
            img = img.copy()
        finally:
            frame.unmap()

        self.frame_available.emit(img)

        return True
Beispiel #3
0
    def isFormatSupported(self, format):
        imageFormat = QVideoFrame.imageFormatFromPixelFormat(
            format.pixelFormat())
        size = format.frameSize()

        return imageFormat != QImage.Format_Invalid and not size.isEmpty() and \
               format.handleType() == QAbstractVideoBuffer.NoHandle
Beispiel #4
0
 def isFormatSupported(self, _format):
     imageFormat = QVideoFrame.imageFormatFromPixelFormat(_format.pixelFormat())
     size = _format.frameSize()
     _bool = False
     if (imageFormat != QImage.Format_Invalid and not size.isEmpty() and _format.handleType() == QAbstractVideoBuffer.NoHandle):
         _bool = True
     return _bool
Beispiel #5
0
    def present(self, frame: QVideoFrame):
        """
        Called by the video player
        :param frame: frame to present
        """
        if frame.isValid():
            clone_frame = QVideoFrame(frame)
            clone_frame.map(QAbstractVideoBuffer.ReadOnly)
            image = QImage(
                clone_frame.bits(), clone_frame.width(), clone_frame.height(),
                QVideoFrame.imageFormatFromPixelFormat(
                    clone_frame.pixelFormat()))

            self.frameAvailable.emit(image)
            clone_frame.unmap()

        if self.surfaceFormat().pixelFormat() != frame.pixelFormat(
        ) or self.surfaceFormat().frameSize() != frame.size():
            self.setError(self.IncorrectFormatError)
            self.stop()
            return False
        else:
            self.currentFrame = frame
            self.widget.repaint()
            return True
    def isFormatSupported(self, format):
        print("isFormatSupported() called")

        image_format = QVideoFrame.imageFormatFromPixelFormat(
            format.pixelFormat())
        size = format.frameSize()

        print("isFormatSupported() finished")
        return image_format != QVideoFrame.Format_Invalid and not size.isEmpty() and \
            format.handleType() == QAbstractVideoBuffer.NoHandle
Beispiel #7
0
 def start(self, _format):
     imageFormat = QVideoFrame.imageFormatFromPixelFormat(_format.pixelFormat())
     size = _format.frameSize()
     if (imageFormat != QImage.Format_Invalid and not size.isEmpty()):
         self.imageFormat = imageFormat
         self.imageSize = size
         self.sourceRect = _format.viewport()
         QAbstractVideoSurface.start(self, _format)
         self.widget.updateGeometry()
         self.updateVideoRect()
         return True
     else:
         return False
Beispiel #8
0
            def run(self):
                self._abort = False

                while True:
                    with QMutexLocker(self._mutex):
                        if self._abort:
                            break
                        frame = self.frame
                        self.frame = None

                    pixel_format = frame.pixelFormat()
                    image_format = QVideoFrame.imageFormatFromPixelFormat(pixel_format)
                    if image_format == QImage.Format_Invalid:
                        qDebug("WARNING: Could not convert video frame to image!")
                        return
                    if not frame.map(QAbstractVideoBuffer.ReadOnly):
                        qDebug("WARNING: Could not map video frame!")
                        return

                    width = frame.width()
                    height = frame.height()
                    bytes_per_line = frame.bytesPerLine()
                    image = QImage(frame.bits(), width, height, bytes_per_line, image_format)
                    image = image.convertToFormat(QImage.Format_RGB32)

                    frame.unmap()

                    # fix upside-down data for windows
                    if platform.system() == "Windows":
                        image = image.mirrored(vertical=True)

                    # now convert QImage to ndarray
                    pointer = image.constBits()
                    pointer.setsize(image.byteCount())
                    array = np.array(pointer).reshape(image.height(), image.width(), 4)

                    # get rid of the transparency channel and organize the colors as rgb
                    # NB: it would be safer to figure out the image format first, and where the transparency channel is
                    # stored...
                    array = array[:, :, 0:3:][:, :, ::-1]

                    self.ndarray_available.emit(array)

                    # see if new data is available, go to sleep if not
                    with QMutexLocker(self._mutex):
                        if self.frame is None:
                            self._condition.wait(self._mutex)
Beispiel #9
0
    def process_frame(self, frame, levels):
        """

        :param frame:
        :param levels:
        :return:
        """

        histogram = [0.0] * levels
        if levels and frame.map(QAbstractVideoBuffer.ReadOnly):
            pixel_format = frame.pixelFormat()

            if pixel_format == QVideoFrame.Format_YUV420P or pixel_format == QVideoFrame.Format_NV12:
                # Process YUV data.
                bits = frame.bits()
                for idx in range(frame.height() * frame.width()):
                    histogram[(bits[idx] * levels) >> 8] += 1.0

            else:
                image_format = QVideoFrame.imageFormatFromPixelFormat(
                    pixel_format)
                if image_format != QImage.Format_Invalid:
                    # Process RGB data.
                    image = QImage(frame.bits(), frame.width(), frame.height(),
                                   image_format)

                    for y in range(image.height()):
                        for x in range(image.width()):
                            pixel = image.pixel(x, y)
                            histogram[(qGray(pixel) * levels) >> 8] += 1.0

            # Find the maximum value.
            max_value = 0.0
            for value in histogram:
                if value > max_value:
                    max_value = value

            # Normalise the values between 0 and 1.
            if max_value > 0.0:
                for i in range(len(histogram)):
                    histogram[i] /= max_value

            frame.unmap()

        self.histogram_ready.emit(histogram)
    def start(self, format):
        print("start() called")

        image_format = QVideoFrame.imageFormatFromPixelFormat(
            format.pixelFormat())
        size = format.frameSize()

        if image_format != QImage.Format_Invalid and not size.isEmpty():
            self.image_format = image_format
            self.image_size = size
            self.source_rect = format.viewport()

            super().start(format)
            print("start() finished")
            return True
        else:
            print("start() finished")
            return False
Beispiel #11
0
    def present(self, frame):
        if frame.isValid():
            cloneFrame = QVideoFrame(frame)
            cloneFrame.map(QAbstractVideoBuffer.ReadOnly)
            image = QImage(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(),
                           QVideoFrame.imageFormatFromPixelFormat(cloneFrame.pixelFormat()))
            self.frameAvailable.emit(image)  # this is very important
            cloneFrame.unmap()

        if self.surfaceFormat().pixelFormat() != frame.pixelFormat() or \
                self.surfaceFormat().frameSize() != frame.size():
            self.setError(QAbstractVideoSurface.IncorrectFormatError)
            self.stop()

            return False
        else:
            self.currentFrame = frame
            self.widget.repaint(self.targetRect)

            return True
Beispiel #12
0
    def processFrame(self, frame, levels):
        print("In processor processFrame()")
        histogram = [0.0] * levels

        if levels and frame.map(QAbstractVideoBuffer.ReadOnly):
            pixelFormat = frame.pixelFormat()

            if pixelFormat == QVideoFrame.Format_YUV420P or pixelFormat == QVideoFrame.Format_NV12:
                # Process YUV data.
                bits = frame.bits()
                for idx in range(frame.height() * frame.width()):
                    histogram[(bits[idx] * levels) >> 8] += 1.0
            else:
                imageFormat = QVideoFrame.imageFormatFromPixelFormat(pixelFormat)
                if imageFormat != QImage.Format_Invalid:
                    # Process RGB data.
                    image = QImage(frame.bits(), frame.width(), frame.height(), imageFormat)

                    for y in range(image.height()):
                        for x in range(image.width()):
                            pixel = image.pixel(x, y)
                            histogram[(qGray(pixel) * levels) >> 8] += 1.0

            # Find the maximum value.
            maxValue = 0.0
            for value in histogram:
                if value > maxValue:
                    maxValue = value

            # Normalise the values between 0 and 1.
            if maxValue > 0.0:
                for i in range(len(histogram)):
                    histogram[i] /= maxValue

            frame.unmap()

        self.histogramReady.emit(histogram)
Beispiel #13
0
    def processFrame(self, frame, levels):
        histogram = [0.0] * levels

        if levels and frame.map(Qt.QAbstractVideoBuffer.ReadOnly):
            pixelFormat = frame.pixelFormat()

            if (pixelFormat == QVideoFrame.Format_YUV420P or
                    pixelFormat == QVideoFrame.Format_NV12):
                # process YUV data
                bits = frame.bits()
                for index in range(frame.height() * frame.width()):
                    histogram[(bits[index] * levels) >> 8] += 1.0
            else:
                imageFormat = QVideoFrame.imageFormatFromPixelFormat(
                    pixelFormat)
                if imageFormat != Qt.QImage.Format_Invalid:
                    # process rgb data
                    image = Qt.QImage(
                        frame.bits(),
                        frame.width(),
                        frame.height(),
                        imageFormat)
                    for y in range(image.height()):
                        for x in range(image.width()):
                            pixel = image.pixel(x, y)
                            histogram[(Qt.qGray(pixel) * levels) >> 8] += 1.0

            # find max value
            maxValue = max(histogram)

            # normalize values between 0 and 1
            if maxValue > 0.0:
                histogram = list(map(lambda x: x / maxValue, histogram))

            frame.unmap()
        self.histogramReady.emit(histogram)
Beispiel #14
0
 def _format_size_(surface_format):
     image_format = QVideoFrame.imageFormatFromPixelFormat(
         surface_format.pixelFormat())
     size = surface_format.frameSize()
     return image_format, size