Ejemplo n.º 1
0
    def _GstNeedData(self, src, need_bytes):  # pylint: disable=unused-argument
        '''
        Gstreamer need-data probe callback to feed the appsrc with image data.
        The image data comes from a queue that is filled from other worker
        threads. If the queue is empty and the finish flag is set send
        end-of-stream to the appsrc so the pipeline can finish its processing.
        If the textoverlay element is available the current text for the
        rendered subtitle will be set.
        :param src: GstElement appsrc
        :param need_bytes: unused size
        '''
        self._Log(logging.DEBUG, '_GstNeedData: %s', self.idxFrame)

        pts = self.idxFrame * self.imgDuration

        while self.active:
            result = None
            try:
                result = self.resQueue.get(True, 0.25)
                break
            except queue.Empty:
                self._Log(logging.DEBUG, '_GstNeedData: Queue.Empty')
                if self.finished:
                    self._Log(logging.DEBUG, '_GstNeedData: finished, emitting end-of-stream (finalTime %s)', pts)
                    self.finalTime = pts
                    src.emit("end-of-stream")
                    return
                else:
                    continue
        else:
            self._Log(logging.DEBUG, '_GstNeedData: not active anymore, emitting end-of-stream (finalTime %s)', pts)
            self.finalTime = pts
            src.emit("end-of-stream")
            return

        self._Log(logging.DEBUG, '_GstNeedData: push to buffer (%s)', len(result))

        buf = Gst.Buffer.new_wrapped(result)
        buf.pts = pts
        buf.duration = self.imgDuration
        ret = src.emit("push-buffer", buf)
        if ret != Gst.FlowReturn.OK:
            return

        if self.textoverlay:
#             self.textoverlay.set_property("text", "Frame: %s" % self.idxFrame)
            if self.srtParse is None:
                srtPath = self._outFile + ".srt"
                self.srtParse = SrtParser(
                    srtPath, self.GetProfile().GetFrameRate().AsFloat())

            subtitle = self.srtParse.Get(self.idxFrame)
            self.textoverlay.set_property("text", subtitle)

        self.idxFrame += 1
Ejemplo n.º 2
0
    def test1(self):
        sp = SrtParser(self.tmpFilename, 25.0)
        sp.Parse()

        self.assertEqual(sp.Get(400), "Cooles Auto")
        self.assertEqual(sp.Get(432), "Cooles Auto")

        self.assertEqual(sp.Get(433), "")

        self.assertEqual(sp.Get(479), "")
        self.assertNotEqual(sp.Get(480), "Großstadtrevier")
        self.assertNotEqual(sp.Get(494), "Großstadtrevier")
        self.assertEqual(sp.Get(495), "Kleine süße Schweine")
Ejemplo n.º 3
0
class _GStreamerRenderer(BaseRenderer):
    def __init__(self):
        BaseRenderer.__init__(self)
        self._Log = _GStreamerRenderer.Log
        self.resQueue = queue.Queue(20)

        self.active = None
        self.finished = None
        self.ready = None
        self.pipeline = None
        self.idxFrame = 0
        self.idxAudioFile = 0
        self.idxSub = 0
        self.imgDuration = None
        self.finalTime = None
        self.textoverlay = None
        self.srtParse = None
        self.concat = None
        self.ptsOffset = 0
        self.ptsLast = -1

        self.usePangoSubtitle = False

    @staticmethod
    def CheckDependencies(msgList):
        if Gst is None or GObject is None:
            _GStreamerRenderer.Log(logging.DEBUG,
                                   "checking for gstreamer failed!")
            msgList.append(_(u"GStreamer (python-gst-1.0) required!"))
        else:
            to = Gst.ElementFactory.find("textoverlay")
            if to is None:
                _GStreamerRenderer.Log(
                    logging.WARN,
                    "GStreamer element textoverlay not found! Subtitles cannot rendered into video file."
                )

    @staticmethod
    def GetProperties():
        return ["Bitrate", "Subtitle", "SubtitleSettings", "SubtitleLanguage"]

    @staticmethod
    def GetDefaultProperty(prop):
        if prop == "Subtitle":
            return "file"
        if prop == "SubtitleSettings":
            return ""
        if prop == "SubtitleLanguage":
            return "en"
        return BaseRenderer.GetDefaultProperty(prop)

    def ToSink(self, data):
        self.resQueue.put(data)

    def GetOutputFile(self):
        outFile = '{0}.{1}'.format(self._outFile, self._GetExtension())
        return outFile

    def __CleanUp(self):
        '''
        Waits until the ready event is set and finished the GTK-Mainloop.
        The ready event is set within _GstOnMessage if the end-of-stream event
        was handled.
        '''
        if self.ready is None:
            return

        self._Log(logging.DEBUG, "waiting for ready event")
        self.ready.wait()

        self.active = None
        self.finished = None
        self.ready = None
        self.pipeline = None
        self.idxFrame = 0
        self.idxAudioFile = 0
        self.idxSub = 0
        self.imgDuration = None
        self.finalTime = None
        self.textoverlay = None
        self.srtParse = None
        self.concat = None
        self.ptsOffset = 0
        self.ptsLast = -1

        if self.GetTypedProperty("Subtitle", str) != "file":
            # delete subtitle file, if subtitle is rendered in video
            srtPath = self._GetSubtitleFile()
            if srtPath:
                os.remove(srtPath)

    def _GetSubtitleFile(self):
        srtPath = self._outFile + ".srt"
        if os.path.exists(srtPath):
            return srtPath
        else:
            return None

    def ProcessAbort(self):
        '''
        Called if the user aborts the rendering. Sets the active flag to false
        and waits until everything is cleaned up.
        '''
        if self.active:
            self.active = False

        self.__CleanUp()

    def Prepare(self):
        '''
        Build the gstreamer pipeline and all necessary objects and bindings.
        '''
        GObject.threads_init()

        self.ready = threading.Event()
        self.ready.set()

        self.active = True
        self.finished = False
        frameRate = self.GetProfile().GetFrameRate()
        # 1000ms / fps == x msec/frame
        self.imgDuration = int(round(1000 * Gst.MSECOND / frameRate.AsFloat()))
        self._Log(logging.DEBUG, "set imgDuration=%s", self.imgDuration)

        self.pipeline = Gst.Pipeline()

        caps = Gst.caps_from_string("image/jpeg,framerate={0}".format(
            frameRate.AsStr()))
        videoSrc = Gst.ElementFactory.make("appsrc")
        videoSrc.set_property("block", True)
        videoSrc.set_property("caps", caps)
        videoSrc.connect("need-data", self._GstNeedData)
        self.pipeline.add(videoSrc)

        queueVideo = Gst.ElementFactory.make("queue")
        self.pipeline.add(queueVideo)

        jpegDecoder = Gst.ElementFactory.make("jpegdec")
        self.pipeline.add(jpegDecoder)

        colorConverter = Gst.ElementFactory.make("videoconvert")
        self.pipeline.add(colorConverter)

        videoEnc = self._GetVideoEncoder()
        self.pipeline.add(videoEnc)

        muxSubtitle = False
        subtitleEnc = None
        if self._GetSubtitleFile():
            self.srtParse = SrtParser(
                self._GetSubtitleFile(),
                self.GetProfile().GetFrameRate().AsFloat())
            if self.GetTypedProperty(
                    "Subtitle", str) == "render" and Gst.ElementFactory.find(
                        "textoverlay"):
                self.textoverlay = Gst.ElementFactory.make("textoverlay")
                self.textoverlay.set_property("text", "")
                self._SetupTextOverlay()
                self.pipeline.add(self.textoverlay)
            elif self.GetTypedProperty("Subtitle", str) == "embed":
                muxSubtitle = True
                subtitleEnc = self._GetSubtitleEncoder()  # pylint: disable=assignment-from-none

        # link elements for video stream
        videoSrc.link(jpegDecoder)
        jpegDecoder.link(colorConverter)
        if self.textoverlay:
            colorConverter.link(self.textoverlay)
            self.textoverlay.link(queueVideo)
        else:
            colorConverter.link(queueVideo)
        queueVideo.link(videoEnc)

        audioEnc = None
        if self.GetAudioFiles():
            self.concat = Gst.ElementFactory.make("concat")
            self.pipeline.add(self.concat)

            srcpad = self.concat.get_static_pad("src")
            srcpad.add_probe(
                Gst.PadProbeType.
                BUFFER,  # | Gst.PadProbeType.EVENT_DOWNSTREAM,
                self._GstProbeBuffer)

            self._GstAddAudioFile(self.GetAudioFiles()[self.idxAudioFile])

            audioConv = Gst.ElementFactory.make("audioconvert")
            self.pipeline.add(audioConv)

            audiorate = Gst.ElementFactory.make("audioresample")
            self.pipeline.add(audiorate)

            audioQueue = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue)

            audioEnc = self._GetAudioEncoder()
            self.pipeline.add(audioEnc)

            self.concat.link(audioConv)
            audioConv.link(audiorate)
            audiorate.link(audioQueue)
            audioQueue.link(audioEnc)

        if self.GetProfile().IsMPEGProfile():
            vp = Gst.ElementFactory.make("mpegvideoparse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

            if audioEnc:
                ap = Gst.ElementFactory.make("mpegaudioparse")
                self.pipeline.add(ap)
                audioEnc.link(ap)
                audioEnc = ap
        elif isinstance(self, MkvX265AC3):
            vp = Gst.ElementFactory.make("h265parse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

        mux = self._GetMux()
        self.pipeline.add(mux)

        videoQueue2 = Gst.ElementFactory.make("queue")
        self.pipeline.add(videoQueue2)

        videoEncCaps = self._GetVideoEncoderCaps()  # pylint: disable=assignment-from-none
        if videoEncCaps:
            videoEnc.link_filtered(videoQueue2, videoEncCaps)
        else:
            videoEnc.link(videoQueue2)
        videoQueue2.link(mux)

        if audioEnc:
            audioQueue2 = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue2)
            audioEnc.link(audioQueue2)
            audioQueue2.link(mux)

        if muxSubtitle:
            subCaps = self._GetSubtitleEncoderCaps()
            subPad = None
            if subCaps:
                subPad = mux.get_request_pad("subtitle_%u")
            if subPad:
                # muxer has subtitle pad, so initialize subtitle processing
                self.usePangoSubtitle = subCaps.find("pango-markup") != -1

                subSrc = Gst.ElementFactory.make("appsrc")
                subCaps = Gst.caps_from_string(subCaps)
                subSrc.set_property("caps", subCaps)
                subSrc.set_property("format", Gst.Format.TIME)
                subSrc.connect("need-data", self._GstNeedSubtitleData)
                self.pipeline.add(subSrc)

                if subtitleEnc:
                    self.pipeline.add(subtitleEnc)
                    subSrc.link(subtitleEnc)
                    srcPad = subtitleEnc.get_static_pad("src")
                else:
                    srcPad = subSrc.get_static_pad("src")

                srcPad.link(subPad)
            else:
                self._Log(
                    logging.WARNING,
                    "Want to mux subtitle but container does not support it!")

        sink = Gst.ElementFactory.make("filesink")
        sink.set_property("location", self.GetOutputFile())
        self.pipeline.add(sink)

        mux.link(sink)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self._GstOnMessage)

        self.pipeline.set_state(Gst.State.PLAYING)

        GtkMainLoop.EnsureRunning()

        self.ready.clear()

    def _GstAddAudioFile(self, audioFile):
        '''
        Inserts new elements to refer a new audio file in the gstreamer pipeline.
        :param audioFile: the full path to the audio file
        '''
        audioSrc = Gst.ElementFactory.make("filesrc")
        audioSrc.set_property("location", audioFile)
        self.pipeline.add(audioSrc)

        audioDec = Gst.ElementFactory.make("decodebin")
        audioDec.connect("pad-added", self._GstPadAddedAudio)
        audioDec.connect("no-more-pads", self._GstNoMorePadsAudio)
        self.pipeline.add(audioDec)

        audioSrc.link(audioDec)

    def Finalize(self):
        if not self.finished:
            self.finished = True

        self.__CleanUp()

    def _GetBitrate(self):
        bitrate = self.GetTypedProperty("Bitrate", int,
                                        self.GetProfile().GetBitrate())
        if bitrate is None:
            raise RendererException(_(u"Bitrate must be a number!"))
        return bitrate

    def _GstOnMessage(self, bus, msg):  # pylint: disable=unused-argument
        '''
        Gstreamer message handler for messages in gstreamer event bus.
        :param bus:
        :param msg:
        '''
        self._Log(logging.DEBUG, '_GstOnMessage: %s', msg.type)

        if msg.type == Gst.MessageType.ERROR:
            err, debug = msg.parse_error()
            self._Log(logging.ERROR, "Error received from element %s: %s",
                      msg.src.get_name(), err)
            self._Log(logging.DEBUG, "Debugging information: %s", debug)

        elif msg.type == Gst.MessageType.LATENCY:
            self.pipeline.recalculate_latency()

        elif msg.type == Gst.MessageType.EOS:
            self.pipeline.set_state(Gst.State.NULL)
            self.ready.set()
#         return Gst.BusSyncReply.PASS

    def _GstNeedData(self, src, need_bytes):  # pylint: disable=unused-argument
        '''
        Gstreamer need-data probe callback to feed the appsrc with image data.
        The image data comes from a queue that is filled from other worker
        threads. If the queue is empty and the finish flag is set send
        end-of-stream to the appsrc so the pipeline can finish its processing.
        If the textoverlay element is available the current text for the
        rendered subtitle will be set.
        :param src: GstElement appsrc
        :param need_bytes: unused size
        '''
        self._Log(logging.DEBUG, '_GstNeedData: %s', self.idxFrame)

        pts = self.idxFrame * self.imgDuration

        while self.active:
            result = None
            try:
                result = self.resQueue.get(True, 0.25)
                break
            except queue.Empty:
                self._Log(logging.DEBUG, '_GstNeedData: Queue.Empty')
                if self.finished:
                    self._Log(
                        logging.DEBUG,
                        '_GstNeedData: finished, emitting end-of-stream (finalTime %s)',
                        pts)
                    self.finalTime = pts
                    src.emit("end-of-stream")
                    return
                else:
                    continue
        else:
            self._Log(
                logging.DEBUG,
                '_GstNeedData: not active anymore, emitting end-of-stream (finalTime %s)',
                pts)
            self.finalTime = pts
            src.emit("end-of-stream")
            return

        self._Log(logging.DEBUG, '_GstNeedData: push to buffer (%s)',
                  len(result))

        buf = Gst.Buffer.new_wrapped(result)
        buf.pts = pts
        buf.duration = self.imgDuration
        ret = src.emit("push-buffer", buf)
        if ret != Gst.FlowReturn.OK:
            return

        if self.textoverlay:
            #             self.textoverlay.set_property("text", "Frame: %s" % self.idxFrame)
            subtitle = self.srtParse.Get(self.idxFrame)
            escaped_subtitle = self._GetPangoEscapedSubtitle(subtitle)

            self.textoverlay.set_property("text", escaped_subtitle)

        self.idxFrame += 1

    def _GetPangoEscapedSubtitle(self, text, rawText=False):
        try:
            parseResult = Pango.parse_markup(text, -1, "&")
            if rawText:
                return parseResult.text
            else:
                return text
        except GObject.GError as err:
            text_escaped = GObject.markup_escape_text(text)
            if err.domain != "g-markup-error-quark":  # pylint: disable=no-member
                self._Log(
                    logging.ERROR,
                    "Unexpected error while parsing subtitle '%s' with pango! Using escaped text '%s'",
                    text, text_escaped)
            else:
                self._Log(
                    logging.WARNING,
                    "Subtitle '%s' is not well formed pango markup. Using escaped text '%s'",
                    text, text_escaped)
            return text_escaped

    def _GstNeedSubtitleData(self, src, need_bytes):  # pylint: disable=unused-argument
        self._Log(logging.DEBUG, "_GstNeedSubtitleData: %s, pango=%s",
                  self.idxSub, self.usePangoSubtitle)
        while 1:
            subtitle, start, duration = self.srtParse.GetByIndex(self.idxSub)
            if start is not None and not subtitle:
                self._Log(
                    logging.WARNING,
                    "_GstNeedSubtitleData: skipping empty subtitle: %s-%s",
                    start, duration)
                self.idxSub += 1
            else:
                break
        if duration is None:
            src.emit("end-of-stream")
            return
        else:
            subtitle = self._GetPangoEscapedSubtitle(subtitle,
                                                     not self.usePangoSubtitle)

            subText = subtitle.encode()
            subBuf = Gst.Buffer.new_allocate(None, len(subText), None)
            subBuf.fill(0, subText)
            subBuf.pts = subBuf.dts = start * Gst.MSECOND
            subBuf.duration = duration * Gst.MSECOND
            src.emit("push-buffer", subBuf)

        self.idxSub += 1

    def _GstPadAddedAudio(self, decodebin, pad):
        '''
        Gstreamer pad-added probe callback to attach a new audio file to the
        pipeline.
        :param decodebin: GstElement decodebin (decoder for audio data)
        :param pad: GstPad object
        '''
        self._Log(logging.DEBUG, "_GstPadAddedAudio: %s - %s", decodebin, pad)
        caps = pad.get_current_caps()
        compatible_pad = self.concat.get_compatible_pad(pad, caps)
        pad.link(compatible_pad)

    def _GstNoMorePadsAudio(self, decodebin):
        self._Log(logging.DEBUG, "_GstNoMorePadsAudio: %s", decodebin)
        self.idxAudioFile += 1
        if self.idxAudioFile < len(self.GetAudioFiles()):
            #             self.pipeline.set_state(Gst.State.PAUSED)
            self._GstAddAudioFile(self.GetAudioFiles()[self.idxAudioFile])


#             self.pipeline.set_state(Gst.State.PLAYING)

    def _GstProbeBuffer(self, srcPad, probeInfo):  # pylint: disable=unused-argument
        '''
        Gstreamer pad probe callback to check if the current stream time has
        reached the final time (usually the length of the overall audio stream).
        If final time has reached send eos event (end of stream) to finish the
        pipeline
        :param srcPad: src pad of the muxer
        :param probeInfo: GstPadProbeInfo object
        '''
        buf = probeInfo.get_buffer()
        self._Log(logging.DEBUG, "_GstProbeBuffer: buffer %s",
                  (buf, buf.pts // Gst.MSECOND, self.ptsOffset // Gst.MSECOND,
                   self.finalTime))
        if buf.pts < self.ptsLast:
            self.ptsOffset += self.ptsLast
        self.ptsLast = buf.pts

        if self.finalTime is None:
            return Gst.PadProbeReturn.PASS
        elif self.ptsOffset + buf.pts >= self.finalTime:
            return Gst.PadProbeReturn.DROP
        else:
            return Gst.PadProbeReturn.PASS

    def _SetupTextOverlay(self):
        settings = self.GetProperty("SubtitleSettings")
        for singleSetting in settings.split(";"):
            settingAndProp = singleSetting.split("=")
            if len(settingAndProp) == 2:
                prop, value = settingAndProp
                try:
                    # try number as int
                    value = int(value)
                except:  # pylint: disable-msg=bare-except
                    try:
                        # try numbers as hex
                        value = int(value, 16)
                    except:  # pylint: disable-msg=bare-except
                        pass
                self.textoverlay.set_property(prop, value)

    def _GetExtension(self):
        raise NotImplementedError()

    def _GetMux(self):
        raise NotImplementedError()

    def _GetAudioEncoder(self):
        raise NotImplementedError()

    def _GetVideoEncoder(self):
        raise NotImplementedError()

    def _GetVideoEncoderCaps(self):
        return None

    def _GetSubtitleEncoder(self):
        return None

    def _GetSubtitleEncoderCaps(self):
        return "text/x-raw,format=(string)utf8"
Ejemplo n.º 4
0
    def Prepare(self):
        '''
        Build the gstreamer pipeline and all necessary objects and bindings.
        '''
        GObject.threads_init()

        self.ready = threading.Event()
        self.ready.set()

        self.active = True
        self.finished = False
        frameRate = self.GetProfile().GetFrameRate()
        # 1000ms / fps == x msec/frame
        self.imgDuration = int(round(1000 * Gst.MSECOND / frameRate.AsFloat()))
        self._Log(logging.DEBUG, "set imgDuration=%s", self.imgDuration)

        self.pipeline = Gst.Pipeline()

        caps = Gst.caps_from_string("image/jpeg,framerate={0}".format(
            frameRate.AsStr()))
        videoSrc = Gst.ElementFactory.make("appsrc")
        videoSrc.set_property("block", True)
        videoSrc.set_property("caps", caps)
        videoSrc.connect("need-data", self._GstNeedData)
        self.pipeline.add(videoSrc)

        queueVideo = Gst.ElementFactory.make("queue")
        self.pipeline.add(queueVideo)

        jpegDecoder = Gst.ElementFactory.make("jpegdec")
        self.pipeline.add(jpegDecoder)

        colorConverter = Gst.ElementFactory.make("videoconvert")
        self.pipeline.add(colorConverter)

        videoEnc = self._GetVideoEncoder()
        self.pipeline.add(videoEnc)

        muxSubtitle = False
        subtitleEnc = None
        if self._GetSubtitleFile():
            self.srtParse = SrtParser(
                self._GetSubtitleFile(),
                self.GetProfile().GetFrameRate().AsFloat())
            if self.GetTypedProperty(
                    "Subtitle", str) == "render" and Gst.ElementFactory.find(
                        "textoverlay"):
                self.textoverlay = Gst.ElementFactory.make("textoverlay")
                self.textoverlay.set_property("text", "")
                self._SetupTextOverlay()
                self.pipeline.add(self.textoverlay)
            elif self.GetTypedProperty("Subtitle", str) == "embed":
                muxSubtitle = True
                subtitleEnc = self._GetSubtitleEncoder()  # pylint: disable=assignment-from-none

        # link elements for video stream
        videoSrc.link(jpegDecoder)
        jpegDecoder.link(colorConverter)
        if self.textoverlay:
            colorConverter.link(self.textoverlay)
            self.textoverlay.link(queueVideo)
        else:
            colorConverter.link(queueVideo)
        queueVideo.link(videoEnc)

        audioEnc = None
        if self.GetAudioFiles():
            self.concat = Gst.ElementFactory.make("concat")
            self.pipeline.add(self.concat)

            srcpad = self.concat.get_static_pad("src")
            srcpad.add_probe(
                Gst.PadProbeType.
                BUFFER,  # | Gst.PadProbeType.EVENT_DOWNSTREAM,
                self._GstProbeBuffer)

            self._GstAddAudioFile(self.GetAudioFiles()[self.idxAudioFile])

            audioConv = Gst.ElementFactory.make("audioconvert")
            self.pipeline.add(audioConv)

            audiorate = Gst.ElementFactory.make("audioresample")
            self.pipeline.add(audiorate)

            audioQueue = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue)

            audioEnc = self._GetAudioEncoder()
            self.pipeline.add(audioEnc)

            self.concat.link(audioConv)
            audioConv.link(audiorate)
            audiorate.link(audioQueue)
            audioQueue.link(audioEnc)

        if self.GetProfile().IsMPEGProfile():
            vp = Gst.ElementFactory.make("mpegvideoparse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

            if audioEnc:
                ap = Gst.ElementFactory.make("mpegaudioparse")
                self.pipeline.add(ap)
                audioEnc.link(ap)
                audioEnc = ap
        elif isinstance(self, MkvX265AC3):
            vp = Gst.ElementFactory.make("h265parse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

        mux = self._GetMux()
        self.pipeline.add(mux)

        videoQueue2 = Gst.ElementFactory.make("queue")
        self.pipeline.add(videoQueue2)

        videoEncCaps = self._GetVideoEncoderCaps()  # pylint: disable=assignment-from-none
        if videoEncCaps:
            videoEnc.link_filtered(videoQueue2, videoEncCaps)
        else:
            videoEnc.link(videoQueue2)
        videoQueue2.link(mux)

        if audioEnc:
            audioQueue2 = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue2)
            audioEnc.link(audioQueue2)
            audioQueue2.link(mux)

        if muxSubtitle:
            subCaps = self._GetSubtitleEncoderCaps()
            subPad = None
            if subCaps:
                subPad = mux.get_request_pad("subtitle_%u")
            if subPad:
                # muxer has subtitle pad, so initialize subtitle processing
                self.usePangoSubtitle = subCaps.find("pango-markup") != -1

                subSrc = Gst.ElementFactory.make("appsrc")
                subCaps = Gst.caps_from_string(subCaps)
                subSrc.set_property("caps", subCaps)
                subSrc.set_property("format", Gst.Format.TIME)
                subSrc.connect("need-data", self._GstNeedSubtitleData)
                self.pipeline.add(subSrc)

                if subtitleEnc:
                    self.pipeline.add(subtitleEnc)
                    subSrc.link(subtitleEnc)
                    srcPad = subtitleEnc.get_static_pad("src")
                else:
                    srcPad = subSrc.get_static_pad("src")

                srcPad.link(subPad)
            else:
                self._Log(
                    logging.WARNING,
                    "Want to mux subtitle but container does not support it!")

        sink = Gst.ElementFactory.make("filesink")
        sink.set_property("location", self.GetOutputFile())
        self.pipeline.add(sink)

        mux.link(sink)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self._GstOnMessage)

        self.pipeline.set_state(Gst.State.PLAYING)

        GtkMainLoop.EnsureRunning()

        self.ready.clear()
Ejemplo n.º 5
0
class _GStreamerRenderer(BaseRenderer):
    def __init__(self):
        BaseRenderer.__init__(self)
        self._Log = _GStreamerRenderer.Log
        self.resQueue = Queue.Queue(20)

        self.active = None
        self.finished = None
        self.ready = None
        self.pipeline = None
        self.idxFrame = 0
        self.idxAudioFile = 0
        self.imgDuration = None
        self.finalTime = None
        self.gtkMainloop = None
        self.textoverlay = None
        self.srtParse = None
        self.concat = None
        self.ptsOffset = 0
        self.ptsLast = None

    @staticmethod
    def CheckDependencies(msgList):
        if Gst is None or GObject is None:
            _GStreamerRenderer.Log(logging.DEBUG,
                                   "checking for gstreamer failed!")
            msgList.append(_(u"GStreamer (python-gst-1.0) required!"))
        else:
            to = Gst.ElementFactory.find("textoverlay")
            if to is None:
                _GStreamerRenderer.Log(
                    logging.WARN,
                    "GStreamer element textoverlay not found! Subtitles cannot rendered into video file."
                )

    @staticmethod
    def GetProperties():
        return ["Bitrate", "RenderSubtitle"]

    @staticmethod
    def GetDefaultProperty(prop):
        if prop == "RenderSubtitle":
            return "false"
        return BaseRenderer.GetDefaultProperty(prop)

    def ToSink(self, data):
        self.resQueue.put(data)

    def __CleanUp(self):
        '''
        Waits until the ready event is set and finished the GTK-Mainloop.
        The ready event is set within _GstOnMessage if the end-of-stream event
        was handled.
        '''
        if self.ready is None:
            return

        self._Log(logging.DEBUG, "waiting for ready event")
        self.ready.wait()
        self.gtkMainloop.quit()

        self.active = None
        self.finished = None
        self.ready = None
        self.pipeline = None
        self.idxFrame = 0
        self.idxAudioFile = 0
        self.imgDuration = None
        self.finalTime = None
        self.gtkMainloop = None
        self.textoverlay = None
        self.srtParse = None
        self.concat = None
        self.ptsOffset = 0
        self.ptsLast = None

        if self.GetTypedProperty("RenderSubtitle", bool):
            # delete subtitle file, if subtitle is rendered in video
            srtPath = os.path.join(self.GetOutputPath(), "output.srt")
            if os.path.exists(srtPath):
                os.remove(srtPath)

    def ProcessAbort(self):
        '''
        Called if the user aborts the rendering. Sets the active flag to false
        and waits until everything is cleaned up.
        '''
        if self.active:
            self.active = False

        self.__CleanUp()

    def Prepare(self):
        '''
        Build the gstreamer pipeline and all necessary objects and bindings.
        '''
        GObject.threads_init()

        self.ready = threading.Event()
        self.ready.set()

        self.active = True
        self.finished = False
        frameRate = self.GetProfile().GetFrameRate()
        # 1000ms / fps == x msec/frame
        self.imgDuration = int(round(1000 * Gst.MSECOND / frameRate.AsFloat()))
        self._Log(logging.DEBUG, "set imgDuration=%s", self.imgDuration)

        outFile = os.path.join(self.GetOutputPath(),
                               "output.%s" % self._GetExtension())

        self.pipeline = Gst.Pipeline()

        caps = Gst.caps_from_string("image/jpeg,framerate={0}".format(
            frameRate.AsStr()))
        videoSrc = Gst.ElementFactory.make("appsrc")
        videoSrc.set_property("block", True)
        videoSrc.set_property("caps", caps)
        videoSrc.connect("need-data", self._GstNeedData)
        self.pipeline.add(videoSrc)

        queueVideo = Gst.ElementFactory.make("queue")
        self.pipeline.add(queueVideo)

        jpegDecoder = Gst.ElementFactory.make("jpegdec")
        self.pipeline.add(jpegDecoder)

        colorConverter = Gst.ElementFactory.make("videoconvert")
        self.pipeline.add(colorConverter)

        videoEnc = self._GetVideoEncoder()
        self.pipeline.add(videoEnc)

        if self.GetTypedProperty(
                "RenderSubtitle",
                bool) and Gst.ElementFactory.find("textoverlay"):
            self.textoverlay = Gst.ElementFactory.make("textoverlay")
            self.textoverlay.set_property("text", "")
            self.pipeline.add(self.textoverlay)

        # link elements for video stream
        videoSrc.link(jpegDecoder)
        jpegDecoder.link(colorConverter)
        if self.textoverlay:
            colorConverter.link(self.textoverlay)
            self.textoverlay.link(queueVideo)
        else:
            colorConverter.link(queueVideo)
        queueVideo.link(videoEnc)

        audioEnc = None
        if self.GetAudioFiles():
            self.concat = Gst.ElementFactory.make("concat")
            self.pipeline.add(self.concat)

            srcpad = self.concat.get_static_pad("src")
            srcpad.add_probe(
                Gst.PadProbeType.
                BUFFER,  # | Gst.PadProbeType.EVENT_DOWNSTREAM,
                self._GstProbeBuffer)

            self._GstAddAudioFile(self.GetAudioFiles()[self.idxAudioFile])

            audioConv = Gst.ElementFactory.make("audioconvert")
            self.pipeline.add(audioConv)

            audiorate = Gst.ElementFactory.make("audioresample")
            self.pipeline.add(audiorate)

            audioQueue = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue)

            audioEnc = self._GetAudioEncoder()
            self.pipeline.add(audioEnc)

            self.concat.link(audioConv)
            audioConv.link(audiorate)
            audiorate.link(audioQueue)
            audioQueue.link(audioEnc)

        if self.GetProfile().IsMPEGProfile():
            vp = Gst.ElementFactory.make("mpegvideoparse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

            if audioEnc:
                ap = Gst.ElementFactory.make("mpegaudioparse")
                self.pipeline.add(ap)
                audioEnc.link(ap)
                audioEnc = ap
        elif isinstance(self, MkvX265AC3):
            vp = Gst.ElementFactory.make("h265parse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

        mux = self._GetMux()
        self.pipeline.add(mux)

        videoQueue2 = Gst.ElementFactory.make("queue")
        self.pipeline.add(videoQueue2)

        videoEnc.link(videoQueue2)
        videoQueue2.link(mux)

        if audioEnc:
            audioQueue2 = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue2)
            audioEnc.link(audioQueue2)
            audioQueue2.link(mux)

        sink = Gst.ElementFactory.make("filesink")
        sink.set_property("location", outFile)
        self.pipeline.add(sink)

        mux.link(sink)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self._GstOnMessage)

        self.pipeline.set_state(Gst.State.PLAYING)

        self.gtkMainloop = GObject.MainLoop()
        gtkMainloopThread = threading.Thread(name="gtkMainLoop",
                                             target=self._GtkMainloop)
        gtkMainloopThread.start()

        self.ready.clear()

    def _GtkMainloop(self):
        self._Log(logging.DEBUG, "GTK mainloop starting...")
        self.gtkMainloop.run()
        self._Log(logging.DEBUG, "GTK mainloop finished")

    def _GstAddAudioFile(self, audioFile):
        '''
        Inserts new elements to refer a new audio file in the gstreamer pipeline.
        :param audioFile: the full path to the audio file
        '''
        audioSrc = Gst.ElementFactory.make("filesrc")
        audioSrc.set_property("location", audioFile)
        self.pipeline.add(audioSrc)

        audioDec = Gst.ElementFactory.make("decodebin")
        audioDec.connect("pad-added", self._GstPadAddedAudio)
        audioDec.connect("no-more-pads", self._GstNoMorePadsAudio)
        self.pipeline.add(audioDec)

        audioSrc.link(audioDec)

    def Finalize(self):
        if not self.finished:
            self.finished = True

        self.__CleanUp()

    def _GetBitrate(self):
        bitrate = self.GetTypedProperty("Bitrate", int,
                                        self.GetProfile().GetBitrate())
        if bitrate is None:
            raise RendererException(_(u"Bitrate must be a number!"))
        return bitrate

    def _GstOnMessage(self, bus, msg):  # pylint: disable=unused-argument
        '''
        Gstreamer message handler for messages in gstreamer event bus.
        :param bus:
        :param msg:
        '''
        self._Log(logging.DEBUG, '_GstOnMessage: %s', msg.type)

        if msg.type == Gst.MessageType.ERROR:
            err, debug = msg.parse_error()
            self._Log(logging.ERROR, "Error received from element %s: %s",
                      msg.src.get_name(), err)
            self._Log(logging.DEBUG, "Debugging information: %s", debug)

        elif msg.type == Gst.MessageType.LATENCY:
            self.pipeline.recalculate_latency()

        elif msg.type == Gst.MessageType.EOS:
            self.pipeline.set_state(Gst.State.NULL)
            self.ready.set()
#         return Gst.BusSyncReply.PASS

    def _GstNeedData(self, src, need_bytes):  # pylint: disable=unused-argument
        '''
        Gstreamer need-data probe callback to feed the appsrc with image data.
        The image data comes from a queue that is filled from other worker
        threads. If the queue is empty and the finish flag is set send
        end-of-stream to the appsrc so the pipeline can finish its processing.
        If the textoverlay element is available the current text for the
        rendered subtitle will be set.
        :param src: GstElement appsrc
        :param need_bytes: unused size
        '''
        self._Log(logging.DEBUG, '_GstNeedData: %s', self.idxFrame)

        pts = self.idxFrame * self.imgDuration

        while self.active:
            result = None
            try:
                result = self.resQueue.get(True, 0.25)
                break
            except Queue.Empty:
                self._Log(logging.DEBUG, '_GstNeedData: Queue.Empty')
                if self.finished:
                    self._Log(
                        logging.DEBUG,
                        '_GstNeedData: finished, emitting end-of-stream (finalTime %s)',
                        pts)
                    self.finalTime = pts
                    src.emit("end-of-stream")
                    return
                else:
                    continue
        else:
            self._Log(
                logging.DEBUG,
                '_GstNeedData: not active anymore, emitting end-of-stream (finalTime %s)',
                pts)
            self.finalTime = pts
            src.emit("end-of-stream")
            return

        self._Log(logging.DEBUG, '_GstNeedData: push to buffer (%s)',
                  len(result))

        buf = Gst.Buffer.new_wrapped(result)
        buf.pts = pts
        buf.duration = self.imgDuration
        ret = src.emit("push-buffer", buf)
        if ret != Gst.FlowReturn.OK:
            return

        if self.textoverlay:
            #             self.textoverlay.set_property("text", "Frame: %s" % self.idxFrame)
            if self.srtParse is None:
                srtPath = os.path.join(self.GetOutputPath(), "output.srt")
                self.srtParse = SrtParser(
                    srtPath,
                    self.GetProfile().GetFrameRate().AsFloat())

            subtitle = self.srtParse.Get(self.idxFrame)
            self.textoverlay.set_property("text", subtitle)

        self.idxFrame += 1

    def _GstPadAddedAudio(self, decodebin, pad):
        '''
        Gstreamer pad-added probe callback to attach a new audio file to the
        pipeline.
        :param decodebin: GstElement decodebin (decoder for audio data)
        :param pad: GstPad object
        '''
        self._Log(logging.DEBUG, "_GstPadAddedAudio: %s - %s", decodebin, pad)
        caps = pad.get_current_caps()
        compatible_pad = self.concat.get_compatible_pad(pad, caps)
        pad.link(compatible_pad)

    def _GstNoMorePadsAudio(self, decodebin):
        self._Log(logging.DEBUG, "_GstNoMorePadsAudio: %s", decodebin)
        self.idxAudioFile += 1
        if self.idxAudioFile < len(self.GetAudioFiles()):
            #             self.pipeline.set_state(Gst.State.PAUSED)
            self._GstAddAudioFile(self.GetAudioFiles()[self.idxAudioFile])


#             self.pipeline.set_state(Gst.State.PLAYING)

    def _GstProbeBuffer(self, srcPad, probeInfo):  # pylint: disable=unused-argument
        '''
        Gstreamer pad probe callback to check if the current stream time has
        reached the final time (usually the length of the overall audio stream).
        If final time has reached send eos event (end of stream) to finish the
        pipeline
        :param srcPad: src pad of the muxer
        :param probeInfo: GstPadProbeInfo object
        '''
        buf = probeInfo.get_buffer()
        self._Log(logging.DEBUG, "_GstProbeBuffer: buffer %s",
                  (buf, buf.pts / Gst.MSECOND, self.ptsOffset / Gst.MSECOND,
                   self.finalTime))
        if buf.pts < self.ptsLast:
            self.ptsOffset += self.ptsLast
        self.ptsLast = buf.pts

        if self.finalTime is None:
            return Gst.PadProbeReturn.PASS
        elif self.ptsOffset + buf.pts >= self.finalTime:
            return Gst.PadProbeReturn.DROP
        else:
            return Gst.PadProbeReturn.PASS

    def _GetExtension(self):
        raise NotImplementedError()

    def _GetMux(self):
        raise NotImplementedError()

    def _GetAudioEncoder(self):
        raise NotImplementedError()

    def _GetVideoEncoder(self):
        raise NotImplementedError()