Exemplo n.º 1
0
if __name__ == '__main__':

    import logging, sys

    logging.basicConfig(
        level=getattr(logging, "DEBUG"),
        format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
        stream=sys.stderr
    )

    def parse_argv(args):
        cmd = ""
        for n in range(1, len(args)):
            cmd+="%s " %args[n]
        logger.info("gst-launch pipeline is: %s" %cmd)
        return cmd
   
    from sys import argv
    if len(argv) <= 1:
        logger.info("No gst-launch syntax detected, using config file")
        import gstgengui.config
        string = gstgengui.config.pipeline_desc
    else:
        string = parse_argv (argv)
    pipeline_launcher = PipelineManager(string)

    from gtk_controller import GtkGstController
    controller = GtkGstController(pipeline_launcher)

    controller.main()
Exemplo n.º 2
0
    def play(self):

        self.emit("preplay")

        self.player = gst.Pipeline("player")

        self.queue_video = gst.element_factory_make("queue", "queue_video")
        self.player.add(self.queue_video)

        self.input_type = 0

        # Source selection

        self.source_pads = {}
        self.audio_pads = {}
        self.pip_pads = {}

        self.output_bins = {}
        type = 0
        source_number = 0
        pip_number = 0

        self.pip = PictureInPicture()

        self.player.add(self.pip)

        for row in self.sources.get_store():
            (name, source) = row
            element = source.create()
            self.player.add(element)

            if element.does_audio():
                if not self.input_type & MEDIA_AUDIO:

                    # The pipeline has audio sources, and this is the first
                    # audio source we add

                    if self.audio_source is None:
                        self.emit("error", "You need to select an audio source")
                        self.emit("stopped")
                        return
                    self.input_type |= MEDIA_AUDIO
                    self.input_selector = gst.element_factory_make(
                            "input-selector", "audio-selector"
                    )
                    self.player.add(self.input_selector)

                audiobin = audioinputbin.AudioInputBin(source)
                self.player.add(audiobin)

                element.audio_pad.link(audiobin.get_static_pad("sink"))
                self.audio_pads[name] = \
                        self.input_selector.get_request_pad("sink%d")
                audiobin.src_pad.link(self.audio_pads[name])

            if element.does_video():
                self.input_type |= MEDIA_VIDEO

                self.source_pads[name] = source_number
                source_number = source_number + 1

                # Thumbnail preview

                tee = gst.element_factory_make("tee", None)
                self.player.add(tee)
                element.video_pad.link(tee.sink_pads().next())

                thumbnail_queue = gst.element_factory_make("queue", None)
                self.player.add(thumbnail_queue)
                self.thumbnails[name] = Preview(self)
                self.player.add(self.thumbnails[name])

                thumbnail_err = gst.element_link_many(
                    tee, thumbnail_queue, self.thumbnails[name]
                )
                if thumbnail_err == False:
                    self.emit("error", "Error conecting thumbnail preview.")

                # Picture in Picture

                self.pip_pads[name] = pip_number
                pip_number = pip_number + 1

                main_queue = gst.element_factory_make("queue", None)
                self.player.add(main_queue)
                pip_queue = gst.element_factory_make("queue", None)
                self.player.add(pip_queue)

                tee.link(main_queue)
                tee.link(pip_queue)
                main_queue.src_pads().next().link(self.pip.get_request_pad_A())
                pip_queue.src_pads().next().link(self.pip.get_request_pad_B())

            if name == self.video_source:
                type |= element.get_type()
            if name == self.audio_source:
                type |= element.get_type()

        self.watermark = gst.element_factory_make(
                "cairoimageoverlay", "cairoimageoverlay"
        )
        self.player.add(self.watermark)

        self.colorspace = gst.element_factory_make(
                "ffmpegcolorspace", "colorspace-imageoverlay-videobalance"
        )
        self.player.add(self.colorspace)

        self.videobalance = gst.element_factory_make(
                "videobalance", "videobalance"
        )
        self.player.add(self.videobalance)
        if self.videobalance_contrast:
            self.videobalance.set_property(
                    "contrast", self.videobalance_contrast
            )
        if self.videobalance_brightness:
            self.videobalance.set_property(
                    "brightness", self.videobalance_brightness
            )
        if self.videobalance_hue:
            self.videobalance.set_property(
                    "hue", self.videobalance_hue
            )
        if self.videobalance_saturation:
            self.videobalance.set_property(
                    "saturation", self.videobalance_saturation
            )

        gst.element_link_many(
                self.pip, self.watermark, self.colorspace, self.videobalance,
                self.queue_video
        )

        self._switch_source()
        self._switch_pip()

        if self.pip_position:
            self.pip.set_property("position", self.pip_position)

        self.effect[MEDIA_VIDEO] = effect.video_effect.VideoEffect(
                self.effect_name[MEDIA_VIDEO]
        )
        self.player.add(self.effect[MEDIA_VIDEO])

        self.overlay = gst.element_factory_make("textoverlay", "overlay")
        self.overlay.set_property("font-desc", self.overlay_font)
        self.overlay.set_property("halign", self.halign)
        self.overlay.set_property("valign", self.valign)
        self.player.add(self.overlay)

        gst.element_link_many(
                self.queue_video, self.effect[MEDIA_VIDEO], self.overlay
        )

        self.preview_tee = multeequeue.MulTeeQueue()
        self.player.add(self.preview_tee)

        self.overlay.link(self.preview_tee)

        if self.input_type & MEDIA_AUDIO:
            self.convert = gst.element_factory_make("audioconvert", "convert")
            self.player.add(self.convert)

            self.effect[MEDIA_AUDIO] = effect.audio_effect.AudioEffect(
                    self.effect_name[MEDIA_AUDIO]
            )
            self.player.add(self.effect[MEDIA_AUDIO])

            self.audio_tee = gst.element_factory_make("tee", "audio_tee")
            self.player.add(self.audio_tee)

            self.volume = volume.Volume()
            self.player.add(self.volume)

            gst.element_link_many(
                    self.input_selector, self.volume,
                    self.effect[MEDIA_AUDIO], self.convert, self.audio_tee
            )
            self.input_selector.set_property(
                    "active-pad", self.audio_pads[self.audio_source]
            )
        added_encoders = {}

        pip_width = 0
        pip_height = 0

        for row in self.outputs.get_store():
            (name, output) = row

            output_bin = outputbin.OutputBin(output)
            self.output_bins[name] = output_bin
            self.player.add(output_bin)

            encoder_name = output.get_config()["parent"]

            encoder_item = self.encoders.get_item(encoder_name)
            if encoder_item is None:
                self.emit("error", "Please, add an encoder.")
                break

            if added_encoders.has_key(encoder_name):
                tee = added_encoders[encoder_name]

                tee.link(output_bin)
            else:
                tee = gst.element_factory_make("tee", None)
                self.player.add(tee)

                converter_item = encoder_item.parent
                converter = converter_item.create()
                if converter_item.config["width"] > pip_width:
                    pip_width = converter_item.config["width"]
                if converter_item.config["height"] > pip_height:
                    pip_height = converter_item.config["height"]
                self.player.add(converter)

                encoder = encoder_item.factory.create(type)
                if encoder.vorbisenc:
                    self.metadata = metadata.Metadata(encoder.vorbisenc)
                    self.metadata.set_tags(self.taglist)
                encoder.config(encoder_item.config)
                self.player.add(encoder)

                added_encoders[encoder_name] = tee
                self.preview_tee.get_src_pad().link(
                        converter.sink_pads().next()
                )
                gst.element_link_many(
                        converter, encoder, tee, output_bin
                )

                if self.input_type & MEDIA_AUDIO:
                    audio_queue = gst.element_factory_make("queue", None)
                    self.player.add(audio_queue)

                    gst.element_link_many(self.audio_tee, audio_queue, encoder)

        if self.preview_enabled:
            self.preview = Preview(self)
            self.player.add(self.preview)
            self.preview_tee.get_src_pad().link(self.preview.sink_pads().next())

        if pip_width == 0:
            pip_width = 320
            pip_height = 240
        self.pip.set_property("width", int(pip_width))
        self.pip.set_property("height", int(pip_height))

        self.video_width = int(pip_width)
        self.video_height = int(pip_height)
        self._set_watermark(self.video_width, self.video_height)

        self.overlay.set_property("text", self.overlay_text)
        if self.volume_value is not None:
            self.volume.set_property("volume", self.volume_value)

        self.emit("pipeline-ready")

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)

        from gstmanager.gstmanager import PipelineManager
        from gtk_controller import GtkGstController
        pipeline_launcher = PipelineManager(self.player)
        controller = GtkGstController(pipeline_launcher)
        controller.scan_pipeline()

        cr = self.player.set_state(gst.STATE_PLAYING)
        if cr == gst.STATE_CHANGE_SUCCESS:
            self.emit("playing")
        elif cr == gst.STATE_CHANGE_ASYNC:
            self.pending_state = gst.STATE_PLAYING
Exemplo n.º 3
0
    introspector = None
    introspector = PipelineIntrospector(pipeline)
    # Build widgets for currently supported elements
    for element in introspector.elements:
        controller.add_element_widget(element)

if __name__ == '__main__':

    def parse_argv(args):
        cmd = ""
        for n in range(1, len(args)):
            cmd+="%s " %args[n]
        print "gst-launch pipeline is: %s" %cmd
        return cmd
   
    from sys import argv
    if len(argv) <= 1:
        print "No gst-launch syntax detected, using config file"
        pipeline_launcher = PipelineManager()
    else:
        gst_string = parse_argv (argv)
        pipeline_launcher = PipelineManager(gst_string)
    pipeline = pipeline_launcher.pipeline

    controller = GtkGstController(pipeline_launcher)

    build_gui(pipeline, controller)
    controller.rebuild_callback = build_gui

    controller.main()