Example #1
0
    def __init__(self):
        # Create GUI objects
        self.window = gtk.Window()
        self.vbox = gtk.VBox()
        self.da = gtk.DrawingArea()
        self.bb = gtk.HButtonBox()
        self.da.set_size_request(300, 150)
        self.playButton = gtk.Button(stock="gtk - media - play")
        self.playButton.connect("clicked", self.OnPlay)
        self.stopButton = gtk.Button(stock="gtk - media - stop")
        self.stopButton.connect("clicked", self.OnStop)
        self.quitButton = gtk.Button(stock="gtk - quit")
        self.quitButton.connect("clicked", self.OnQuit)
        self.vbox.pack_start(self.da)
        self.bb.add(self.playButton)
        self.bb.add(self.stopButton)
        self.bb.add(self.quitButton)
        self.vbox.pack_start(self.bb)
        self.window.add(self.vbox)

        # Create GStreamer pipeline
        self.pipeline = gst.Pipeline("mypipeline")
        # Set up our video test source
        self.videotestsrc = gst.element_factory_make("videotestsrc", "video")
        # Add it to the pipeline
        self.pipeline.add(self.videotestsrc)
        # Now we need somewhere to send the video
        self.sink = gst.element_factory_make("xvimagesink", "sink")
        # Add it to the pipeline
        self.pipeline.add(self.sink)
        # Link the video source to the sink - xv
        self.videotestsrc.link(self.sink)
        self.window.show_all()
Example #2
0
    def openfile(self, filename, aModel):
        self.images = list()
        self.errors = list()
        self.fileName = filename
        self.codec = ""
        if (self.player != None):
            self.player.set_state(gst.STATE_NULL)

        self.__isEndOfStream = False
        self.player = gst.element_factory_make("playbin", "player")
        videoBin = gst.Bin("video")
        videoFilter = gst.element_factory_make("capsfilter", "videofilter")
        videoBin.add(videoFilter)
        videoFilter.set_property("caps",
                                 gst.Caps("video/x-raw-rgb, depth=24, bpp=24"))
        ghostPad = gst.GhostPad("sink", videoFilter.get_pad("sink"))
        videoBin.add_pad(ghostPad)
        videoSink = gst.element_factory_make("fakesink", "videosink")
        videoBin.add(videoSink)
        pad = videoSink.get_pad("sink")
        pad.add_buffer_probe(self.__onBufferProbe)
        gst.element_link_many(videoFilter, videoSink)
        self.player.set_property("video-sink", videoBin)

        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()
        self.watchID = self.bus.connect("message", self.__onMessage)
        self.player.set_property("uri", "file://" + filename)
        self.player.set_state(gst.STATE_PAUSED)
        self.model = aModel
Example #3
0
    def openfile(self, filename ,aModel):
        self.images = list()
        self.errors = list()
        self.fileName = filename
        self.codec = ""
        if (self.player != None):
            self.player.set_state(gst.STATE_NULL)
           
        self.__isEndOfStream = False
        self.player = gst.element_factory_make("playbin", "player")
        videoBin = gst.Bin("video")
        videoFilter = gst.element_factory_make("capsfilter", "videofilter")
        videoBin.add(videoFilter)
        videoFilter.set_property("caps", gst.Caps("video/x-raw-rgb, depth=24, bpp=24"))
        ghostPad = gst.GhostPad("sink", videoFilter.get_pad("sink"))
        videoBin.add_pad(ghostPad)
        videoSink = gst.element_factory_make("fakesink", "videosink")
        videoBin.add(videoSink)
        pad = videoSink.get_pad("sink")
        pad.add_buffer_probe(self.__onBufferProbe)
        gst.element_link_many(videoFilter, videoSink)
        self.player.set_property("video-sink", videoBin)

        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()
        self.watchID = self.bus.connect("message", self.__onMessage)
        self.player.set_property("uri", "file://" + filename)
        self.player.set_state(gst.STATE_PAUSED)
        self.model = aModel
Example #4
0
 def __findedtype(typefinder, probability, caps, gstobject):
     size = caps.get_size()
     print('caps size :', size)
     demuxer = None
     pipeline = gstobject['pipeline']
     #typefind  = gstobject['ele_typefind']
     #typefind  = gstobject.ele_typefind
     for i in range(size):
         structure = caps.get_structure(i)
         name = structure.get_name()
         print('find new type ', name)
         if name == 'video/mpegts':
             demuxer = Gst.ElementFactory.make("tsdemux", "demuxer")
         elif name == 'video/quicktime':
             demuxer = Gst.element_factory_make("qtdemux", "demuxer")
         elif name == 'application/x-rtp':
             demuxer = Gst.element_factory_make("qtdemux", "demuxer")
     if demuxer is None:
         print('This container is not support')
         exit()
     pipeline.add(demuxer)
     demuxer.connect('pad-added', EngineGST.__pad_added, gstobject)
     typefind = pipeline.get_by_name('typefinder')
     typefind.link(demuxer)
     gstobject.ele_demuxer = demuxer
     demuxer.set_state(Gst.State.PLAYING)
Example #5
0
 def build_pipeline(self, video_src, video_sink, pipeline):
     self._visualiser = gst.element_factory_make(self._visualisation)
     self._color_space = gst.element_factory_make("ffmpegcolorspace")
     self._audioconvert = gst.element_factory_make("audioconvert")
     pipeline.add(video_src, self._audioconvert, self._visualiser,
                  self._color_space, video_sink)
     gst.element_link_many(video_src, self._audioconvert, self._visualiser,
                           self._color_space, video_sink)
Example #6
0
def build_pipeline(access_in,stream_lists):
    pipeline = Gst.Pipeline("itv-channel-1")
    access = None
    typefind = None
    if access_in == 'udp':
        access = Gst.element_factory_make("udpsrc","access")
        typefind = Gst.element_factory_make("typefind","typefinder")
        typefind.connect("have-type", findedtype, pipeline)
        #demuxer= Gst.element_factory_make("tsdemux","demuxer")
    elif access_in == 'file':
        access = Gst.element_factory_make("filesrc","access")
        access.set_property('location','/media/hertz/b901d1b9-1b63-46ca-a706-86f7401fee63/hebin/4K 体验视频.mp4')
        typefind = Gst.element_factory_make("typefind","typefinder")
        #demuxer= Gst.element_factory_make("tsdemux","demuxer")
    elif access_in == 'rtsp':
        access = Gst.element_factory_make("rtspsrc","access")
        access.set_property('location','rtsp://192.168.61.26/')
        typefind = Gst.element_factory_make("typefind","typefinder")
    elif access_in == 'rtmp':
        access = Gst.element_factory_make("rtmpsrc","access")
    elif access_in == 'http':
        access = Gst.element_factory_make("httpsrc","access")
        access.set_property('location','http://192.168.61.26/')
        typefind = Gst.element_factory_make("typefind","typefinder")
        typefind.connect("have-type", findedtype, pipeline)
    for stream in stream_lists:
        pass       

    if typefind is not None:
        pipeline.add(access,typefind)
        access.link(typefind)
    pipeline.set_state(gst.STATE_PLAYING)
    return 0
Example #7
0
def pad_added(src, new_pad, pipeline):
    preparser = None
    decoder = None
    sink_pad=None
    new_pad_caps = Gst.Pad.get_current_caps(new_pad)None #Gst.Caps.new_empty()
    new_pad_struct = new_pad_caps.get_structure(0)#Gst.Structure.new_empty()
    new_pad_type = new_pad_struct.get_name()
    new_pad_name = new_pad.get_name()

    print("Received new pad '%s' type '%s' \n from '%s'" % (new_pad_name,new_pad_type,src.get_name()))

    if new_pad_type == 'video/x-h265':
        preparser = Gst.element_factory_make("h265parse","preparser")
        pipeline.add(preparser)
    elif new_pad_type == 'video/x-h264':
        preparser = Gst.element_factory_make("h264parse","preparser")
        pipeline.add(preparser)
    elif new_pad_type == 'video/mpeg':
        preparser = Gst.element_factory_make("mpegvideoparse","preparser")
        pipeline.add(preparser)
    elif new_pad_type == 'audio/mpeg':
        preparser = Gst.element_factory_make("mpegvideoparse","preparser")
        pipeline.add(preparser)
    elif new_pad_type == 'audio/aac':
        preparser = Gst.element_factory_make("aacparse","preparser")
        pipeline.add(preparser)
    elif new_pad_type == 'audio/x-ac3':
        preparser = Gst.element_factory_make("ac3parse","preparser")
        pipeline.add(preparser)
    demuxer=pipeline.get_by_name('demuxer')
    demuxer.link(preparser)
    tee_layer0=None # parsing  -> tee --> decoding 
    tee_layer1=None # decoding -> tee -->encoding
    tee_layer2=None # encoding -> tee --> muxing
    stream_num = 0
    for stream in stream_lists:
        outputs = len(stream[3])
        muxer = None
        outer = None
        encoder=None
        tee_layer2=None # encoding -> tee --> muxing
        if outputs > 1 and tee_layer2 in None:
            tee_layer2 = Gst.element_factory_make('tee','teer_%d_%d' % (stream_num,stream_num))
            pipeline.add(tee_layer2)
        for out in stream[3]:
            if out['wrapper'] == 'ts':
                muxer = Gst.element_factory_make('mpegts','muxer_%d_%d' % (stream_num,stream_num))
            if out['outaddr'][:3] == 'udp':
                outer = Gst.element_factory_make('udpsrc','outer_%d_%d' % (stream_num,stream_num))
            pipeline.add(muxer)
            pipeline.add(outer)
            muxer.link(outer)
            if outputs > 1:
                tee_layer2.link(muxer)
        if video_transcode == 0 :
            preparser.link(tee_layer2)
        if audio_transcode == 0 :
            preparser.link(tee_layer2)
Example #8
0
    def build_pipeline(self, video_src, video_sink, pipeline):

        # Create the pipeline elements
        self._decodebin = gst.element_factory_make("decodebin2")
        self._autoconvert = gst.element_factory_make("autoconvert")

        # As a precaution add videio capability filter
        # in the video processing pipeline.
        videocap = gst.Caps("video/x-raw-yuv")

        self._filter = gst.element_factory_make("capsfilter")
        self._filter.set_property("caps", videocap)

        # Converts the video from one colorspace to another
        self._color_space = gst.element_factory_make("ffmpegcolorspace")

        self._audioconvert = gst.element_factory_make("audioconvert")
        self._audiosink = gst.element_factory_make("autoaudiosink")

        # Queues
        self._queue1 = gst.element_factory_make("queue")
        self._queue2 = gst.element_factory_make("queue")

        pipeline.add(video_src, self._decodebin, self._autoconvert,
                     self._audioconvert, self._queue1, self._queue2,
                     self._filter, self._color_space, self._audiosink,
                     video_sink)

        # Link everything we can link now
        gst.element_link_many(video_src, self._decodebin)
        gst.element_link_many(self._queue1, self._autoconvert, self._filter,
                              self._color_space, video_sink)
        gst.element_link_many(self._queue2, self._audioconvert,
                              self._audiosink)
    def addAudioChain(self, pipeline, name, decoder, adder):
        volume = gst.element_factory_make("volume")
        volume.props.volume = 0.5
        audioconvert = gst.element_factory_make("audioconvert")
        audiorate = gst.element_factory_make("audioresample")
        queue = gst.element_factory_make("queue")

        pipeline.add(volume, audioconvert, audiorate, queue)
        decoder.connect("pad-added", self.onPad, audioconvert)
        audioconvert.link(audiorate)
        audiorate.link(queue)
        queue.link(volume)
        volume.link(adder)

        setattr(self, "vol%s" % name, volume)
    def addAudioChain(self, pipeline, name, decoder, adder):
        volume = gst.element_factory_make("volume")
        volume.props.volume = 0.5
        audioconvert = gst.element_factory_make("audioconvert")
        audiorate = gst.element_factory_make("audioresample")
        queue = gst.element_factory_make("queue")

        pipeline.add(volume, audioconvert, audiorate, queue)
        decoder.connect("pad-added", self.onPad, audioconvert)
        audioconvert.link(audiorate)
        audiorate.link(queue)
        queue.link(volume)
        volume.link(adder)

        setattr(self, "vol%s" % name, volume)
Example #11
0
    def start_play(self, location=None):
	if not location:
	    return
	if not sys.platform == "win32":
            if not self.vis_selector.getSelectedIndex() == 0 and self.mainGui.search_engine.engine_type != "video":
		self.player.player.set_property('flags', 0x00000008|0x00000002)
		self.vis = self.change_visualisation()
                self.visual = Gst.element_factory_make(self.vis,'visual')
                self.player.player.set_property('vis-plugin', self.visual)
	    else:
		self.player.player.set_property('flags', 0x00000001|0x00000002|0x80)
	self.player.file_tags = {}
	self.active_link = location
	GObject.idle_add(self.play_btn_pb.set_from_pixbuf,self.stop_icon)
	GObject.idle_add(self.pause_btn_pb.set_from_pixbuf,self.pause_icon)

	if self.update_id == -1:
	    self.update_id = GObject.timeout_add(self.UPDATE_INTERVAL,
                                                     self.update_scale_cb)
	try:
	    GObject.idle_add(self.media_name_label.set_markup,'<small><b>%s</b> %s</small>' % (self.play_label,self.mainGui.media_name))
	except:
	    print ''
	try:
	    self.play_thread.stop()
	except:
	    print ''
	self.player.set_location(location)
	self.player.play()
	self.play_thread_id = thread.start_new_thread(self.play_thread, ())
        return True
Example #12
0
 def findedtype(typefinder, probability, caps, obj):
     pipeline = obj['pipeline']
     ep = obj.chainner.el_pool
     e = ep['demux']
     caps_lib_cat = ''
     size = caps.get_size()
     print('caps size :', size)
     for i in range(size):
         structure = caps.get_structure(i)
         name = structure.get_name()
         print('find new type ', name)
         if name == 'video/mpegts':
             demuxer = Gst.ElementFactory.make("tsdemux", "demuxer")
             caps_lib_cat = 'ts' + '--demux'
             demuxer.connect("pad-added", EngineGST.pad_added, obj)
             pipeline.add(demuxer)
             EngineGST.linking(typefinder, demuxer)
             demuxer.set_state(Gst.State.PLAYING)
         elif name == 'video/quicktime':
             demuxer = Gst.element_factory_make("qtdemux", "demuxer")
             caps_lib_cat = 'mp4' + '--demux'
         else:
             print('No support ', name)
             exit()
     ret = obj.__build_element__(caps_lib_cat, e)
     if ret is None:
         print("typefinder failed")
     obj.str_launch += (e.up_name + '. ! ' + e.mod_name + ' name=' + e.name)
Example #13
0
    def __init__(self, mediator, cfg_provider, eventManager):
        self.mediator = mediator
        self.eventManager = eventManager
        self.decoder = StreamDecoder(cfg_provider)
        self.playlist = []
        self.retrying = False

        self.log = logging.getLogger("radiotray")

        # init player
        self.souphttpsrc = Gst.element_factory_make("souphttpsrc", "source")
        self.souphttpsrc.set_property("user-agent", USER_AGENT)

        self.player = Gst.ElementFactory.make("playbin2", "player")
        fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
        self.player.set_property("video-sink", fakesink)

        # buffer size
        if cfg_provider._settingExists("buffer_size"):
            bufferSize = int(cfg_provider.getConfigValue("buffer_size"))
            if bufferSize > 0:

                self.log.debug("Setting buffer size to " + str(bufferSize))
                self.player.set_property("buffer-size", bufferSize)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.on_message)
    def addVideoChain(self, pipeline, name, decoder, mixer):
        alpha = gst.element_factory_make("alpha")
        alpha.props.alpha = 1.0
        videoscale = gst.element_factory_make("videoscale")
        videorate = gst.element_factory_make("videorate")
        colorspace = gst.element_factory_make("videoconvert")
        queue = gst.element_factory_make("queue")

        pipeline.add(alpha, videoscale, videorate, colorspace, queue)
        decoder.connect("pad-added", self.onPad, videorate)
        videorate.link(videoscale)
        videoscale.link(colorspace)
        colorspace.link(queue)
        queue.link(alpha)
        alpha.link(mixer)

        setattr(self, "alpha%s" % name, alpha)
    def addVideoChain(self, pipeline, name, decoder, mixer):
        alpha = gst.element_factory_make("alpha")
        alpha.props.alpha = 1.0
        videoscale = gst.element_factory_make("videoscale")
        videorate = gst.element_factory_make("videorate")
        colorspace = gst.element_factory_make("videoconvert")
        queue = gst.element_factory_make("queue")

        pipeline.add(alpha, videoscale, videorate, colorspace, queue)
        decoder.connect("pad-added", self.onPad, videorate)
        videorate.link(videoscale)
        videoscale.link(colorspace)
        colorspace.link(queue)
        queue.link(alpha)
        alpha.link(mixer)

        setattr(self, "alpha%s" % name, alpha)
    def addSourceChain(self, pipeline, name, filename, mixer, adder):
        #src = gst.element_factory_make("souphttpsrc")
        src = gst.element_factory_make("filesrc")
        src.props.location = filename
        dcd = create_decodebin()

        pipeline.add(src, dcd)
        src.link(dcd)
        self.addVideoChain(pipeline, name, dcd, mixer)
        self.addAudioChain(pipeline, name, dcd, adder)
    def addSourceChain(self, pipeline, name, filename, mixer, adder):
        #src = gst.element_factory_make("souphttpsrc")
	src = gst.element_factory_make("filesrc")
        src.props.location = filename
        dcd = create_decodebin()

        pipeline.add(src, dcd)
        src.link(dcd)
        self.addVideoChain(pipeline, name, dcd, mixer)
        self.addAudioChain(pipeline, name, dcd, adder)
Example #18
0
	def __init__(self):
		self.songs = []
		self.current = 0
		self.player = gst.element_factory_make("playbin2", "player")
		#pulse = gst.element_factory_make("pulsesink", "pulse")
		#fakesink = gst.element_factory_make("fakesink", "fakesink")
		#self.player.set_property("audio-sink", pulse)
		#self.player.set_property("video-sink", fakesink)
		bus = self.player.get_bus()
		bus.add_signal_watch()
		#bus.enable_sync_message_emission()	
		bus.connect("message", self.on_message)
Example #19
0
def findedtype(typefinder,probability,caps,pipeline):
    size = caps.gst_size(caps)
    demuxer = None
    preparser = None
    for i in range(size):
       structure =  caps.get_structure(i)
       name = structure.get_name()
       print('find new type ',name)
       if name == 'video/mpegts':
            demuxer = Gst.element_factory_make("tsdemux","demuxer")
       elif name == 'video/quicktime':
            demuxer = Gst.element_factory_make("qtdemux","demuxer")
       elif name == 'application/x-rtp':
            demuxer = Gst.element_factory_make("qtdemux","demuxer")
    if demuxer is None:
        print('This container is not support')
        exit()
    pipeline.add(demuxer)
    demuxer=pipeline.get_by_name('typefind')
    typefind.link(demuxer)
    demuxer.connect('pad-added',pad_added,pipeline)
Example #20
0
 def build_pipeline(self, video_src, video_sink, pipeline):
     self._decodebin = gst.element_factory_make("decodebin2")
     self._visualiser = gst.element_factory_make(self._visualisation)
     self._color_space = gst.element_factory_make("ffmpegcolorspace")
     self._audioconvert = gst.element_factory_make("audioconvert")
     self._audiosink = gst.element_factory_make("autoaudiosink")
     self._tee = gst.element_factory_make('tee', "tee")
     self._queue1 = gst.element_factory_make("queue")
     self._queue2 = gst.element_factory_make("queue")
     pipeline.add(video_src, self._decodebin, self._audioconvert, self._tee,
                  self._queue1, self._audiosink, self._queue2,
                  self._visualiser, self._color_space, video_sink)
     gst.element_link_many(video_src, self._decodebin)
     gst.element_link_many(self._audioconvert, self._tee)
     self._tee.link(self._queue1)
     self._queue1.link(self._audiosink)
     self._tee.link(self._queue2)
     gst.element_link_many(self._queue2, self._visualiser,
                           self._color_space, video_sink)
Example #21
0
    def load_menu_items(self):
        items = []
        self.volume_monitor_signals = []

        # Webcams etc
        video_devices = []
        for i in os.listdir("/dev"):
            if i.startswith("video"):
                video_devices.append(i)

        if len(video_devices) > 0:
            items.append(
                g15theme.MenuItem("video-devices",
                                  True,
                                  _("Video Devices"),
                                  icon=g15icontools.get_icon_path(
                                      ["camera-web", "camera-video"]),
                                  activatable=False))
            for i in video_devices:
                items.append(G15VideoDeviceMenuItem(self, i))

        # Video File
        def activate_video_file():
            gobject.idle_add(self._open_video_file)

        items.append(
            g15theme.MenuItem("video-file",
                              True,
                              _("Open Audio/Video File"),
                              activate=activate_video_file,
                              icon=g15icontools.get_icon_path("folder")))

        # DVD / Mounts
        self.volume_monitor = gio.VolumeMonitor()
        self.volume_monitor_signals.append(
            self.volume_monitor.connect("mount_added", self._on_mount_added))
        self.volume_monitor_signals.append(
            self.volume_monitor.connect("mount_removed",
                                        self._on_mount_removed))
        removable_media_items = []
        for i, mount in enumerate(self.volume_monitor.get_mounts()):
            drive = mount.get_drive()
            if not mount.is_shadowed(
            ) and drive is not None and drive.is_media_removable():
                removable_media_items.append(
                    MountMenuItem('mount-%d' % i, mount, self))
        if len(removable_media_items):
            items.append(
                g15theme.MenuItem("removable-devices",
                                  True,
                                  _("Removable Devices"),
                                  icon=g15icontools.get_icon_path([
                                      "driver-removable-media",
                                      "gnome-dev-removable"
                                  ]),
                                  activatable=False))
            items += removable_media_items

        # Pulse
        status, output = g15os.get_command_output("pacmd list-sources")
        if status == 0 and len(output) > 0:
            i = 0
            pulse_items = []
            for line in output.split("\n"):
                line = line.strip()
                if line.startswith("name: "):
                    name = line[7:-1]
                elif line.startswith("device.description = "):
                    pulse_items.append(
                        PulseSourceMenuItem(name, line[22:-1], self))
            if len(pulse_items) > 0:
                items.append(
                    g15theme.MenuItem("pulse-sources",
                                      True,
                                      _("PulseAudio Source"),
                                      icon=g15icontools.get_icon_path([
                                          "audio-card", "audio-speakers",
                                          "audio-volume-high",
                                          "audio-x-generic"
                                      ]),
                                      activatable=False))
                items += pulse_items

        # Visualisations - TODO - there must be a better way to list them
        items.append(
            g15theme.MenuItem("visualisation-mode",
                              True,
                              _("Visualisation Mode"),
                              icon=g15icontools.get_icon_path([
                                  "preferences-color", "gtk-select-color",
                                  "preferences-desktop-screensaver",
                                  "kscreensaver", "xscreensaver"
                              ]),
                              activatable=False))
        for c in [
                "goom", "libvisual_bumpscope", "libvisual_corona",
                "libvisual_infinite", "libvisual_jakdaw", "libvisual_jess",
                "libvisual_lv_analyzer", "libvisual_lv_scope",
                "libvisual_lv_oinksie", "synaesthesia", "spacescope",
                "spectrascope", "synaescope", "wavescope", "monoscope"
        ]:
            try:
                gst.element_factory_make(c)
                items.append(G15VisualisationMenuItem(c, self))
            except Exception as e:
                logger.debug("Error creating visualizations", exc_info=e)
                pass

        self.menu.set_children(items)
        if len(items) > 0:
            self.menu.selected = items[0]
        else:
            self.menu.selected = None
Example #22
0
 def create_source(self):
     src = gst.element_factory_make("v4l2src", "video-source")
     device_path = "/dev/%s" % self.name
     logger.info("Opening Video device %s", device_path)
     src.set_property("device", device_path)
     return src
Example #23
0
 def create_source(self):
     src = gst.element_factory_make("dvdreadsrc", "video-source")
     return src
Example #24
0
def pad_added(src, new_pad, pipeline):
    preparser = None
    decoder = None
    sink_pad=None
    new_pad_caps = Gst.Pad.get_current_caps(new_pad)None #Gst.Caps.new_empty()
    new_pad_struct = new_pad_caps.get_structure(0)#Gst.Structure.new_empty()
    new_pad_type = new_pad_struct.get_name()
    new_pad_name = new_pad.get_name()

    print("Received new pad '%s' type '%s' \n from '%s'" % (new_pad_name,new_pad_type,src.get_name()))
        #pad.link(self.audioconverter.get_pad("sink"))
    if streams > 1:
        if new_pad_type == 'video/x-h265':
            preparser = Gst.element_factory_make("h265parse","preparser")
            pipeline.add(preparser)
            demuxer=pipeline.get_by_name('demuxer')
            demuxer.link(preparser)
            if video_transcode != 0 and video_transform != 0:
                tee_layer0 = Gst.element_factory_make("tee","teer_layer0")
                decoder = Gst.element_factory_make('avdec_h265','decoder_layer0')
                converter = Gst.element_factory_make('autovideoconvert','converter')
                pipeline.add(tee_layer0,decoder,converter)
                decoder.link(converter)
                preparser.link(tee_layer0)
                tee_layer0.link(decoder)

                stream_num = 0
                tee_layer1=None
                tee_layer2=None
                for stream in stream_lists:
                    outputs = len(stream[3])
                    muxer = None
                    outer = None
                    encoder=None
                    tee = None
                    
                    if outputs > 1:
                        tee = Gst.element_factory_make('tee','teer_%d_%d' % (stream_num,stream_num))
                        pipeline.add(tee)
                    for out in stream[3]:
                        if out['wrapper'] == 'ts':
                            muxer = Gst.element_factory_make('mpegts','muxer_%d_%d' % (stream_num,stream_num))
                        if out['outaddr'][:3] == 'udp':
                            outer = Gst.element_factory_make('udpsrc','outer_%d_%d' % (stream_num,stream_num))
                        pipeline.add(muxer)
                        pipeline.add(outer)
                        muxer.link(outer)
                        if outputs > 1:
                            tee.link(muxer)
                    tee_layer2 = tee
                    tee = None

                    if len(stream[0]['video']) != 0 :# decoding
                        if stream[0]['video']['vcodec'] == 'h264':
                            encoder = Gst.element_factory_make('x264enc',None)
                            pipeline.add(encoder)
                        if stream[0]['video']['vcodec'] == 'h265':
                            encoder = Gst.element_factory_make('x265enc',None)
                            pipeline.add(encoder)
                        if video_transcode > 1: 
                            if tee_layer1 is None:
                                tee = Gst.element_factory_make("tee","teer_%d" %(stream_num))
                                pipeline.add(tee )
                                converter.link(tee)
                                tee.link(encoder)
                                tee_layer1=tee
                            else:
                                tee_layer1.link(encoder)
                        else:
                            converter.link(encoder)
                        encoder.link(tee_layer2)
                    else: # transforming
                        preparser.link(tee_layer2)
            elif video_tranform == 0:
                decoder = Gst.element_factory_make('avdec_h265','decoder_layer')
                tee_layer1 = Gst.element_factory_make("tee","teer_layer1")
                pipeline.add(tee_layer1,decoder)
                preparser.link(decoder)
                decoder.link(tee_layer1)
                tee_
            elif video_transcode == 0:
                tee = Gst.element_factory_make("tee","teer_layer1")
                pipeline.add(tee)
                preparser.link(tee)

    else:
Example #25
0
 def create_source(self):
     src = gst.element_factory_make("filesrc", "video-source")
     src.set_property("location", self._path)
     return src
Example #26
0
    def __init__(self):
        def on_message(bus, message):
            if message.type == Gst.MESSAGE_EOS:
                # End of Stream
                player.seek(
                    1.0,
                    Gst.FORMAT_TIME,
                    Gst.SEEK_FLAG_FLUSH,
                    Gst.SEEK_TYPE_SET,
                    5000000000,
                    Gst.SEEK_TYPE_NONE,
                    6000000000,
                )
            elif message.type == Gst.MESSAGE_ERROR:
                player.set_state(Gst.STATE_NULL)
                (err, debug) = message.parse_error()
                print("Error: %s", err)

        def on_sync_message(bus, message):
            # if message.structure is None:
            # return False
            # if message.structure.get_name() == "prepare-xwindow-id":
            Gdk.threads_enter()
            Gdk.Display.get_default().sync()
            win_id = videowidget.get_property("window").get_xid()
            imagesink = message.src
            imagesink.set_property("force-aspect-ratio", True)
            imagesink.set_xwindow_id(win_id)
            Gdk.threads_leave()

        def click_me(event, data=None):
            player.seek(
                1.0,
                Gst.FORMAT_TIME,
                Gst.SEEK_FLAG_FLUSH,
                Gst.SEEK_TYPE_SET,
                5000000000,
                Gst.SEEK_TYPE_NONE,
                6000000000,
            )

        win = Gtk.Window()
        win.set_resizable(False)
        win.set_decorated(False)
        win.set_position(Gtk.WindowPosition.CENTER)

        overlay = Gtk.Overlay()
        win.add(overlay)
        overlay.show()

        videowidget = Gtk.DrawingArea()
        overlay.add(videowidget)
        videowidget.set_halign(Gtk.Align.START)
        videowidget.set_valign(Gtk.Align.START)
        videowidget.set_size_request(640, 480)
        videowidget.window.ensure_native()
        videowidget.show()

        fixed = Gtk.Fixed()
        overlay.add_overlay(fixed)
        fixed.show()

        # pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size("IMG/IMG-MPG-LOGO.png", 250, 50)
        # imgMPL = Gtk.Image()
        # imgMPL.set_from_pixbuf(pixbuf)
        # eb_imgMPL = Gtk.EventBox()
        # eb_imgMPL.set_visible_window(False)
        # eb_imgMPL.add(imgMPL)
        # fixed.put(eb_imgMPL, 10, 10)
        # imgMPL.show()
        # eb_imgMPL.show()

        win.show_all()

        # Setup GStreamer
        player = Gst.element_factory_make("playbin", "MultimediaPlayer")
        bus = player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        # used to get messages that GStreamer emits
        bus.connect("message", on_message)
        # used for connecting video to your application
        bus.connect("sync-message::element", on_sync_message)
        player.set_property(
            "uri", "file://" + os.getcwd() + "media/Video1280x720b.mp4")
        player.set_state(Gst.STATE_PLAYING)
class AVCrossfade(AVDemo):
    """Base class implementing boring, boiler-plate code.
    Sets up a basic gstreamer environment which includes:

    * a window containing a drawing area and basic media controls
    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
    * connects the ximagesink to the window's drawing area

    Derived classes need only override magic(), __name__,
    and __usage__ to create new demos."""

    __name__ = "AV Demo"
    __usage__ = "python audio_video.py <filename>"
    __def_win_size__ = (640, 480)

    # this commment allows us to include only a portion of the file
    # in the tutorial for this demo

    def onPad(self, decoder, pad, target):
        tpad = target.get_compatible_pad(pad)
        if tpad:
            pad.link(tpad)

    def addVideoChain(self, pipeline, name, decoder, mixer):
        alpha = gst.element_factory_make("alpha")
        alpha.props.alpha = 1.0
        videoscale = gst.element_factory_make("videoscale")
        videorate = gst.element_factory_make("videorate")
        colorspace = gst.element_factory_make("videoconvert")
        queue = gst.element_factory_make("queue")

        pipeline.add(alpha, videoscale, videorate, colorspace, queue)
        decoder.connect("pad-added", self.onPad, videorate)
        videorate.link(videoscale)
        videoscale.link(colorspace)
        colorspace.link(queue)
        queue.link(alpha)
        alpha.link(mixer)

        setattr(self, "alpha%s" % name, alpha)

    def addAudioChain(self, pipeline, name, decoder, adder):
        volume = gst.element_factory_make("volume")
        volume.props.volume = 0.5
        audioconvert = gst.element_factory_make("audioconvert")
        audiorate = gst.element_factory_make("audioresample")
        queue = gst.element_factory_make("queue")

        pipeline.add(volume, audioconvert, audiorate, queue)
        decoder.connect("pad-added", self.onPad, audioconvert)
        audioconvert.link(audiorate)
        audiorate.link(queue)
        queue.link(volume)
        volume.link(adder)

        setattr(self, "vol%s" % name, volume)

    def addSourceChain(self, pipeline, name, filename, mixer, adder):
        #src = gst.element_factory_make("souphttpsrc")
        src = gst.element_factory_make("filesrc")
        src.props.location = filename
        dcd = create_decodebin()

        pipeline.add(src, dcd)
        src.link(dcd)
        self.addVideoChain(pipeline, name, dcd, mixer)
        self.addAudioChain(pipeline, name, dcd, adder)

    def magic(self, pipeline, (videosink, audiosink), args):
        """This is where the magic happens"""
        mixer = gst.element_factory_make("videomixer")
        adder = gst.element_factory_make("adder")
        pipeline.add(mixer, adder)

        mixer.link(videosink)
        adder.link(audiosink)
        self.addSourceChain(pipeline, "A", args[0], mixer, adder)
        self.addSourceChain(pipeline, "B", args[1], mixer, adder)
        self.alphaB.props.alpha = 0.5
Example #28
0
 def create_source(self):
     src = gst.element_factory_make("pulsesrc", "video-source")
     src.set_property("device", self.name)
     return src