Example #1
0
 def run(self):
     logging.debug("initalizing the player")
     #initialize the loop for thread
     GObject.threads_init()
     Gst.init(None)
     #start the loop
     loop.run()
Example #2
0
	def makeReference(self, video):
		'''
		Make the reference videos.

		:param string video: Path to the selected video.
		'''
		VTLOG.info('Making reference...')
		self.pipeline = Gst.parse_launch('filesrc name=source ! decodebin ! videorate ! video/x-raw,framerate=%s/1 ! filesink name=sink1' % self.framerate)
		source = self.pipeline.get_by_name('source')
		sink1 = self.pipeline.get_by_name('sink1')
		self.files['original'].append(video)
		source.props.location = video
		location = self.path + '_ref_original.yuv'
		self.files['original'].append(location)
		sink1.props.location = location
		self.__play()
		self.pipeline = Gst.parse_launch('filesrc name=source ! decodebin ! videorate ! video/x-raw,framerate=%s/1 ! %s bitrate=%s ! tee name=t ! queue %s ! filesink name=sink2 t. ! queue ! decodebin ! filesink name=sink3' % (
			self.framerate,
			supported_codecs[self.codec]['encoder'],
			supported_codecs[self.codec]['bitrate_from_kbps'](self.bitrate),
			supported_codecs[self.codec]['add']
		))
		source = self.pipeline.get_by_name('source')
		sink2 = self.pipeline.get_by_name('sink2')
		sink3 = self.pipeline.get_by_name('sink3')
		source.props.location = video
		location = self.path + '_ref.' + self.codec
		self.files['coded'].append(location)
		sink2.props.location = location
		location = self.path + '_ref.yuv'
		self.files['coded'].append(location)
		sink3.props.location = location
		self.__play()
		VTLOG.info('Reference made')
Example #3
0
    def _import_gst(self):
        """Import the necessary GObject-related modules and assign `Gst`
        and `GObject` fields on this object.
        """

        try:
            import gi
        except ImportError:
            raise FatalReplayGainError(
                "Failed to load GStreamer: python-gi not found"
            )

        try:
            gi.require_version('Gst', '1.0')
        except ValueError as e:
            raise FatalReplayGainError(
                "Failed to load GStreamer 1.0: {0}".format(e)
            )

        from gi.repository import GObject, Gst, GLib
        # Calling GObject.threads_init() is not needed for
        # PyGObject 3.10.2+
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            GObject.threads_init()
        Gst.init([sys.argv[0]])

        self.GObject = GObject
        self.GLib = GLib
        self.Gst = Gst
	def __init__(self, uri):

		self.__uri = uri

		# Inizializza Gst
		Gst.init(None)

		self.pipe = Gst.Pipeline()

		#filesource = Gst.element_factory_make("filesrc", "filesource")
		#filesource.set_property("location", self.__location)
		filesource = Gst.Element.make_from_uri(Gst.URIType.SRC, self.__uri, "filesrc")
		fakesink = Gst.ElementFactory.make("fakesink", "sink")

		typefind = Gst.ElementFactory.make("typefind", "typefinder")
		typefind.connect("have_type", self.on_find_type)

		self.pipe.add(filesource)
		self.pipe.add(typefind)
		self.pipe.add(fakesink)
		#Gst.element_link_many(filesource, typefind, fakesink)
		filesource.link(typefind)
		typefind.link(fakesink)

		self.bus = self.pipe.get_bus()
		self.bus.add_signal_watch()
		self.bus.connect("message::eos", self.on_eos)
		self.bus.connect("message::error", self.on_error)
		self.bus.connect("message::async_done", self.on_async_done)

		self.pipe.set_state(Gst.State.PLAYING)
		self.mainloop = GObject.MainLoop()
		self.mainloop.run()
Example #5
0
    def build_pipeline(self, channels, sinkname, samplerate, srcname,
                       parse_element='wavparse'):
        self.channels = channels
        self.srcname = srcname
        self.sink = self.make_add_link(sinkname, None)
        self.classifier = self.make_add_link('classify', self.sink)
        self.capsfilter = self.make_add_link('capsfilter', self.classifier)
        self.interleave = self.make_add_link('interleave', self.capsfilter)
        self.sources = []
        for i in range(channels):
            ac = self.make_add_link('audioconvert', self.interleave)
            ar = self.make_add_link('audioresample', ac)
            if srcname == 'filesrc':
                wp = self.make_add_link(parse_element, ar)
                fs = self.make_add_link(srcname, wp)
            else:
                cf = self.make_add_link('capsfilter', ar)
                cf.set_property("caps", Gst.caps_from_string("audio/x-raw, "
                                                             "layout=(string)interleaved, "
                                                             "channel-mask=(bitmask)0x0, "
                                                             "rate=%d, channels=1"
                                                             % (samplerate,)))
                fs = self.make_add_link(srcname, cf)
            self.sources.append(fs)

        caps =  Gst.caps_from_string("audio/x-raw, "
                                     "layout=(string)interleaved, "
                                     "channel-mask=(bitmask)0x0, "
                                     "rate=%d, channels=%d"
                                     % (samplerate, channels))
        self.capsfilter.set_property("caps", caps)
        if 0:
            Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL,
                                      "pipeline.dot")
Example #6
0
    def get_frame(self, timeout_secs=10, since=None):
        import time
        t = time.time()
        end_time = t + timeout_secs
        if since is None:
            # If you want to wait 10s for a frame you're probably not interested
            # in a frame from 10s ago.
            since = t - timeout_secs

        with self._condition:
            while True:
                if (isinstance(self.last_frame, Frame) and
                        self.last_frame.time > since):
                    self.last_used_frame = self.last_frame
                    return self.last_frame
                elif isinstance(self.last_frame, Exception):
                    raise RuntimeError(str(self.last_frame))
                t = time.time()
                if t > end_time:
                    break
                self._condition.wait(end_time - t)

        pipeline = self.source_pipeline
        if pipeline:
            Gst.debug_bin_to_dot_file_with_ts(
                pipeline, Gst.DebugGraphDetails.ALL, "NoVideo")
        raise NoVideo("No video")
Example #7
0
    def __init__(self, pipeline_string='videotestsrc pattern=18 ! tee name=t ! queue ! autovideosink t. ! queue ! videoconvert ! videorate ! video/x-raw,width=(int)320,height=(int)240,format=(string)RGB16,framerate=(fraction)30/1 ! appsink name=sink'):
        self.data = None# this will contain the data passed between
        self.source_id = None
        self.lock = Lock()

        self.isWhite = True
        self.isStream = True
        self.timestamp = 0

        self.pipeline = Gst.parse_launch(pipeline_string)

        self.appsink = self.pipeline.get_by_name('sink')

        assert self.appsink, 'appsink element named \'sink\' not found'

        self.appsink.connect('new-sample', self.on_new_buffer)
        self.appsink.set_property('emit-signals', True)

        self.pipeline.set_state(Gst.State.PLAYING)

        # OUTPUT pipeline
        self.pipeline_out = Gst.parse_launch('appsrc name=source ! videoconvert ! autovideosink')

        self.appsrc = self.pipeline_out.get_by_name('source')

        assert self.appsrc, 'appsrc element named \'source\' not found'

        self.appsrc.set_property('caps', Gst.Caps.from_string('video/x-raw,format=(string)RGB16,width=(int)320,height=(int)240,framerate=(fraction)30/1'))

        self.appsrc.connect('need-data', self.on_need_data)
        self.appsrc.connect('enough-data', self.on_enough_data)

        self.pipeline_out.set_state(Gst.State.PLAYING)

        GObject.timeout_add_seconds(2, self._switch_data_type)
Example #8
0
def initialize_modules():
    """
    Initialize the modules.

    This has to be done in a specific order otherwise the app
    crashes on some systems.
    """
    from gi.repository import Gdk
    Gdk.init([])
    from gi.repository import GtkClutter
    GtkClutter.init([])

    import gi
    if not gi.version_info >= (3, 11):
        from gi.repository import GObject
        GObject.threads_init()

    from gi.repository import Gst
    Gst.init(None)
    from gi.repository import GES
    GES.init()

    # This is required because of:
    # https://bugzilla.gnome.org/show_bug.cgi?id=656314
    from gi.repository import GdkX11
    GdkX11  # noop
Example #9
0
    def __init__(self, pls=None, *args, **kwargs):
        super(GstZOCP, self).__init__(*args, **kwargs)
        GObject.threads_init()
        self.loop = GObject.MainLoop()
        Gst.init(None)
        if pls == None:
            pls = ""
            #pls = "file:///home/people/arnaud/Videos/tordinaire-youtubeHD.mp4" 
        #pls = "file:///home/pi/test3.h264,file:///home/pi/tordinaire-youtubeHD.mp4"
        #pls = "file:///home/people/arnaud/Videos/test.h264,file:///home/people/arnaud/Videos/test2.h264"
        self.count = 0
        # create elements
        self.playbin = Gst.ElementFactory.make('playbin', 'playbin0')
        self.glcolorconv = Gst.ElementFactory.make("glcolorscale", "glcolorconv0")
        self.glshader = Gst.ElementFactory.make("glshader", "glshader0")
        self.glimagesink = Gst.ElementFactory.make('glimagesink', "glimagesink0")
        self.sinkbin = Gst.Bin()
        
        # setup the pipeline
        #videosrc.set_property("video-sink", glimagesink)
        #self.playbin.set_property("uri", pls.split(',')[self.count])
        #self.glimagesink.set_locked_state(True)
        self.sinkbin.add(self.glcolorconv)
        self.sinkbin.add(self.glshader)
        self.sinkbin.add(self.glimagesink)
        
        # we add a message handler
        self.bus = self.playbin.get_bus()
        self.bus.add_watch(0, self.bus_call, self.loop) # 0 == GLib.PRIORITY_DEFAULT 
        
        # we link the elements together
        self.glcolorconv.link(self.glshader)
        self.glshader.link(self.glimagesink)
        ghostpad = Gst.GhostPad.new("sink", self.glcolorconv.get_static_pad("sink"))
        self.sinkbin.add_pad(ghostpad)

        #self.playbin.connect("pad-added", self.on_pad_added, self.sinkbin)
        #self.playbin.connect("drained", self.on_drained)
        #self.playbin.connect("about-to-finish", self.update_uri)
        
        # set properties of elements
        self.glshader.set_property("location", "shader.glsl")
        self.glshader.set_property("vars", "float alpha = float(1.);")
        self.glshader.set_property("preset", "preset.glsl")
        self.playbin.set_property("video-sink",self.sinkbin)

        self.set_name("zvidplyr@{0}".format(socket.gethostname()))
        self.register_bool("quit", False, access='rw')
        self.register_vec2f("top_left", (-1.0, 1.0), access='rw', step=[0.01, 0.01])
        self.register_vec2f('top_right', (1.0, 1.0), access='rw', step=[0.01, 0.01])
        self.register_vec2f('bottom_right', (1.0, -1.0), access='rw', step=[0.01, 0.01])
        self.register_vec2f('bottom_left', (-1.0, -1.0), access='rw', step=[0.01, 0.01])
        self.register_string("playlist", pls, access="rws")
        self.register_bool("loop", True, access="rwse")
        self.register_bool("fade", False, access="rwse")
        self.register_vec3f("fade_color", (1,0,0), access="rws")
        self.register_bool("pause", False, access="rwse")
        self.register_bool("stop", False, access="rwse")
        
        self._fade_val = 1.0
Example #10
0
 def _init_pipeline(self):
     """Initialize the GStreamer playback pipeline."""
     self._playbin = Gst.ElementFactory.make("playbin", None)
     if gaupol.conf.video_player.volume is not None:
         self.volume = gaupol.conf.video_player.volume
     sink = Gst.ElementFactory.make("autovideosink", None)
     bin = Gst.Bin()
     bin.add(self._time_overlay)
     bin.add(self._text_overlay)
     pad = self._time_overlay.get_static_pad("video_sink")
     bin.add_pad(Gst.GhostPad.new("sink", pad))
     bin.add(sink)
     self._time_overlay.link(self._text_overlay)
     self._text_overlay.link(sink)
     self._playbin.props.video_sink = bin
     # We need to disable playbin's own subtitle rendering, since we don't
     # want embedded subtitles to be displayed, but rather what we
     # explicitly set to our own overlays. Since Gst.PlayFlags is not
     # available via introspection, we need to use Gst.util_set_object_arg.
     # Playbin's default values can be found via 'gst-inspect-1.0 playbin'.
     Gst.util_set_object_arg(self._playbin,
                             "flags",
                             "+".join(("soft-colorbalance",
                                       "deinterlace",
                                       "soft-volume",
                                       "audio",
                                       "video")))
Example #11
0
    def _busMessageCb(self, bus, message):
        if message.type == Gst.MessageType.EOS:
            self._prepareSamples()
            self._startRendering()
            self.stopGeneration()

        elif message.type == Gst.MessageType.ERROR:
            if self.adapter:
                self.adapter.stop()
                self.adapter = None
            # Something went wrong TODO : recover
            self.stopGeneration()
            self._num_failures += 1
            if self._num_failures < 2:
                self.warning("Issue during waveforms generation: %s"
                             " for the %ith time, trying again with no rate "
                             " modulation", message.parse_error(),
                             self._num_failures)
                bus.disconnect_by_func(self._busMessageCb)
                self._launchPipeline()
                self.becomeControlled()
            else:
                Gst.debug_bin_to_dot_file_with_ts(self.pipeline,
                                                  Gst.DebugGraphDetails.ALL,
                                                  "error-generating-waveforms")
                self.error("Aborting due to waveforms generation issue: %s",
                           message.parse_error())
Example #12
0
File: main.py Project: worr/sugar
def main():
    GLib.threads_init()
    Gdk.threads_init()
    dbus.glib.threads_init()
    Gst.init(sys.argv)

    cleanup_temporary_files()

    _start_window_manager()

    setup_locale()
    setup_fonts()
    setup_theme()

    # this must be added early, so that it executes and unfreezes the screen
    # even when we initially get blocked on the intro screen
    GLib.idle_add(unfreeze_dcon_cb)

    GLib.idle_add(setup_cursortracker_cb)
    sound.restore()
    keyboard.setup()

    sys.path.append(config.ext_path)

    if not intro.check_profile():
        _start_intro()
    else:
        _begin_desktop_startup()

    try:
        Gtk.main()
    except KeyboardInterrupt:
        print 'Ctrl+C pressed, exiting...'

    _stop_window_manager()
Example #13
0
def main():
    GObject.threads_init()
    Gst.init(None)
    wcw = WebcamWidget()
    wcw.show()
    Gtk.main()
    exit(0)
    def _create_videobin(self):
        queue = Gst.ElementFactory.make("queue", "videoqueue")
        queue.set_property("max-size-time", 5000000000) # 5 seconds
        queue.set_property("max-size-bytes", 33554432) # 32mb
        queue.connect("overrun", self._log_queue_overrun)

        scale = Gst.ElementFactory.make("videoscale", "vbscale")

        scalecapsfilter = Gst.ElementFactory.make("capsfilter", "scalecaps")

        scalecaps = Gst.Caps('video/x-raw-yuv,width=160,height=120')
        scalecapsfilter.set_property("caps", scalecaps)

        colorspace = Gst.ElementFactory.make("ffmpegcolorspace", "vbcolorspace")

        enc = Gst.ElementFactory.make("theoraenc", "vbenc")
        enc.set_property("quality", 16)

        mux = Gst.ElementFactory.make("oggmux", "vbmux")

        sink = Gst.ElementFactory.make("filesink", "vbfile")
        sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))

        self._videobin = Gst.Bin("videobin")
        self._videobin.add(queue, scale, scalecapsfilter, colorspace, enc, mux, sink)

        queue.link(scale)
        scale.link_pads(None, scalecapsfilter, "sink")
        scalecapsfilter.link_pads("src", colorspace, None)
        Gst.element_link_many(colorspace, enc, mux, sink)

        pad = queue.get_static_pad("sink")
        self._videobin.add_pad(Gst.GhostPad("sink", pad))
Example #15
0
 def __init__(self):
     """
         Init playbin
     """
     Gst.init(None)
     BasePlayer.__init__(self)
     self._codecs = Codecs()
     self._crossfading = False
     self._playbin = self._playbin1 = Gst.ElementFactory.make(
                                                        'playbin', 'player')
     self._playbin2 = Gst.ElementFactory.make('playbin', 'player')
     self._preview = None
     self._plugins = self.plugins1 = PluginsPlayer(self._playbin1)
     self.plugins2 = PluginsPlayer(self._playbin2)
     self._volume_id = self._playbin.connect('notify::volume',
                                             self._on_volume_changed)
     for playbin in [self._playbin1, self._playbin2]:
         flags = playbin.get_property("flags")
         flags &= ~GstPlayFlags.GST_PLAY_FLAG_VIDEO
         playbin.set_property('flags', flags)
         playbin.set_property('buffer-size', 5 << 20)
         playbin.set_property('buffer-duration', 10 * Gst.SECOND)
         playbin.connect('about-to-finish',
                         self._on_stream_about_to_finish)
         bus = playbin.get_bus()
         bus.add_signal_watch()
         bus.connect('message::error', self._on_bus_error)
         bus.connect('message::eos', self._on_bus_eos)
         bus.connect('message::element', self._on_bus_element)
         bus.connect('message::stream-start', self._on_stream_start)
         bus.connect("message::tag", self._on_bus_message_tag)
     self._handled_error = None
     self._start_time = 0
Example #16
0
 def __init__(self, verbose=False):
     if not GI_GSTREAMER_INSTALLED:
         raise ValueError('pygobject library was not found')
     if not PIL_INSTALLED:
         raise ValueError('python-imaging library was not found')
     self.verbose = verbose
     Gst.init(None)
Example #17
0
	def __init__(self):
		GObject.GObject.__init__(self)
		Gst.init(None)

		self._current_track_number = -1
		self._current_track_album_id = -1
		self._current_track_id = -1
		self._albums = []
		self._timeout = None
		self._shuffle = False
		self._shuffle_tracks_history = []
		self._shuffle_albums_history = []
		self._party = False
		self._party_ids = []
		self._queue = []

		self._playbin = Gst.ElementFactory.make('playbin', 'player')
		self._playbin.connect("about-to-finish", self._on_stream_about_to_finish)
		self._rg_setup()
		
		
		self._bus = self._playbin.get_bus()
		self._bus.add_signal_watch()
		self._bus.connect('message::error', self._on_bus_error)
		self._bus.connect('message::eos', self._on_bus_eos)
		self._bus.connect('message::stream-start', self._on_stream_start)
Example #18
0
 def __init__(self):
     GObject.threads_init()
     Gst.init(None)
     self.pipeline = None
     self._create_pipeline(default_song)
     loop = GObject.MainLoop()
     threading.Thread(target=loop.run, daemon=True).start()
Example #19
0
def import_gst1():
    log("import_gst1()")
    import gi
    log("import_gst1() gi=%s", gi)
    gi.require_version('Gst', '1.0')
    from gi.repository import Gst           #@UnresolvedImport
    log("import_gst1() Gst=%s", Gst)
    Gst.init(None)
    #make it look like pygst (gstreamer-0.10):
    Gst.registry_get_default = Gst.Registry.get
    Gst.get_pygst_version = lambda: gi.version_info
    Gst.get_gst_version = lambda: Gst.version()
    def new_buffer(data):
        buf = Gst.Buffer.new_allocate(None, len(data), None)
        buf.fill(0, data)
        return buf
    Gst.new_buffer = new_buffer
    Gst.element_state_get_name = Gst.Element.state_get_name
    #note: we only copy the constants we actually need..
    for x in ('NULL', 'PAUSED', 'PLAYING', 'READY', 'VOID_PENDING'):
        setattr(Gst, "STATE_%s" % x, getattr(Gst.State, x))
    for x in ('EOS', 'ERROR', 'TAG', 'STREAM_STATUS', 'STATE_CHANGED',
              'LATENCY', 'WARNING', 'ASYNC_DONE', 'NEW_CLOCK', 'STREAM_STATUS',
              'BUFFERING', 'INFO', 'STREAM_START'
              ):
        setattr(Gst, "MESSAGE_%s" % x, getattr(Gst.MessageType, x))
    Gst.MESSAGE_DURATION = Gst.MessageType.DURATION_CHANGED
    Gst.FLOW_OK = Gst.FlowReturn.OK
    global gst_version, pygst_version
    gst_version = Gst.get_gst_version()
    pygst_version = Gst.get_pygst_version()
    return Gst
Example #20
0
	def __init__(self, db):
		GObject.GObject.__init__(self)
		Gst.init(None)

		self._current_track_number = -1
		self._current_track_album_id = -1
		self._current_track_id = -1
		self._albums = []
		self._progress_callback = None
		self._timeout = None
		self._shuffle = False
		self._shuffle_tracks_history = []
		self._shuffle_albums_history = []
		self._party = False
		self._party_ids = []
		self._playlist = []

		self._db = db
		self._player = Gst.ElementFactory.make('playbin', 'player')
		self._rg_setup()
		
		
		self._bus = self._player.get_bus()
		self._bus.add_signal_watch()
		#self._bus.connect('message::state-changed', self._on_bus_state_changed)
		#self.bus.connect('message::error', self._onBusError)
		self._bus.connect('message::eos', self._on_bus_eos)
Example #21
0
    def init_camera(self):
        # TODO: This doesn't work when camera resolution is resized at runtime.
        # There must be some other way to release the camera?
        if self._pipeline:
            self._pipeline = None

        video_src = self._video_src
        if video_src == 'v4l2src':
            video_src += ' device=/dev/video%d' % self._index
        elif video_src == 'dc1394src':
            video_src += ' camera-number=%d' % self._index

        if Gst.version() < (1, 0, 0, 0):
            caps = ('video/x-raw-rgb,red_mask=(int)0xff0000,'
                    'green_mask=(int)0x00ff00,blue_mask=(int)0x0000ff')
            pl = ('{} ! decodebin name=decoder ! ffmpegcolorspace ! '
                  'appsink name=camerasink emit-signals=True caps={}')
        else:
            caps = 'video/x-raw,format=RGB'
            pl = '{} ! decodebin name=decoder ! videoconvert ! appsink ' + \
                 'name=camerasink emit-signals=True caps={}'

        self._pipeline = Gst.parse_launch(pl.format(video_src, caps))
        self._camerasink = self._pipeline.get_by_name('camerasink')
        self._camerasink.connect('new-sample', self._gst_new_sample)
        self._decodebin = self._pipeline.get_by_name('decoder')

        if self._camerasink and not self.stopped:
            self.start()
Example #22
0
    def run(self) :

        self.show_all()
        self.xid = self.movie_window.get_property('window').get_xid()
        self.player.set_state(Gst.State.NULL)

        if self.startcam == "Start":
            if self.flipcam == True:
                self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! videoflip method=horizontal-flip ! autovideosink")
            else:
                self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! autovideosink")

#             Set up the gstreamer pipeline
#             self.player = Gst.parse_launch("v4l2src ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! video/x-raw-yuv,width=320,height=240,framerate=30/1 ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! video/x-raw-yuv,width=320,height=240,framerate=30/1 ! textoverlay font-desc=\"Sans 20\" text=\"Microsoft LifeCam NX-3000\" valign=top halign=left shaded-background=true ! timeoverlay halign=right valign=bottom font-desc=\"Sans 20\" ! clockoverlay halign=left valign=bottom text=\"\" time-format=\"%d.%m.%Y  %H:%M:%S \" font-desc=\"Sans 20\" ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! textoverlay valignment=bottom xpad=450 ypad=25 color=4278255360 font-desc=\"Sans 20\" text=\"Microsoft LifeCam NX-3000\" shaded-background=true ! timeoverlay halignment=right color=4278255360 font-desc=\"Sans 20\" ! clockoverlay color=4278255360 text=\"\" time-format=\"%d.%m.%Y  %H:%M:%S \" font-desc=\"Sans 20\" ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! videoflip method=horizontal-flip ! autovideosink")
            self.startcam ="Stop"
        else:
            self.player = Gst.parse_launch("videotestsrc ! video/x-raw,width=640,height=480,framerate=30/1 ! aspectratiocrop aspect-ratio=16/9 ! autovideosink")
#             self.player = Gst.parse_launch("videotestsrc ! autovideosink")
            self.startcam = "Start"

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
        self.player.set_state(Gst.State.PLAYING)
Example #23
0
def check_soft_dependencies():
    """
    Verify for the presence of optional modules that enhance the user experience

    If those are missing from the system, the user will be notified of their
    existence by the presence of a "Missing dependencies..." button at startup.
    """
    # Importing Gst again (even if we did it in hard deps checks), anyway it
    # seems to have no measurable performance impact the 2nd time:
    from gi.repository import Gst
    Gst.init(None)
    registry = Gst.Registry.get()
    # Description strings are translatable as they may be shown in the pitivi UI
    if not _try_import("pycanberra"):
        missing_soft_deps["PyCanberra"] = \
            _("enables sound notifications when rendering is complete")
    if not _try_import_from_gi("Notify"):
        missing_soft_deps["libnotify"] = \
            _("enables visual notifications when rendering is complete")
    if not registry.find_plugin("libav"):
        missing_soft_deps["GStreamer Libav plugin"] = \
            _('additional multimedia codecs through the Libav library')
    # Apparently, doing a registry.find_plugin("frei0r") is not enough.
    # Sometimes it still returns something even when frei0r is uninstalled,
    # and anyway we're looking specifically for the scale0tilt filter.
    # Don't use Gst.ElementFactory.make for this check, it's very I/O intensive.
    # Instead, ask the registry with .lookup_feature or .check_feature_version:
    if not registry.lookup_feature("frei0r-filter-scale0tilt"):
        missing_soft_deps["Frei0r"] = \
            _("additional video effects, clip transformation feature")
Example #24
0
    def __transcoderDoneCb(self, transcoder, asset):
        transcoder.disconnect_by_func(self.__transcoderDoneCb)
        transcoder.disconnect_by_func(self.__transcoderErrorCb)
        transcoder.disconnect_by_func(self.__proxyingPositionChangedCb)

        self.debug("Transcoder done with %s", asset.get_id())

        self.__running_transcoders.remove(transcoder)

        proxy_uri = self.getProxyUri(asset)
        os.rename(Gst.uri_get_location(transcoder.props.dest_uri),
                  Gst.uri_get_location(proxy_uri))

        # Make sure that if it first failed loading, the proxy is forced to be
        # reloaded in the GES cache.
        GES.Asset.needs_reload(GES.UriClip, proxy_uri)
        GES.Asset.request_async(GES.UriClip, proxy_uri, None,
                                self.__assetLoadedCb, asset, transcoder)

        try:
            self.__startTranscoder(self.__pending_transcoders.pop())
        except IndexError:
            if not self.__running_transcoders:
                self._total_transcoded_time = 0
                self._total_time_to_transcode = 0
                self._start_proxying_time = 0
Example #25
0
    def __init__(self):
        GObject.GObject.__init__(self)
        self.playlist = None
        self.playlistType = None
        self.playlistId = None
        self.playlistField = None
        self.currentTrack = None
        self._lastState = Gst.State.PAUSED
        self.cache = AlbumArtCache.get_default()
        self._symbolicIcon = self.cache.get_default_icon(ART_SIZE, ART_SIZE)

        Gst.init(None)

        self.discoverer = GstPbutils.Discoverer()
        self.discoverer.connect('discovered', self._on_discovered)
        self.discoverer.start()
        self._discovering_urls = {}

        self.player = Gst.ElementFactory.make('playbin', 'player')
        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()

        self._settings = Gio.Settings.new('org.gnome.Music')
        self._settings.connect('changed::repeat', self._on_settings_changed)
        self.repeat = self._settings.get_enum('repeat')

        self.bus.connect('message::state-changed', self._on_bus_state_changed)
        self.bus.connect('message::error', self._onBusError)
        self.bus.connect('message::eos', self._on_bus_eos)
        self._setup_view()

        self.playlist_insert_handler = 0
        self.playlist_delete_handler = 0
Example #26
0
def let_it_rain():
    GObject.threads_init()
    Gst.init(None)
    signal.signal(signal.SIGINT, signal.SIG_DFL)
    Notify.init("silver-rain")
    # Create system directories
    if not os.path.exists(IMG_DIR):
        os.makedirs(IMG_DIR)
    # Initialize config
    config.setup()
    # Create directory for recordings
    if not os.path.exists(config.recs_dir):
        os.makedirs(config.recs_dir)
    # Load css
    css_load()
    # Init translation
    set_translation()
    # Init application
    silver_app = SilverApp()
    # Setup dbus service
    service = SilverService(silver_app)
    # Run loop
    Gtk.main()
    # Cleanup
    silver_app.clean()
    Notify.uninit()
def main():
    if len(sys.argv) < 3:
        exit("Usage: {0} <url> <quality>".format(sys.argv[0]))

    gi.require_version("Gst", "1.0")
    gobject.threads_init()
    gst.init(None)

    url = sys.argv[1]
    quality = sys.argv[2]

    livestreamer = Livestreamer()

    livestreamer.set_loglevel("info")
    livestreamer.set_logoutput(sys.stdout)

    try:
        streams = livestreamer.streams(url)
    except NoPluginError:
        exit("Livestreamer is unable to handle the URL '{0}'".format(url))
    except PluginError as err:
        exit("Plugin error: {0}.".format(err))

    if not streams:
        exit("No streams found on URL '{0}'.".format(url))

    if quality not in streams:
        exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))

    stream = streams[quality]

    player = LivestreamerPlayer()

    player.play(stream)
Example #28
0
    def __init__(self, width, height):
        Gtk.Window.__init__(self, title="JingleBank")

        Gst.init()

        #Grid to organize the Buttons
        self.grid = Gtk.Grid()
        self.add(self.grid)

        #Set Button properties (will be replaced by configurable button dimensions)
        self.buttonwidth = width
        self.buttonheight = height

        #create buttons (will be read from configfile in the future)
        self.button1 = JingleButton(self.buttonwidth, self.buttonheight, [0.3,0.7,0.9], "Track 1", TESTFILE)
        self.button2 = JingleButton(self.buttonwidth, self.buttonheight, [0.4,0.6,0.4], "Track 2", TESTFILE)
        self.button3 = JingleButton(self.buttonwidth, self.buttonheight, [0.5,0.5,0.3], "Track 3", TESTFILE)
        self.button4 = JingleButton(self.buttonwidth, self.buttonheight, [0.6,0.4,0.2], "Track 4", TESTFILE)
        self.button5 = JingleButton(self.buttonwidth, self.buttonheight, [0.7,0.3,0.4], "Track 5", TESTFILE)
        self.button6 = JingleButton(self.buttonwidth, self.buttonheight, [0.8,0.2,0.3], "Track 6", TESTFILE)
        self.button7 = JingleButton(self.buttonwidth, self.buttonheight, [0.9,0.1,0.8], "Track 7", TESTFILE)

        #testarray of buttons
        self.grid.attach(self.button1, 1, 1, 1, 1)
        self.grid.attach(self.button2, 1, 2, 1, 1)
        self.grid.attach(self.button3, 2, 1, 1, 1)
        self.grid.attach(self.button4, 2, 2, 1, 1)
        self.grid.attach(self.button5, 3, 1, 1, 1)
        self.grid.attach(self.button6, 3, 2, 1, 1)
        self.grid.attach(self.button7, 3, 3, 1, 1)
    def _initialise_gstreamer(self):
        
        if self._gstreamer_has_initialised:
            return
        
        self._gstreamer_has_initialised = True
        Gst.init(None)
        
        def on_new_decoded_pad(dbin, pad):
            decode = pad.get_parent()
            pipeline = decode.get_parent()
            convert = pipeline.get_by_name('convert')
            decode.link(convert)
            
        #we are going to mimic the following
        #gst-launch-1.0 filesrc location="02 - ABBA - Knowing Me, Knowing You.ogg" ! 
        #decodebin ! audioconvert ! audioresample ! lamemp3enc target=bitrate bitrate=128 ! 
        #id3v2mux ! filesink location=mytrack.mp3

        converter = Gst.Pipeline.new('converter')

        source = Gst.ElementFactory.make('filesrc', None)

        decoder = Gst.ElementFactory.make('decodebin', 'decoder')
        convert = Gst.ElementFactory.make('audioconvert', 'convert')
        sample = Gst.ElementFactory.make('audioresample', 'sample')
        encoder = Gst.ElementFactory.make('lamemp3enc', 'encoder')
        encoder.set_property('target', 'bitrate') 
        encoder.set_property('bitrate', self.TARGET_BITRATE)

        mux = Gst.ElementFactory.make('id3v2mux', 'mux')
        if not mux:
            # use id3mux where not available
            mux = Gst.ElementFactory.make('id3mux', 'mux')
            
        sink = Gst.ElementFactory.make('filesink', 'sink')

        converter.add(source)
        converter.add(decoder)
        converter.add(convert)
        converter.add(sample)
        converter.add(encoder)
        converter.add(mux)
        converter.add(sink)

        Gst.Element.link(source, decoder)
        #note - a decodebin cannot be linked at compile since
        #it doesnt have source-pads (http://stackoverflow.com/questions/2993777/gstreamer-of-pythons-gst-linkerror-problem)

        decoder.connect("pad-added", on_new_decoded_pad)
            
        Gst.Element.link(convert, sample)
        Gst.Element.link(sample, encoder)
        Gst.Element.link(encoder, mux)
        Gst.Element.link(mux, sink)
            
        self.converter=converter
        self.source=source
        self.sink=sink
        self.encoder=encoder
Example #30
0
def simple():
  Gst.init(None)
  GES.init()

  timeline = GES.Timeline.new_audio_video()
  
  layer = GES.Layer()
  
  timeline.add_layer(layer)

  asset = GES.UriClipAsset.request_sync(videoFile)
  
  imageasset = GES.UriClipAsset.request_sync(imageFile)

  layer.add_asset(imageasset, 0 * Gst.SECOND, 0, 1 * Gst.SECOND, GES.TrackType.UNKNOWN)
  layer.add_asset(asset, 0 * Gst.SECOND, 0, 10 * Gst.SECOND, GES.TrackType.UNKNOWN)
  
  timeline.commit()

  pipeline = GES.Pipeline()
  pipeline.add_timeline(timeline)
  pipeline.set_state(Gst.State.PLAYING)

  mainLoop = GLib.MainLoop.new(None, False)
  GLib.timeout_add_seconds(10, quit, mainLoop)
  GLib.MainLoop().run()
class AVCrossfade(AVDemo):
    """Base class implementing boring, boiler-plate code.
    Sets up a basic gstreamer environment which includes:

    * a window containing a drawing area and basic media controls
    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
    * connects the ximagesink to the window's drawing area

    Derived classes need only override magic(), __name__,
    and __usage__ to create new demos."""

    __name__ = "AV Demo"
    __usage__ = "python audio_video.py <filename>"
    __def_win_size__ = (640, 480)

    # this commment allows us to include only a portion of the file
    # in the tutorial for this demo

    def onPad(self, decoder, pad, target):
        tpad = target.get_compatible_pad(pad)
        if tpad:
            pad.link(tpad)

    def addVideoChain(self, pipeline, name, decoder, mixer):
        alpha = gst.element_factory_make("alpha")
        alpha.props.alpha = 1.0
        videoscale = gst.element_factory_make("videoscale")
        videorate = gst.element_factory_make("videorate")
        colorspace = gst.element_factory_make("videoconvert")
        queue = gst.element_factory_make("queue")

        pipeline.add(alpha, videoscale, videorate, colorspace, queue)
        decoder.connect("pad-added", self.onPad, videorate)
        videorate.link(videoscale)
        videoscale.link(colorspace)
        colorspace.link(queue)
        queue.link(alpha)
        alpha.link(mixer)

        setattr(self, "alpha%s" % name, alpha)

    def addAudioChain(self, pipeline, name, decoder, adder):
        volume = gst.element_factory_make("volume")
        volume.props.volume = 0.5
        audioconvert = gst.element_factory_make("audioconvert")
        audiorate = gst.element_factory_make("audioresample")
        queue = gst.element_factory_make("queue")

        pipeline.add(volume, audioconvert, audiorate, queue)
        decoder.connect("pad-added", self.onPad, audioconvert)
        audioconvert.link(audiorate)
        audiorate.link(queue)
        queue.link(volume)
        volume.link(adder)

        setattr(self, "vol%s" % name, volume)

    def addSourceChain(self, pipeline, name, filename, mixer, adder):
        #src = gst.element_factory_make("souphttpsrc")
        src = gst.element_factory_make("filesrc")
        src.props.location = filename
        dcd = create_decodebin()

        pipeline.add(src, dcd)
        src.link(dcd)
        self.addVideoChain(pipeline, name, dcd, mixer)
        self.addAudioChain(pipeline, name, dcd, adder)

    def magic(self, pipeline, (videosink, audiosink), args):
        """This is where the magic happens"""
        mixer = gst.element_factory_make("videomixer")
        adder = gst.element_factory_make("adder")
        pipeline.add(mixer, adder)

        mixer.link(videosink)
        adder.link(audiosink)
        self.addSourceChain(pipeline, "A", args[0], mixer, adder)
        self.addSourceChain(pipeline, "B", args[1], mixer, adder)
        self.alphaB.props.alpha = 0.5
Example #32
0
                winsound.PlaySound(url2pathname(uri[5:]), winsound.SND_FILENAME
                                   | winsound.SND_ASYNC)
            except RuntimeError:
                log.error("ERROR: RuntimeError while playing %s." %
                          url2pathname(uri[5:]))

    sound_player = WinsoundPlayer()
else:
    try:
        from gi.repository import Gst
    except ImportError as err:
        log.error(
            "ERROR: Unable to import gstreamer. All sound will be mute.\n%s" %
            err)
    else:
        if not Gst.init_check(None):
            log.error(
                "ERROR: Unable to initialize gstreamer. All sound will be mute."
            )
        else:

            class GstPlayer(Player):
                def __init__(self):
                    self.player = Gst.ElementFactory.make("playbin", "player")
                    if self.player is None:
                        log.error(
                            'ERROR: Gst.ElementFactory.make("playbin", "player") failed'
                        )
                    else:
                        self.ready = True
                        fakesink = Gst.ElementFactory.make(
Example #33
0
_libgst = None
try:
    import gi
    gi.require_version('Gst', '1.0')
    from gi.repository import Gst
    _libgst = ctypes.CDLL(find_library('gstreamer-1.0'))
    _libgst.gst_buffer_map.argtypes = [
        ctypes.c_void_p,
        ctypes.POINTER(_GstMapInfo), ctypes.c_int
    ]
    _libgst.gst_buffer_map.restype = ctypes.c_int
    _libgst.gst_buffer_unmap.argtypes = [
        ctypes.c_void_p, ctypes.POINTER(_GstMapInfo)
    ]
    _libgst.gst_buffer_unmap.restype = None
    Gst.init(None)
except (ImportError, ValueError, OSError):
    pass


class TestBasicEnginePythonAPI(unittest.TestCase):
    def _test_inference_with_different_input_types(self,
                                                   engine,
                                                   input_data,
                                                   input_size=None):
        """Test inference with different input types. It doesn't check correctness
       of inference. Instead it checks inference repeatability with different
       input types.

    Args:
      input_data (list): A 1-D list as the input tensor.
Example #34
0
    def _show_preview(self, uri, info):
        self.log("Show preview for %s", uri)
        duration = info.get_duration()
        pretty_duration = beautify_length(duration)

        videos = info.get_video_streams()
        if videos:
            video = videos[0]
            if video.is_image():
                self.current_preview_type = 'image'
                self.preview_video.hide()
                path = Gst.uri_get_location(uri)
                try:
                    pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
                except GLib.Error as error:
                    self.debug("Failed loading image because: %s", error)
                    self._show_error(error.message)
                    return False
                pixbuf_w = pixbuf.get_width()
                pixbuf_h = pixbuf.get_height()
                w, h = self.__get_best_size(pixbuf_w, pixbuf_h)
                pixbuf = pixbuf.scale_simple(w, h,
                                             GdkPixbuf.InterpType.NEAREST)
                self.preview_image.set_from_pixbuf(pixbuf)
                self.preview_image.set_size_request(
                    self.settings.FCpreviewWidth,
                    self.settings.FCpreviewHeight)
                self.preview_image.show()
                self.bbox.show()
                self.play_button.hide()
                self.seeker.hide()
                self.b_zoom_in.show()
                self.b_zoom_out.show()
            else:
                self.current_preview_type = 'video'
                self.preview_image.hide()
                self.player.uri = self.current_selected_uri
                self.player.setState(Gst.State.PAUSED)
                self.pos_adj.props.upper = duration
                video_width = video.get_square_width()
                video_height = video.get_height()
                w, h = self.__get_best_size(video_width, video_height)
                self.preview_video.set_size_request(w, h)
                self.preview_video.props.ratio = video_width / video_height
                self.preview_video.show()
                self.bbox.show()
                self.play_button.show()
                self.seeker.show()
                self.b_zoom_in.show()
                self.b_zoom_out.show()
                self.description = "\n".join([
                    _("<b>Resolution</b>: %d×%d") %
                    (video_width, video_height),
                    _("<b>Duration</b>: %s") % pretty_duration
                ])
        else:
            self.current_preview_type = 'audio'
            self.preview_video.hide()
            audio = info.get_audio_streams()
            if not audio:
                self.debug("Audio has no streams")
                return False

            audio = audio[0]
            self.pos_adj.props.upper = duration
            self.preview_image.set_from_icon_name("audio-x-generic",
                                                  Gtk.IconSize.DIALOG)
            self.preview_image.show()
            self.preview_image.set_size_request(PREVIEW_WIDTH, PREVIEW_HEIGHT)
            self.description = "\n".join([
                beautify_stream(audio),
                _("<b>Duration</b>: %s") % pretty_duration
            ])
            self.player.setState(Gst.State.NULL)
            self.player.uri = self.current_selected_uri
            self.player.setState(Gst.State.PAUSED)
            self.play_button.show()
            self.seeker.show()
            self.b_zoom_in.hide()
            self.b_zoom_out.hide()
            self.bbox.show()
        return True
Example #35
0
  def __init__(self,myplaylist=None,loop=None,starttoplay=False,myaudiosink=None):
    self.playlist=myplaylist
    #self.player = gst.element_factory_make("playbin2", "playbin2")
    Gst.init(None)
    self.player = Gst.ElementFactory.make("playbin", None)
    self.playmode = "Stopped"
    self.recoverplaymode = "Stopped"
    self.statuschanged = False
    self.starttoplay=starttoplay
    self.loop=loop

    if self.player is None:
        logging.error( "creating player")
        raise Exception("cannot create player!")

    #fakesink = gst.element_factory_make("fakesink", "fakesink")
    fakesink = Gst.ElementFactory.make("fakesink", None)
    self.player.set_property("video-sink", fakesink)

    ##icecast
    #print "Icecast selected"
    #bin = gst.Bin("my-bin")

    #audioconvert = gst.element_factory_make("audioconvert")
    #bin.add(audioconvert)
    #pad = audioconvert.get_pad("sink")
    #ghostpad = gst.GhostPad("sink", pad)
    #bin.add_pad(ghostpad)

    #audioresample = gst.element_factory_make("audioresample")
    #audioresample.set_property("quality", 0)
    #bin.add(audioresample)
    #capsfilter = gst.element_factory_make('capsfilter')
    #capsfilter.set_property('caps', gst.caps_from_string('audio/x-raw,rate=44100,channels=2'))
    ##bin.add(capsfilter)
    #vorbisenc = gst.element_factory_make("vorbisenc")
    #vorbisenc.set_property("quality", 0)
    #bin.add(vorbisenc)
    #oggmux = gst.element_factory_make("oggmux")
    #bin.add(oggmux)

    #streamsink = gst.element_factory_make("shout2send", "streamsink")
    #streamsink.set_property("ip", "localhost")
    ##streamsink.set_property("username", "source")
    #streamsink.set_property("password", "ackme")
    #streamsink.set_property("port", 8000)
    #streamsink.set_property("mount", "/myradio.ogg")
    #bin.add(streamsink)

    ### Link the elements
    #queue = gst.element_factory_make("queue", "queue")
    ##queue.link(audioresample, capsfilter)
    #bin.add(queue)

    #gst.element_link_many(audioconvert,audioresample,queue,vorbisenc,oggmux,streamsink)
    #self.player.set_property("audio-sink", bin)


    #audiosink = gst.element_factory_make("autoaudiosink")
    #audiosink = gst.element_factory_make("jackaudiosink")


    # ReplayGain
    if (Gst.ElementFactory.find('rgvolume') and
        Gst.ElementFactory.find('rglimiter')):
      self.audioconvert = Gst.ElementFactory.make('audioconvert',None)

      self.rgvolume = Gst.ElementFactory.make('rgvolume',None)
      self.rgvolume.set_property('album-mode', False)
      self.rgvolume.set_property('pre-amp', 0)
      self.rgvolume.set_property('fallback-gain', 0)

      self.rgvolume.set_property('headroom',0)
      self.rgvolume.set_property('pre-amp',0)

      self.rglimiter = Gst.ElementFactory.make('rglimiter',None)
      self.rglimiter.set_property('enabled', True)

      self.rgfilter = Gst.Bin()
      self.rgfilter.add(self.rgvolume)
      self.rgfilter.add(self.rglimiter)
      self.rgvolume.link(self.rglimiter)
      self.rgfilter.add_pad(Gst.GhostPad.new('sink',
                self.rgvolume.get_static_pad('sink')))
      self.rgfilter.add_pad(Gst.GhostPad.new('src',
                self.rglimiter.get_static_pad('src')))
      try:
        self.player.set_property('audio-filter', self.rgfilter)
      except:
        logging.error( "setting replaygain player")
        #raise Exception("cannot manage replaygain!")
        

#    TODO replaygain
#+++++++
#
#Example 40
#
#From project rhythmbox-multiple-libraries, under directory plugins/replaygain/replaygain, in source file player.py.
#
#def setup_playbin2_mode(self):
#		print "using output filter for rgvolume and rglimiter"
#		self.rgvolume = gst.element_factory_make("rgvolume")
#		self.rgvolume.connect("notify::target-gain", self.playbin2_target_gain_cb)
#		self.rglimiter = gst.element_factory_make("rglimiter")
#
#		# on track changes, we need to reset the rgvolume state, otherwise it
#		# carries over the tags from the previous track
#		self.pec_id = self.shell_player.connect('playing-song-changed', self.playing_entry_changed)
#
#		# watch playbin2's uri property to see when a new track is opened
#		playbin = self.player.props.playbin
#		if playbin is None:
#			self.player.connect("notify::playbin", self.playbin2_notify_cb)
#		else:
#			playbin.connect("notify::uri", self.playbin2_uri_notify_cb)
#
#		self.rgfilter = gst.Bin()
#		self.rgfilter.add(self.rgvolume, self.rglimiter)
#		self.rgvolume.link(self.rglimiter)
#		self.rgfilter.add_pad(gst.GhostPad("sink", self.rgvolume.get_static_pad("sink")))
#		self.rgfilter.add_pad(gst.GhostPad("src", self.rglimiter.get_static_pad("src")))
#		self.player.add_filter(self.rgfilter)
#
#+++++++++

    if myaudiosink is None: myaudiosink = "autoaudiosink"
    audiosink = Gst.ElementFactory.make(myaudiosink,None)
    self.player.set_property("audio-sink", audiosink)

#
#    self.player.set_property("audio-sink", streamsink)

    bus = self.player.get_bus()
    bus.add_signal_watch()
#    bus.connect("message",                self.on_message)
    bus.connect('message::eos',           self.on_message_eos)
    bus.connect('message::error',         self.on_message_error)
    bus.connect("message::state-changed", self.on_message_state_changed)
Example #36
0
    def __init__(self, conf):
        self.result_handler = None
        self.full_result_handler = None
        self.eos_handler = None
        self.error_handler = None
        self.appsrc = Gst.ElementFactory.make("appsrc", "appsrc")
        self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")
        self.audioconvert = Gst.ElementFactory.make("audioconvert",
                                                    "audioconvert")
        self.audioresample = Gst.ElementFactory.make("audioresample",
                                                     "audioresample")
        self.tee = Gst.ElementFactory.make("tee", "tee")
        self.queue1 = Gst.ElementFactory.make("queue", "queue1")
        self.filesink = Gst.ElementFactory.make("filesink", "filesink")
        self.queue2 = Gst.ElementFactory.make("queue", "queue2")
        self.fakesink = Gst.ElementFactory.make("fakesink", "fakesink")

        self.asr = Gst.ElementFactory.make("kaldinnet2onlinedecoder", "asr")
        if not self.asr:
            print >> sys.stderr, "ERROR: Couldn't create the kaldinnet2onlinedecoder element!"
            sys.exit(-1)

        self.asr.set_property("use-threaded-decoder", True)  # todo: necessary?

        if 'nnet-mode' in conf:
            self.asr.set_property('nnet-mode', conf['nnet-mode'])
            del conf['nnet-mode']

        conf = OrderedDict(conf)

        if "fst" in conf: conf["fst"] = conf.pop("fst")
        if "model" in conf: conf["model"] = conf.pop("model")

        for (key, val) in conf.iteritems():
            if key != "use-threaded-decoder":
                self.asr.set_property(key, val)

        self.appsrc.set_property("is-live", True)
        self.filesink.set_property("location", "/dev/null")

        self.pipeline = Gst.Pipeline()
        for element in [
                self.appsrc, self.decodebin, self.audioconvert,
                self.audioresample, self.tee, self.queue1, self.filesink,
                self.queue2, self.asr, self.fakesink
        ]:
            self.pipeline.add(element)

        self.appsrc.link(self.decodebin)
        self.decodebin.connect('pad-added', self._connect_decoder)
        self.audioconvert.link(self.audioresample)

        self.audioresample.link(self.tee)

        self.tee.link(self.queue1)
        self.queue1.link(self.filesink)

        self.tee.link(self.queue2)
        self.queue2.link(self.asr)

        self.asr.link(self.fakesink)

        # Create bus and connect several handlers
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.enable_sync_message_emission()
        self.bus.connect('message::eos', self._on_eos)
        self.bus.connect('message::error', self._on_error)

        #self.asr.connect('partial-result', self._on_partial_result)
        self.asr.connect('final-result', self._on_final_result)
        #self.asr.connect('full-final-result', self._on_full_final_result)

        self.pipeline.set_state(Gst.State.READY)
Example #37
0
    def __init__(self,
                 sn=None,
                 width=1920,
                 height=1080,
                 framerate=30,
                 color=False):
        Gst.init(sys.argv)
        self.height = height
        self.width = width
        self.sample = None
        self.samplelocked = False
        self.newsample = False
        self.gotimage = False
        self.img_mat = None
        self.new_image_callback_external = None
        self.image_locked = False
        self.is_streaming = False

        self.GAIN_MAX = 480
        self.GAIN_MIN = 0
        self.GAIN_STEP = 10
        self.EXPOSURE_TIME_MS_MIN = 0.02
        self.EXPOSURE_TIME_MS_MAX = 4000

        format = "BGRx"
        if (color == False):
            format = "GRAY8"

        if (framerate == 2500000):
            p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/10593' % (
                sn,
                format,
                width,
                height,
                framerate,
            )
        else:
            p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (
                sn,
                format,
                width,
                height,
                framerate,
            )

        p += ' ! videoconvert ! appsink name=sink'

        print(p)
        try:
            self.pipeline = Gst.parse_launch(p)
        except GLib.Error as error:
            print("Error creating pipeline: {0}".format(err))
            raise

        self.pipeline.set_state(Gst.State.READY)
        self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
        # Query a pointer to our source, so we can set properties.
        self.source = self.pipeline.get_by_name("source")

        # Query a pointer to the appsink, so we can assign the callback function.
        self.appsink = self.pipeline.get_by_name("sink")
        self.appsink.set_property("max-buffers", 5)
        self.appsink.set_property("drop", True)
        self.appsink.set_property("emit-signals", True)
Example #38
0
    window.realize()
    gstPipeline.get_by_name(GST_VIEW_NAME).set_window_handle(
        gDrawAreaSink.get_window().get_xid())

    # Set up the network and plugin
    input_shape, input_blob, output_blob, exec_net = setup_network()

    #Initialize input and output threads to pass images to the
    # MVNC device and to read results from the inferences made on thos images.

    gCallback = put_output
    start_thread()

    if gstPipeline.set_state(Gst.State.PLAYING) == Gst.StateChangeReturn.FAILURE:
        gstPipeline.set_state(Gst.State.NULL)
    else:
        # export GST_DEBUG_DUMP_DOT_DIR=/tmp/
        Gst.debug_bin_to_dot_file(
            gstPipeline, Gst.DebugGraphDetails.ALL, 'playing-pipeline')
        Gtk.main()
        Gst.debug_bin_to_dot_file(
            gstPipeline, Gst.DebugGraphDetails.ALL, 'shutting-down-pipeline')
        gstPipeline.set_state(Gst.State.NULL)
        print("exiting main loop")

    # close the device
    del exec_net
    del plugin
    if __name__ == '__main__':
        sys.exit(main() or 0)
#import gst
#gst.require("1.0")
import pygtk

pygtk.require("2.0")
import gtk
import sys
import os
from audio_video import AVDemo, create_decodebin
#from gi.repository import GObject
from gi.repository import GObject, Gtk
from gi.repository import Gst as gst

GObject.threads_init()
gst.init(None)


#import gobject
#gobject.threads_init()
class AVCrossfade(AVDemo):
    """Base class implementing boring, boiler-plate code.
    Sets up a basic gstreamer environment which includes:

    * a window containing a drawing area and basic media controls
    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
    * connects the ximagesink to the window's drawing area

    Derived classes need only override magic(), __name__,
    and __usage__ to create new demos."""
Example #40
0
    def __init__(self, path: str):
        super().__init__()
        self.uri = pathlib.Path(path).as_uri()

        Gst.init(None)
        self.discoverer: GstPbutils.Discoverer = GstPbutils.Discoverer()
Example #41
0
 def init_request(self, caps_str):
     #logger.info("Setting caps to %s" % caps_str)
     caps = Gst.caps_from_string(caps_str)
     self.appsrc.set_property("caps", caps)
     self.pipeline.set_state(Gst.State.PLAYING)
     self.filesink.set_state(Gst.State.PLAYING)
Example #42
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
        return

    # source = create_element_or_error("nvarguscamerasrc", "camera-source")
    # src_caps = create_element_or_error("capsfilter", "source-caps-definition")
    # src_caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, framerate=30/1, format=(string)NV12"))

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = Gst.ElementFactory.make("nvvideoconvert", "convertor-1")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    convertor2 = Gst.ElementFactory.make("nvvidconv", "converter-2")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "./nv-inferance-config-files/config_infer_primary_trafficcamnet.txt")

    #Set properties of tracker
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 384)
    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('ll-config-file', './tracker_config.yml')

    # Add Elemements to Pipielin
    pipeline.add(source)
    # pipeline.add(src_caps)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(transform)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    source.link(streammux)
    # src_caps.link(streammux)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    #Feed tracker
    tracker_sinkpad = tracker.get_static_pad("sink")
    if not tracker_sinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd")

    tracker_sinkpad.add_probe(Gst.PadProbeType.BUFFER,
                              osd_sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Example #43
0
from kazam.frontend.preferences import Preferences
from kazam.frontend.about_dialog import AboutDialog
from kazam.frontend.indicator import KazamIndicator
from kazam.frontend.window_select import SelectWindow
from kazam.frontend.done_recording import DoneRecording
from kazam.frontend.window_outline import OutlineWindow
from kazam.frontend.window_countdown import CountdownWindow

logger = logging.getLogger("Main")

#
# Detect GStreamer version and bail out if lower than 1.0 and no GI
#
try:
    from gi.repository import Gst
    gst_gi = Gst.version()
    if not gst_gi[0]:
        logger.critical(_("Gstreamer 1.0 or higher required, bailing out."))
        gst_gi = None
        sys.exit(0)
    else:
        logger.debug("Gstreamer version detected: {0}.{1}.{2}.{3}".format(
            gst_gi[0], gst_gi[1], gst_gi[2], gst_gi[3]))
except ImportError:
    logger.critical(_("Gstreamer 1.0 or higher required, bailing out."))
    sys.exit(0)


class KazamApp(GObject.GObject):
    def __init__(self, datadir, dist, debug, test, sound, silent):
        GObject.GObject.__init__(self)
Example #44
0
def run_pipeline(pipeline,
                 layout,
                 loop,
                 render_overlay,
                 display,
                 handle_sigint=True,
                 signals=None):
    # Create pipeline
    pipeline = describe(pipeline)
    print(pipeline)
    pipeline = Gst.parse_launch(pipeline)

    if display is not Display.NONE:
        # Workaround for https://gitlab.gnome.org/GNOME/gtk/issues/844 in gtk3 < 3.24.
        widget_draws = 123

        def on_widget_draw(widget, cairo):
            nonlocal widget_draws
            if widget_draws:
                widget.queue_draw()
                widget_draws -= 1
            return False

        # Needed to account for window chrome etc.
        def on_widget_configure(widget, event, glsink):
            allocation = widget.get_allocation()
            glsink.set_render_rectangle(allocation.x, allocation.y,
                                        allocation.width, allocation.height)
            return False

        window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
        window.set_title(WINDOW_TITLE)
        window.set_default_size(layout.render_size.width,
                                layout.render_size.height)
        if display is Display.FULLSCREEN:
            window.fullscreen()

        drawing_area = Gtk.DrawingArea()
        window.add(drawing_area)
        drawing_area.realize()

        glsink = pipeline.get_by_name('glsink')
        set_display_contexts(glsink, drawing_area)
        drawing_area.connect('draw', on_widget_draw)
        drawing_area.connect('configure-event', on_widget_configure, glsink)
        window.connect('delete-event', Gtk.main_quit)
        window.show_all()

    with Worker(save_frame) as images, Commands() as get_command:
        signals = {
            'appsink': {
                'new-sample':
                functools.partial(on_new_sample,
                                  render_overlay=functools.partial(
                                      render_overlay, layout=layout),
                                  layout=layout,
                                  images=images,
                                  get_command=get_command),
                'eos':
                on_sink_eos
            },
            **(signals or {})
        }

        for name, signals in signals.items():
            component = pipeline.get_by_name(name)
            if component:
                for signal_name, signal_handler in signals.items():
                    component.connect(signal_name, signal_handler, pipeline)

        # Set up a pipeline bus watch to catch errors.
        bus = pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message', on_bus_message, pipeline, loop)

        # Handle signals.
        if handle_sigint:
            GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGINT,
                                 Gtk.main_quit)

        # Run pipeline.
        pipeline.set_state(Gst.State.PLAYING)
        try:
            Gtk.main()
        except KeyboardInterrupt:
            pass
        finally:
            pipeline.set_state(Gst.State.NULL)

        # Process all pending MainContext operations.
        while GLib.MainContext.default().iteration(False):
            pass
Example #45
0
def main(argv):
    # first parse the arguments
    parser = argparse.ArgumentParser(description='Deepstream Face Recognition')
    parser.add_argument(
        '-l',
        '--learned',
        help='this is the file that contains the learned faces')
    parser.add_argument(
        '-s',
        '--stream',
        help=
        'this is the URL or filenames of the video stream (argument can be used multilple times)',
        action='append',
        nargs='+')
    parser.add_argument('-o',
                        '--output',
                        help='Optional: this is the output (gui or headless)')
    parser.add_argument('-r', '--rate', help='Optional: this is sampling rate')
    parser.add_argument('-f',
                        '--factor',
                        help='Optional: this is the resize factor')
    parser.add_argument('-p',
                        '--upscale',
                        help='Optional: this is the upscale')
    parser.add_argument(
        '-d',
        '--detection',
        help='Optional: this is the detection model (hog or cnn)')
    parser.add_argument('-j',
                        '--jitters',
                        help='Optional: this is the number of jitters')
    parser.add_argument(
        '-e',
        '--encoding',
        help='Optional: this is the encoding model (large or small)')
    parser.add_argument(
        '-w',
        '--write',
        help=
        'Optional: this enable or disables writing (saving) unknown faces (on or off)'
    )
    parser.add_argument(
        '-c',
        '--confidence',
        help=
        'Optional: minimum confidence of person before doing face recognition')
    parser.add_argument(
        '-u',
        '--unclear',
        help='Optional: this is the directory to store unclear objects')

    args = parser.parse_args()
    global learnedfile
    learnedfile = args.learned
    if not learnedfile:
        print(
            'No file with learned faces specified. Please use: python3 deepstream_fr.py -l \"trained_faces.pkl\" -s \"rtsp://thecamera.com\" [-r 5 -f 2 -p 2 -d \"hog\" -j 1 -e \"large" -c 0.33 -u logdir/frames]'
        )
        sys.exit(
            404)  # Bail out with 404 = no file with learned faces specified
    global stream
    stream = args.stream
    if not stream:
        print(
            'No video stream specified. Please use: python3 deepstream_fr.py -l \"trained_faces.pkl\" -s \"rtsp://thecamera.com\" [-r 5 -f 2 -p 2 -d \"hog\" -j 1 -e \"large" -c 0.33 -u logdir/frames]'
        )
        sys.exit(404)  # Bail out with 404 = no stream specified
    # overrule fixed values when used in argument
    if args.output:
        if args.output.upper() == 'HEADLESS':
            global gui
            gui = False
    if args.rate:
        global sampling_rate
        sampling_rate = int(args.rate)
    if args.factor:
        global resize_factor
        resize_factor = int(args.factor)
    if args.upscale:
        global up_scale
        up_scale = int(args.upscale)
    if args.detection:
        global detection_model
        detection_model = args.detection
    if args.jitters:
        global number_jitters
        number_jitters = int(args.jitters)
    if args.encoding:
        global encoding_model
        encoding_model = args.encoding
    if args.write:
        if args.output.upper() == 'OFF':
            global save_unknown
            save_unknown = False
    if args.confidence:
        global person_min_confidence
        person_min_confidence = float(args.confidence)
    if args.unclear:
        global folder_name
        folder_name = args.unclear

    # start logging and counter and create directory for any unknow faces just in case we find any
    global log
    logpath = Path(logdir)
    logpath.mkdir(parents=True, exist_ok=True)
    log = init_log(logfile, process, loglevel, logsize, logbackups)
    log.critical(
        'Starting program: %s with OpenCv version %s in %s mode and saving unknow face: %s'
        % (process, cv2.__version__, 'Screen' if gui else 'Headless',
           'On' if save_unknown else 'Off'))
    starttime = time.perf_counter()
    # create directory to store unknown faces detected
    unknown_faces_path = Path(unknown_face_dir)
    unknown_faces_path.mkdir(parents=True, exist_ok=True)
    # create logfile to store known faces detected
    global known_faces_log
    known_faces_logpath = Path(known_faces_dir)
    known_faces_logpath.mkdir(parents=True, exist_ok=True)
    known_faces_log = init_log(known_faces_logfile, 'known_faces', loglevel,
                               logsize, logbackups)
    known_faces_log.critical('Start logging known faces')

    for i in range(0, len(stream)):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
        log.info(f'- Detected stream{i}: {stream[i][0]}')
    number_sources = len(stream)

    # opening learned faces file
    log.info(f'- Opening learned faces file: {learnedfile}')
    with open(learnedfile, 'rb') as trainedfacesfile:
        # reading the learned faces file
        global Names
        Names = pickle.load(trainedfacesfile)
        global Sequence
        Sequence = pickle.load(trainedfacesfile)
        # TODO: Create updated learned faces file
        # global Filedate
        # Filedate = pickle.load(trainedfacesfile)
        global Encodings
        Encodings = pickle.load(trainedfacesfile)

    # create directory to save ambigious objects
    folder_path = Path(folder_name)
    folder_path.mkdir(parents=True, exist_ok=True)
    log.warning(f'- Ambigious objects will be saved in: {folder_path}')

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    log.warning('- Creating Pipeline')
    pipeline = Gst.Pipeline()
    is_live = False
    if not pipeline:
        log.critical('Error: Unable to create Pipeline')

    # Create nvstreammux instance to form batches from one or more sources.
    log.warning('- Creating streamux')
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        log.critical('Error: Unable to create NvStreamMux')
    pipeline.add(streammux)
    for i in range(number_sources):
        log.info(f'- Creating source_bin: {folder_name}/stream_{i}')
        stream_path = Path(f'{folder_name}/stream_{i}')
        stream_path.mkdir(parents=True, exist_ok=True)
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        uri_name = stream[i][0]
        if uri_name.startswith("rtsp://"):
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            log.critical('Error: Unable to create source bin')
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            log.critical('Error: Unable to create sink pad bin')
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            log.critical('Error: Unable to create src pad bin')
        srcpad.link(sinkpad)

    log.warning('- Creating Pgie')
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        log.critical('Error: Unable to create pgie')

    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    log.warning('- Creating nvvidconv1 and filter1 to convert frames to RGBA')
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        log.critical('Error: Unable to create nvvidconv1')
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        log.critical('Error: Unable to get the caps filter1')
    filter1.set_property("caps", caps1)

    # creating tiler
    log.warning('- Creating tiler')
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        log.critical('Error: Unable to create tiler')

    log.warning('- Creating nvvidconv')
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        log.critical('Error: Unable to create nvvidconv')

    log.warning('- Creating nvosd')
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        log.critical('Error: Unable to create nvosd')
    if (is_aarch64()):
        log.warning('- Creating transform for arch64')
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            log.critical('Error: Unable to create transform')

    log.warning('- Creating EGLSink')
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        log.critical('Error: Unable to create egl sink')

    if is_live:
        log.info('- At least one of the sources is live')
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_imagedata_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        log.warning(
            f'Warning: Overriding infer-config batch-size {pgie_batch_size} with number of sources {number_sources}'
        )
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)
    sink.set_property("sync", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    log.warning('- Adding elements to Pipeline')
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    log.warning('- Linking elements in the Pipeline')
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    log.warning('- Create event loop')
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        log.critical('Error: Unable to get src pad')
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER,
                                 tiler_sink_pad_buffer_probe, 0)

    # List the sources
    log.info('- Now playing...')
    for i, source in enumerate(stream[:-1]):
        if (i != 0):
            log.info(f'- {i}: {source}')

    # start play back and listed to events
    log.info(
        f'- Starting pipeline and processing with sampling rate: {sampling_rate}, resize factor: {resize_factor} and up scale: {up_scale}'
    )
    log.info(
        f'- Facial recognition is done with detection model: {detection_model}, number of jitters: {number_jitters} and encoding model: {encoding_model}'
    )
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    endtime = time.perf_counter()
    log.critical(
        f'Program {process} ended and took {endtime - starttime:0.2f} seconds to complete'
    )
    pipeline.set_state(Gst.State.NULL)
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
print(Gst.Fraction(num=3, denom=5))
Example #47
0
from pprint import pprint

import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')

from gi.repository import Gtk, Gst
Gst.init(None)
Gst.init_check(None)


class GstWidget(Gtk.Box):
    def __init__(self, pipeline):
        super().__init__()
        self.connect('realize', self._on_realize)
        # self._bin = Gst.parse_bin_from_description('videotestsrc', True)
        self._bin = Gst.ElementFactory.make('playbin', 'MultimediaPlayer')
        self._bin.set_property('uri', 'file:///home/kemal/Videos/jason_statham.mp4')

    def _on_realize(self, widget):
        pipeline = Gst.Pipeline()
        factory = pipeline.get_factory()
        gtksink = factory.make('gtksink')
        pipeline.add(gtksink)
        pipeline.add(self._bin)
        self._bin.link(gtksink)
        self.pack_start(gtksink.props.widget, True, True, 0)
        gtksink.props.widget.show()
        pipeline.set_state(Gst.State.PLAYING)
Example #48
0
def main():

    cameras_list = [
        {
            "source": 0,
            "name": "Camera 1",
        },
        {
            "source": 1,
            "name": "Camera 2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    pipeline.add(streammux)

    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter",
                                       "source-caps-source-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"
            ))
        pipeline.add(source)
        pipeline.add(caps)

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
        srcpad = source.get_static_pad("src")

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = create_element_or_error("nvvideoconvert", "converter-1")
    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    queue1 = create_element_or_error("queue", "queue1")
    queue2 = create_element_or_error("queue", "queue2")
    queue3 = create_element_or_error("queue", "queue3")
    queue4 = create_element_or_error("queue", "queue4")
    queue5 = create_element_or_error("queue", "queue5")
    queue6 = create_element_or_error("queue", "queue6")

    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)

    # Set Element Properties
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so')
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 480)

    tiler.set_property("rows", 2)
    tiler.set_property("columns", 2)
    tiler.set_property("width", 1920)
    tiler.set_property("height", 1080)
    sink.set_property("qos", 0)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")

    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(tiler)
    tiler.link(queue4)
    queue4.link(convertor)
    convertor.link(queue5)
    queue5.link(nvosd)
    nvosd.link(queue6)
    queue6.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Example #49
0
import time

import numpy as np

import gi

gi.require_version('Gtk', '3.0')
gi.require_version('GLib', '2.0')
gi.require_version('GObject', '2.0')
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstPbutils', '1.0')
from gi.repository import GLib, GObject, Gst, GstBase, Gtk

GObject.threads_init()
Gst.init([])
Gtk.init([])

from gi.repository import GstPbutils  # Must be called after Gst.init().

from PIL import Image

from gst_native import set_display_contexts
from pipelines import *

COMMAND_SAVE_FRAME = ' '
COMMAND_PRINT_INFO = 'p'
COMMAND_QUIT = 'q'
WINDOW_TITLE = 'Coral'

Example #50
0
    def __init__(self):

        # Set default properties
        self.uri = ""
        self.state = playback.STATE_STOPPED
        self.recorder_state = playback.STATE_RECORDER_OFF
        self.rec_trigger = False
        self.output_str = "./%t"
        self.output_fmt = playback.FORMAT_OGG
        self.ext = ".ogg"
        self.recopt = playback.REC_IMMEDIATALY
        self.cb_cg_track = None
        self.ud_cg_track = None
        self.cb_cg_time = None
        self.ud_cg_time = None
        self.cb_cg_state = None
        self.ud_cg_state = None
        self.cb_cg_buffer = None
        self.ud_cg_buffer = None
        self.cb_cg_duration = None
        self.ud_cg_duration = None
        self.cb_eos_reached = None
        self.ud_eos_reached = None
        self.cb_error = None
        self.ud_error = None

        # Properties of stream
        self.organization = "unknown"
        self.bitrate = 0
        self.genre = "unknown"
        self.title = "untitled"
        self.duration = None
        self.rec_plugged = False

        ## Gstreamer

        self.lock1 = RLock()

        # Create the pipeline
        self.pipeline = Gst.Pipeline()  #Gst.Pipeline("playback")

        # uridecodebin
        self.uridec = Gst.ElementFactory.make("uridecodebin", "uridecoder")
        self.uridec.connect("pad-added", self.cb_pad_added)
        self.pipeline.add(self.uridec)

        # Create the player Bin
        self.player = Gst.Bin()

        queue = Gst.ElementFactory.make("queue", "queue")
        convert = Gst.ElementFactory.make("audioconvert", "converter")
        output = Gst.ElementFactory.make("autoaudiosink", "output")

        self.player.add(queue)
        self.player.add(convert)
        self.player.add(output)

        queue.link(convert)
        convert.link(output)

        pad = queue.get_static_pad("sink")
        self.player.add_pad(Gst.GhostPad.new("sink", pad))

        # Create recorder Bin
        self.recorder = None
        self.rplugins = self.get_rec_plugins()
        try:
            self.rplugins.index(playback.FORMAT_OGG)
            self.set_format(playback.FORMAT_OGG)
        except:
            try:
                self.rplugins.index(playback.FORMAT_MP3)
                self.set_format(playback.FORMAT_MP3)
            except:
                print "ERROR: Could not find a plugin to record sound."
                print "       Please, install gstreamer-base-plugins"

        # Create 1-to-N pipe fitting
        tee = Gst.ElementFactory.make("tee", "tee")
        teepad_0 = tee.get_request_pad("src_0")
        teepad_1 = tee.get_request_pad("src_1")

        # Add to pipeline
        #self.pipeline.add(tee, self.player)
        self.pipeline.add(tee)
        self.pipeline.add(self.player)

        # Link tee with player and recorder
        pad_p = self.player.get_static_pad("sink")
        teepad_0.link(pad_p)

        # Here the pipeline is configured only to play the stream

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.cb_messages)
Example #51
0
    def __init__(self):
        self.caps = Config.get('mix', 'videocaps')

        self.names = Config.getlist('mix', 'sources')
        self.log.info('Configuring Mixer for %u Sources', len(self.names))

        pipeline = """
            compositor name=mix !
            {caps} !
            identity name=sig !
            queue !
            tee name=tee

            intervideosrc channel=video_background !
            {caps} !
            mix.

            tee. ! queue ! intervideosink channel=video_mix_out
        """.format(
            caps=self.caps
        )

        if Config.getboolean('previews', 'enabled'):
            pipeline += """
                tee. ! queue ! intervideosink channel=video_mix_preview
            """

        if Config.getboolean('stream-blanker', 'enabled'):
            pipeline += """
                tee. ! queue ! intervideosink channel=video_mix_streamblanker
            """

        for idx, name in enumerate(self.names):
            pipeline += """
                intervideosrc channel=video_{name}_mixer !
                {caps} !
                mix.
            """.format(
                name=name,
                caps=self.caps,
                idx=idx
            )

        self.log.debug('Creating Mixing-Pipeline:\n%s', pipeline)
        self.mixingPipeline = Gst.parse_launch(pipeline)
        self.mixingPipeline.use_clock(Clock)

        self.log.debug('Binding Error & End-of-Stream-Signal '
                       'on Mixing-Pipeline')
        self.mixingPipeline.bus.add_signal_watch()
        self.mixingPipeline.bus.connect("message::eos", self.on_eos)
        self.mixingPipeline.bus.connect("message::error", self.on_error)

        self.log.debug('Binding Handoff-Handler for '
                       'Synchronus mixer manipulation')
        sig = self.mixingPipeline.get_by_name('sig')
        sig.connect('handoff', self.on_handoff)

        self.padStateDirty = False
        self.padState = list()
        for idx, name in enumerate(self.names):
            self.padState.append(PadState())

        self.log.debug('Initializing Mixer-State')
        self.compositeMode = CompositeModes.fullscreen
        self.sourceA = 0
        self.sourceB = 1
        self.recalculateMixerState()
        self.applyMixerState()

        bgMixerpad = (self.mixingPipeline.get_by_name('mix')
                                         .get_static_pad('sink_0'))
        bgMixerpad.set_property('zorder', 0)

        self.log.debug('Launching Mixing-Pipeline')
        self.mixingPipeline.set_state(Gst.State.PLAYING)
Example #52
0
__id__ = "$Id:$"
__version__ = "$Revision:$"
__date__ = "$Date:$"
__copyright__ = "Copyright (c) 2016 Orca Team"
__license__ = "LGPL"

import gi
from gi.repository import GLib

try:
    gi.require_version('Gst', '1.0')
    from gi.repository import Gst
except:
    _gstreamerAvailable = False
else:
    _gstreamerAvailable, args = Gst.init_check(None)

from . import debug
from .sound_generator import Icon, Tone


class Player:
    """Plays Icons and Tones."""
    def __init__(self):
        self._initialized = False
        self._source = None
        self._sink = None

        if not _gstreamerAvailable:
            msg = 'SOUND ERROR: Gstreamer is not available'
            debug.println(debug.LEVEL_INFO, msg, True)
Example #53
0
def main():
    number_sources = 1
    GObject.threads_init()
    Gst.init(None)
    pipeline = Gst.Pipeline()
    is_live = False
    uri_name = "rtsp://192.168.1.10:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp"
    ds_pgie_config = '/home/proxeye/dev/proxeye/proxeye/resources/ds_pgie_config.txt'

    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    pipeline.add(streammux)

    source_bin = create_source_bin(1, uri_name)
    pipeline.add(source_bin)
    sinkpad = streammux.get_request_pad("sink_1")
    srcpad = source_bin.get_static_pad("src")
    srcpad.link(sinkpad)

    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    filter1.set_property("caps", caps1)
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if (is_aarch64()):
        transform = Gst.ElementFactory.make("queue", "queue")

    sink = Gst.ElementFactory.make("fakesink", "fakesink")
    if is_live:
        streammux.set_property('live-source', 1)
    streammux.set_property('width', 640)
    streammux.set_property('height', 480)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', ds_pgie_config)
    sink.set_property('sync', False)
    pgie_batch_size = pgie.get_property("batch-size")

    tiler_rows = 1
    tiler_columns = 1
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", 640)
    tiler.set_property("height", 480)
    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)
    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)
    GObject.idle_add(refreshApp)
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
    tiler_src_pad = tiler.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                tiler_src_pad_buffer_probe, 0)
    print("Now playing...")
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)