예제 #1
0
def simple():
  Gst.init(None)
  GES.init()

  timeline = GES.Timeline.new_audio_video()
  
  layer = GES.Layer()
  
  timeline.add_layer(layer)

  asset = GES.UriClipAsset.request_sync(videoFile)
  
  imageasset = GES.UriClipAsset.request_sync(imageFile)

  layer.add_asset(imageasset, 0 * Gst.SECOND, 0, 1 * Gst.SECOND, GES.TrackType.UNKNOWN)
  layer.add_asset(asset, 0 * Gst.SECOND, 0, 10 * Gst.SECOND, GES.TrackType.UNKNOWN)
  
  timeline.commit()

  pipeline = GES.Pipeline()
  pipeline.add_timeline(timeline)
  pipeline.set_state(Gst.State.PLAYING)

  mainLoop = GLib.MainLoop.new(None, False)
  GLib.timeout_add_seconds(10, quit, mainLoop)
  GLib.MainLoop().run()
예제 #2
0
파일: replaygain.py 프로젝트: dreewoo/beets
    def _import_gst(self):
        """Import the necessary GObject-related modules and assign `Gst`
        and `GObject` fields on this object.
        """

        try:
            import gi
        except ImportError:
            raise FatalReplayGainError(
                "Failed to load GStreamer: python-gi not found"
            )

        try:
            gi.require_version('Gst', '1.0')
        except ValueError as e:
            raise FatalReplayGainError(
                "Failed to load GStreamer 1.0: {0}".format(e)
            )

        from gi.repository import GObject, Gst, GLib
        # Calling GObject.threads_init() is not needed for
        # PyGObject 3.10.2+
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            GObject.threads_init()
        Gst.init([sys.argv[0]])

        self.GObject = GObject
        self.GLib = GLib
        self.Gst = Gst
예제 #3
0
파일: check.py 프로젝트: brion/pitivi
def initialize_modules():
    """
    Initialize the modules.

    This has to be done in a specific order otherwise the app
    crashes on some systems.
    """
    from gi.repository import Gdk
    Gdk.init([])
    from gi.repository import GtkClutter
    GtkClutter.init([])

    import gi
    if not gi.version_info >= (3, 11):
        from gi.repository import GObject
        GObject.threads_init()

    from gi.repository import Gst
    Gst.init(None)
    from gi.repository import GES
    GES.init()

    # This is required because of:
    # https://bugzilla.gnome.org/show_bug.cgi?id=656314
    from gi.repository import GdkX11
    GdkX11  # noop
예제 #4
0
    def _initialise_gstreamer(self):
        
        if self._gstreamer_has_initialised:
            return
        
        self._gstreamer_has_initialised = True
        Gst.init(None)
        
        def on_new_decoded_pad(dbin, pad):
            decode = pad.get_parent()
            pipeline = decode.get_parent()
            convert = pipeline.get_by_name('convert')
            decode.link(convert)
            
        #we are going to mimic the following
        #gst-launch-1.0 filesrc location="02 - ABBA - Knowing Me, Knowing You.ogg" ! 
        #decodebin ! audioconvert ! audioresample ! lamemp3enc target=bitrate bitrate=128 ! 
        #id3v2mux ! filesink location=mytrack.mp3

        converter = Gst.Pipeline.new('converter')

        source = Gst.ElementFactory.make('filesrc', None)

        decoder = Gst.ElementFactory.make('decodebin', 'decoder')
        convert = Gst.ElementFactory.make('audioconvert', 'convert')
        sample = Gst.ElementFactory.make('audioresample', 'sample')
        encoder = Gst.ElementFactory.make('lamemp3enc', 'encoder')
        encoder.set_property('target', 'bitrate') 
        encoder.set_property('bitrate', self.TARGET_BITRATE)

        mux = Gst.ElementFactory.make('id3v2mux', 'mux')
        if not mux:
            # use id3mux where not available
            mux = Gst.ElementFactory.make('id3mux', 'mux')
            
        sink = Gst.ElementFactory.make('filesink', 'sink')

        converter.add(source)
        converter.add(decoder)
        converter.add(convert)
        converter.add(sample)
        converter.add(encoder)
        converter.add(mux)
        converter.add(sink)

        Gst.Element.link(source, decoder)
        #note - a decodebin cannot be linked at compile since
        #it doesnt have source-pads (http://stackoverflow.com/questions/2993777/gstreamer-of-pythons-gst-linkerror-problem)

        decoder.connect("pad-added", on_new_decoded_pad)
            
        Gst.Element.link(convert, sample)
        Gst.Element.link(sample, encoder)
        Gst.Element.link(encoder, mux)
        Gst.Element.link(mux, sink)
            
        self.converter=converter
        self.source=source
        self.sink=sink
        self.encoder=encoder
예제 #5
0
    def __init__(self, pls=None, *args, **kwargs):
        super(GstZOCP, self).__init__(*args, **kwargs)
        GObject.threads_init()
        self.loop = GObject.MainLoop()
        Gst.init(None)
        if pls == None:
            pls = ""
            #pls = "file:///home/people/arnaud/Videos/tordinaire-youtubeHD.mp4" 
        #pls = "file:///home/pi/test3.h264,file:///home/pi/tordinaire-youtubeHD.mp4"
        #pls = "file:///home/people/arnaud/Videos/test.h264,file:///home/people/arnaud/Videos/test2.h264"
        self.count = 0
        # create elements
        self.playbin = Gst.ElementFactory.make('playbin', 'playbin0')
        self.glcolorconv = Gst.ElementFactory.make("glcolorscale", "glcolorconv0")
        self.glshader = Gst.ElementFactory.make("glshader", "glshader0")
        self.glimagesink = Gst.ElementFactory.make('glimagesink', "glimagesink0")
        self.sinkbin = Gst.Bin()
        
        # setup the pipeline
        #videosrc.set_property("video-sink", glimagesink)
        #self.playbin.set_property("uri", pls.split(',')[self.count])
        #self.glimagesink.set_locked_state(True)
        self.sinkbin.add(self.glcolorconv)
        self.sinkbin.add(self.glshader)
        self.sinkbin.add(self.glimagesink)
        
        # we add a message handler
        self.bus = self.playbin.get_bus()
        self.bus.add_watch(0, self.bus_call, self.loop) # 0 == GLib.PRIORITY_DEFAULT 
        
        # we link the elements together
        self.glcolorconv.link(self.glshader)
        self.glshader.link(self.glimagesink)
        ghostpad = Gst.GhostPad.new("sink", self.glcolorconv.get_static_pad("sink"))
        self.sinkbin.add_pad(ghostpad)

        #self.playbin.connect("pad-added", self.on_pad_added, self.sinkbin)
        #self.playbin.connect("drained", self.on_drained)
        #self.playbin.connect("about-to-finish", self.update_uri)
        
        # set properties of elements
        self.glshader.set_property("location", "shader.glsl")
        self.glshader.set_property("vars", "float alpha = float(1.);")
        self.glshader.set_property("preset", "preset.glsl")
        self.playbin.set_property("video-sink",self.sinkbin)

        self.set_name("zvidplyr@{0}".format(socket.gethostname()))
        self.register_bool("quit", False, access='rw')
        self.register_vec2f("top_left", (-1.0, 1.0), access='rw', step=[0.01, 0.01])
        self.register_vec2f('top_right', (1.0, 1.0), access='rw', step=[0.01, 0.01])
        self.register_vec2f('bottom_right', (1.0, -1.0), access='rw', step=[0.01, 0.01])
        self.register_vec2f('bottom_left', (-1.0, -1.0), access='rw', step=[0.01, 0.01])
        self.register_string("playlist", pls, access="rws")
        self.register_bool("loop", True, access="rwse")
        self.register_bool("fade", False, access="rwse")
        self.register_vec3f("fade_color", (1,0,0), access="rws")
        self.register_bool("pause", False, access="rwse")
        self.register_bool("stop", False, access="rwse")
        
        self._fade_val = 1.0
예제 #6
0
파일: main.py 프로젝트: worr/sugar
def main():
    GLib.threads_init()
    Gdk.threads_init()
    dbus.glib.threads_init()
    Gst.init(sys.argv)

    cleanup_temporary_files()

    _start_window_manager()

    setup_locale()
    setup_fonts()
    setup_theme()

    # this must be added early, so that it executes and unfreezes the screen
    # even when we initially get blocked on the intro screen
    GLib.idle_add(unfreeze_dcon_cb)

    GLib.idle_add(setup_cursortracker_cb)
    sound.restore()
    keyboard.setup()

    sys.path.append(config.ext_path)

    if not intro.check_profile():
        _start_intro()
    else:
        _begin_desktop_startup()

    try:
        Gtk.main()
    except KeyboardInterrupt:
        print 'Ctrl+C pressed, exiting...'

    _stop_window_manager()
예제 #7
0
def main():
    GObject.threads_init()
    Gst.init(None)
    wcw = WebcamWidget()
    wcw.show()
    Gtk.main()
    exit(0)
예제 #8
0
파일: player.py 프로젝트: erwanj/lollypop
	def __init__(self):
		GObject.GObject.__init__(self)
		Gst.init(None)

		self._current_track_number = -1
		self._current_track_album_id = -1
		self._current_track_id = -1
		self._albums = []
		self._timeout = None
		self._shuffle = False
		self._shuffle_tracks_history = []
		self._shuffle_albums_history = []
		self._party = False
		self._party_ids = []
		self._queue = []

		self._playbin = Gst.ElementFactory.make('playbin', 'player')
		self._playbin.connect("about-to-finish", self._on_stream_about_to_finish)
		self._rg_setup()
		
		
		self._bus = self._playbin.get_bus()
		self._bus.add_signal_watch()
		self._bus.connect('message::error', self._on_bus_error)
		self._bus.connect('message::eos', self._on_bus_eos)
		self._bus.connect('message::stream-start', self._on_stream_start)
예제 #9
0
 def __init__(self):
     """
         Init playbin
     """
     Gst.init(None)
     BasePlayer.__init__(self)
     self._codecs = Codecs()
     self._playbin = Gst.ElementFactory.make('playbin', 'player')
     flags = self._playbin.get_property("flags")
     flags &= ~GstPlayFlags.GST_PLAY_FLAG_VIDEO
     self._playbin.set_property('flags', flags)
     self._playbin.set_property('buffer-size', 5 << 20)
     self._playbin.set_property('buffer-duration', 10 * Gst.SECOND)
     ReplayGainPlayer.__init__(self, self._playbin)
     self._playbin.connect('about-to-finish',
                           self._on_stream_about_to_finish)
     bus = self._playbin.get_bus()
     bus.add_signal_watch()
     bus.connect('message::error', self._on_bus_error)
     bus.connect('message::eos', self._on_bus_eos)
     bus.connect('message::element', self._on_bus_element)
     bus.connect('message::stream-start', self._on_stream_start)
     bus.connect("message::tag", self._on_bus_message_tag)
     self._handled_error = None
     self._start_time = 0
예제 #10
0
def main(args):
    _, uri, port = args
    port = int(port)

    Gst.init(sys.argv)

    # make the pipeline
    pipeline = Gst.parse_launch('playbin')
    pipeline.set_property('uri', uri) # uri interface

    # make sure some other clock isn't autoselected
    clock = Gst.SystemClock.obtain()
    print 'Using clock: ', clock
    pipeline.use_clock(clock)

    # this will start a server listening on a UDP port
    clock_provider = GstNet.NetTimeProvider.new(clock, '0.0.0.0', port)

    # we explicitly manage our base time
    base_time = clock.get_time()
    print ('Start slave as: python ./play-slave.py %s 192.168.1.149 %d %d'
           % (uri, port, base_time))

    # disable the pipeline's management of base_time -- we're going
    # to set it ourselves.
    pipeline.set_start_time(Gst.CLOCK_TIME_NONE)
    pipeline.set_base_time(base_time)

    # now we go :)
    pipeline.set_state(Gst.State.PLAYING)

    # wait until things stop
    pipeline.get_bus().poll(Gst.MessageType.EOS | Gst.MessageType.ERROR, Gst.CLOCK_TIME_NONE)
    pipeline.set_state(Gst.State.NULL)
예제 #11
0
 def __init__(self, blockify):
     Gst.init(None)
     self.Gst = Gst
     self.b = blockify
     self.manual_control = False
     self.temp_autoresume = False
     self.temp_disable = False
     self._index = 0
     self._autoresume = util.CONFIG["interlude"]["autoresume"]
     self.playback_delay = util.CONFIG["interlude"]["playback_delay"]
     # Automatically resume spotify playback after n seconds.
     self.radio_timeout = util.CONFIG["interlude"]["radio_timeout"]
     self.uri_rx = re.compile("[A-Za-z]+:\/\/")
     self.formats = ["mp3", "mp4", "flac", "wav", "wma", "ogg", "avi", "mov", "mpg", "flv", "wmv", \
                     "spx", "3gp", "b-mtp", "aac", "aiff", "raw", "midi", "ulaw", "alaw", "gsm" ]
     self.player = Gst.ElementFactory.make("playbin", "player")
     self.player.connect("about-to-finish", self.on_about_to_finish)
     # Get and watch the bus. We use this in blockify-ui.
     self.bus = self.player.get_bus()
     self.bus.add_signal_watch()
     # self.bus.connect("message::tag", self.on_tag_changed)
     # self.bus.connect("message::eos", self.on_finish)
     # Finally, load the playlist file.
     log.info("InterludePlayer initialized.")
     self.load_playlist(self.parse_playlist(), util.CONFIG["interlude"]["start_shuffled"])
예제 #12
0
파일: nostart.py 프로젝트: lubosz/ges-tests
def simple():
  Gst.init(None)
  GES.init()

  timeline = GES.Timeline.new_audio_video()
  
  imagelayer = GES.Layer()
  videolayer = GES.Layer()
  timeline.add_layer(imagelayer)
  timeline.add_layer(videolayer)

  asset = GES.UriClipAsset.request_sync(videoFile)
  imageasset = GES.UriClipAsset.request_sync(imageFile)

  imagelayer.add_asset(imageasset, 1 * Gst.SECOND, 0, 1 * Gst.SECOND, GES.TrackType.UNKNOWN)
  videolayer.add_asset(asset, 0 * Gst.SECOND, 0, 10 * Gst.SECOND, GES.TrackType.UNKNOWN)
  
  timeline.commit()

  pipeline = GES.Pipeline()
  pipeline.add_timeline(timeline)

  pipeline.set_state(Gst.State.PLAYING)

  bus = pipeline.get_bus()
  bus.add_signal_watch()
  bus.connect("message", busMessageCb)
  GObject.timeout_add(300, duration_querier, pipeline)
 
  signal.signal(signal.SIGINT, handle_sigint)
  Gtk.main()
예제 #13
0
파일: daw_player.py 프로젝트: RaaH/dawawin
 def prepare_gst(self):
     try:
         Gst.init(None)
         self._player = Gst.ElementFactory.make('playbin2', None)
         self._volume.set_value(50)
     except NameError: 
         self.set_sensitive(False)
예제 #14
0
    def __llenar_lista(self):

        try:
            import gi
            gi.require_version('Gst', '1.0')
            from gi.repository import Gst

            Gst.init([])

            registry = Gst.Registry.get()
            plugins = registry.get_plugin_list()

        except:
            return

        iter = self.lista.get_model().get_iter_first()

        for elemento in plugins:

            iteractual = self.lista.get_model().append(
                iter, [elemento.get_name(), elemento.get_description()])

            features = registry.get_feature_list_by_plugin(elemento.get_name())

            if len(features) > 1:
                for feature in features:
                    self.lista.get_model().append(
                        iteractual,
                        [feature.get_name(),
                        elemento.get_description()])
예제 #15
0
    def __init__(self, client_ip, client_base_port, filename, fileoffset):
        print "[DBG_Streamclient] Init enter with Args, client_ip: " + client_ip + ", client_base_port: " + str(client_base_port) + ", filename: " + filename + ", fileoffset: " + str(fileoffset)
        self.client_ip, self.client_base_port, self.filename, self.fileoffset = client_ip, client_base_port, filename, fileoffset
        self.client_port1 = client_base_port   #5000
        self.client_port2 = client_base_port+1 #5001
        self.client_port3 = client_base_port+2 #5002
        self.client_port4 = client_base_port+3 #5003
        self.host_port1 = client_base_port+5 #5005
        self.host_port2 = client_base_port+7 #5007

        self.client_str = " rtpbin name=rtpbin latency=100 udpsrc caps=application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)H263-1998 port=" + str(self.client_port1) + " ! "

        self.client_str += "rtpbin.recv_rtp_sink_0 rtpbin. ! rtph263pdepay ! avdec_h263 ! videoconvert ! autovideosink udpsrc port=" + str(self.client_port2)  + " ! "

        self.client_str += "rtpbin.recv_rtcp_sink_0 rtpbin.send_rtcp_src_0 ! udpsink port=" + str(self.host_port1)  + " sync=false async=false "

        self.client_str += "udpsrc caps=application/x-rtp,media=(string)audio,clock-rate=(int)8000,encoding-name=(string)AMR,encoding-params=(string)1,octet-align=(string)1 port=" + str(self.client_port3) + " ! "

        self.client_str += "rtpbin.recv_rtp_sink_1 rtpbin. ! rtpamrdepay ! amrnbdec ! audioconvert ! audioresample ! autoaudiosink udpsrc port=" + str(self.client_port4) + " ! "

        self.client_str += "rtpbin.recv_rtcp_sink_1 rtpbin.send_rtcp_src_1 ! udpsink port="+ str(self.host_port2)  + " sync=false async=false"

        print "[DBG_Streamclient] client_port1: " + str(self.client_port1) + ", client_port2: " + str(self.client_port2) + ", client_port3: " + str(self.client_port3)
        print "[DBG_Streamclient] client_port4: " + str(self.client_port4) + ", host_port1: " + str(self.host_port1) + ", host_port2: " + str(self.host_port2)
        print "[DBG_Streamclient] client_str:\n" + self.client_str

        Gst.init([])
def main():

    Gst.init(sys.argv)
    # Set this to a serial string for a specific camera
    serial = None

    camera = Gst.ElementFactory.make("tcambin")

    if serial:
        # This is gstreamer set_property
        camera.set_property("serial", serial)

    # in the READY state the camera will always be initialized
    camera.set_state(Gst.State.READY)

    # Print properties for a before/after comparison
    print_properties()

    # Set properties

    camera.set_tcam_property("Exposure Auto", False)
    camera.set_tcam_property("Gain Auto", False)

    camera.set_tcam_property("Exposure", 3000)

    print_properties()

    # cleanup, reset state
    camera.set_state(Gst.State.NULL)
예제 #17
0
def main(args):
    def usage():
        sys.stderr.write("usage: %s\n" % args[0])
        return 1

    if len(args) != 1:
        return usage()
    try:
        Gst.init(None)
        gc = core.Main()
        Gtk.main()
    except KeyboardInterrupt:
        gc.emit_quit()
        print "Interrupted by user!"
    except Exception as exc:
        # debug
        # print traceback.format_exc()

        msg = "Error starting Galicaster: {0}".format(exc)
        print msg

        from galicaster.core import context
        logger = context.get_logger()
        logger and logger.error(msg)

        d = context.get_dispatcher()
        d.emit("galicaster-notify-quit")
        return -1


    return 0
예제 #18
0
    def test_new(self):
        Gst.init(None)
        test = Gst.Structure('test', test=1)
        self.assertEqual(test['test'], 1)

        test = Gst.Structure('test,test=1')
        self.assertEqual(test['test'], 1)
예제 #19
0
 def __init__(self, verbose=False):
     if not GI_GSTREAMER_INSTALLED:
         raise ValueError('pygobject library was not found')
     if not PIL_INSTALLED:
         raise ValueError('python-imaging library was not found')
     self.verbose = verbose
     Gst.init(None)
예제 #20
0
 def __init__(self):
     """
         Init playbin
     """
     Gst.init(None)
     BasePlayer.__init__(self)
     self._codecs = Codecs()
     self._crossfading = False
     self._playbin = self._playbin1 = Gst.ElementFactory.make(
                                                        'playbin', 'player')
     self._playbin2 = Gst.ElementFactory.make('playbin', 'player')
     self._preview = None
     self._plugins = self.plugins1 = PluginsPlayer(self._playbin1)
     self.plugins2 = PluginsPlayer(self._playbin2)
     self._volume_id = self._playbin.connect('notify::volume',
                                             self._on_volume_changed)
     for playbin in [self._playbin1, self._playbin2]:
         flags = playbin.get_property("flags")
         flags &= ~GstPlayFlags.GST_PLAY_FLAG_VIDEO
         playbin.set_property('flags', flags)
         playbin.set_property('buffer-size', 5 << 20)
         playbin.set_property('buffer-duration', 10 * Gst.SECOND)
         playbin.connect('about-to-finish',
                         self._on_stream_about_to_finish)
         bus = playbin.get_bus()
         bus.add_signal_watch()
         bus.connect('message::error', self._on_bus_error)
         bus.connect('message::eos', self._on_bus_eos)
         bus.connect('message::element', self._on_bus_element)
         bus.connect('message::stream-start', self._on_stream_start)
         bus.connect("message::tag", self._on_bus_message_tag)
     self._handled_error = None
     self._start_time = 0
예제 #21
0
def import_gst1():
    log("import_gst1()")
    import gi
    log("import_gst1() gi=%s", gi)
    gi.require_version('Gst', '1.0')
    from gi.repository import Gst           #@UnresolvedImport
    log("import_gst1() Gst=%s", Gst)
    Gst.init(None)
    #make it look like pygst (gstreamer-0.10):
    Gst.registry_get_default = Gst.Registry.get
    Gst.get_pygst_version = lambda: gi.version_info
    Gst.get_gst_version = lambda: Gst.version()
    def new_buffer(data):
        buf = Gst.Buffer.new_allocate(None, len(data), None)
        buf.fill(0, data)
        return buf
    Gst.new_buffer = new_buffer
    Gst.element_state_get_name = Gst.Element.state_get_name
    #note: we only copy the constants we actually need..
    for x in ('NULL', 'PAUSED', 'PLAYING', 'READY', 'VOID_PENDING'):
        setattr(Gst, "STATE_%s" % x, getattr(Gst.State, x))
    for x in ('EOS', 'ERROR', 'TAG', 'STREAM_STATUS', 'STATE_CHANGED',
              'LATENCY', 'WARNING', 'ASYNC_DONE', 'NEW_CLOCK', 'STREAM_STATUS',
              'BUFFERING', 'INFO', 'STREAM_START'
              ):
        setattr(Gst, "MESSAGE_%s" % x, getattr(Gst.MessageType, x))
    Gst.MESSAGE_DURATION = Gst.MessageType.DURATION_CHANGED
    Gst.FLOW_OK = Gst.FlowReturn.OK
    global gst_version, pygst_version
    gst_version = Gst.get_gst_version()
    pygst_version = Gst.get_pygst_version()
    return Gst
예제 #22
0
 def __init__(self):
     GObject.threads_init()
     Gst.init(None)
     self.pipeline = None
     self._create_pipeline(default_song)
     loop = GObject.MainLoop()
     threading.Thread(target=loop.run, daemon=True).start()
예제 #23
0
	def __init__(self, db):
		GObject.GObject.__init__(self)
		Gst.init(None)

		self._current_track_number = -1
		self._current_track_album_id = -1
		self._current_track_id = -1
		self._albums = []
		self._progress_callback = None
		self._timeout = None
		self._shuffle = False
		self._shuffle_tracks_history = []
		self._shuffle_albums_history = []
		self._party = False
		self._party_ids = []
		self._playlist = []

		self._db = db
		self._player = Gst.ElementFactory.make('playbin', 'player')
		self._rg_setup()
		
		
		self._bus = self._player.get_bus()
		self._bus.add_signal_watch()
		#self._bus.connect('message::state-changed', self._on_bus_state_changed)
		#self.bus.connect('message::error', self._onBusError)
		self._bus.connect('message::eos', self._on_bus_eos)
예제 #24
0
    def __init__(self, width, height):
        Gtk.Window.__init__(self, title="JingleBank")

        Gst.init()

        #Grid to organize the Buttons
        self.grid = Gtk.Grid()
        self.add(self.grid)

        #Set Button properties (will be replaced by configurable button dimensions)
        self.buttonwidth = width
        self.buttonheight = height

        #create buttons (will be read from configfile in the future)
        self.button1 = JingleButton(self.buttonwidth, self.buttonheight, [0.3,0.7,0.9], "Track 1", TESTFILE)
        self.button2 = JingleButton(self.buttonwidth, self.buttonheight, [0.4,0.6,0.4], "Track 2", TESTFILE)
        self.button3 = JingleButton(self.buttonwidth, self.buttonheight, [0.5,0.5,0.3], "Track 3", TESTFILE)
        self.button4 = JingleButton(self.buttonwidth, self.buttonheight, [0.6,0.4,0.2], "Track 4", TESTFILE)
        self.button5 = JingleButton(self.buttonwidth, self.buttonheight, [0.7,0.3,0.4], "Track 5", TESTFILE)
        self.button6 = JingleButton(self.buttonwidth, self.buttonheight, [0.8,0.2,0.3], "Track 6", TESTFILE)
        self.button7 = JingleButton(self.buttonwidth, self.buttonheight, [0.9,0.1,0.8], "Track 7", TESTFILE)

        #testarray of buttons
        self.grid.attach(self.button1, 1, 1, 1, 1)
        self.grid.attach(self.button2, 1, 2, 1, 1)
        self.grid.attach(self.button3, 2, 1, 1, 1)
        self.grid.attach(self.button4, 2, 2, 1, 1)
        self.grid.attach(self.button5, 3, 1, 1, 1)
        self.grid.attach(self.button6, 3, 2, 1, 1)
        self.grid.attach(self.button7, 3, 3, 1, 1)
예제 #25
0
    def __init__(self):
        GObject.GObject.__init__(self)
        self.playlist = None
        self.playlistType = None
        self.playlistId = None
        self.playlistField = None
        self.currentTrack = None
        self._lastState = Gst.State.PAUSED
        self.cache = AlbumArtCache.get_default()
        self._symbolicIcon = self.cache.get_default_icon(ART_SIZE, ART_SIZE)

        Gst.init(None)

        self.discoverer = GstPbutils.Discoverer()
        self.discoverer.connect('discovered', self._on_discovered)
        self.discoverer.start()
        self._discovering_urls = {}

        self.player = Gst.ElementFactory.make('playbin', 'player')
        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()

        self._settings = Gio.Settings.new('org.gnome.Music')
        self._settings.connect('changed::repeat', self._on_settings_changed)
        self.repeat = self._settings.get_enum('repeat')

        self.bus.connect('message::state-changed', self._on_bus_state_changed)
        self.bus.connect('message::error', self._onBusError)
        self.bus.connect('message::eos', self._on_bus_eos)
        self._setup_view()

        self.playlist_insert_handler = 0
        self.playlist_delete_handler = 0
예제 #26
0
def check_soft_dependencies():
    """
    Verify for the presence of optional modules that enhance the user experience

    If those are missing from the system, the user will be notified of their
    existence by the presence of a "Missing dependencies..." button at startup.
    """
    # Importing Gst again (even if we did it in hard deps checks), anyway it
    # seems to have no measurable performance impact the 2nd time:
    from gi.repository import Gst
    Gst.init(None)
    registry = Gst.Registry.get()
    # Description strings are translatable as they may be shown in the pitivi UI
    if not _try_import("pycanberra"):
        missing_soft_deps["PyCanberra"] = \
            _("enables sound notifications when rendering is complete")
    if not _try_import_from_gi("Notify"):
        missing_soft_deps["libnotify"] = \
            _("enables visual notifications when rendering is complete")
    if not registry.find_plugin("libav"):
        missing_soft_deps["GStreamer Libav plugin"] = \
            _('additional multimedia codecs through the Libav library')
    # Apparently, doing a registry.find_plugin("frei0r") is not enough.
    # Sometimes it still returns something even when frei0r is uninstalled,
    # and anyway we're looking specifically for the scale0tilt filter.
    # Don't use Gst.ElementFactory.make for this check, it's very I/O intensive.
    # Instead, ask the registry with .lookup_feature or .check_feature_version:
    if not registry.lookup_feature("frei0r-filter-scale0tilt"):
        missing_soft_deps["Frei0r"] = \
            _("additional video effects, clip transformation feature")
예제 #27
0
def main():
    if len(sys.argv) < 3:
        exit("Usage: {0} <url> <quality>".format(sys.argv[0]))

    gi.require_version("Gst", "1.0")
    gobject.threads_init()
    gst.init(None)

    url = sys.argv[1]
    quality = sys.argv[2]

    livestreamer = Livestreamer()

    livestreamer.set_loglevel("info")
    livestreamer.set_logoutput(sys.stdout)

    try:
        streams = livestreamer.streams(url)
    except NoPluginError:
        exit("Livestreamer is unable to handle the URL '{0}'".format(url))
    except PluginError as err:
        exit("Plugin error: {0}.".format(err))

    if not streams:
        exit("No streams found on URL '{0}'.".format(url))

    if quality not in streams:
        exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))

    stream = streams[quality]

    player = LivestreamerPlayer()

    player.play(stream)
예제 #28
0
	def __init__(self, uri):

		self.__uri = uri

		# Inizializza Gst
		Gst.init(None)

		self.pipe = Gst.Pipeline()

		#filesource = Gst.element_factory_make("filesrc", "filesource")
		#filesource.set_property("location", self.__location)
		filesource = Gst.Element.make_from_uri(Gst.URIType.SRC, self.__uri, "filesrc")
		fakesink = Gst.ElementFactory.make("fakesink", "sink")

		typefind = Gst.ElementFactory.make("typefind", "typefinder")
		typefind.connect("have_type", self.on_find_type)

		self.pipe.add(filesource)
		self.pipe.add(typefind)
		self.pipe.add(fakesink)
		#Gst.element_link_many(filesource, typefind, fakesink)
		filesource.link(typefind)
		typefind.link(fakesink)

		self.bus = self.pipe.get_bus()
		self.bus.add_signal_watch()
		self.bus.connect("message::eos", self.on_eos)
		self.bus.connect("message::error", self.on_error)
		self.bus.connect("message::async_done", self.on_async_done)

		self.pipe.set_state(Gst.State.PLAYING)
		self.mainloop = GObject.MainLoop()
		self.mainloop.run()
예제 #29
0
 def run(self):
     logging.debug("initalizing the player")
     #initialize the loop for thread
     GObject.threads_init()
     Gst.init(None)
     #start the loop
     loop.run()
예제 #30
0
def let_it_rain():
    GObject.threads_init()
    Gst.init(None)
    signal.signal(signal.SIGINT, signal.SIG_DFL)
    Notify.init("silver-rain")
    # Create system directories
    if not os.path.exists(IMG_DIR):
        os.makedirs(IMG_DIR)
    # Initialize config
    config.setup()
    # Create directory for recordings
    if not os.path.exists(config.recs_dir):
        os.makedirs(config.recs_dir)
    # Load css
    css_load()
    # Init translation
    set_translation()
    # Init application
    silver_app = SilverApp()
    # Setup dbus service
    service = SilverService(silver_app)
    # Run loop
    Gtk.main()
    # Cleanup
    silver_app.clean()
    Notify.uninit()
#import gst
#gst.require("1.0")
import pygtk

pygtk.require("2.0")
import gtk
import sys
import os
from audio_video import AVDemo, create_decodebin
#from gi.repository import GObject
from gi.repository import GObject, Gtk
from gi.repository import Gst as gst

GObject.threads_init()
gst.init(None)


#import gobject
#gobject.threads_init()
class AVCrossfade(AVDemo):
    """Base class implementing boring, boiler-plate code.
    Sets up a basic gstreamer environment which includes:

    * a window containing a drawing area and basic media controls
    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
    * connects the ximagesink to the window's drawing area

    Derived classes need only override magic(), __name__,
    and __usage__ to create new demos."""
예제 #32
0
    def __init__(self,
                 sn=None,
                 width=1920,
                 height=1080,
                 framerate=30,
                 color=False):
        Gst.init(sys.argv)
        self.height = height
        self.width = width
        self.sample = None
        self.samplelocked = False
        self.newsample = False
        self.gotimage = False
        self.img_mat = None
        self.new_image_callback_external = None
        self.image_locked = False
        self.is_streaming = False

        self.GAIN_MAX = 480
        self.GAIN_MIN = 0
        self.GAIN_STEP = 10
        self.EXPOSURE_TIME_MS_MIN = 0.02
        self.EXPOSURE_TIME_MS_MAX = 4000

        format = "BGRx"
        if (color == False):
            format = "GRAY8"

        if (framerate == 2500000):
            p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/10593' % (
                sn,
                format,
                width,
                height,
                framerate,
            )
        else:
            p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (
                sn,
                format,
                width,
                height,
                framerate,
            )

        p += ' ! videoconvert ! appsink name=sink'

        print(p)
        try:
            self.pipeline = Gst.parse_launch(p)
        except GLib.Error as error:
            print("Error creating pipeline: {0}".format(err))
            raise

        self.pipeline.set_state(Gst.State.READY)
        self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
        # Query a pointer to our source, so we can set properties.
        self.source = self.pipeline.get_by_name("source")

        # Query a pointer to the appsink, so we can assign the callback function.
        self.appsink = self.pipeline.get_by_name("sink")
        self.appsink.set_property("max-buffers", 5)
        self.appsink.set_property("drop", True)
        self.appsink.set_property("emit-signals", True)
예제 #33
0
    def __init__(self, path: str):
        super().__init__()
        self.uri = pathlib.Path(path).as_uri()

        Gst.init(None)
        self.discoverer: GstPbutils.Discoverer = GstPbutils.Discoverer()
예제 #34
0
# Copyright 2020-2021 Rafael Mardojai CM
# SPDX-License-Identifier: GPL-3.0-or-later

import sys
import gi

gi.require_version('Gst', '1.0')
gi.require_version('GstPlayer', '1.0')
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
gi.require_version('Handy', '1')

from gettext import gettext as _
from gi.repository import GLib, Gst, Gdk, Gio, Gtk, Handy
# Init GStreamer
Gst.init(None)

from blanket.mpris import MPRIS
from blanket.sound import MainPlayer
from blanket.settings import Settings
from blanket.window import BlanketWindow
from blanket.preferences import PreferencesWindow
from blanket.presets import PresetDialog
from blanket.about import AboutDialog


class Application(Gtk.Application):
    def __init__(self, version):
        super().__init__(application_id='com.rafaelmardojai.Blanket',
                         flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE)
        GLib.set_application_name(_('Blanket'))
예제 #35
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
        return

    # source = create_element_or_error("nvarguscamerasrc", "camera-source")
    # src_caps = create_element_or_error("capsfilter", "source-caps-definition")
    # src_caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, framerate=30/1, format=(string)NV12"))

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = Gst.ElementFactory.make("nvvideoconvert", "convertor-1")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    convertor2 = Gst.ElementFactory.make("nvvidconv", "converter-2")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "./nv-inferance-config-files/config_infer_primary_trafficcamnet.txt")

    #Set properties of tracker
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 384)
    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('ll-config-file', './tracker_config.yml')

    # Add Elemements to Pipielin
    pipeline.add(source)
    # pipeline.add(src_caps)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(transform)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    source.link(streammux)
    # src_caps.link(streammux)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    #Feed tracker
    tracker_sinkpad = tracker.get_static_pad("sink")
    if not tracker_sinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd")

    tracker_sinkpad.add_probe(Gst.PadProbeType.BUFFER,
                              osd_sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
예제 #36
0
def main(args):
    GObject.threads_init()
    Gst.init(None)

    # registering callbacks
    pyds.register_user_copyfunc(meta_copy_func)
    pyds.register_user_releasefunc(meta_free_func)

    print("Creating Pipeline \n ")

    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    print("Creating Source \n ")
    source = Gst.ElementFactory.make("filesrc", "file-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    print("Creating H264Parser \n")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    if not h264parser:
        sys.stderr.write(" Unable to create h264 parser \n")

    print("Creating Decoder \n")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")

    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    msgconv = Gst.ElementFactory.make("nvmsgconv", "nvmsg-converter")
    if not msgconv:
        sys.stderr.write(" Unable to create msgconv \n")

    msgbroker = Gst.ElementFactory.make("nvmsgbroker", "nvmsg-broker")
    if not msgbroker:
        sys.stderr.write(" Unable to create msgbroker \n")

    tee = Gst.ElementFactory.make("tee", "nvsink-tee")
    if not tee:
        sys.stderr.write(" Unable to create tee \n")

    queue1 = Gst.ElementFactory.make("queue", "nvtee-que1")
    if not queue1:
        sys.stderr.write(" Unable to create queue1 \n")

    queue2 = Gst.ElementFactory.make("queue", "nvtee-que2")
    if not queue2:
        sys.stderr.write(" Unable to create queue2 \n")

    if no_display:
        print("Creating FakeSink \n")
        sink = Gst.ElementFactory.make("fakesink", "fakesink")
        if not sink:
            sys.stderr.write(" Unable to create fakesink \n")
    else:
        if is_aarch64():
            transform = Gst.ElementFactory.make("nvegltransform",
                                                "nvegl-transform")

        print("Creating EGLSink \n")
        sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")

    print("Playing file %s " % input_file)
    source.set_property('location', input_file)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', PGIE_CONFIG_FILE)
    msgconv.set_property('config', MSCONV_CONFIG_FILE)
    msgconv.set_property('payload-type', schema_type)
    msgbroker.set_property('proto-lib', proto_lib)
    msgbroker.set_property('conn-str', conn_str)
    if cfg_file is not None:
        msgbroker.set_property('config', cfg_file)
    if topic is not None:
        msgbroker.set_property('topic', topic)
    msgbroker.set_property('sync', False)

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(h264parser)
    pipeline.add(decoder)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(tee)
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(msgconv)
    pipeline.add(msgbroker)
    pipeline.add(sink)
    if is_aarch64() and not no_display:
        pipeline.add(transform)

    print("Linking elements in the Pipeline \n")
    source.link(h264parser)
    h264parser.link(decoder)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = decoder.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")
    srcpad.link(sinkpad)

    streammux.link(pgie)
    pgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(tee)
    queue1.link(msgconv)
    msgconv.link(msgbroker)
    if is_aarch64() and not no_display:
        queue2.link(transform)
        transform.link(sink)
    else:
        queue2.link(sink)
    sink_pad = queue1.get_static_pad("sink")
    tee_msg_pad = tee.get_request_pad('src_%u')
    tee_render_pad = tee.get_request_pad("src_%u")
    if not tee_msg_pad or not tee_render_pad:
        sys.stderr.write("Unable to get request pads\n")
    tee_msg_pad.link(sink_pad)
    sink_pad = queue2.get_static_pad("sink")
    tee_render_pad.link(sink_pad)

    # create an event loop and feed gstreamer bus messages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    print("Starting pipeline \n")

    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pyds.unset_callback_funcs()
    pipeline.set_state(Gst.State.NULL)
예제 #37
0
def main():

    cameras_list = [
        {
            "source": 0,
            "name": "Camera 1",
        },
        {
            "source": 1,
            "name": "Camera 2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    pipeline.add(streammux)

    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter",
                                       "source-caps-source-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"
            ))
        pipeline.add(source)
        pipeline.add(caps)

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
        srcpad = source.get_static_pad("src")

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = create_element_or_error("nvvideoconvert", "converter-1")
    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    queue1 = create_element_or_error("queue", "queue1")
    queue2 = create_element_or_error("queue", "queue2")
    queue3 = create_element_or_error("queue", "queue3")
    queue4 = create_element_or_error("queue", "queue4")
    queue5 = create_element_or_error("queue", "queue5")
    queue6 = create_element_or_error("queue", "queue6")

    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)

    # Set Element Properties
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so')
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 480)

    tiler.set_property("rows", 2)
    tiler.set_property("columns", 2)
    tiler.set_property("width", 1920)
    tiler.set_property("height", 1080)
    sink.set_property("qos", 0)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")

    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(tiler)
    tiler.link(queue4)
    queue4.link(convertor)
    convertor.link(queue5)
    queue5.link(nvosd)
    nvosd.link(queue6)
    queue6.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
예제 #38
0
def main(argv):
    # first parse the arguments
    parser = argparse.ArgumentParser(description='Deepstream Face Recognition')
    parser.add_argument(
        '-l',
        '--learned',
        help='this is the file that contains the learned faces')
    parser.add_argument(
        '-s',
        '--stream',
        help=
        'this is the URL or filenames of the video stream (argument can be used multilple times)',
        action='append',
        nargs='+')
    parser.add_argument('-o',
                        '--output',
                        help='Optional: this is the output (gui or headless)')
    parser.add_argument('-r', '--rate', help='Optional: this is sampling rate')
    parser.add_argument('-f',
                        '--factor',
                        help='Optional: this is the resize factor')
    parser.add_argument('-p',
                        '--upscale',
                        help='Optional: this is the upscale')
    parser.add_argument(
        '-d',
        '--detection',
        help='Optional: this is the detection model (hog or cnn)')
    parser.add_argument('-j',
                        '--jitters',
                        help='Optional: this is the number of jitters')
    parser.add_argument(
        '-e',
        '--encoding',
        help='Optional: this is the encoding model (large or small)')
    parser.add_argument(
        '-w',
        '--write',
        help=
        'Optional: this enable or disables writing (saving) unknown faces (on or off)'
    )
    parser.add_argument(
        '-c',
        '--confidence',
        help=
        'Optional: minimum confidence of person before doing face recognition')
    parser.add_argument(
        '-u',
        '--unclear',
        help='Optional: this is the directory to store unclear objects')

    args = parser.parse_args()
    global learnedfile
    learnedfile = args.learned
    if not learnedfile:
        print(
            'No file with learned faces specified. Please use: python3 deepstream_fr.py -l \"trained_faces.pkl\" -s \"rtsp://thecamera.com\" [-r 5 -f 2 -p 2 -d \"hog\" -j 1 -e \"large" -c 0.33 -u logdir/frames]'
        )
        sys.exit(
            404)  # Bail out with 404 = no file with learned faces specified
    global stream
    stream = args.stream
    if not stream:
        print(
            'No video stream specified. Please use: python3 deepstream_fr.py -l \"trained_faces.pkl\" -s \"rtsp://thecamera.com\" [-r 5 -f 2 -p 2 -d \"hog\" -j 1 -e \"large" -c 0.33 -u logdir/frames]'
        )
        sys.exit(404)  # Bail out with 404 = no stream specified
    # overrule fixed values when used in argument
    if args.output:
        if args.output.upper() == 'HEADLESS':
            global gui
            gui = False
    if args.rate:
        global sampling_rate
        sampling_rate = int(args.rate)
    if args.factor:
        global resize_factor
        resize_factor = int(args.factor)
    if args.upscale:
        global up_scale
        up_scale = int(args.upscale)
    if args.detection:
        global detection_model
        detection_model = args.detection
    if args.jitters:
        global number_jitters
        number_jitters = int(args.jitters)
    if args.encoding:
        global encoding_model
        encoding_model = args.encoding
    if args.write:
        if args.output.upper() == 'OFF':
            global save_unknown
            save_unknown = False
    if args.confidence:
        global person_min_confidence
        person_min_confidence = float(args.confidence)
    if args.unclear:
        global folder_name
        folder_name = args.unclear

    # start logging and counter and create directory for any unknow faces just in case we find any
    global log
    logpath = Path(logdir)
    logpath.mkdir(parents=True, exist_ok=True)
    log = init_log(logfile, process, loglevel, logsize, logbackups)
    log.critical(
        'Starting program: %s with OpenCv version %s in %s mode and saving unknow face: %s'
        % (process, cv2.__version__, 'Screen' if gui else 'Headless',
           'On' if save_unknown else 'Off'))
    starttime = time.perf_counter()
    # create directory to store unknown faces detected
    unknown_faces_path = Path(unknown_face_dir)
    unknown_faces_path.mkdir(parents=True, exist_ok=True)
    # create logfile to store known faces detected
    global known_faces_log
    known_faces_logpath = Path(known_faces_dir)
    known_faces_logpath.mkdir(parents=True, exist_ok=True)
    known_faces_log = init_log(known_faces_logfile, 'known_faces', loglevel,
                               logsize, logbackups)
    known_faces_log.critical('Start logging known faces')

    for i in range(0, len(stream)):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
        log.info(f'- Detected stream{i}: {stream[i][0]}')
    number_sources = len(stream)

    # opening learned faces file
    log.info(f'- Opening learned faces file: {learnedfile}')
    with open(learnedfile, 'rb') as trainedfacesfile:
        # reading the learned faces file
        global Names
        Names = pickle.load(trainedfacesfile)
        global Sequence
        Sequence = pickle.load(trainedfacesfile)
        # TODO: Create updated learned faces file
        # global Filedate
        # Filedate = pickle.load(trainedfacesfile)
        global Encodings
        Encodings = pickle.load(trainedfacesfile)

    # create directory to save ambigious objects
    folder_path = Path(folder_name)
    folder_path.mkdir(parents=True, exist_ok=True)
    log.warning(f'- Ambigious objects will be saved in: {folder_path}')

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    log.warning('- Creating Pipeline')
    pipeline = Gst.Pipeline()
    is_live = False
    if not pipeline:
        log.critical('Error: Unable to create Pipeline')

    # Create nvstreammux instance to form batches from one or more sources.
    log.warning('- Creating streamux')
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        log.critical('Error: Unable to create NvStreamMux')
    pipeline.add(streammux)
    for i in range(number_sources):
        log.info(f'- Creating source_bin: {folder_name}/stream_{i}')
        stream_path = Path(f'{folder_name}/stream_{i}')
        stream_path.mkdir(parents=True, exist_ok=True)
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        uri_name = stream[i][0]
        if uri_name.startswith("rtsp://"):
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            log.critical('Error: Unable to create source bin')
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            log.critical('Error: Unable to create sink pad bin')
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            log.critical('Error: Unable to create src pad bin')
        srcpad.link(sinkpad)

    log.warning('- Creating Pgie')
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        log.critical('Error: Unable to create pgie')

    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    log.warning('- Creating nvvidconv1 and filter1 to convert frames to RGBA')
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        log.critical('Error: Unable to create nvvidconv1')
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        log.critical('Error: Unable to get the caps filter1')
    filter1.set_property("caps", caps1)

    # creating tiler
    log.warning('- Creating tiler')
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        log.critical('Error: Unable to create tiler')

    log.warning('- Creating nvvidconv')
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        log.critical('Error: Unable to create nvvidconv')

    log.warning('- Creating nvosd')
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        log.critical('Error: Unable to create nvosd')
    if (is_aarch64()):
        log.warning('- Creating transform for arch64')
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            log.critical('Error: Unable to create transform')

    log.warning('- Creating EGLSink')
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        log.critical('Error: Unable to create egl sink')

    if is_live:
        log.info('- At least one of the sources is live')
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_imagedata_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        log.warning(
            f'Warning: Overriding infer-config batch-size {pgie_batch_size} with number of sources {number_sources}'
        )
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)
    sink.set_property("sync", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    log.warning('- Adding elements to Pipeline')
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    log.warning('- Linking elements in the Pipeline')
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    log.warning('- Create event loop')
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        log.critical('Error: Unable to get src pad')
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER,
                                 tiler_sink_pad_buffer_probe, 0)

    # List the sources
    log.info('- Now playing...')
    for i, source in enumerate(stream[:-1]):
        if (i != 0):
            log.info(f'- {i}: {source}')

    # start play back and listed to events
    log.info(
        f'- Starting pipeline and processing with sampling rate: {sampling_rate}, resize factor: {resize_factor} and up scale: {up_scale}'
    )
    log.info(
        f'- Facial recognition is done with detection model: {detection_model}, number of jitters: {number_jitters} and encoding model: {encoding_model}'
    )
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    endtime = time.perf_counter()
    log.critical(
        f'Program {process} ended and took {endtime - starttime:0.2f} seconds to complete'
    )
    pipeline.set_state(Gst.State.NULL)
예제 #39
0
def main():
    number_sources = 1
    GObject.threads_init()
    Gst.init(None)
    pipeline = Gst.Pipeline()
    is_live = False
    uri_name = "rtsp://192.168.1.10:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp"
    ds_pgie_config = '/home/proxeye/dev/proxeye/proxeye/resources/ds_pgie_config.txt'

    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    pipeline.add(streammux)

    source_bin = create_source_bin(1, uri_name)
    pipeline.add(source_bin)
    sinkpad = streammux.get_request_pad("sink_1")
    srcpad = source_bin.get_static_pad("src")
    srcpad.link(sinkpad)

    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    filter1.set_property("caps", caps1)
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if (is_aarch64()):
        transform = Gst.ElementFactory.make("queue", "queue")

    sink = Gst.ElementFactory.make("fakesink", "fakesink")
    if is_live:
        streammux.set_property('live-source', 1)
    streammux.set_property('width', 640)
    streammux.set_property('height', 480)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', ds_pgie_config)
    sink.set_property('sync', False)
    pgie_batch_size = pgie.get_property("batch-size")

    tiler_rows = 1
    tiler_columns = 1
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", 640)
    tiler.set_property("height", 480)
    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)
    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)
    GObject.idle_add(refreshApp)
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
    tiler_src_pad = tiler.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                tiler_src_pad_buffer_probe, 0)
    print("Now playing...")
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)
예제 #40
0
import time

import numpy as np

import gi

gi.require_version('Gtk', '3.0')
gi.require_version('GLib', '2.0')
gi.require_version('GObject', '2.0')
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstPbutils', '1.0')
from gi.repository import GLib, GObject, Gst, GstBase, Gtk

GObject.threads_init()
Gst.init([])
Gtk.init([])

from gi.repository import GstPbutils  # Must be called after Gst.init().

from PIL import Image

from gst_native import set_display_contexts
from pipelines import *

COMMAND_SAVE_FRAME = ' '
COMMAND_PRINT_INFO = 'p'
COMMAND_QUIT = 'q'
WINDOW_TITLE = 'Coral'