def main(): if len(sys.argv) <= 1: # No filename was passed, let the user choose one. dialog = Gtk.FileChooserDialog(title="Choose a file to send", parent=None, action=Gtk.FileChooserAction.OPEN, buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) try: response = dialog.run() if response == Gtk.ResponseType.OK: filename = dialog.get_filename() else: # Nothing for us to do, exit with error code return 1 finally: dialog.destroy() else: filename = sys.argv[1] Gdk.threads_init() GObject.threads_init() Notify.init('send-to-kindle') application = Application(filename) application.run()
def main(): for opt in sys.argv[1:]: if opt in ('-h', '--help'): usage() sys.exit() elif opt in ('-d', '--dev'): config.ENABLE_INSPECTOR = True else: print "hotot: unrecognized option '%s'" % opt usage() sys.exit(1) try: import i18n except: from gettext import gettext as _ try: import prctl prctl.set_name('hotot') except: pass GObject.threads_init() config.loads(); agent.init_notify() app = Hotot() agent.app = app Gdk.threads_enter() Gtk.main() Gdk.threads_leave()
def main(): debug = False print("Trackma-gtk v{}".format(utils.VERSION)) if '-h' in sys.argv: print("Usage: trackma-qt [options]") print() print('Options:') print(' -d Shows debugging information') print(' -h Shows this help') return if '-d' in sys.argv: debug = True app = TrackmaWindow(debug) try: GObject.threads_init() Gdk.threads_init() Gdk.threads_enter() app.main() Gtk.main() except utils.TrackmaFatal as e: md = Gtk.MessageDialog(None, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.ERROR, Gtk.ButtonsType.CLOSE, str(e)) md.run() md.destroy() finally: Gdk.threads_leave()
def app_main(dm_server): from danmaku_ui import Danmaku from gi.repository import Gtk, GLib, GObject def new_danmaku(dm_opts): for opt in dm_opts: try: Danmaku(**opt) except: continue def subscribe_danmaku(server="http://dm.tuna.moe/danmaku/stream"): while 1: try: res = requests.get(server) except requests.exceptions.ConnectionError: continue if res.status_code == 200 and res.text: try: dm_opts = json.loads(res.text) except: continue else: GLib.idle_add(new_danmaku, dm_opts) GObject.threads_init() thread_sub = threading.Thread( target=subscribe_danmaku, args=(dm_server, )) thread_sub.daemon = True thread_sub.start() Gtk.main()
def _import_gst(self): """Import the necessary GObject-related modules and assign `Gst` and `GObject` fields on this object. """ try: import gi except ImportError: raise FatalReplayGainError( "Failed to load GStreamer: python-gi not found" ) try: gi.require_version('Gst', '1.0') except ValueError as e: raise FatalReplayGainError( "Failed to load GStreamer 1.0: {0}".format(e) ) from gi.repository import GObject, Gst, GLib # Calling GObject.threads_init() is not needed for # PyGObject 3.10.2+ with warnings.catch_warnings(): warnings.simplefilter("ignore") GObject.threads_init() Gst.init([sys.argv[0]]) self.GObject = GObject self.GLib = GLib self.Gst = Gst
def run(): # Protobuf #c = ControllerProtobuf() # Directly connected to the vision server c = VisionManager() if not c.is_connected(): print("Vision server is not accessible.") return server = Server() server.start("127.0.0.1", 5030) # add observer output for "server" c.add_filter_output_observer(server.send) from gi.repository import Gtk, GObject import CapraVision.client.gtk.main GObject.threads_init() w = CapraVision.client.gtk.main.WinFilterChain(c) w.window.show_all() Gtk.main() # Close connection server.stop() c.close_server()
def run(self, argv): GObject.threads_init() GObject.timeout_add(10, self.on_timer) self.connect("startup", self.on_startup) self.connect("activate", self.on_activate) self.connect("shutdown", self.on_shutdown) return super(Gtk3Example, self).run(argv)
def initialize_modules(): """ Initialize the modules. This has to be done in a specific order otherwise the app crashes on some systems. """ from gi.repository import Gdk Gdk.init([]) from gi.repository import GtkClutter GtkClutter.init([]) import gi if not gi.version_info >= (3, 11): from gi.repository import GObject GObject.threads_init() from gi.repository import Gst Gst.init(None) from gi.repository import GES GES.init() # This is required because of: # https://bugzilla.gnome.org/show_bug.cgi?id=656314 from gi.repository import GdkX11 GdkX11 # noop
def __init__(self, pls=None, *args, **kwargs): super(GstZOCP, self).__init__(*args, **kwargs) GObject.threads_init() self.loop = GObject.MainLoop() Gst.init(None) if pls == None: pls = "" #pls = "file:///home/people/arnaud/Videos/tordinaire-youtubeHD.mp4" #pls = "file:///home/pi/test3.h264,file:///home/pi/tordinaire-youtubeHD.mp4" #pls = "file:///home/people/arnaud/Videos/test.h264,file:///home/people/arnaud/Videos/test2.h264" self.count = 0 # create elements self.playbin = Gst.ElementFactory.make('playbin', 'playbin0') self.glcolorconv = Gst.ElementFactory.make("glcolorscale", "glcolorconv0") self.glshader = Gst.ElementFactory.make("glshader", "glshader0") self.glimagesink = Gst.ElementFactory.make('glimagesink', "glimagesink0") self.sinkbin = Gst.Bin() # setup the pipeline #videosrc.set_property("video-sink", glimagesink) #self.playbin.set_property("uri", pls.split(',')[self.count]) #self.glimagesink.set_locked_state(True) self.sinkbin.add(self.glcolorconv) self.sinkbin.add(self.glshader) self.sinkbin.add(self.glimagesink) # we add a message handler self.bus = self.playbin.get_bus() self.bus.add_watch(0, self.bus_call, self.loop) # 0 == GLib.PRIORITY_DEFAULT # we link the elements together self.glcolorconv.link(self.glshader) self.glshader.link(self.glimagesink) ghostpad = Gst.GhostPad.new("sink", self.glcolorconv.get_static_pad("sink")) self.sinkbin.add_pad(ghostpad) #self.playbin.connect("pad-added", self.on_pad_added, self.sinkbin) #self.playbin.connect("drained", self.on_drained) #self.playbin.connect("about-to-finish", self.update_uri) # set properties of elements self.glshader.set_property("location", "shader.glsl") self.glshader.set_property("vars", "float alpha = float(1.);") self.glshader.set_property("preset", "preset.glsl") self.playbin.set_property("video-sink",self.sinkbin) self.set_name("zvidplyr@{0}".format(socket.gethostname())) self.register_bool("quit", False, access='rw') self.register_vec2f("top_left", (-1.0, 1.0), access='rw', step=[0.01, 0.01]) self.register_vec2f('top_right', (1.0, 1.0), access='rw', step=[0.01, 0.01]) self.register_vec2f('bottom_right', (1.0, -1.0), access='rw', step=[0.01, 0.01]) self.register_vec2f('bottom_left', (-1.0, -1.0), access='rw', step=[0.01, 0.01]) self.register_string("playlist", pls, access="rws") self.register_bool("loop", True, access="rwse") self.register_bool("fade", False, access="rwse") self.register_vec3f("fade_color", (1,0,0), access="rws") self.register_bool("pause", False, access="rwse") self.register_bool("stop", False, access="rwse") self._fade_val = 1.0
def run(): # CLASS NOT CURRENTLY USED # Protobuf #c = ControllerProtobuf() print "In main/maingtk. Do you really want to be here?" # Directly connected to the vision server c = VisionManager() if not c.is_connected(): print("Vision server is not accessible.") return #server = Server() # server.start("127.0.0.1", 5030) # add observer output for "server" #c.add_filter_output_observer(server.send) from gi.repository import Gtk, GObject import CapraVision.client.gtk.main GObject.threads_init() # w = CapraVision.client.gtk.main.WinFilterChain(c) #w.window.show_all() Gtk.main() # Close connection #server.stop() c.close_server()
def install_gobject_iteration(): '''Import and install gobject context iteration inside our event loop. This is used as soon as gobject is used (like gstreamer). ''' from kivy.clock import Clock try: from gi.repository import GObject as gobject except ImportError: import gobject if hasattr(gobject, '_gobject_already_installed'): # already installed, don't do it twice. return gobject._gobject_already_installed = True # get gobject mainloop / context loop = gobject.MainLoop() gobject.threads_init() context = loop.get_context() # schedule the iteration each frame def _gobject_iteration(*largs): # XXX we need to loop over context here, otherwise, we might have a lag loop = 0 while context.pending() and loop < 10: context.iteration(False) loop += 1 Clock.schedule_interval(_gobject_iteration, 0)
def startPipelineProcess(device, size, rotation, source, encoding, onListeningEvent, errorState, procPipe, debugLevel=0): from gi.repository import GObject GObject.threads_init() mainLoop = GObject.MainLoop() logger = logging.getLogger(__name__ + ':processLoop') interface = None def onFatalError(details): if interface: interface.sendResponse(0, {'error': 'fatal_error', 'details': details}) else: #There was a fatal error during the creation of the pipeline, interface has not even been created logger.error('Fatal error creating pipeline: %s' % details) raise SystemExit(-1) try: pipeline = pipelineFactory(device, size, rotation, source, encoding, onFatalError, mainLoop, debugLevel) except InvalidGStreamerPipelineException as e: logger.error(e) raise SystemExit(-1) interface = processInterface(pipeline, procPipe, mainLoop, onListeningEvent) try: interface.start() logger.debug('Pipeline process started') mainLoop.run() except KeyboardInterrupt, SystemExit: mainLoop.quit()
def main(): GObject.threads_init() Gst.init(None) wcw = WebcamWidget() wcw.show() Gtk.main() exit(0)
def main(): notification_manager.init("Scrapstream") GObject.threads_init() # Necessary to use multithreading window = StreamWindow() window.show() signal.signal(signal.SIGINT, signal.SIG_DFL) Gtk.main()
def __init__(self): GObject.threads_init() Gst.init(None) self.pipeline = None self._create_pipeline(default_song) loop = GObject.MainLoop() threading.Thread(target=loop.run, daemon=True).start()
def main_app(): """ Before using Python threads, or libraries using threads, must apply GObject.threads_init().This is needed because GTK+ isn't thread safe.Only one thread,the main thread, is allowed to call GTK+ code at all times. This function isn't provided by gobject but initializes thread support in PyGObject (it was called gobject.threads_init() in pygtk). Contrary to the naming actually it is gi.threads_init(). GObject.idle_add() takes the function and arguments that will get passed to the function and asks the main loop to schedule its execution in the main thread. Instantiate models and views , then pass it to the main_controller. """ GObject.threads_init() App_Model = Model() App_Window = Main_Window() App = Main_Controller(App_Window,App_Model) App.show() Gtk.main() App.quit() print("Exiting Gtk-Main Thread")
def __init__(self, auth): import dbus from gi.repository import GObject as gobject from dbus.mainloop.glib import DBusGMainLoop gobject.threads_init() bus = dbus.SystemBus( mainloop=DBusGMainLoop(), private=True) self.bus = bus self.auth = auth self.callbacks = [] # local context required!? # http://rmarko.fedorapeople.org/random/high_five.jpg evt_match = self.bus.add_signal_receiver( self._new_problem_handler, signal_name='Crash', path='/org/freedesktop/problems') # add second listener for the old path evt_match_old_path = self.bus.add_signal_receiver( self._new_problem_handler, signal_name='Crash', path='/com/redhat/abrt') self.loop = gobject.MainLoop()
def main(): if len(sys.argv) < 3: exit("Usage: {0} <url> <quality>".format(sys.argv[0])) gi.require_version("Gst", "1.0") gobject.threads_init() gst.init(None) url = sys.argv[1] quality = sys.argv[2] livestreamer = Livestreamer() livestreamer.set_loglevel("info") livestreamer.set_logoutput(sys.stdout) try: streams = livestreamer.streams(url) except NoPluginError: exit("Livestreamer is unable to handle the URL '{0}'".format(url)) except PluginError as err: exit("Plugin error: {0}.".format(err)) if not streams: exit("No streams found on URL '{0}'.".format(url)) if quality not in streams: exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url)) stream = streams[quality] player = LivestreamerPlayer() player.play(stream)
def main(): global vfsEnabledSmbShares bus.add_signal_receiver(onSnapCreated, dbus_interface="org.opensuse.Snapper", signal_name="SnapshotCreated") logging.debug('Added signal receiver for snapper ''SnapshotCreated'' dBus-Signal') bus.add_signal_receiver(onSnapsDeleted, dbus_interface="org.opensuse.Snapper", signal_name="SnapshotsDeleted") logging.debug('Added signal receiver for snapper ''SnapshotsDeleted'' dBus-Signal') bus.add_signal_receiver(onSmbConfChanged, dbus_interface="com.example.Sample", signal_name="smbConfChanged") logging.debug('Added signal receiver for selfCreated smConfWatcher ''smbConfChanged'' dBus-Signal') vfsEnabledSmbShares = getSmbShadowCopyEnabledPathes() logging.info('using share pathes for comparision: ' + str(list(vfsEnabledSmbShares.keys()))) smbConfPath = '/etc/samba/smb.conf' # Enable File Monitoring on sm.conf gio_file = Gio.File.new_for_path(smbConfPath) monitor = gio_file.monitor_file(Gio.FileMonitorFlags.NONE, None) monitor.connect("changed", onSmbConfChanged) onStartCleanupSymlinks() global loop loop = GObject.MainLoop() # for signal handling: refer to : http://stackoverflow.com/questions/26388088/python-gtk-signal-handler-not-working GObject.threads_init() InitSignal(loop) # connect to System signals as definied through InitSignal() and sub functions loop.run()
def __init__(self): GObject.threads_init() self.settings = Settings() self.api = GoogleMusic() builder = Gtk.Builder() builder.add_from_file('ui/main.glade') builder.connect_signals(self) self.loading_modal = builder.get_object('loadingModal') self.loading_modal_label = builder.get_object('loadingModalLabel') self.window = builder.get_object('mainWindow') self.notebook = builder.get_object('mainWindowNotebook') self.status_bar = builder.get_object('statusBar') self.status_bar_context_id = self.status_bar.get_context_id('search') self.preferences_dialog = builder.get_object('preferencesDialog') self.preferences_username_entry = builder.get_object('preferencesUsernameEntry') self.preferences_password_entry = builder.get_object('preferencesPasswordEntry') self.preferences_directory_chooser = builder.get_object('preferencesDirectoryChooser') self.search_entry = builder.get_object('searchEntry') self.track_list_store = builder.get_object('trackListStore') self.download_list_store = builder.get_object('downloadListStore') self.results_tree_view = builder.get_object('resultsTreeView') self.window.show_all()
def clutter_proc(self): try: from gi.repository import Clutter, GObject, Gtk, GtkClutter # explicit init seems to avoid strange thread sync/blocking issues GObject.threads_init() GtkClutter.init([]) # create main window from mfp.gui.patch_window import PatchWindow self.appwin = PatchWindow() self.mfp = MFPCommand() except Exception as e: log.error("Fatal error during GUI startup") log.debug_traceback() return try: # direct logging to GUI log console Gtk.main() except Exception as e: log.error("Caught GUI exception:", e) log.debug_traceback() sys.stdout.flush()
def run(self): logging.debug("initalizing the player") #initialize the loop for thread GObject.threads_init() Gst.init(None) #start the loop loop.run()
def start(self): self.connect("destroy", Gtk.main_quit) self.set_position(Gtk.WindowPosition.CENTER) self.show_all() GObject.threads_init() Gtk.main() return self.accepted
def set_threaded(self, mainloop=None): """ Start the gobject mainloop in a thread. This function should always be used together with the generic mainloop. It is possible to jump between the gobject and the generic mainloop with the threaded decorator. :param mainloop: the mainloop object to use a mainloop based on gobject like the gstreamer or clutter mainloop. The object provided here must have a start and a stop function. """ if self.init: raise RuntimeError('gobject loop already running') if self.thread: return self.thread = True # Register this class as a thread pool thread_support._thread_pools[GOBJECT] = self if gobject is not None: # init thread support in the module importing gobject gobject.threads_init() self.loop(mainloop) # make sure we get a clean shutdown main_module.signals['shutdown'].connect_once(self.stop, True)
def __init__(self, gui, instance, ui_manager): Zoomable.__init__(self) Gtk.VBox.__init__(self) GObject.threads_init() self.gui = gui self.ui_manager = ui_manager self.app = instance self._settings = None if self.app: self._settings = self.app.settings self._projectmanager = None self._project = None self.pipeline = None self._createUi() self._createActions() self._setUpDragAndDrop() if self._settings: self._settings.connect("edgeSnapDeadbandChanged", self._snapDistanceChangedCb) # Standalone if not self._settings: gtksettings = Gtk.Settings.get_default() gtksettings.set_property("gtk-application-prefer-dark-theme", True) self.show_all()
def run_player(cmdline, init_threads=True, keyboard_engulfer=True): ''' This is the main function to play a video, cmdline is the omxplayer command. Set init_threads to False if your app is multi-threaded and you already called GObject.threads_init(). If your app is multi-threaded, you want to set keyboard_engulfer to False and provide your own strategy to capture all mouse and keyboard events that traverse through the omxplayer video window. Otherwise, setting keyboard_engulfer to True will do that for you, by creating a fake full screen window that captures and discards all these events. ''' rc = -1 if init_threads: GObject.threads_init() if keyboard_engulfer: win = VideoKeyboardEngulfer(cmdline) win.connect("destroy", Gtk.main_quit) win.show_all() Gtk.main() rc = win.rc else: rc = run_video(None, cmdline) return rc
def __init__(self, name, autoAnswer): if sys.version_info[0] < 3: super(DRingCtrl, self).__init__() else: super().__init__() self.activeCalls = {} # list of active calls (known by the client) self.activeConferences = {} # list of active conferences self.account = None # current active account self.name = name # client name self.autoAnswer = autoAnswer self.currentCallId = "" self.currentConfId = "" self.isStop = False # Glib MainLoop for processing callbacks self.loop = GObject.MainLoop() GObject.threads_init() # client registered to sflphoned ? self.registered = False self.register()
def let_it_rain(): GObject.threads_init() Gst.init(None) signal.signal(signal.SIGINT, signal.SIG_DFL) Notify.init("silver-rain") # Create system directories if not os.path.exists(IMG_DIR): os.makedirs(IMG_DIR) # Initialize config config.setup() # Create directory for recordings if not os.path.exists(config.recs_dir): os.makedirs(config.recs_dir) # Load css css_load() # Init translation set_translation() # Init application silver_app = SilverApp() # Setup dbus service service = SilverService(silver_app) # Run loop Gtk.main() # Cleanup silver_app.clean() Notify.uninit()
def __init__(self, config): GObject.threads_init() self.queue = Queue() self.config = config self.app = Application(self.update, self.queue, self.config) self.info = self.app.last_info
def __init__(self): """ Initialises the plugin object. """ GObject.Object.__init__(self) if not rb3compat.compare_pygobject_version('3.9'): GObject.threads_init()
def __init__(self, handle): '''Set up the Acoustic Tape Measure activity.''' super(AcousticMeasureActivity, self).__init__(handle) #self.set_title(_('Acoustic Tape Measure Activity')) self._logger = logging.getLogger('acousticmeasure-activity') GObject.threads_init() try: self._logger.debug("locale: " + locale.setlocale(locale.LC_ALL, '')) except locale.Error: self._logger.error("setlocale failed") # top toolbar with share and close buttons: from sugar3.graphics.toolbarbox import ToolbarBox from sugar3.graphics.toolbarbox import ToolbarButton from sugar3.activity.widgets import ShareButton from sugar3.activity.widgets import StopButton from sugar3.activity.widgets import ActivityButton from sugar3.activity.widgets import TitleEntry toolbar_box = ToolbarBox() activity_button = ActivityButton(self) toolbar_box.toolbar.insert(activity_button, 0) activity_button.show() title_entry = TitleEntry(self) toolbar_box.toolbar.insert(title_entry, -1) title_entry.show() try: from sugar3.activity.widgets import DescriptionItem description_item = DescriptionItem(self) toolbar_box.toolbar.insert(description_item, -1) description_item.show() except BaseException: pass share_button = ShareButton(self) toolbar_box.toolbar.insert(share_button, -1) share_button.show() separator = Gtk.SeparatorToolItem() toolbar_box.toolbar.insert(separator, -1) separator.show() self._t_h_bar = atm_toolbars.TempToolbar() self._t_h_bar.show_all() adj_button = ToolbarButton(page=self._t_h_bar, icon_name='preferences-system') toolbar_box.toolbar.insert(adj_button, -1) adj_button.show() self._smoot_bar = smoot_toolbar.SmootToolbar(self) self._smoot_bar.show_all() custom_button = ToolbarButton(page=self._smoot_bar, icon_name='view-source') toolbar_box.toolbar.insert(custom_button, -1) custom_button.show() separator = Gtk.SeparatorToolItem() separator.props.draw = False separator.set_expand(True) toolbar_box.toolbar.insert(separator, -1) separator.show() stop_button = StopButton(self) toolbar_box.toolbar.insert(stop_button, -1) stop_button.show() self.set_toolbar_box(toolbar_box) toolbar_box.show() if not self.powerd_running(): try: bus = dbus.SystemBus() proxy = bus.get_object('org.freedesktop.ohm', '/org/freedesktop/ohm/Keystore') self.ohm_keystore = dbus.Interface( proxy, 'org.freedesktop.ohm.Keystore') except dbus.DBusException as e: self._logger.warning("Error setting OHM inhibit: %s" % e) self.ohm_keystore = None #distance in meters self.current_distance = 0.0 # worker thread self._button_event = threading.Event() thread.start_new_thread(self._helper_thread, ()) # Main Panel GUI vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL) self.main_panel = vbox self._message_dict['unshared'] = _("To measure the distance between \ two laptops, you must first share this Activity.") self._message_dict['ready'] = _("Press the button to measure the \ distance to another laptop") self._message_dict['preparing'] = _("Preparing to measure distance") self._message_dict['waiting'] = _("Ready to make a measurement. \ Waiting for partner to be ready.") self._message_dict['playing'] = _("Recording sound from each laptop.") self._message_dict['processing'] = _("Processing recorded audio.") self._message_dict['done'] = self._message_dict['ready'] self._message_dict['full'] = _("This activity already has two \ participants, so you cannot join.") self._button_dict['waiting'] = _("Begin Measuring Distance") self._button_dict['going'] = _("Stop Measuring Distance") self.button = Gtk.ToggleButton(label=self._button_dict['waiting']) self.button.connect('clicked', self._button_clicked) self.button.set_sensitive(False) check = Gtk.Image() check.set_from_file('check.svg') self.button.set_image(check) self.message = Gtk.Label(label=self._message_dict['unshared']) self.message.set_selectable(True) self.message.set_single_line_mode(True) img = Gtk.Image() pb = GdkPixbuf.Pixbuf.new_from_file( sugar3.activity.activity.get_bundle_path() + '/dist.svg') img.set_from_pixbuf(pb) self.value = Gtk.Label() self.value.set_selectable(True) thread.start_new_thread(self._update_distance, (0, )) valuefont = Pango.FontDescription() valuefont.set_family("monospace") valuefont.set_absolute_size(100 * Pango.SCALE) self.value.modify_font(valuefont) self.value.set_single_line_mode(True) self.value.set_width_chars(6) eb = Gtk.EventBox() eb.add(self.value) eb.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse("white")) eb.set_margin_left(10) eb.set_margin_right(10) eb.set_margin_top(10) self.fr = Gtk.Frame() self.fr.set_label(_('Measured distance in %s') % _('meters')) self.fr.set_label_align(0.5, 0.5) self.fr.add(eb) self.main_panel.pack_start( self.button, expand=False, fill=False, padding=6) self.main_panel.pack_start( self.message, expand=False, fill=True, padding=0) self.main_panel.pack_start(img, expand=True, fill=False, padding=0) self.main_panel.pack_start( self.fr, expand=False, fill=False, padding=10) self.set_canvas(self.main_panel) self.show_all() self.server_socket = None self.main_socket = None self.main_socket_addr = None self.main_tube_id = None self.initiating = False # get the Presence Service self.pservice = presenceservice.get_instance() # Buddy object for you owner = self.pservice.get_owner() self.owner = owner self.connect('shared', self._shared_cb) self.connect('joined', self._joined_cb) self.connect('key-press-event', self._keypress_cb)
def main(args): # Check input arguments # Permite introducir un numero x de fuentes, en nuestro caso streamings delas camaras Meraki number_sources = len(args) - 1 if number_sources + 1 < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) # Arreglo de FPS, para manejo con Tiler for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") # Variable para verificar si al menos un video esta vivo is_live = False # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) # Se crea elemento que acepta todo tipo de video o RTSP for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # el video con RTSP para Meraki viene optimizado a H264, por lo que no debe ser necesario crear un elemento h264parser stream #print("Creating H264Parser \n") #h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") #if not h264parser: # sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU # el video con RTSP para Meraki viene optimizado a H264, pero si necesita ser decodificado print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("At least one of the sources is live") streammux.set_property('live-source', 1) #streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', CURRENT_DIR + "/dstest2_pgie_config.txt") # Falta añadir la ruta completa del archivo de configuracion pgie_batch_size = pgie.get_property("batch-size") if pgie_batch_size != number_sources: print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Set properties of pgie and sgie #pgie.set_property('config-file-path', CURRENT_DIR + "/dstest2_pgie_config.txt") sgie1.set_property('config-file-path', CURRENT_DIR + "/dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', CURRENT_DIR + "/dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', CURRENT_DIR + "/dstest2_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) elif key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) elif key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) elif key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) elif key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) elif key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) # Creacion del marco de tiler tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") pipeline.add(decoder) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # source_bin -> -> nvh264-decoder -> PGIE -> Tracker # tiler -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") #source.link(h264parser) #h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") # Flujo anterior antes de incluir el decoder #srcpad.link(sinkpad) #source_bin.link(streammux) #streammux.link(pgie) #pgie.link(tracker) #tracker.link(sgie1) #sgie1.link(sgie2) #sgie2.link(sgie3) #sgie3.link(nvvidconv) #nvvidconv.link(nvosd) # Flujo Añadiendo el decoder srcpad.link(sinkpad) source_bin.link(decoder) decoder.link(streammux) streammux.link(pgie) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. tiler_src_pad = tracker.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) #osdsinkpad = nvosd.get_static_pad("sink") #if not osdsinkpad: # sys.stderr.write(" Unable to get sink pad of nvosd \n") #osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except Exception as e: print(str(e)) pass # cleanup pipeline.set_state(Gst.State.NULL)
freq_spinner.connect("value-changed", self._freq_changed) sound_check = Gtk.CheckButton("Play sound") if "Gst" in _optmissing: self.prefs["sound"] = False sound_check.set_sensitive(False) sound_check.connect("toggled", self._sound_toggled) sound_check.set_active(self.prefs["sound"]) close_button.connect("clicked", lambda w: self.hide()) hbox1.add(freq_label) hbox1.add(freq_spinner) hbox2.add(close_button) hbox3.add(sound_check) vbox.add(hbox1) vbox.add(hbox3) vbox.add(hbox2) self.add(vbox) vbox.show_all() def _freq_changed(self, widget): self.prefs["freq"] = widget.get_value() * 60 def _sound_toggled(self, widget): self.prefs["sound"] = widget.get_active() if __name__ == "__main__": GObject.threads_init() if not "Gst" in _optmissing: Gst.init(None) Indicator() Gtk.main()
def main(): # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create Pipeline Element pipeline = Gst.Pipeline() if not pipeline: print("Unable to create Pipeline") return False # Create GST Elements source = create_element_or_error("filesrc", "file-source") # parser = create_element_or_error("h264parse", "parse") # decoder = create_element_or_error("nvv4l2decoder", "decoder") # streammux = create_element_or_error("nvstreammux", "Stream-muxer") # pgie = create_element_or_error("nvinfer", "primary-inference") # tracker = create_element_or_error("nvtracker", "tracker") # convertor = create_element_or_error("nvvideoconvert", "convertor-1") # nvosd = create_element_or_error("nvdsosd", "onscreendisplay") transform = create_element_or_error("nvegltransform", "nvegl-transform") sink = create_element_or_error("nveglglessink", "egl-overlay") # Set Element Properties source.set_property('location', './videos/sample_qHD.h264') # sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/stream-test') # streammux.set_property('width', 1280) # streammux.set_property('height', 720) # streammux.set_property('batch-size', 1) # streammux.set_property('batched-push-timeout', 4000000) # pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt") # tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so') # tracker.set_property('gpu-id', 0) # tracker.set_property('enable-past-frame', 1) # tracker.set_property('enable-batch-process', 1) # tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/tracker_config.yml') # Add Elemements to Pipielin print("Adding elements to Pipeline") pipeline.add(source) # pipeline.add(parser) # pipeline.add(decoder) # pipeline.add(streammux) # pipeline.add(pgie) # pipeline.add(tracker) # pipeline.add(convertor) # pipeline.add(nvosd) pipeline.add(transform) pipeline.add(sink) # source.link(parser) # parser.link(decoder) # sinkpad = streammux.get_request_pad("sink_0") # if not sinkpad: # sys.stderr.write(" Unable to get the sink pad of streammux \n") # srcpad = decoder.get_static_pad("src") # if not srcpad: # sys.stderr.write(" Unable to get source pad of decoder \n") # srcpad.link(sinkpad) # Link the elements together: print("Linking elements in the Pipeline") # source.link(parser) # parser.link(decoder) # decoder.link(streammux) # streammux.link(pgie) # pgie.link(tracker) # tracker.link(convertor) # convertor.link(nvosd) # nvosd.link(transform) source.link(transform) transform.link(sink) # Create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start play back and listen to events print("Starting pipeline") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # Cleanup pipeline.set_state(Gst.State.NULL)
def _gtk_init(): """Call before using Gtk/Gdk""" import gi # make sure GdkX11 doesn't get used under Windows if os.name == "nt": sys.modules["gi.repository.GdkX11"] = None try: # not sure if this is available under Windows gi.require_version("GdkX11", "3.0") from gi.repository import GdkX11 GdkX11 except (ValueError, ImportError): pass gi.require_version("GLib", "2.0") gi.require_version("Gtk", "3.0") gi.require_version("Gdk", "3.0") gi.require_version("GObject", "2.0") gi.require_version("Pango", "1.0") gi.require_version("GdkPixbuf", "2.0") gi.require_version("Gio", "2.0") from gi.repository import Gtk, GObject, Gdk, GdkPixbuf # add Gtk.TreePath.__getitem__/__len__ for PyGObject 3.2 try: Gtk.TreePath()[0] except TypeError: Gtk.TreePath.__getitem__ = lambda self, index: list(self)[index] Gtk.TreePath.__len__ = lambda self: self.get_depth() # GTK+ 3.4+ constants if not hasattr(Gdk, "BUTTON_PRIMARY"): Gdk.BUTTON_PRIMARY = 1 Gdk.BUTTON_MIDDLE = 2 Gdk.BUTTON_SECONDARY = 3 if not hasattr(Gdk, "EVENT_PROPAGATE"): Gdk.EVENT_PROPAGATE = 0 Gdk.EVENT_STOP = 1 # On windows the default variants only do ANSI paths, so replace them. # In some typelibs they are replaced by default, in some don't.. if os.name == "nt": for name in ["new_from_file_at_scale", "new_from_file_at_size", "new_from_file"]: cls = GdkPixbuf.Pixbuf setattr(cls, name, getattr(cls, name + "_utf8", name)) # https://bugzilla.gnome.org/show_bug.cgi?id=670372 if not hasattr(GdkPixbuf.Pixbuf, "savev"): GdkPixbuf.Pixbuf.savev = GdkPixbuf.Pixbuf.save # Force menu/button image related settings. We might show too many atm # but this makes sure we don't miss cases where we forgot to force them # per widget. # https://bugzilla.gnome.org/show_bug.cgi?id=708676 warnings.filterwarnings('ignore', '.*g_value_get_int.*', Warning) # some day... but not now warnings.filterwarnings( 'ignore', '.*Stock items are deprecated.*', Warning) warnings.filterwarnings( 'ignore', '.*:use-stock.*', Warning) warnings.filterwarnings( 'ignore', '.*The property GtkAlignment:[^\s]+ is deprecated.*', Warning) settings = Gtk.Settings.get_default() with warnings.catch_warnings(): warnings.simplefilter("ignore") settings.set_property("gtk-button-images", True) settings.set_property("gtk-menu-images", True) if hasattr(settings.props, "gtk_primary_button_warps_slider"): settings.set_property("gtk-primary-button-warps-slider", True) # Make sure PyGObject includes support for foreign cairo structs try: gi.require_foreign("cairo") except AttributeError: # older pygobject pass except ImportError: print_e("PyGObject is missing cairo support") exit(1) # CSS overrides if os.name == "nt": # somehow borders are missing under Windows & Gtk+3.14 style_provider = Gtk.CssProvider() style_provider.load_from_data(""" .menu { border: 1px solid @borders; } """) Gtk.StyleContext.add_provider_for_screen( Gdk.Screen.get_default(), style_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION ) if sys.platform == "darwin": # fix duplicated shadows for popups with Gtk+3.14 style_provider = Gtk.CssProvider() style_provider.load_from_data(""" GtkWindow { box-shadow: none; } .tooltip { border-radius: 0; padding: 0; } .tooltip.background { background-clip: border-box; } """) Gtk.StyleContext.add_provider_for_screen( Gdk.Screen.get_default(), style_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION ) # https://bugzilla.gnome.org/show_bug.cgi?id=708676 warnings.filterwarnings('ignore', '.*g_value_get_int.*', Warning) # blacklist some modules, simply loading can cause segfaults sys.modules["gtk"] = None sys.modules["gpod"] = None sys.modules["glib"] = None sys.modules["gobject"] = None sys.modules["gnome"] = None from quodlibet.qltk import pygobject_version if pygobject_version < (3, 9): GObject.threads_init()
class GStreamerPipeline(Pipeline): Gst.init(None) GObject.threads_init() def __init__(self, id, config, models, request): self.config = config self.id = id self.pipeline = None self.template = config['template'] self.models = models self.request = request self.state = "QUEUED" self.frame_count = 0 self.start_time = None self.stop_time = None self.avg_fps = 0 self.destination = None self._gst_launch_string = None def stop(self): if self.pipeline is not None: self.pipeline.set_state(Gst.State.NULL) if self.state is "RUNNING": self.state = "ABORTED" logger.debug("Setting Pipeline {id} State to ABORTED".format( id=self.id)) self.stop_time = time.time() PipelineManager.pipeline_finished() if self.state is "QUEUED": self.state = "ABORTED" PipelineManager.remove_from_queue(self.id) logger.debug( "Setting Pipeline {id} State to ABORTED and removing from the queue" .format(id=self.id)) del self.pipeline self.pipeline = None return self.status() def params(self): request = copy.deepcopy(self.request) del request["models"] params_obj = { "id": self.id, "request": request, "type": self.config["type"], "launch_command": self._gst_launch_string } return params_obj def status(self): logger.debug("Called Status") if self.stop_time is not None: elapsed_time = max(0, self.stop_time - self.start_time) elif self.start_time is not None: elapsed_time = max(0, time.time() - self.start_time) else: elapsed_time = None status_obj = { "id": self.id, "state": self.state, "avg_fps": self.avg_fps, "start_time": self.start_time, "elapsed_time": elapsed_time } return status_obj def get_avg_fps(self): return self.avg_fps def _add_tags(self): if "tags" in self.request: metaconvert = self.pipeline.get_by_name("jsonmetaconvert") if metaconvert: metaconvert.set_property("tags", json.dumps(self.request["tags"])) else: logger.debug("tags given but no metaconvert element found") def _add_default_parameters(self): request_parameters = self.request.get("parameters", {}) pipeline_parameters = self.config.get("parameters", {}).get("properties", {}) for key in pipeline_parameters: if (not key in request_parameters) and ( "default" in pipeline_parameters[key]): request_parameters[key] = pipeline_parameters[key]["default"] self.request["parameters"] = request_parameters def _add_element_parameters(self): request_parameters = self.request.get("parameters", {}) pipeline_parameters = self.config.get("parameters", {}).get("properties", {}) for key in pipeline_parameters: if "element" in pipeline_parameters[key]: if key in request_parameters: element = self.pipeline.get_by_name( pipeline_parameters[key]["element"]) if element: element.set_property(key, request_parameters[key]) else: logger.debug( "parameter given for element but no element found") @staticmethod def validate_config(config): template = config["template"] pipeline = Gst.parse_launch(template) appsink = pipeline.get_by_name("appsink") jsonmetaconvert = pipeline.get_by_name("jsonmetaconvert") metapublish = pipeline.get_by_name("metapublish") if appsink is None: logger.warning("Missing appsink element") if jsonmetaconvert is None: logger.warning("Missing metaconvert element") if metapublish is None: logger.warning("Missing metapublish element") def start(self): logger.debug("Starting Pipeline {id}".format(id=self.id)) try: self.destination = Destination.create_instance(self.request) except: self.destination = None self.request["models"] = self.models self._add_default_parameters() self._gst_launch_string = string.Formatter().vformat( self.template, [], self.request) logger.debug(self._gst_launch_string) self.pipeline = Gst.parse_launch(self._gst_launch_string) self._add_element_parameters() self._add_tags() sink = self.pipeline.get_by_name("appsink") if sink is not None: sink.set_property("emit-signals", True) sink.set_property('sync', False) sink.connect("new-sample", GStreamerPipeline.on_sample, self) self.avg_fps = 0 bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect("message", GStreamerPipeline.bus_call, self) self.pipeline.set_state(Gst.State.PLAYING) self.start_time = time.time() @staticmethod def on_sample(sink, self): logger.debug("Received Sample from Pipeline {id}".format(id=self.id)) sample = sink.emit("pull-sample") try: buf = sample.get_buffer() try: meta = buf.get_meta("GstGVAJSONMetaAPI") except: meta = None if meta is None: logger.debug("No GstGVAJSONMeta") else: json_string = GstGVAJSONMeta.get_json_message(meta).decode( 'utf-8') # pylint: disable=undefined-variable json_object = json.loads(json_string) logger.debug(json.dumps(json_object)) if self.destination and ("objects" in json_object) and (len( json_object["objects"]) > 0): self.destination.send(json_object) except Exception as error: logger.error("Error on Pipeline {id}: {err}".format(id=self.id, err=error)) self.frame_count += 1 self.avg_fps = self.frame_count / (time.time() - self.start_time) return Gst.FlowReturn.OK @staticmethod def bus_call(bus, message, self): t = message.type if t == Gst.MessageType.EOS: logger.info("Pipeline {id} Ended".format(id=self.id)) self.pipeline.set_state(Gst.State.NULL) if self.state is "RUNNING": logger.debug("Setting Pipeline {id} State to COMPLETED".format( id=self.id)) self.state = "COMPLETED" self.stop_time = time.time() bus.remove_signal_watch() del self.pipeline self.pipeline = None PipelineManager.pipeline_finished() elif t == Gst.MessageType.ERROR: err, debug = message.parse_error() logger.error("Error on Pipeline {id}: {err}".format(id=id, err=err)) if (self.state is None) or (self.state is "RUNNING") or (self.state is "QUEUED"): logger.debug( "Setting Pipeline {id} State to ERROR".format(id=self.id)) self.stop_time = time.time() self.state = "ERROR" self.pipeline.set_state(Gst.State.NULL) self.stop_time = time.time() bus.remove_signal_watch() del self.pipeline self.pipeline = None PipelineManager.pipeline_finished() elif t == Gst.MessageType.STATE_CHANGED: old_state, new_state, pending_state = message.parse_state_changed() if message.src == self.pipeline: if old_state == Gst.State.PAUSED and new_state == Gst.State.PLAYING: if self.state is "QUEUED": logger.debug( "Setting Pipeline {id} State to RUNNING".format( id=self.id)) self.state = "RUNNING" else: pass return True
@author: tanel """ """ Annotated on Jan 22, 2019 @author: alkazap Using Kaldi's OnlineGmmDecodeFaster decoder """ import gi gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst GObject.threads_init() # initialize thread support in PyGObject Gst.init(None) # initialize GStreamer import logging import thread import os logger = logging.getLogger(__name__) import pdb class DecoderPipeline(object): def __init__(self, conf={}): logger.info("Creating decoder using conf: %s" % conf) self.use_cutter = conf.get("use-vad", False) # VAD - Voice Activity Detection
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 2): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 2 global folder_name folder_name = args[-1] if path.exists(folder_name): sys.stderr.write( "The output folder %s already exists. Please remove it first.\n" % folder_name) sys.exit(1) os.mkdir(folder_name) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Use nvinferserver to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = make_elm_or_print_err("nvinferserver", "primary-inference", "Nvinferserver") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = make_elm_or_print_err("nvvideoconvert", "convertor", "Nvvidconv") # Create OSD to draw on the converted RGBA buffer nvosd = make_elm_or_print_err("nvdsosd", "onscreendisplay", "OSD (nvosd)") # Finally encode and save the osd output queue = make_elm_or_print_err("queue", "queue", "Queue") nvvidconv2 = make_elm_or_print_err("nvvideoconvert", "convertor2", "Converter 2 (nvvidconv2)") capsfilter = make_elm_or_print_err("capsfilter", "capsfilter", "capsfilter") caps = Gst.Caps.from_string("video/x-raw, format=I420") capsfilter.set_property("caps", caps) # On Jetson, there is a problem with the encoder failing to initialize # due to limitation on TLS usage. To work around this, preload libgomp. # Add a reminder here in case the user forgets. preload_reminder = "If the following error is encountered:\n" + \ "/usr/lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block\n" + \ "Preload the offending library:\n" + \ "export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n" encoder = make_elm_or_print_err("avenc_mpeg4", "encoder", "Encoder", preload_reminder) encoder.set_property("bitrate", 2000000) codeparser = make_elm_or_print_err("mpeg4videoparse", "mpeg4-parser", 'Code Parser') container = make_elm_or_print_err("qtmux", "qtmux", "Container") sink = make_elm_or_print_err("filesink", "filesink", "Sink") sink.set_property("location", OUTPUT_VIDEO_NAME) sink.set_property("sync", 0) sink.set_property("async", 0) # print("Playing file %s " %args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property("config-file-path", "dstest_ssd_nopostprocess.txt") print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(queue) pipeline.add(nvvidconv2) pipeline.add(capsfilter) pipeline.add(encoder) pipeline.add(codeparser) pipeline.add(container) pipeline.add(sink) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(queue) queue.link(nvvidconv2) nvvidconv2.link(capsfilter) capsfilter.link(encoder) encoder.link(codeparser) codeparser.link(container) container.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Add a probe on the primary-infer source pad to get inference output tensors pgiesrcpad = pgie.get_static_pad("src") if not pgiesrcpad: sys.stderr.write(" Unable to get src pad of primary infer \n") pgiesrcpad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def __init__(self): threading.Thread.__init__(self) GObject.threads_init() self.__msg_queue = Queue.Queue() self.lock = thread.allocate_lock() self.lock.acquire()
def main(args): # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") """ # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") """ # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) streamdemux = Gst.ElementFactory.make("nvstreamdemux", "Stream-demuxer") if not streamdemux: sys.stderr.write(" Unable to create NvStreamdeMux \n") for i, sp in enumerate(stream_path): # i = 0 print("Creating source_bin ", i, " \n ") source_bin = create_source_bin(i, sp) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # queue1=Gst.ElementFactory.make("queue", "queue1") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd1 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay1") nvosd2 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay2") if not nvosd1: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd1 = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd1") nvvidconv_postosd2 = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd2") if not nvvidconv_postosd1: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps1 = Gst.ElementFactory.make("capsfilter", "filter1") caps2 = Gst.ElementFactory.make("capsfilter", "filter2") caps1.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) caps2.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder if codec == "H264": encoder1 = Gst.ElementFactory.make("nvv4l2h264enc", "encoder1") encoder2 = Gst.ElementFactory.make("nvv4l2h264enc", "encoder2") print("Creating H264 Encoder") elif codec == "H265": encoder1 = Gst.ElementFactory.make("nvv4l2h265enc", "encoder1") encoder2 = Gst.ElementFactory.make("nvv4l2h265enc", "encoder2") print("Creating H265 Encoder") if not encoder1: sys.stderr.write(" Unable to create encoder") encoder1.set_property('bitrate', bitrate) encoder2.set_property('bitrate', bitrate) if is_aarch64(): encoder1.set_property('preset-level', 1) encoder2.set_property('preset-level', 1) encoder1.set_property('insert-sps-pps', 1) encoder2.set_property('insert-sps-pps', 1) encoder1.set_property('bufapi-version', 1) encoder2.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay1 = Gst.ElementFactory.make("rtph264pay", "rtppay1") rtppay2 = Gst.ElementFactory.make("rtph264pay", "rtppay2") print("Creating H264 rtppay") elif codec == "H265": rtppay1 = Gst.ElementFactory.make("rtph265pay", "rtppay1") rtppay2 = Gst.ElementFactory.make("rtph265pay", "rtppay2") print("Creating H265 rtppay") if not rtppay1: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num1 = 5400 updsink_port_num2 = 5401 sink1 = Gst.ElementFactory.make("udpsink", "udpsink1") sink2 = Gst.ElementFactory.make("udpsink", "udpsink2") if not sink1: sys.stderr.write(" Unable to create udpsink") sink1.set_property('host', '127.0.0.1') sink2.set_property('host', '127.0.0.1') sink1.set_property('port', updsink_port_num1) sink2.set_property('port', updsink_port_num2) sink1.set_property('async', False) sink2.set_property('async', False) sink1.set_property('sync', 1) sink2.set_property('sync', 1) is_live = 0 if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) print("Playing file %s " % stream_path) #source.set_property('location', stream_path) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest1_pgie_config.txt") print("Adding elements to Pipeline \n") #pipeline.add(source) #pipeline.add(h264parser) #pipeline.add(decoder) #pipeline.add(streammux) pipeline.add(pgie) pipeline.add(streamdemux) pipeline.add(nvvidconv1) pipeline.add(nvvidconv2) pipeline.add(nvosd1) pipeline.add(nvosd2) pipeline.add(nvvidconv_postosd1) pipeline.add(nvvidconv_postosd2) pipeline.add(caps1) pipeline.add(caps2) pipeline.add(encoder1) pipeline.add(encoder2) pipeline.add(rtppay1) pipeline.add(rtppay2) pipeline.add(sink1) pipeline.add(sink2) # Link the elements together: # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd -> # caps -> encoder -> rtppay -> udpsink print("Linking elements in the Pipeline \n") """ source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) """ nvvidconv = [nvvidconv1, nvvidconv2] nvosd = [nvosd1, nvosd2] nvvidconv_postosd = [nvvidconv_postosd1, nvvidconv_postosd2] caps = [caps1, caps2] encoder = [encoder1, encoder2] rtppay = [rtppay1, rtppay2] sink = [sink1, sink2] streammux.link(pgie) # pgie.link(nvvidconv) pgie.link(streamdemux) ####################### for i in range(len(stream_path)): print("demux source", i, "\n") srcpad1 = streamdemux.get_request_pad("src_%u" % i) if not srcpad1: sys.stderr.write(" Unable to get the src pad of streamdemux \n") sinkpad1 = nvvidconv[i].get_static_pad("sink") if not sinkpad1: sys.stderr.write(" Unable to get sink pad of nvvidconv \n") srcpad1.link(sinkpad1) ####################### nvvidconv[i].link(nvosd[i]) nvosd[i].link(nvvidconv_postosd[i]) nvvidconv_postosd[i].link(caps[i]) caps[i].link(encoder[i]) encoder[i].link(rtppay[i]) rtppay[i].link(sink[i]) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 22 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory1 = GstRtspServer.RTSPMediaFactory.new() factory2 = GstRtspServer.RTSPMediaFactory.new() factory1.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num1, codec)) factory2.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num2, codec)) factory1.set_shared(True) factory2.set_shared(True) server.get_mount_points().add_factory("/ds-test1", factory1) server.get_mount_points().add_factory("/ds-test2", factory2) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad1 = nvosd[0].get_static_pad("sink") osdsinkpad2 = nvosd[1].get_static_pad("sink") if not osdsinkpad1: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad1.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) osdsinkpad2.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
return conn, unique def testCall(self): conn, unique = self.get_conn_and_unique() ret = conn.call_blocking(NAME, OBJECT, IFACE, 'Echo', 'v', ('V', )) self.assertEqual(ret, 'V') def testCallThroughProxy(self): conn, unique = self.get_conn_and_unique() proxy = conn.get_object(NAME, OBJECT) iface = dbus.Interface(proxy, IFACE) ret = iface.Echo('V') self.assertEqual(ret, 'V') def testSetUniqueName(self): conn, unique = self.get_conn_and_unique() kwargs = {} if is_py2: kwargs['utf8_strings'] = True ret = conn.call_blocking(NAME, OBJECT, IFACE, 'MethodExtraInfoKeywords', '', (), **kwargs) self.assertEqual( ret, (unique, OBJECT, NAME, 'dbus.lowlevel.MethodCallMessage')) if __name__ == '__main__': gobject.threads_init() dbus.glib.init_threads() unittest.main()
def __init__(self, argv=None): self.pipeline = None self.running = False self.video_caps = None self.BOX_SIZE = 4 self.FACE_LABEL_SIZE = 2 self.OBJECT_LABEL_SIZE = 91 self.DETECTION_MAX = 1917 self.MAX_FACE_DETECTION = 3 self.MAX_OBJECT_DETECTION = 5 self.Y_SCALE = 10.0 self.X_SCALE = 10.0 self.H_SCALE = 5.0 self.W_SCALE = 5.0 self.VIDEO_WIDTH = 640 self.VIDEO_HEIGHT = 480 self.FACE_MODEL_WIDTH = 300 self.FACE_MODEL_HEIGHT = 300 self.POSE_MODEL_WIDTH = 257 self.POSE_MODEL_HEIGHT = 257 self.KEYPOINT_SIZE = 17 self.OUTPUT_STRIDE = 32 self.GRID_XSIZE = 9 self.GRID_YSIZE = 9 self.SCORE_THRESHOLD = 0.7 self.tflite_face_model = '' self.tflite_face_labels = [] self.tflite_pose_model = '' self.tflite_pose_labels = [] self.tflite_object_model = '' self.tflite_object_labels = [] self.tflite_box_priors = [] self.detected_faces = [] self.detected_objects = [] self.kps = [list(), list(), list(), list(), list()] self.pattern = None self.AUTH_KEY = "null" if not self.tflite_init(): raise Exception # Base_Pipelines for Each component self.BASE_PIPELINE = ( 'v4l2src name=cam_src ! videoconvert ! videoscale ! ' 'video/x-raw,width=' + str(self.VIDEO_WIDTH) + ',height=' + str(self.VIDEO_HEIGHT) + ',format=RGB ! tee name=t_raw ') self.BASE_MODEL_PIPE = ('t_raw. ! queue ! videoscale ! videorate ! ' 'video/x-raw,width=' + str(self.VIDEO_WIDTH) + ',height=' + str(self.VIDEO_HEIGHT) + ',framerate=15/1 ! tee name=model_handler ') self.BASE_OUTPUT_PIPE = ( 't_raw. ! queue leaky=2 max-size-buffers=2 ! videoconvert ! cairooverlay name=tensor_res ! tee name=output_handler ' ) # Output component. This pipeline should not be dynamically self.LOCAL_OUTPUT_PIPE = ( 'output_handler. ! queue leaky=2 max-size-buffers=2 ! videoconvert ! ximagesink name=output_local ' ) self.RTMP_OUTPUT_PIPE = ( 'output_handler. ! queue ! videoconvert ! x264enc bitrate=2000 byte-stream=false key-int-max=60 bframes=0 aud=true tune=zerolatency ! ' 'video/x-h264,profile=main ! flvmux streamable=true name=rtmp_mux ' 'rtmp_mux. ! rtmpsink location=rtmp://a.rtmp.youtube.com/live2/x/' + self.AUTH_KEY + ' ' 'alsasrc name=audio_src ! audioconvert ! audio/x-raw,rate=16000,format=S16LE,channels=1 ! voaacenc bitrate=16000 ! rtmp_mux. ' ) # SSD Based detection component self.SSD_PIPE = ( 'model_handler. ! queue ! videoscale ! video/x-raw,width=' + str(self.FACE_MODEL_WIDTH) + ',height=' + str(self.FACE_MODEL_HEIGHT) + ' ! tensor_converter ! ' 'tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! tee name=t_ssd ' ) self.OBJECT_DETECT_PIPE = ( 't_ssd. ! queue leaky=2 max-size-buffers=2 ! tensor_filter framework=tensorflow-lite model=' + self.tflite_object_model + ' ! tensor_sink name=res_object ') self.FACE_DETECT_PIPE = ( 't_ssd. ! queue leaky=2 max-size-buffers=2 ! tensor_filter framework=tensorflow-lite model=' + self.tflite_face_model + ' ! tensor_sink name=res_face ') # Pose Detection component for Eye Tracking self.EYETRACK_PIPE = ( 'model_handler. ! queue leaky=2 max-size-buffers=2 ! videoconvert ! tee name=pose_split ' 'pose_split. ! queue leaky=2 max-size-buffers=2 ! videobox name=face0 ! videoflip method=horizontal-flip ! videoscale ! ' 'video/x-raw,width=' + str(self.POSE_MODEL_WIDTH) + ',height=' + str(self.POSE_MODEL_HEIGHT) + ',format=RGB ! tensor_converter ! ' 'tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! ' 'tensor_filter framework=tensorflow-lite model=' + self.tflite_pose_model + ' ! tensor_sink name=posesink_0 ' 'pose_split. ! queue leaky=2 max-size-buffers=2 ! videobox name=face1 ! videoflip method=horizontal-flip ! videoscale ! ' 'video/x-raw,width=' + str(self.POSE_MODEL_WIDTH) + ',height=' + str(self.POSE_MODEL_HEIGHT) + ',format=RGB ! tensor_converter ! ' 'tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! ' 'tensor_filter framework=tensorflow-lite model=' + self.tflite_pose_model + ' ! tensor_sink name=posesink_1 ' 'pose_split. ! queue leaky=2 max-size-buffers=2 ! videobox name=face2 ! videoflip method=horizontal-flip ! videoscale ! ' 'video/x-raw,width=' + str(self.POSE_MODEL_WIDTH) + ',height=' + str(self.POSE_MODEL_HEIGHT) + ',format=RGB ! tensor_converter ! ' 'tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! ' 'tensor_filter framework=tensorflow-lite model=' + self.tflite_pose_model + ' ! tensor_sink name=posesink_2 ') self.OPTION_FM = False # FACE MASKING self.OPTION_EM = False # EYE MASKING self.OPTION_OD = False # OBJECT DETECTING self.OPTION_DM = False # DETECT ME self.OPTION_XV = False # XVIMAGESINK self.OPTION_RTMP = False # RTMP GObject.threads_init() Gst.init(argv)
def main(root_path): """ Called at application start. Initializes application with a default project. """ # DEBUG: Direct output to log file if log file set if _log_file != None: log_print_output_to_file() # Print OS, Python version and GTK+ version try: os_release_file = open("/etc/os-release","r") os_text = os_release_file.read() s_index = os_text.find("PRETTY_NAME=") e_index = os_text.find("\n", s_index) print "OS: " + os_text[s_index + 13:e_index - 1] except: pass print "Python", sys.version gtk_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version()) print "GTK+ version:", gtk_version editorstate.gtk_version = gtk_version try: editorstate.mlt_version = mlt.LIBMLT_VERSION except: editorstate.mlt_version = "0.0.99" # magic string for "not found" # passing -xdg as a flag will change the user_dir location with XDG_CONFIG_HOME # For full xdg-app support all the launch processes need to add this too, currently not impl. for arg in sys.argv: if arg.lower() == "-xdg": editorstate.use_xdg = True # Create hidden folders if not present user_dir = utils.get_hidden_user_dir_path() print "User dir:",user_dir if not os.path.exists(user_dir): os.mkdir(user_dir) if not os.path.exists(user_dir + mltprofiles.USER_PROFILES_DIR): os.mkdir(user_dir + mltprofiles.USER_PROFILES_DIR) if not os.path.exists(user_dir + AUTOSAVE_DIR): os.mkdir(user_dir + AUTOSAVE_DIR) if not os.path.exists(user_dir + BATCH_DIR): os.mkdir(user_dir + BATCH_DIR) if not os.path.exists(user_dir + appconsts.AUDIO_LEVELS_DIR): os.mkdir(user_dir + appconsts.AUDIO_LEVELS_DIR) if not os.path.exists(utils.get_hidden_screenshot_dir_path()): os.mkdir(utils.get_hidden_screenshot_dir_path()) if not os.path.exists(user_dir + appconsts.GMIC_DIR): os.mkdir(user_dir + appconsts.GMIC_DIR) if not os.path.exists(user_dir + appconsts.MATCH_FRAME_DIR): os.mkdir(user_dir + appconsts.MATCH_FRAME_DIR) if not os.path.exists(user_dir + appconsts.TRIM_VIEW_DIR): os.mkdir(user_dir + appconsts.TRIM_VIEW_DIR) if not os.path.exists(user_dir + appconsts.NATRON_DIR): os.mkdir(user_dir + appconsts.NATRON_DIR) # Set paths. respaths.set_paths(root_path) # Load editor prefs and list of recent projects editorpersistance.load() if editorpersistance.prefs.dark_theme == True: respaths.apply_dark_theme() if editorpersistance.prefs.display_all_audio_levels == False: editorstate.display_all_audio_levels = False editorpersistance.create_thumbs_folder_if_needed(user_dir) editorpersistance.create_rendered_clips_folder_if_needed(user_dir) editorpersistance.save() # Apr-2017 - SvdB - Keyboard shortcuts shortcuts.load_shortcut_files() shortcuts.load_shortcuts() # Init translations module with translations data translations.init_languages() translations.load_filters_translations() mlttransitions.init_module() # RHEL7/CentOS compatibility fix if gtk_version == "3.8.8": GObject.threads_init() # Init gtk threads Gdk.threads_init() Gdk.threads_enter() # Request dark theme if so desired if editorpersistance.prefs.dark_theme == True: Gtk.Settings.get_default().set_property("gtk-application-prefer-dark-theme", True) # Load drag'n'drop images dnd.init() # Adjust gui parameters for smaller screens scr_w = Gdk.Screen.width() scr_h = Gdk.Screen.height() editorstate.SCREEN_WIDTH = scr_w editorstate.SCREEN_HEIGHT = scr_h print scr_w, scr_h print "Small height:", editorstate.screen_size_small_height() print "Small width:", editorstate.screen_size_small_width() _set_draw_params() # Refuse to run on too small screen. if scr_w < 1151 or scr_h < 767: _too_small_screen_exit() return # Splash screen if editorpersistance.prefs.display_splash_screen == True: show_splash_screen() # Init MLT framework repo = mlt.Factory().init() # Set numeric locale to use "." as radix, MLT initilizes this to OS locale and this causes bugs locale.setlocale(locale.LC_NUMERIC, 'C') # Check for codecs and formats on the system mltenv.check_available_features(repo) renderconsumer.load_render_profiles() # Load filter and compositor descriptions from xml files. mltfilters.load_filters_xml(mltenv.services) mlttransitions.load_compositors_xml(mltenv.transitions) # Replace some services if better replacements available mltfilters.replace_services(mltenv.services) # Create list of available mlt profiles mltprofiles.load_profile_list() # Save assoc file path if found in arguments global assoc_file_path assoc_file_path = get_assoc_file_path() # There is always a project open, so at startup we create a default project. # Set default project as the project being edited. editorstate.project = projectdata.get_default_project() check_crash = True # Audiomonitoring being available needs to be known before GUI creation audiomonitoring.init(editorstate.project.profile) # Set trim view mode to current default value editorstate.show_trim_view = editorpersistance.prefs.trim_view_default # Check for tools and init tools integration gmic.test_availablity() toolnatron.init() toolsintegration.init() #toolsintegration.test() # Create player object create_player() # Create main window and set widget handles in gui.py for more convenient reference. create_gui() # Inits widgets with project data init_project_gui() # Inits widgets with current sequence data init_sequence_gui() # Launch player now that data and gui exist launch_player() # Editor and modules need some more initializing init_editor_state() # Tracks need to be recentered if window is resized. # Connect listener for this now that the tline panel size allocation is sure to be available. global window_resize_id, window_state_id window_resize_id = gui.editor_window.window.connect("size-allocate", lambda w, e:updater.window_resized()) window_state_id = gui.editor_window.window.connect("window-state-event", lambda w, e:updater.window_resized()) # Get existing autosave files autosave_files = get_autosave_files() # Clear splash if ((editorpersistance.prefs.display_splash_screen == True) and len(autosave_files) == 0): global splash_timeout_id splash_timeout_id = GLib.timeout_add(2600, destroy_splash_screen) splash_screen.show_all() appconsts.SAVEFILE_VERSION = projectdata.SAVEFILE_VERSION # THIS IS A QUESTIONABLE IDEA TO SIMPLIFY IMPORTS, NOT DRY. WHEN DOING TOOLS THAT RUN IN ANOTHER PROCESSES AND SAVE PROJECTS, THIS LINE NEEDS TO BE THERE ALSO. # Every running instance has unique autosave file which is deleted at exit set_instance_autosave_id() # Existance of autosave file hints that program was exited abnormally if check_crash == True and len(autosave_files) > 0: if len(autosave_files) == 1: GObject.timeout_add(10, autosave_recovery_dialog) else: GObject.timeout_add(10, autosaves_many_recovery_dialog) else: start_autosave() # We prefer to monkeypatch some callbacks into some modules, usually to # maintain a simpler and/or non-circular import structure monkeypatch_callbacks() if not(check_crash == True and len(autosave_files) > 0): if assoc_file_path != None: print "Launch assoc file:", assoc_file_path global assoc_timeout_id assoc_timeout_id = GObject.timeout_add(10, open_assoc_file) # Launch gtk+ main loop Gtk.main() Gdk.threads_leave()
def main(): cameras_list = [ { "index": 0, "source": "/dev/video0", "name": "Camera-1" }, # {"index" : 1, "source": "/dev/video1", "name": "Camera-2"}, ] GObject.threads_init() Gst.init(None) pipeline = Gst.Pipeline() if not pipeline: print("Unable to create Pipeline") exit(0) # Muxer muxer = create_element_or_error("nvstreammux", "stream-muxer") muxer.set_property('live-source', True) muxer.set_property('width', 1280) muxer.set_property('height', 720) muxer.set_property('num-surfaces-per-frame', 1) muxer.set_property('batch-size', 1) muxer.set_property('batched-push-timeout', 4000000) pipeline.add(muxer) # Sources for camera in cameras_list: # Source source = create_element_or_error("nvv4l2camerasrc", "source-" + camera['name']) source.set_property('device', camera["source"]) source.set_property('do-timestamp', True) source.set_property('bufapi-version', True) pipeline.add(source) # Caps caps = create_element_or_error("capsfilter", "source-caps-source-1") caps.set_property( "caps", Gst.Caps.from_string( "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=(fraction)30/1, format=(string)UYVY" )) pipeline.add(caps) source.link(caps) convertor = create_element_or_error("nvvideoconvert", "converter-1") pipeline.add(convertor) caps.link(convertor) srcpad = convertor.get_static_pad("src") sinkpad = muxer.get_request_pad('sink_' + str(camera['index'])) if not sinkpad: print("Unable to create source sink pad") exit(0) if not srcpad: print("Unable to create source src pad") exit(0) srcpad.link(sinkpad) pgie = create_element_or_error("nvinfer", "primary-inference") pgie.set_property( 'config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt" ) pipeline.add(pgie) muxer.link(pgie) tracker = create_element_or_error("nvtracker", "tracker") tracker.set_property( 'll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so') tracker.set_property('enable-batch-process', 1) tracker.set_property('tracker-width', 640) tracker.set_property('tracker-height', 480) pipeline.add(tracker) pgie.link(tracker) tiler = create_element_or_error("nvmultistreamtiler", "nvtiler") tiler.set_property("rows", 1) tiler.set_property("columns", 1) tiler.set_property("width", 1280) tiler.set_property("height", 720) pipeline.add(tiler) tracker.link(tiler) convertor2 = create_element_or_error("nvvideoconvert", "converter-2") pipeline.add(convertor2) tiler.link(convertor2) nvosd = create_element_or_error("nvdsosd", "onscreendisplay") pipeline.add(nvosd) convertor2.link(nvosd) transform = create_element_or_error("nvegltransform", "nvegl-transform") pipeline.add(transform) nvosd.link(transform) sink = create_element_or_error("nveglglessink", "nvvideo-renderer") pipeline.add(sink) transform.link(sink) loop = GObject.MainLoop() pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass pipeline.set_state(Gst.State.NULL)
def run(): MEDIA_DIR = os.path.join(DATADIR, 'Pellmonweb', 'media') argparser = argparse.ArgumentParser(prog='pellmonweb') argparser.add_argument('-D', '--DAEMONIZE', action='store_true', help='Run as daemon') argparser.add_argument('-P', '--PIDFILE', default='/tmp/pellmonweb.pid', help='Full path to pidfile') argparser.add_argument('-U', '--USER', help='Run as USER') argparser.add_argument('-G', '--GROUP', default='nogroup', help='Run as GROUP') argparser.add_argument('-C', '--CONFIG', default='pellmon.conf', help='Full path to config file') argparser.add_argument('-d', '--DBUS', default='SESSION', choices=['SESSION', 'SYSTEM'], help='which bus to use, SESSION is default') argparser.add_argument('-V', '--version', action='version', version='%(prog)s version ' + __version__) args = argparser.parse_args() global dbus dbus = Dbus_handler(args.DBUS) #Look for temlates in this directory global lookup lookup = myLookup( directories=[os.path.join(DATADIR, 'Pellmonweb', 'html')], dbus=dbus) config_file = args.CONFIG pidfile = args.PIDFILE if pidfile: plugins.PIDFile(cherrypy.engine, pidfile).subscribe() if args.USER: config_file = os.path.join(CONFDIR, 'pellmon.conf') try: parser.read(config_file) except: cherrypy.log("can not parse config file") sys.exit(1) try: config_dir = parser.get('conf', 'config_dir') walk_config_dir(config_dir, parser) except ConfigParser.NoOptionError: pass try: accesslog = parser.get('weblog', 'accesslog') logdir = os.path.dirname(accesslog) if not os.path.isdir(logdir): os.mkdir(logdir) uid = pwd.getpwnam(args.USER).pw_uid gid = grp.getgrnam(args.GROUP).gr_gid os.chown(logdir, uid, gid) if os.path.isfile(accesslog): os.chown(accesslog, uid, gid) except: pass try: errorlog = parser.get('weblog', 'errorlog') logdir = os.path.dirname(errorlog) if not os.path.isdir(logdir): os.mkdir(logdir) uid = pwd.getpwnam(args.USER).pw_uid gid = grp.getgrnam(args.GROUP).gr_gid os.chown(logdir, uid, gid) if os.path.isfile(errorlog): os.chown(errorlog, uid, gid) except: pass uid = pwd.getpwnam(args.USER).pw_uid gid = grp.getgrnam(args.GROUP).gr_gid plugins.DropPrivileges(cherrypy.engine, uid=uid, gid=gid, umask=033).subscribe() # Load the configuration file try: parser.read(config_file) config_dir = parser.get('conf', 'config_dir') walk_config_dir(config_dir, parser) except ConfigParser.NoOptionError: pass except ConfigParser.NoSectionError: cherrypy.log("can not parse config file") except: cherrypy.log("Config file not found") # The RRD database, updated by pellMon global polling, db try: polling = True db = parser.get('conf', 'database') graph_file = os.path.join(os.path.dirname(db), 'graph.png') except: polling = False db = '' # the colors to use when drawing the graph global colorsDict try: colors = parser.items('graphcolors') colorsDict = {} for key, value in colors: colorsDict[key] = value except: colorsDict = {} # Get the names of the polled data global polldata try: polldata = parser.items("pollvalues") # Get the names of the polled data rrd_ds_names = parser.items("rrd_ds_names") ds_names = {} for key, value in rrd_ds_names: ds_names[key] = value except: ds_names = {} polldata = [] try: # Get the optional scales scales = parser.items("scaling") scale_data = {} for key, value in scales: scale_data[key] = value except: scale_data = {} global graph_lines graph_lines = [] global logtick logtick = None for key, value in polldata: if key in colorsDict and key in ds_names: graph_lines.append({ 'name': value, 'color': colorsDict[key], 'ds_name': ds_names[key] }) if key in scale_data: graph_lines[-1]['scale'] = scale_data[key] if value == '_logtick' and key in ds_names: logtick = ds_names[key] global credentials try: credentials = parser.items('authentication') except: credentials = [('testuser', '12345')] global logfile try: logfile = parser.get('conf', 'logfile') except: logfile = None try: webroot = parser.get('conf', 'webroot') except: webroot = '/' global system_image try: system_image = os.path.join(os.path.join(MEDIA_DIR, 'img'), parser.get('conf', 'system_image')) except: system_image = os.path.join(MEDIA_DIR, 'img/system.svg') global frontpage_widgets frontpage_widgets = [] try: for row, widgets in parser.items('frontpage_widgets'): frontpage_widgets.append([s.strip() for s in widgets.split(',')]) except ConfigParser.NoSectionError: frontpage_widgets = [['systemimage', 'events'], ['graph'], ['consumption7d', 'silolevel']] global timeChoices timeChoices = ['time1h', 'time3h', 'time8h', 'time24h', 'time3d', 'time1w'] global timeNames timeNames = [ t.replace(' ', ' ') for t in ['1 hour', '3 hours', '8 hours', '24 hours', '3 days', '1 week'] ] global timeSeconds timeSeconds = [ 3600, 3600 * 3, 3600 * 8, 3600 * 24, 3600 * 24 * 3, 3600 * 24 * 7 ] ft = False fc = False for a, b in polldata: if b == 'feeder_capacity': fc = True if b == 'feeder_time': ft = True if fc and ft: consumption_graph = True consumption_file = os.path.join(os.path.dirname(db), 'consumption.png') else: consumption_graph = False if websockets: #make sure WebSocketPlugin runs after daemonizer plugin (priority 65) #see cherrypy plugin documentation for default plugin priorities WebSocketPlugin.start.__func__.priority = 66 WebSocketPlugin(cherrypy.engine).subscribe() cherrypy.tools.websocket = WebSocketTool() try: port = int(parser.get('conf', 'port')) except: port = 8081 global_conf = { 'global': { #w'server.environment': 'debug', 'tools.sessions.on' : True, 'tools.sessions.timeout': 7200, 'tools.auth.on': True, 'server.socket_host': '0.0.0.0', 'server.socket_port': port, #'engine.autoreload.on': False, #'checker.on': False, #'tools.log_headers.on': False, #'request.show_tracebacks': False, 'request.show_mismatched_params': False, #'log.screen': False, 'engine.SIGHUP': None, 'engine.SIGTERM': None, } } app_conf = { '/media': { 'tools.staticdir.on': True, 'tools.staticdir.dir': MEDIA_DIR }, } if websockets: ws_conf = { '/ws': { 'tools.websocket.on': True, 'tools.websocket.handler_cls': WebSocket } } current_dir = os.path.dirname(os.path.abspath(__file__)) cherrypy.config.update(global_conf) # Only daemonize if asked to. if args.DAEMONIZE: # Don't print anything to stdout/sterr. cherrypy.config.update({ 'log.screen': False, 'engine.autoreload.on': False }) plugins.Daemonizer(cherrypy.engine).subscribe() cherrypy.tree.mount(PellMonWeb(), webroot, config=app_conf) if websockets: cherrypy.tree.mount(WsHandler(), os.path.join(webroot, 'websocket'), config=ws_conf) try: cherrypy.config.update({'log.access_file': accesslog}) except: pass try: cherrypy.config.update({'log.error_file': errorlog}) except: pass GObject.threads_init() # Always start the engine; this will start all other services try: cherrypy.engine.start() except: # Assume the error has been logged already via bus.log. sys.exit(1) else: # Needed to be able to use threads with a glib main loop running # A main loop is needed for dbus "name watching" to work main_loop = GLib.MainLoop() # cherrypy has it's own mainloop, cherrypy.engine.block, that # regularly calls engine.publish every 100ms. The most reliable # way to run dbus and cherrypy together seems to be to use the # glib mainloop for both, ie call engine.publish from the glib # mainloop instead of calling engine.block. def publish(): try: cherrypy.engine.publish('main') if cherrypy.engine.execv: main_loop.quit() cherrypy.engine._do_execv() except KeyboardInterrupt: pass return True # Use our own signal handler to stop on ctrl-c, seems to be simpler # than subscribing to cherrypy's signal handler def signal_handler(signal, frame): cherrypy.engine.exit() main_loop.quit() signal.signal(signal.SIGINT, signal_handler) # Handle cherrypy's main loop needs from here GLib.timeout_add(100, publish) dbus.start() try: main_loop.run() except KeyboardInterrupt: pass
def main(args): # Standard GStreamer initialization GObject.threads_init() Gst.init(None) print("Creating Pipeline") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline") print("Creating streamux") streammux = create_element_or_error("nvstreammux", "Stream-muxer") pipeline.add(streammux) source_bin = create_source_bin( "file:/home/socieboy/edge/deepstream-examples/videos/front.mp4") if not source_bin: sys.stderr.write("Unable to create source bin") pipeline.add(source_bin) sinkpad = streammux.get_request_pad('sink_0') if not sinkpad: sys.stderr.write("Unable to create sink pad bin") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin") srcpad.link(sinkpad) queue1 = create_element_or_error("queue", "queue1") # queue2 = create_element_or_error("queue","queue2") # queue3 = create_element_or_error("queue","queue3") # queue4 = create_element_or_error("queue","queue4") # queue5 = create_element_or_error("queue","queue5") queue6 = create_element_or_error("queue", "queue6") queue7 = create_element_or_error("queue", "queue7") pipeline.add(queue1) # pipeline.add(queue2) # pipeline.add(queue3) # pipeline.add(queue4) # pipeline.add(queue5) pipeline.add(queue6) pipeline.add(queue7) pgie = create_element_or_error("nvinfer", "primary-inference") nvosd = create_element_or_error("nvdsosd", "onscreendisplay") converter = create_element_or_error("nvvideoconvert", "convertor-1") nvosd.set_property('process-mode', 2) # nvosd.set_property('display-text', 0) transform = create_element_or_error("nvegltransform", "nvegl-transform") sink = create_element_or_error("nveglglessink", "nvvideo-renderer") streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) sink.set_property('sync', False) sink.set_property('window-width', 1080) sink.set_property('window-height', 720) # pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt") pgie.set_property( 'config-file-path', "models/yolov3-nurawash-80/config_infer_primary_yoloV3.txt") print("Adding elements to Pipeline") pipeline.add(pgie) pipeline.add(converter) pipeline.add(nvosd) pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline") streammux.link(pgie) pgie.link(converter) converter.link(nvosd) nvosd.link(transform) transform.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # analytics_src_pad = analytics.get_static_pad("src") # if not analytics_src_pad: # sys.stderr.write("Unable to get src pad") # else: # analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0) # List the sources print("Starting pipeline") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) != 4: sys.stderr.write("usage: %s config_file <jpeg/mjpeg file> " "<path to save seg images>\n" % args[0]) sys.exit(1) global folder_name folder_name = args[-1] if path.exists(folder_name): sys.stderr.write("The output folder %s already exists. " "Please remove it first.\n" % folder_name) sys.exit(1) os.mkdir(folder_name) config_file = args[1] num_sources = len(args) - 3 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is jpeg, # we need a jpegparser print("Creating jpegParser \n") jpegparser = Gst.ElementFactory.make("jpegparse", "jpeg-parser") if not jpegparser: sys.stderr.write("Unable to create jpegparser \n") # Use nvdec for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Create segmentation for primary inference seg = Gst.ElementFactory.make("nvinfer", "primary-nvinference-engine") if not seg: sys.stderr.write("Unable to create primary inferene\n") # Create nvsegvisual for visualizing segmentation nvsegvisual = Gst.ElementFactory.make("nvsegvisual", "nvsegvisual") if not nvsegvisual: sys.stderr.write("Unable to create nvsegvisual\n") if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % args[2]) source.set_property('location', args[2]) if is_aarch64() and (args[2].endswith("mjpeg") or args[2].endswith("mjpg")): decoder.set_property('mjpeg', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) seg.set_property('config-file-path', config_file) pgie_batch_size = seg.get_property("batch-size") if pgie_batch_size != num_sources: print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", num_sources, " \n") seg.set_property("batch-size", num_sources) nvsegvisual.set_property('batch-size', num_sources) nvsegvisual.set_property('width', 512) nvsegvisual.set_property('height', 512) sink.set_property("qos", 0) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(jpegparser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(seg) pipeline.add(nvsegvisual) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # file-source -> jpeg-parser -> nvv4l2-decoder -> # nvinfer -> nvsegvisual -> sink print("Linking elements in the Pipeline \n") source.link(jpegparser) jpegparser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(seg) seg.link(nvsegvisual) if is_aarch64(): nvsegvisual.link(transform) transform.link(sink) else: nvsegvisual.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the src pad of the inference element seg_src_pad = seg.get_static_pad("src") if not seg_src_pad: sys.stderr.write(" Unable to get src pad \n") else: seg_src_pad.add_probe(Gst.PadProbeType.BUFFER, seg_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[1:-1]): if i != 0: print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def run(self): GObject.threads_init() Gdk.threads_init() Gtk.main() gtkthread.quit.set()
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % args[1]) source.set_property('location', args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) #Set properties of pgie and sgie pgie.set_property('config-file-path', "dstest2_pgie_config.txt") sgie1.set_property('config-file-path', "dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', "dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', "dstest2_sgie3_config.txt") #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(nvvidconv) #streammux.link(sgie1) #sgie1.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(): print('Tracker Example') # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create Pipeline Element pipeline = Gst.Pipeline() if not pipeline: print("Unable to create Pipeline") return False # Create GST Elements source = create_element_or_error("nvarguscamerasrc", "camera-source") srcCaps = create_element_or_error("capsfilter", "source-caps") srcCaps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=30/1, format=(string)NV12")) streammux = create_element_or_error("nvstreammux", "Stream-muxer") pgie = create_element_or_error("nvinfer", "primary-inference") tracker = create_element_or_error("nvtracker", "tracker") convertor = create_element_or_error("nvvideoconvert", "convertor-1") nvosd = create_element_or_error("nvdsosd", "onscreendisplay") convertor2 = create_element_or_error("nvvideoconvert", "converter-2") encoder = create_element_or_error("nvv4l2h264enc", "encoder") parser = create_element_or_error("h264parse", "parser") muxer = create_element_or_error("flvmux", "muxer") queue = create_element_or_error("queue", "queue-sink") sink = create_element_or_error("rtmpsink", "sink") # Set Element Properties source.set_property('sensor-id', 0) source.set_property('bufapi-version', True) encoder.set_property('insert-sps-pps', True) encoder.set_property('bitrate', 4000000) streammux.set_property('live-source', 1) streammux.set_property('width', 1080) streammux.set_property('height', 720) streammux.set_property('num-surfaces-per-frame', 1) streammux.set_property('nvbuf-memory-type', 4) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt") tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so') tracker.set_property('gpu-id', 0) tracker.set_property('enable-batch-process', 1) tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/tracker_config.yml') muxer.set_property('streamable', True) sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/csi-camera') # Add Elemements to Pipielin print("Adding elements to Pipeline") pipeline.add(source) pipeline.add(srcCaps) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(convertor) pipeline.add(nvosd) pipeline.add(convertor2) pipeline.add(encoder) pipeline.add(parser) pipeline.add(muxer) pipeline.add(queue) pipeline.add(sink) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux") # Link the elements together: print("Linking elements in the Pipeline") source.link(srcCaps) srcCaps.link(streammux) streammux.link(pgie) pgie.link(tracker) tracker.link(convertor) convertor.link(nvosd) nvosd.link(convertor2) convertor2.link(encoder) encoder.link(parser) parser.link(muxer) muxer.link(queue) queue.link(sink) # Create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) print('Create OSD Sink Pad') nvosd_sinkpad = nvosd.get_static_pad("sink") if not nvosd_sinkpad: sys.stderr.write("Unable to get sink pad of nvosd") nvosd_sinkpad.add_probe(Gst.PadProbeType.BUFFER, sink_pad_buffer_probe, 0) # Start play back and listen to events print("Starting pipeline") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # Cleanup pipeline.set_state(Gst.State.NULL)
def main(): # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create Pipeline Element print("Creating Pipeline") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline") source = create_element_or_error("nvarguscamerasrc", "camera-source") streammux = create_element_or_error("nvstreammux", "Stream-muxer") pgie = create_element_or_error("nvinfer", "primary-inference") convertor = create_element_or_error("nvvideoconvert", "convertor-1") nvosd = create_element_or_error("nvdsosd", "onscreendisplay") sink = create_element_or_error("nvoverlaysink", "egl-overlay") # Set Element Properties source.set_property('sensor-id', 0) source.set_property('bufapi-version', True) source.set_property('bufapi-version', True) streammux.set_property('live-source', 1) streammux.set_property('width', 1280) streammux.set_property('height', 720) streammux.set_property('num-surfaces-per-frame', 1) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property( 'config-file-path', "./nv-inferance-config-files/config_infer_primary_yolov3.txt") # pgie.set_property('config-file-path', "./nv-inferance-config-files/config_infer_primary_trafficcamnet.txt") sink.set_property('sync', 0) # Add Elemements to Pipielin print("Adding elements to Pipeline") pipeline.add(source) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(convertor) pipeline.add(nvosd) pipeline.add(sink) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux") # Link the elements together: print("Linking elements in the Pipeline") source.link(streammux) streammux.link(pgie) pgie.link(convertor) convertor.link(nvosd) nvosd.link(sink) # Create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. # print('Create OSD Sink Pad') # osdsinkpad = nvosd.get_static_pad("sink") # if not osdsinkpad: # sys.stderr.write(" Unable to get sink pad of nvosd") # osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # Start play back and listen to events print("Starting pipeline") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # Cleanup pipeline.set_state(Gst.State.NULL)
def main(root_path): """ Called at application start. Initializes application with a default project. """ # DEBUG: Direct output to log file if log file set if _log_file != None: log_print_output_to_file() set_quiet_if_requested() print("Application version: " + editorstate.appversion) # Print OS, Python version and GTK+ version try: os_release_file = open("/etc/os-release", "r") os_text = os_release_file.read() s_index = os_text.find("PRETTY_NAME=") e_index = os_text.find("\n", s_index) print("OS: " + os_text[s_index + 13:e_index - 1]) except: pass print("Python", sys.version) gtk_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version()) print("GTK+ version:", gtk_version) editorstate.gtk_version = gtk_version try: editorstate.mlt_version = mlt.LIBMLT_VERSION except: editorstate.mlt_version = "0.0.99" # magic string for "not found" # Create user folders if needed and determine if we're using xdg or dotfile user folders. userfolders.init() # Set paths. respaths.set_paths(root_path) # Load editor prefs and list of recent projects editorpersistance.load() if editorpersistance.prefs.theme != appconsts.LIGHT_THEME: respaths.apply_dark_theme() if editorpersistance.prefs.display_all_audio_levels == False: editorstate.display_all_audio_levels = False editorpersistance.save() # Init translations module with translations data translations.init_languages() translations.load_filters_translations() mlttransitions.init_module() # Apr-2017 - SvdB - Keyboard shortcuts shortcuts.load_shortcut_files() shortcuts.load_shortcuts() # Aug-2019 - SvdB - AS # The test for len != 4 is to make sure that if we change the number of values below the prefs are reset to the correct list # So when we add or remove a value, make sure we also change the len test # Only use positive numbers. if (not editorpersistance.prefs.AUTO_SAVE_OPTS or len(editorpersistance.prefs.AUTO_SAVE_OPTS) != 4): print("Initializing Auto Save Options") editorpersistance.prefs.AUTO_SAVE_OPTS = ((0, _("No Autosave")), (1, _("1 min")), (2, _("2 min")), (5, _("5 min"))) # We respaths and translations data available so we need to init in a function. workflow.init_data() # RHEL7/CentOS compatibility fix if gtk_version == "3.8.8": GObject.threads_init() # Init gtk threads Gdk.threads_init() Gdk.threads_enter() # Themes if editorpersistance.prefs.theme == appconsts.FLOWBLADE_THEME: success = gui.apply_gtk_css() if not success: editorpersistance.prefs.theme = appconsts.LIGHT_THEME editorpersistance.save() if editorpersistance.prefs.theme != appconsts.LIGHT_THEME: Gtk.Settings.get_default().set_property( "gtk-application-prefer-dark-theme", True) # Load drag'n'drop images dnd.init() # Save screen size data and modify rendering based on screen size/s and number of monitors. scr_w, scr_h = _set_screen_size_data() _set_draw_params() # Refuse to run on too small screen. if scr_w < 1151 or scr_h < 767: _too_small_screen_exit() return # Splash screen if editorpersistance.prefs.display_splash_screen == True: show_splash_screen() # Init MLT framework repo = mlt.Factory().init() processutils.prepare_mlt_repo(repo) # Set numeric locale to use "." as radix, MLT initilizes this to OS locale and this causes bugs. locale.setlocale(locale.LC_NUMERIC, 'C') # Check for codecs and formats on the system. mltenv.check_available_features(repo) renderconsumer.load_render_profiles() # Load filter and compositor descriptions from xml files. mltfilters.load_filters_xml(mltenv.services) mlttransitions.load_compositors_xml(mltenv.transitions) # Replace some services if better replacements available. mltfilters.replace_services(mltenv.services) # Create list of available mlt profiles. mltprofiles.load_profile_list() # If we have crashed we could have large amount of disk space wasted unless we delete all files here. tlinerender.app_launch_clean_up() # Save assoc file path if found in arguments. global assoc_file_path assoc_file_path = get_assoc_file_path() # There is always a project open, so at startup we create a default project. # Set default project as the project being edited. editorstate.project = projectdata.get_default_project() check_crash = True # Audiomonitoring being available needs to be known before GUI creation. audiomonitoring.init(editorstate.project.profile) # Set trim view mode to current default value. editorstate.show_trim_view = editorpersistance.prefs.trim_view_default # Check for tools and init tools integration. gmic.test_availablity() toolsintegration.init() # Create player object. create_player() # Create main window and set widget handles in gui.py for more convenient reference. create_gui() # Inits widgets with project data. init_project_gui() # Inits widgets with current sequence data. init_sequence_gui() # Launch player now that data and gui exist launch_player() # Editor and modules need some more initializing. init_editor_state() # Tracks need to be recentered if window is resized. # Connect listener for this now that the tline panel size allocation is sure to be available. global window_resize_id, window_state_id window_resize_id = gui.editor_window.window.connect( "size-allocate", lambda w, e: updater.window_resized()) window_state_id = gui.editor_window.window.connect( "window-state-event", lambda w, e: updater.window_resized()) # Get existing autosave files autosave_files = get_autosave_files() # Show splash if ((editorpersistance.prefs.display_splash_screen == True) and len(autosave_files) == 0 ) and not editorstate.runtime_version_greater_then_test_version( editorpersistance.prefs.workflow_dialog_last_version_shown, editorstate.appversion): global splash_timeout_id splash_timeout_id = GLib.timeout_add(2600, destroy_splash_screen) splash_screen.show_all() appconsts.SAVEFILE_VERSION = projectdata.SAVEFILE_VERSION # THIS IS A QUESTIONABLE IDEA TO SIMPLIFY IMPORTS, NOT DRY. WHEN DOING TOOLS THAT RUN IN ANOTHER PROCESSES AND SAVE PROJECTS, THIS LINE NEEDS TO BE THERE ALSO. # Every running instance has unique autosave file which is deleted at exit set_instance_autosave_id() # Existance of autosave file hints that program was exited abnormally. if check_crash == True and len(autosave_files) > 0: if len(autosave_files) == 1: GObject.timeout_add(10, autosave_recovery_dialog) else: GObject.timeout_add(10, autosaves_many_recovery_dialog) else: tlinerender.init_session() start_autosave() projectaction.clear_changed_since_last_save_flags() # We prefer to monkeypatch some callbacks into some modules, usually to # maintain a simpler and/or non-circular import structure. monkeypatch_callbacks() # File in assoc_file_path is opened after very short delay. if not (check_crash == True and len(autosave_files) > 0): if assoc_file_path != None: print("Launch assoc file:", assoc_file_path) global assoc_timeout_id assoc_timeout_id = GObject.timeout_add(10, open_assoc_file) if editorpersistance.prefs.theme == appconsts.FLOWBLADE_THEME: gui.apply_flowblade_theme_fixes() # SDL 2 consumer needs to created after Gtk.main() has run enough for window to be visble #if editorstate.get_sdl_version() == editorstate.SDL_2: # needs more state considerion still # print "SDL2 timeout launch" # global sdl2_timeout_id # sdl2_timeout_id = GObject.timeout_add(1500, create_sdl_2_consumer) # In PositionNumericalEntries we are using Gtk.Entry objects in a way that works for us nicely, but is somehow "error" for Gtk, so we just kill this. Gtk.Settings.get_default().set_property("gtk-error-bell", False) # Show first run worflow info dialog if not shown for this version of application. if editorstate.runtime_version_greater_then_test_version( editorpersistance.prefs.workflow_dialog_last_version_shown, editorstate.appversion): GObject.timeout_add(500, show_worflow_info_dialog) # Handle userfolders init error and data copy. if userfolders.get_init_error() != None: GObject.timeout_add(500, show_user_folders_init_error_dialog, userfolders.get_init_error()) elif userfolders.data_copy_needed(): GObject.timeout_add(500, show_user_folders_copy_dialog) else: print("No user folders actions needed.") global disk_cache_timeout_id disk_cache_timeout_id = GObject.timeout_add(2500, check_disk_cache_size) # Launch gtk+ main loop Gtk.main() Gdk.threads_leave()
def main(): cameras_list = [ { "source": 0, "name": "Camera 1", }, { "source": 1, "name": "Camera 2" }, ] GObject.threads_init() Gst.init(None) pipeline = Gst.Pipeline() if not pipeline: print("Unable to create Pipeline") exit(0) streammux = create_element_or_error("nvstreammux", "stream-muxer") pipeline.add(streammux) for camera in cameras_list: source = create_element_or_error("nvarguscamerasrc", "source-" + camera['name']) source.set_property('sensor-id', camera['source']) source.set_property('bufapi-version', True) caps = create_element_or_error("capsfilter", "source-caps-source-" + camera['name']) caps.set_property( "caps", Gst.Caps.from_string( "video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12" )) pipeline.add(source) pipeline.add(caps) srcpad = source.get_static_pad("src") sinkpad = streammux.get_request_pad('sink_' + str(camera['source'])) if not sinkpad: print("Unable to create source sink pad") exit(0) if not srcpad: print("Unable to create source src pad") exit(0) srcpad.link(sinkpad) pgie = create_element_or_error("nvinfer", "primary-inference") convertor = create_element_or_error("nvvideoconvert", "converter-1") nvosd = create_element_or_error("nvdsosd", "onscreendisplay") transform = create_element_or_error("nvegltransform", "nvegl-transform") tee = create_element_or_error("tee", "tee") queue = create_element_or_error("queue", "queue1") queue2 = create_element_or_error("queue", "queue2") # Set Element Properties streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('num-surfaces-per-frame', 1) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property( 'config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt" ) # Add Elemements to Pipielin print("Adding elements to Pipeline") pipeline.add(pgie) pipeline.add(convertor) pipeline.add(nvosd) pipeline.add(transform) # Create outputs for camera in cameras_list: sink = create_element_or_error("nveglglessink", "nvvideo-renderer-" + camera['name']) # sink.set_property("qos", 0) pipeline.add(sink) srcpad = streammux.get_pad_template("src_%u") srcpad.link # Link the elements together: print("Linking elements in the Pipeline") streammux.link(pgie) pgie.link(convertor) convertor.link(nvosd) nvosd.link(transform) # transform.link(sink) # Create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() # Start play back and listen to events print("Starting pipeline") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # Cleanup pipeline.set_state(Gst.State.NULL)
def main(args): GObject.threads_init() Gst.init(None) print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") msgconv = Gst.ElementFactory.make("nvmsgconv", "nvmsg-converter") if not msgconv: sys.stderr.write(" Unable to create msgconv \n") msgbroker = Gst.ElementFactory.make("nvmsgbroker", "nvmsg-broker") if not msgbroker: sys.stderr.write(" Unable to create msgbroker \n") tee = Gst.ElementFactory.make("tee", "nvsink-tee") if not tee: sys.stderr.write(" Unable to create tee \n") queue1 = Gst.ElementFactory.make("queue", "nvtee-que1") if not queue1: sys.stderr.write(" Unable to create queue1 \n") queue2 = Gst.ElementFactory.make("queue", "nvtee-que2") if not queue2: sys.stderr.write(" Unable to create queue2 \n") if (no_display): print("Creating FakeSink \n") sink = Gst.ElementFactory.make("fakesink", "fakesink") if not sink: sys.stderr.write(" Unable to create fakesink \n") else: if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % input_file) source.set_property('location', input_file) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', PGIE_CONFIG_FILE) msgconv.set_property('config', MSCONV_CONFIG_FILE) msgconv.set_property('payload-type', schema_type) msgbroker.set_property('proto-lib', proto_lib) msgbroker.set_property('conn-str', conn_str) if cfg_file is not None: msgbroker.set_property('config', cfg_file) if topic is not None: msgbroker.set_property('topic', topic) msgbroker.set_property('sync', False) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(tee) pipeline.add(queue1) pipeline.add(queue2) pipeline.add(msgconv) pipeline.add(msgbroker) pipeline.add(sink) if is_aarch64() and not no_display: pipeline.add(transform) print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(tee) queue1.link(msgconv) msgconv.link(msgbroker) if is_aarch64() and not no_display: queue2.link(transform) transform.link(sink) else: queue2.link(sink) sink_pad = queue1.get_static_pad("sink") tee_msg_pad = tee.get_request_pad('src_%u') tee_render_pad = tee.get_request_pad("src_%u") if not tee_msg_pad or not tee_render_pad: sys.stderr.write("Unable to get request pads\n") tee_msg_pad.link(sink_pad) sink_pad = queue2.get_static_pad("sink") tee_render_pad.link(sink_pad) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pyds.unset_callback_funcs() pipeline.set_state(Gst.State.NULL)
def init_threads(self): self.checking_data_t=threading.Thread(target=self.checking_data) self.checking_data_t.daemon=True GObject.threads_init()
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write( "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0]) sys.exit(1) """ for i in range(0,len(args)-2): fps_streams["stream{0}".format(i)]=GETFPS(i) number_sources=len(args)-2 """ #TODO: remove hack past first paramater and last with -4 number_sources = len(args) - 1 - 5 for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) # TODO: modify index not offset; see launch.json width = int(args[len(args) - 3]) height = int(args[len(args) - 1]) MUXER_OUTPUT_WIDTH = width MUXER_OUTPUT_HEIGHT = height TILED_OUTPUT_WIDTH = width TILED_OUTPUT_HEIGHT = height ############################################# global folder_name folder_name = args[-1] if path.exists(folder_name): sys.stderr.write( "The output folder %s already exists. Please remove it first.\n" % folder_name) sys.exit(1) os.mkdir(folder_name) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', width) streammux.set_property('height', height) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest_imagedata_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("sync", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(nvvidconv1) nvvidconv1.link(filter1) filter1.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it # TODO: determine this codes meaning loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_sink_pad = tiler.get_static_pad("sink") if not tiler_sink_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[:-1]): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def run(): GObject.threads_init() Gst.init(None) main()
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file source = make_elm_or_print_err("filesrc", "file-source", "Source") # Since the data format in the input file is elementary h264 stream, # we need a h264parser h264parser = make_elm_or_print_err("h264parse", "h264-parser", "H264Parser") # Use nvdec_h264 for hardware accelerated decode on GPU decoder = make_elm_or_print_err("nvv4l2decoder", "nvv4l2-decoder", "Decoder") # Create nvstreammux instance to form batches from one or more sources. streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer", "NvStreamMux") # Use nvinferserver to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = make_elm_or_print_err("nvinferserver", "primary-inference", "Nvinferserver") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = make_elm_or_print_err("nvvideoconvert", "convertor", "Nvvidconv") # Create OSD to draw on the converted RGBA buffer nvosd = make_elm_or_print_err("nvdsosd", "onscreendisplay", "OSD (nvosd)") # Finally encode and save the osd output queue = make_elm_or_print_err("queue", "queue", "Queue") nvvidconv2 = make_elm_or_print_err("nvvideoconvert", "convertor2", "Converter 2 (nvvidconv2)") capsfilter = make_elm_or_print_err("capsfilter", "capsfilter", "capsfilter") caps = Gst.Caps.from_string("video/x-raw, format=I420") capsfilter.set_property("caps", caps) # On Jetson, there is a problem with the encoder failing to initialize # due to limitation on TLS usage. To work around this, preload libgomp. # Add a reminder here in case the user forgets. preload_reminder = "If the following error is encountered:\n" + \ "/usr/lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block\n" + \ "Preload the offending library:\n" + \ "export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n" encoder = make_elm_or_print_err("avenc_mpeg4", "encoder", "Encoder", preload_reminder) encoder.set_property("bitrate", 2000000) codeparser = make_elm_or_print_err("mpeg4videoparse", "mpeg4-parser", 'Code Parser') container = make_elm_or_print_err("qtmux", "qtmux", "Container") sink = make_elm_or_print_err("filesink", "filesink", "Sink") sink.set_property("location", OUTPUT_VIDEO_NAME) sink.set_property("sync", 0) sink.set_property("async", 0) print("Playing file %s " % args[1]) source.set_property("location", args[1]) streammux.set_property("width", IMAGE_WIDTH) streammux.set_property("height", IMAGE_HEIGHT) streammux.set_property("batch-size", 1) streammux.set_property("batched-push-timeout", 4000000) pgie.set_property("config-file-path", "dstest_ssd_nopostprocess.txt") print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(queue) pipeline.add(nvvidconv2) pipeline.add(capsfilter) pipeline.add(encoder) pipeline.add(codeparser) pipeline.add(container) pipeline.add(sink) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(queue) queue.link(nvvidconv2) nvvidconv2.link(capsfilter) capsfilter.link(encoder) encoder.link(codeparser) codeparser.link(container) container.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Add a probe on the primary-infer source pad to get inference output tensors pgiesrcpad = pgie.get_static_pad("src") if not pgiesrcpad: sys.stderr.write(" Unable to get src pad of primary infer \n") pgiesrcpad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(): parser = argparse.ArgumentParser(description='King Phisher Client GUI', conflict_handler='resolve') utilities.argp_add_args(parser, default_root='KingPhisher') startup.argp_add_client(parser) arguments = parser.parse_args() # basic runtime checks if sys.version_info < (3, 4): color.print_error( 'the Python version is too old (minimum required is 3.4)') return 0 if Gtk.check_version(3, 14, 0): color.print_error( 'the GTK+ version is too old (minimum required is 3.14)') return 0 if sys.platform.startswith('linux') and not os.environ.get('DISPLAY'): color.print_error( 'no display was detected, this must be run with an interactive X session' ) return 0 config_file = arguments.config_file use_plugins = arguments.use_plugins use_style = arguments.use_style del arguments, parser logger = logging.getLogger('KingPhisher.Client.CLI') if sys.platform.startswith('linux') and not os.getuid(): logger.warning( 'it is not necessary to run the king phisher client as root') find.init_data_path('client') if not gui_utilities.which_glade(): color.print_error('unable to locate the glade ui data file') return 0 logger.debug( "king phisher version: {0} python version: {1}.{2}.{3}".format( version.version, sys.version_info[0], sys.version_info[1], sys.version_info[2])) logger.debug("client running in process: {0} main tid: 0x{1:x}".format( os.getpid(), threading.current_thread().ident)) start_time = time.time() logger.debug('using ui data from glade file: ' + gui_utilities.which_glade()) try: app = application.KingPhisherClientApplication(config_file=config_file, use_plugins=use_plugins, use_style=use_style) except Exception as error: logger.critical("initialization error: {0} ({1})".format( error.__class__.__name__, getattr(error, 'message', 'n/a'))) color.print_error('failed to initialize the King Phisher client') return 0 logger.debug("client loaded in {0:.2f} seconds".format(time.time() - start_time)) GObject.threads_init() return app.run([])
def main(): # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create Pipeline Element print("Creating Pipeline") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline") # Create GST Source source = create_element_or_error("nvarguscamerasrc", "camera-source") streammux = create_element_or_error("nvstreammux", "Stream-muxer") pgie = create_element_or_error("nvinfer", "primary-inference") convertor = create_element_or_error("nvvideoconvert", "convertor-1") nvosd = create_element_or_error("nvdsosd", "onscreendisplay") convertor2 = create_element_or_error("nvvideoconvert", "convertor-2") # Create Gst Threads tee = create_element_or_error("tee", "tee") streaming_queue = create_element_or_error("queue", "streaming_queue") recording_queue = create_element_or_error("queue", "recording_queue") # Create Gst Elements for Streaming Branch s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder") s_parser = create_element_or_error("h264parse", "streaming-parser") s_muxer = create_element_or_error("flvmux", "streaming-muxer") s_sink = create_element_or_error("rtmpsink", "streaming-sink") # Create Gst Elements for Recording Branch r_encoder = create_element_or_error('nvv4l2h265enc', 'encoder') r_parser = create_element_or_error('h265parse', 'parser') r_sink = create_element_or_error('filesink', 'sink') # Set Element Properties source.set_property('sensor-id', 0) source.set_property('bufapi-version', True) streammux.set_property('live-source', 1) streammux.set_property('width', 1280) streammux.set_property('height', 720) streammux.set_property('num-surfaces-per-frame', 1) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt") s_sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/streaming-test') r_encoder.set_property('bitrate', 8000000) r_sink.set_property('location', 'video_' + str(datetime.datetime.utcnow().date()) + '.mp4') # Add Elemements to Pipielin print("Adding elements to Pipeline") pipeline.add(source) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(convertor) pipeline.add(nvosd) pipeline.add(convertor2) pipeline.add(tee) pipeline.add(streaming_queue) pipeline.add(s_encoder) pipeline.add(s_parser) pipeline.add(s_muxer) pipeline.add(s_sink) pipeline.add(recording_queue) pipeline.add(r_encoder) pipeline.add(r_parser) pipeline.add(r_sink) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux") # Link the elements together: print("Linking elements in the Pipeline") source.link(streammux) streammux.link(pgie) pgie.link(convertor) convertor.link(nvosd) nvosd.link(convertor2) convertor2.link(tee) # Streaming Queue streaming_queue.link(s_encoder) s_encoder.link(s_parser) s_parser.link(s_muxer) s_muxer.link(s_sink) # Recording Queue recording_queue.link(r_encoder) r_encoder.link(r_parser) r_parser.link(r_sink) # Get pad templates from source tee_src_pad_template = tee.get_pad_template("src_%u") # Get source to Streaming Queue tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None) streaming_queue_pad = streaming_queue.get_static_pad("sink") # Get source to recording Queue tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None) recording_queue_pad = recording_queue.get_static_pad("sink") # Link sources if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK or tee_recording_pad.link(recording_queue_pad) != Gst.PadLinkReturn.OK): print("ERROR: Tees could not be linked") sys.exit(1) # Create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # Start play back and listen to events print("Starting pipeline") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # Cleanup pipeline.set_state(Gst.State.NULL)