Example #1
0
def main():

    cameras_list = [
        {"source": 0, "name": "Camera 1",},
        {"source": 1, "name": "Camera 2"},
    ]
    
    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    # Muxer
    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pipeline.add(streammux)

    # Sources
    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc", "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter", "source-caps-source-" + camera['name'])
        caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"))
        pipeline.add(source)
        pipeline.add(caps)

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
        srcpad = source.get_static_pad("src")

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    # Primary Inferance
    pgie = create_element_or_error("nvinfer", "primary-inference")
    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")
    pipeline.add(pgie)
    streammux.link(pgie)

    # Tracker
    tracker = create_element_or_error("nvtracker", "tracker")
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so')
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 480)
    pipeline.add(tracker)
    pgie.link(tracker)

    # Analitycs
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    analytics.set_property("config-file", "/deepstream-examples/Analitycs/analitycs.txt")
    pipeline.add(analytics)
    tracker.link(analytics)

    # Tee
    tee = create_element_or_error("tee", "tee")
    pipeline.add(tee)
    analytics.link(tee)

    # Get Main Tee Sink Pad for the Queues
    tee_src_pad_template = tee.get_pad_template("src_%u")
    tee_display_src_pad = tee.request_pad(tee_src_pad_template, None, None)

    # Display Queue
    queue = create_element_or_error("queue", "queue")
    pipeline.add(queue)
    queue_sink_pad = queue.get_static_pad("sink")

    # Link Main Tee to Display Queue
    if (tee_display_src_pad.link(queue_sink_pad) != Gst.PadLinkReturn.OK):
        print("Could not link main tee to display queue")
        return

    # Tiler
    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    tiler.set_property("rows", 2)
    tiler.set_property("columns", 2)
    tiler.set_property("width", 1920)
    tiler.set_property("height", 1080)
    pipeline.add(tiler)
    queue.link(tiler)

    # Converter
    convertor = create_element_or_error("nvvideoconvert", "converter-1")
    pipeline.add(convertor)
    tiler.link(convertor)

    # Nvosd
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    pipeline.add(nvosd)
    convertor.link(nvosd)

    # Transform
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    pipeline.add(transform)
    nvosd.link(transform)

    # Sink
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")
    sink.set_property("qos", 0)
    pipeline.add(sink)
    transform.link(sink)

    # Play Pipeline
    loop = GObject.MainLoop()
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    pipeline.set_state(Gst.State.NULL)
Example #2
0
    def __init__(self, handle, create_jobject=True):
        """Initialise the Activity

        handle -- sugar3.activity.activityhandle.ActivityHandle
            instance providing the activity id and access to the
            presence service which *may* provide sharing for this
            application

        create_jobject -- boolean
            define if it should create a journal object if we are
            not resuming

        Side effects:

            Sets the gdk screen DPI setting (resolution) to the
            Sugar screen resolution.

            Connects our "destroy" message to our _destroy_cb
            method.

            Creates a base Gtk.Window within this window.

            Creates an ActivityService (self._bus) servicing
            this application.

        Usage:
            If your Activity implements __init__(), it should call
            the base class __init()__ before doing Activity specific things.

        """
        # Stuff that needs to be done early
        icons_path = os.path.join(get_bundle_path(), 'icons')
        Gtk.IconTheme.get_default().append_search_path(icons_path)

        sugar_theme = 'sugar-72'
        if 'SUGAR_SCALING' in os.environ:
            if os.environ['SUGAR_SCALING'] == '100':
                sugar_theme = 'sugar-100'

        # This code can be removed when we grow an xsettings daemon (the GTK+
        # init routines will then automatically figure out the font settings)
        settings = Gtk.Settings.get_default()
        settings.set_property('gtk-theme-name', sugar_theme)
        settings.set_property('gtk-icon-theme-name', 'sugar')
        settings.set_property('gtk-font-name',
                              '%s %f' % (style.FONT_FACE, style.FONT_SIZE))

        Window.__init__(self)

        if 'SUGAR_ACTIVITY_ROOT' in os.environ:
            # If this activity runs inside Sugar, we want it to take all the
            # screen. Would be better if it was the shell to do this, but we
            # haven't found yet a good way to do it there. See #1263.
            self.connect('window-state-event', self.__window_state_event_cb)
            screen = Gdk.Screen.get_default()
            screen.connect('size-changed', self.__screen_size_changed_cb)
            self._adapt_window_to_screen()

        # process titles will only show 15 characters
        # but they get truncated anyway so if more characters
        # are supported in the future we will get a better view
        # of the processes
        proc_title = '%s <%s>' % (get_bundle_name(), handle.activity_id)
        util.set_proc_title(proc_title)

        self.connect('realize', self.__realize_cb)
        self.connect('delete-event', self.__delete_event_cb)

        self._active = False
        self._activity_id = handle.activity_id
        self.shared_activity = None
        self._join_id = None
        self._updating_jobject = False
        self._closing = False
        self._quit_requested = False
        self._deleting = False
        self._max_participants = None
        self._invites_queue = []
        self._jobject = None
        self._read_file_called = False

        self._session = _get_session()
        self._session.register(self)
        self._session.connect('quit-requested',
                              self.__session_quit_requested_cb)
        self._session.connect('quit', self.__session_quit_cb)

        accel_group = Gtk.AccelGroup()
        self.sugar_accel_group = accel_group
        self.add_accel_group(accel_group)

        self._bus = ActivityService(self)
        self._owns_file = False

        share_scope = SCOPE_PRIVATE

        if handle.object_id:
            self._jobject = datastore.get(handle.object_id)

            if 'share-scope' in self._jobject.metadata:
                share_scope = self._jobject.metadata['share-scope']

            if 'launch-times' in self._jobject.metadata:
                self._jobject.metadata['launch-times'] += ', %d' % \
                    int(time.time())
            else:
                self._jobject.metadata['launch-times'] = \
                    str(int(time.time()))

        self.shared_activity = None
        self._join_id = None

        if handle.object_id is None and create_jobject:
            logging.debug('Creating a jobject.')
            self._jobject = self._initialize_journal_object()

        if handle.invited:
            wait_loop = GObject.MainLoop()
            self._client_handler = _ClientHandler(
                self.get_bundle_id(), partial(self.__got_channel_cb,
                                              wait_loop))
            # FIXME: The current API requires that self.shared_activity is set
            # before exiting from __init__, so we wait until we have got the
            # shared activity. http://bugs.sugarlabs.org/ticket/2168
            wait_loop.run()
        else:
            pservice = presenceservice.get_instance()
            mesh_instance = pservice.get_activity(self._activity_id,
                                                  warn_if_none=False)
            self._set_up_sharing(mesh_instance, share_scope)

        if not create_jobject:
            self.set_title(get_bundle_name())
            return

        if self.shared_activity is not None:
            self._jobject.metadata['title'] = self.shared_activity.props.name
            self._jobject.metadata['icon-color'] = \
                self.shared_activity.props.color
        else:
            self._jobject.metadata.connect('updated',
                                           self.__jobject_updated_cb)
        self.set_title(self._jobject.metadata['title'])
Example #3
0
    def __init__(self,
                 verbose=False,
                 log_dir=None,
                 console_log=False,
                 run_dir=None,
                 config_file=None,
                 test_dir=None):

        setproctitle.setproctitle('openrazer-daemon')  # pylint: disable=no-member

        # Expanding ~ as python doesnt do it by default, also creating dirs if needed
        try:
            if log_dir is not None:
                log_dir = os.path.expanduser(log_dir)
                os.makedirs(log_dir, exist_ok=True)
            if run_dir is not None:
                run_dir = os.path.expanduser(run_dir)
                os.makedirs(run_dir, exist_ok=True)
        except NotADirectoryError as e:
            print("Failed to create {}".format(e.filename), file=sys.stderr)
            sys.exit(1)

        if config_file is not None:
            config_file = os.path.expanduser(config_file)
            if not os.path.exists(config_file):
                print("Config file {} does not exist.".format(config_file),
                      file=sys.stderr)
                sys.exit(1)

        self._test_dir = test_dir
        self._run_dir = run_dir
        self._config_file = config_file
        self._config = configparser.ConfigParser()
        self.read_config(config_file)

        # Logging
        log_level = logging.INFO
        if verbose or self._config.getboolean('General', 'verbose_logging'):
            log_level = logging.DEBUG
        self.logger = self._create_logger(log_dir, log_level, console_log)

        # Check for plugdev group
        if not self._check_plugdev_group():
            self.logger.critical("User is not a member of the plugdev group")
            sys.exit(1)

        # Setup DBus to use gobject main loop
        dbus.mainloop.glib.threads_init()
        dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
        DBusService.__init__(self, self.BUS_NAME, '/org/razer')

        self._init_signals()
        self._main_loop = GObject.MainLoop()

        # Listen for input events from udev
        self._init_udev_monitor()

        # Load Classes
        self._device_classes = openrazer_daemon.hardware.get_device_classes()

        self.logger.info("Initialising Daemon (v%s). Pid: %d", __version__,
                         os.getpid())
        self._init_screensaver_monitor()

        self._razer_devices = DeviceCollection()
        self._load_devices(first_run=True)

        # Add DBus methods
        methods = {
            # interface, method, callback, in-args, out-args
            ('razer.devices', 'getDevices', self.get_serial_list, None, 'as'),
            ('razer.devices', 'supportedDevices', self.supported_devices, None,
             's'),
            ('razer.devices', 'enableTurnOffOnScreensaver',
             self.enable_turn_off_on_screensaver, 'b', None),
            ('razer.devices', 'getOffOnScreensaver',
             self.get_off_on_screensaver, None, 'b'),
            ('razer.devices', 'syncEffects', self.sync_effects, 'b', None),
            ('razer.devices', 'getSyncEffects', self.get_sync_effects, None,
             'b'),
            ('razer.daemon', 'version', self.version, None, 's'),
            ('razer.daemon', 'stop', self.stop, None, None),
        }

        for m in methods:
            self.logger.debug("Adding {}.{} method to DBus".format(m[0], m[1]))
            self.add_dbus_method(m[0],
                                 m[1],
                                 m[2],
                                 in_signature=m[3],
                                 out_signature=m[4])

        # TODO remove
        self.sync_effects(
            self._config.getboolean('Startup', 'sync_effects_enabled'))
Example #4
0
def main(args):
    Notify.init("gvfs-automount")
    vm = Gio.VolumeMonitor.get()
    connections = []
    connections.append(vm.connect("volume-added", on_volume_added, None))
    GObject.MainLoop().run()
def main(args):
    # Check input arguments
    if len(args) != 2:
        sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    source = Gst.ElementFactory.make("filesrc", "file-source")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    # if is_aarch64():
    #     transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
    # print("Creating EGLSink \n")
    # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    print("Playing file %s " % args[1])
    source.set_property('location', args[1])
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pipeline.add(source)
    pipeline.add(h264parser)
    pipeline.add(decoder)
    pipeline.add(streammux)
    pipeline.add(nvvidconv)

    print("Linking elements in the Pipeline \n")
    source.link(h264parser)
    h264parser.link(decoder)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = decoder.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")
    srcpad.link(sinkpad)

    streammux.link(nvvidconv)

    sink_bin = create_sink_bin(1)
    pipeline.add(sink_bin)
    srcpad = nvvidconv.get_static_pad("src")
    print(srcpad)
    sinkpad = sink_bin.get_static_pad("sink")
    print(sinkpad)
    srcpad.link(sinkpad)
    # if is_aarch64():
    #     nvvidconv.link(transform)
    #     transform.link(sink)
    # else:
    #     nvvidconv.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)
Example #6
0
def main(args):

    # Standard GStreamer initialization
    cameras_list = [
        {
            "source": 0,
            "name": "camera1"
        },
        {
            "source": 1,
            "name": "camera2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    # Muxer
    muxer = create_element_or_error("nvstreammux", "muxer")
    muxer.set_property('live-source', True)
    muxer.set_property('sync-inputs', True)
    muxer.set_property('width', 720)
    muxer.set_property('height', 480)
    muxer.set_property('batch-size', 3)
    muxer.set_property('batched-push-timeout', 4000000)
    pipeline.add(muxer)

    # Primart Inferance
    pgie = create_element_or_error("nvinfer", "primary-inference")
    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    pipeline.add(pgie)
    muxer.link(pgie)

    #Tracker
    tracker = create_element_or_error("nvtracker", "tracker")
    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream/lib/libnvds_mot_klt.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)
    pipeline.add(tracker)
    pgie.link(tracker)

    #Analitics
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    analytics.set_property("config-file", "./../Analitycs/analitycs.txt")
    pipeline.add(analytics)
    tracker.link(analytics)

    # Converter
    converterOsd = create_element_or_error("nvvideoconvert",
                                           "to-osd-convertor")
    pipeline.add(converterOsd)
    analytics.link(converterOsd)

    # Demuxer
    demux = create_element_or_error("nvstreamdemux", "demuxer")
    pipeline.add(demux)
    converterOsd.link(demux)

    # Sources
    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])

        caps = create_element_or_error("capsfilter",
                                       "source-caps-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=(fraction)30/1, format=(string)NV12"
            ))

        source.set_property('do-timestamp', True)
        source.set_property('bufapi-version', True)
        source.set_property('tnr-mode', 2)
        source.set_property('ee-mode', 2)
        source.set_property('aeantibanding', 0)

        pipeline.add(source)
        pipeline.add(caps)

        source.link(caps)

        srcpad = caps.get_static_pad("src")
        sinkpad = muxer.get_request_pad('sink_' + str(camera['source']))

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    # Outputs
    for camera in cameras_list:

        queue = create_element_or_error("queue", "queue-" + camera['name'])
        pipeline.add(queue)

        _srcpad = demux.get_request_pad("src_" + str(camera['source']))
        if not _srcpad:
            print("Unable to create output src pad")
            exit(0)

        _sinkpad = queue.get_static_pad('sink')
        if not _sinkpad:
            print("Unable to create output sink pad")
            exit(0)

        _srcpad.link(_sinkpad)

        # Converter
        converter = create_element_or_error("nvvideoconvert",
                                            "converter-" + camera['name'])
        pipeline.add(converter)
        queue.link(converter)

        # Nvosd
        nvosd = create_element_or_error("nvdsosd",
                                        "on-screen-display" + camera['name'])
        pipeline.add(nvosd)
        converter.link(nvosd)

        # Transform
        transform = create_element_or_error(
            "nvegltransform", "nvegl-transform-" + camera['name'])
        pipeline.add(transform)
        nvosd.link(transform)

        # Sink
        sink = create_element_or_error("nveglglessink",
                                       "sink-" + camera['name'])
        sink.set_property('sync', False)
        pipeline.add(sink)
        transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # List the sources
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # cleanup
    pipeline.set_state(Gst.State.NULL)

    print("Exiting app")
def main():
    logging.basicConfig(level=logging.DEBUG,
                        format="%(levelname)8s %(asctime)s %(message)s ")
    logging.debug('Starting up worker')
    parser = argparse.ArgumentParser(description='Worker for kaldigstserver')
    parser.add_argument('-u',
                        '--uri',
                        default="ws://localhost:8888/worker/ws/speech",
                        dest="uri",
                        help="Server<-->worker websocket URI")
    parser.add_argument('-f', '--fork', default=1, dest="fork", type=int)
    parser.add_argument('-c',
                        '--conf',
                        dest="conf",
                        help="YAML file with decoder configuration")

    args = parser.parse_args()

    if args.fork > 1:
        logging.info("Forking into %d processes" % args.fork)
        tornado.process.fork_processes(args.fork)

    conf = {}
    if args.conf:
        with open(args.conf) as f:
            conf = yaml.safe_load(f)

    if "logging" in conf:
        logging.config.dictConfig(conf["logging"])

    # fork off the post-processors before we load the model into memory
    tornado.process.Subprocess.initialize()
    post_processor = None
    if "post-processor" in conf:
        STREAM = tornado.process.Subprocess.STREAM
        post_processor = tornado.process.Subprocess(conf["post-processor"],
                                                    shell=True,
                                                    stdin=PIPE,
                                                    stdout=STREAM)

    full_post_processor = None
    if "full-post-processor" in conf:
        full_post_processor = Popen(conf["full-post-processor"],
                                    shell=True,
                                    stdin=PIPE,
                                    stdout=PIPE)

    global USE_NNET2
    USE_NNET2 = conf.get("use-nnet2", False)

    global SILENCE_TIMEOUT
    SILENCE_TIMEOUT = conf.get("silence-timeout", 5)
    #    if USE_NNET2:
    #        decoder_pipeline = DecoderPipeline2(conf)
    #    else:
    #        decoder_pipeline = DecoderPipeline(conf)

    gobject_loop = GObject.MainLoop()
    tornado.ioloop.IOLoop.current().run_in_executor(executor, gobject_loop.run)
    tornado.ioloop.IOLoop.current().spawn_callback(main_loop, args.uri,
                                                   post_processor,
                                                   full_post_processor)
    tornado.ioloop.IOLoop.current().start()
Example #8
0
    def startPairing(self):
        print("Start Pairing")
        bus = dbus.SystemBus()
        adapter_path = findAdapter().object_path
        adapter = dbus.Interface(bus.get_object(SERVICE_NAME, adapter_path), "org.freedesktop.DBus.Properties")
        adapter.Set(ADAPTER_IFACE, "Discoverable", True)
        
        logging.info("BlueAgent is waiting to pair with device")

        
bus = None

if __name__ == "__main__":
    pin_code = "0000"
    parser = OptionParser()
    parser.add_option("-p", "--pin", action="store", dest="pin_code", help="PIN code to pair with", metavar="PIN")
    (options, args) = parser.parse_args()

    # use the pin code if provided
    if (options.pin_code):
        pin_code = options.pin_code

    dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)

    agent = BlueAgent(pin_code)
    agent.registerAsDefault()
    agent.startPairing()

    mainloop = gobject.MainLoop()
    mainloop.run()
Example #9
0
def _test():
    '''a simple test/example'''
    Tasklet(_count_some_numbers1(5))
    GObject.MainLoop().run()
Example #10
0
def main():
    print('Tracker Example')

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
        return

    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "converter-2")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('nvbuf-memory-type', 4)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path',
                      "models/nurawash/config_infer_primary.txt")

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(transform)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # print('Create OSD Sink Pad')
    # osdsinkpad = nvosd.get_static_pad("sink")
    # if not osdsinkpad:
    #     sys.stderr.write("Unable to get sink pad of nvosd")

    # osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Example #11
0
    def setUp(self):
        self.main_loop = GObject.MainLoop()

        self.mock_data_insert()
        self.finish_counter = 0
Example #12
0
def main(args):
    device = '/dev/video0'
    width = 2592
    height = 1944
    location = '%02d.mp4'

    # init gstreamer
    GObject.threads_init()
    Gst.init(None)

    # init loop
    loop = GObject.MainLoop()

    # create pipeline
    pipeline: Gst.Pipeline = Gst.Pipeline.new('camera-recorder')

    # region create elements
    src: Gst.Element = Gst.ElementFactory.make('v4l2src')
    filter1: Gst.Element = Gst.ElementFactory.make('capsfilter')
    decoder: Gst.Element = Gst.ElementFactory.make('jpegdec')
    queue1: Gst.Element = Gst.ElementFactory.make('queue')
    encoder: Gst.Element = Gst.ElementFactory.make('x264enc')
    parser: Gst.Element = Gst.ElementFactory.make('h264parse')
    sink: Gst.Element = Gst.ElementFactory.make('splitmuxsink')

    if (not pipeline or not src or not filter1 or not decoder or not queue1
            or not encoder or not parser or not sink):
        print('ERROR: Not all elements could be created.')
        sys.exit(1)

    src.set_property('device', device)

    caps = Gst.caps_from_string(f'image/jpeg,width={width},height={height}')
    filter1.set_property('caps', caps)

    encoder.set_property('key-int-max', 10)

    sink.set_property('location', location)
    sink.set_property('max-size-time', 10000000000)
    # endregion

    # region set up the pipeline
    # add elements
    pipeline.add(src, filter1, decoder, queue1, encoder, parser, sink)

    # link elements
    ret = src.link(filter1)
    ret = ret and filter1.link(decoder)
    ret = ret and decoder.link(queue1)
    ret = ret and queue1.link(encoder)
    ret = ret and encoder.link(parser)
    ret = ret and parser.link(sink)

    if not ret:
        print('ERROR: Elements could not be linked')
        sys.exit(1)
    # endregion

    # add message handler
    bus: Gst.Bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", on_bus_callback, loop)

    # start playing
    pipeline.set_state(Gst.State.PLAYING)

    # main loop
    try:
        loop.run()
    except Exception as e:
        pass

    # clean up
    pipeline.set_state(Gst.State.NULL)
Example #13
0
 def __init__(self):
     bus_name = dbus.service.BusName(DBUS_NAME, bus=dbus.SessionBus())
     super(DBusWrapper, self).__init__(bus_name, DBUS_PATH)
     self.loop = GObject.MainLoop()
     self.loop.run()
Example #14
0
 def _build_loop(self, message_bus):
     # create and event loop and feed gstreamer bus mesages to it
     loop = GObject.MainLoop()
     message_bus.add_signal_watch()
     message_bus.connect("message", self.bus_call, loop)
     return loop
Example #15
0
def main(args):

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    print("Creating Pipeline")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
    print("Creating streamux")

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")

    pipeline.add(streammux)

    source_bin = create_source_bin("file:/deepstream-examples/videos/traffic.mp4")

    if not source_bin:
        sys.stderr.write("Unable to create source bin")
    
    pipeline.add(source_bin)

    sinkpad= streammux.get_request_pad('sink_0') 
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin")

    srcpad=source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin")

    srcpad.link(sinkpad)

    queue1 = create_element_or_error("queue","queue1")
    queue2 = create_element_or_error("queue","queue2")
    queue3 = create_element_or_error("queue","queue3")
    queue4 = create_element_or_error("queue","queue4")
    queue5 = create_element_or_error("queue","queue5")
    queue6 = create_element_or_error("queue","queue6")

    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    converter = create_element_or_error("nvvideoconvert", "convertor")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    
    nvosd.set_property('process-mode', 0)
    nvosd.set_property('display-text', 0)

    transform=create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    
    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt")

    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)

    analytics.set_property("config-file", "config_nvdsanalytics.txt")

    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(analytics)
    pipeline.add(converter)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(analytics)
    analytics.link(queue4)
    queue4.link(converter)
    converter.link(queue5)
    queue5.link(nvosd)
    nvosd.link(queue6)
    queue6.link(transform)
    transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    analytics_src_pad = analytics.get_static_pad("src")
    if not analytics_src_pad:
        sys.stderr.write("Unable to get src pad")
    else:
        analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, analytics_meta_buffer_probe, 0)

    # List the sources
    print("Starting pipeline")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)
def main(args):
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
        ############################################
    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("filesrc", "file-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    # Since the data format in the input file is elementary h264 stream,
    # we need a h264parser
    print("Creating H264Parser \n")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    if not h264parser:
        sys.stderr.write(" Unable to create h264 parser \n")

    # Use nvdec_h264 for hardware accelerated decode on GPU
    print("Creating Decoder \n")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")

        ###########################################
    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    ###########################################
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert",
                                                "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write(" Unable to create nvvidconv_postosd \n")

    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property(
        "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

    # Make the encoder
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        encoder.set_property('bufapi-version', 1)

    # Make the payload-encode video into RTP packets
    if codec == "H264":
        rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
        print("Creating H264 rtppay")
    elif codec == "H265":
        rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
        print("Creating H265 rtppay")
    if not rtppay:
        sys.stderr.write(" Unable to create rtppay")

    # Make the UDP sink
    updsink_port_num = 5400
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write(" Unable to create udpsink")

    sink.set_property('host', '224.224.255.255')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)

    print("Playing file %s " % stream_path)
    source.set_property('location', stream_path)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', "dstest1_pgie_config.txt")

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(h264parser)
    pipeline.add(decoder)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    pipeline.add(sink)

    # Link the elements together:
    # file-source -> h264-parser -> nvh264-decoder ->
    # nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd ->
    # caps -> encoder -> rtppay -> udpsink

    print("Linking elements in the Pipeline \n")
    source.link(h264parser)
    h264parser.link(decoder)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")

    srcpad = decoder.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")

    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(nvvidconv_postosd)
    nvvidconv_postosd.link(caps)
    caps.link(encoder)
    encoder.link(rtppay)
    rtppay.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start streaming
    rtsp_port_num = 8554

    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch(
        "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )"
        % (updsink_port_num, codec))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/ds-test", factory)

    print(
        "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n"
        % rtsp_port_num)
    ###########################################
    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
    ######################################################################

    # Create the node death payload
    deathPayload = sparkplug.getNodeDeathPayload()

    # Start of main program - Set up the MQTT client connection
    client.on_connect = on_connect
    client.on_message = on_message
    client.username_pw_set(myUsername, myPassword)
    deathByteArray = bytearray(deathPayload.SerializeToString())
    client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName,
                    deathByteArray, 0, False)
    client.connect(serverUrl, 1883, 60)

    # Publish the birth certificates
    publishBirth()

    def foo():
        # Periodically publish some new data
        payload = sparkplug.getDdataPayload()

        # Add some random data to the inputs
        addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx,
                  MetricDataType.Int16, frame_numberx)
        addMetric(payload, "input/number of objects",
                  AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx)
        addMetric(payload, "input/Vehicle count", AliasMap.Device_counter1,
                  MetricDataType.Int16, counter1)
        addMetric(payload, "input/Person count", AliasMap.Device_counter2,
                  MetricDataType.Int16, counter2)
        # Note this data we're setting to STALE via the propertyset as an example
        metric = addMetric(payload, None,
                           AliasMap.Device_Metric1, MetricDataType.Boolean,
                           random.choice([True, False]))
        metric.properties.keys.extend(["Quality"])
        propertyValue = metric.properties.values.add()
        propertyValue.type = ParameterDataType.Int32
        propertyValue.int_value = 500

        # Publish a message data
        byteArray = bytearray(payload.SerializeToString())
        client.publish(
            "spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" +
            myDeviceName, byteArray, 0, False)

        threading.Timer(WAIT_SECONDS, foo).start()

    foo()
    ######################################################################
    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)
Example #17
0
    #    pipeline = Gst.parse_launch(sound_framework+' device-name="'+device_name+'" latency-time=1000 buffer-time=1001 ! \
    #                                spectrum interval='+str(sample_interval)+' ! fakesink')

    bus = pipeline.get_bus()
    #bus.add_signal_watch()
    #bus.connect('message', playerbin_message)
    bus.add_watch(0, playerbin_message)
    pipeline.set_state(Gst.State.PLAYING)
    print(time.strftime('[%H:%M:%S]') + ' pipeline PLAYING')
    return pipeline, bus


if use_gstreamer:
    main_thread = threading.Thread(target=main)
    main_thread.start()
    gi_thread = GObject.MainLoop()
    gi_thread.run()
else:
    main()
## Wait until error or EOS.
#msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE,
#Gst.MessageType.ERROR | Gst.MessageType.EOS)
#print msg
# To find devices
# sink=Gst.ElementFactory.make("directsoundsrc")
#
#caps audio/x-raw, format=(string)S16LE, layout=(string)interleaved, rate=(int)44100, channels=(int)2
#

#convert to not global:
#wR wG wB
Example #18
0
    def __init__(self, irc):
        super().__init__(irc)
        self.irc = irc
        self.topic_lock = threading.Lock()

        self.mainloop = None
        self.mainloop_thread = None
        mainloop = GObject.MainLoop()
        if not mainloop.is_running():
            log.info("Starting Glib main loop")
            mainloop_thread = threading.Thread(target=mainloop.run,
                                               name="Glib maing loop")
            mainloop_thread.start()
            self.mainloop_thread = mainloop_thread
            self.mainloop = mainloop

        self.requests_session = requests.Session()
        self.requests_session.verify = True

        self.queued_topics = {}
        self.last_n_messages = []

        # data sources
        pseudo_packages.pp = PseudoPackages(self.requests_session)
        self.pseudo_packages = pseudo_packages.pp
        self.stable_rc_bugs = StableRCBugs(self.requests_session)
        self.testing_rc_bugs = TestingRCBugs(self.requests_session)
        self.new_queue = NewQueue(self.requests_session)
        self.dinstall = Dinstall(self.requests_session)
        self.rm_queue = RmQueue(self.requests_session)
        self.apt_archive = AptArchive(
            self.registryValue("apt_configuration_directory"),
            self.registryValue("apt_cache_directory"),
        )
        self.data_sources = (
            self.pseudo_packages,
            self.stable_rc_bugs,
            self.testing_rc_bugs,
            self.new_queue,
            self.dinstall,
            self.rm_queue,
            self.apt_archive,
        )

        # Schedule datasource updates
        def wrapper(source):
            def implementation():
                try:
                    source.update()
                except Exception as e:
                    log.exception("Failed to update {}: {}".format(
                        source.NAME, e))
                self._topic_callback()

            return implementation

        for source in self.data_sources:
            # schedule periodic events
            schedule.addPeriodicEvent(wrapper(source),
                                      source.INTERVAL,
                                      source.NAME,
                                      now=False)
            # and run them now once
            schedule.addEvent(wrapper(source), time.time() + 1)

        log.info("Starting D-Bus service")
        self.dbus_service = BTSDBusService(self._email_callback)
        self.dbus_bus = SystemBus()
        self.dbus_bus.publish(self.dbus_service.interface_name,
                              self.dbus_service)
        self.dbus_service.start()
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    # Create GST Source
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "convertor-2")

    # Create Gst Threads
    tee = create_element_or_error("tee", "tee")
    streaming_queue = create_element_or_error("queue", "streaming_queue")
    recording_queue = create_element_or_error("queue", "recording_queue")

    # Create Gst Elements for Streaming Branch
    s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder")
    s_parser = create_element_or_error("h264parse", "streaming-parser")
    s_muxer = create_element_or_error("flvmux", "streaming-muxer")
    s_sink = create_element_or_error("rtmpsink", "streaming-sink")

    # Create Gst Elements for Recording Branch
    r_encoder = create_element_or_error('nvv4l2h265enc', 'encoder')
    r_parser = create_element_or_error('h265parse', 'parser')
    r_sink = create_element_or_error('filesink', 'sink')

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    s_sink.set_property('location',
                        'rtmp://media.streamit.live/LiveApp/streaming-test')
    r_encoder.set_property('bitrate', 8000000)
    r_sink.set_property(
        'location', 'video_' + str(datetime.datetime.utcnow().date()) + '.mp4')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(tee)
    pipeline.add(streaming_queue)
    pipeline.add(s_encoder)
    pipeline.add(s_parser)
    pipeline.add(s_muxer)
    pipeline.add(s_sink)
    pipeline.add(recording_queue)
    pipeline.add(r_encoder)
    pipeline.add(r_parser)
    pipeline.add(r_sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(tee)

    # Streaming Queue
    streaming_queue.link(s_encoder)
    s_encoder.link(s_parser)
    s_parser.link(s_muxer)
    s_muxer.link(s_sink)

    # Recording Queue
    recording_queue.link(r_encoder)
    r_encoder.link(r_parser)
    r_parser.link(r_sink)

    # Get pad templates from source
    tee_src_pad_template = tee.get_pad_template("src_%u")

    # Get source to Streaming Queue
    tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None)
    streaming_queue_pad = streaming_queue.get_static_pad("sink")

    # Get source to recording Queue
    tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None)
    recording_queue_pad = recording_queue.get_static_pad("sink")

    # Link sources
    if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK
            or tee_recording_pad.link(recording_queue_pad) !=
            Gst.PadLinkReturn.OK):
        print("ERROR: Tees could not be linked")
        sys.exit(1)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Example #20
0
 def recognize(self):
     self.main_loop = GObject.MainLoop()
     self.pipeline.set_state(Gst.State.PLAYING)
     self.main_loop.run()
Example #21
0
    def start(self):
        signal.signal(signal.SIGINT, self.sigint_handler)

        try:
            self.loop = GObject.MainLoop()

            obplayer.Gui = obplayer.ObGui()
            obplayer.Gui.create_window()

            self.load_module('player')
            self.load_module('httpadmin')

            if obplayer.Config.setting(
                    'audio_out_mode') == 'pulse' or obplayer.Config.setting(
                        'audio_in_mode') == 'pulse':
                self.load_module('pulse')
            if not obplayer.Config.headless:
                self.load_module('xrandr')
            if obplayer.Config.setting('testsignal_enable'):
                self.load_module('testsignal')
            if obplayer.Config.setting('alerts_enable'):
                self.load_module('alerts')
            if obplayer.Config.setting('fallback_enable'):
                self.load_module('fallback')
            if obplayer.Config.setting('aoip_in_enable'):
                self.load_module('aoipin')
            if obplayer.Config.setting('rtp_in_enable'):
                self.load_module('rtpin')
            if obplayer.Config.setting('audio_in_enable'):
                self.load_module('linein')
            if obplayer.Config.setting('scheduler_enable'):
                self.load_module('scheduler')
            if obplayer.Config.setting('live_assist_enable'):
                self.load_module('liveassist')
            if obplayer.Config.setting('audiolog_enable'):
                self.load_module('audiolog')
            if obplayer.Config.setting('offair_audiolog_enable'):
                self.load_module('offair_audiolog')
            if obplayer.Config.setting('streamer_enable'):
                self.load_module('streamer')
            if obplayer.Config.setting('station_override_enabled'):
                self.load_module('override_streamer')
            if obplayer.Config.setting('newsfeed_override_enabled'):
                self.load_module('newsfeed_override')
            # check if automatic updater file exists
            update_file = '/tmp/obplayer.update'
            if obplayer.Config.setting('update_at_3_am'):
                try:
                    if os.path.exists(update_file):
                        obplayer.Log.log(
                            'update file already exists. not recreating.',
                            'debug')
                    else:
                        os.system('touch {0}'.format(update_file))
                        obplayer.Log.log(
                            'Created update file. System will check for updates and reboot at 3 am local time.',
                            'debug')
                except Exception as e:
                    obplayer.Log.log(
                        'OS updater failed. if you a openbroadcaster system unit, Please contact support.',
                        'error')
            else:
                try:
                    os.remove(update_file)
                    obplayer.Log.log(
                        'OS updating file is being removed. Automatic OS updating is disabled.',
                        'debug')
                except Exception as e:
                    pass

            #### TEST CODE ####

            #time.sleep(2)

            #alert = obplayer.alerts.parse_alert_file("/media/work/Projects/OpenBroadcaster/Information/2014-08 Pelmorex Tie-In/CAP Examples/2example_CAPCP_with_Embedded_Large_Audio_File(2).xml")
            #alert = obplayer.alerts.parse_alert_file("/media/work/Projects/OpenBroadcaster/Information/2014-08 Pelmorex Tie-In/CAP Examples/4example_CAPCP_with_External_Large_Audio_File(2).xml")
            #obplayer.alerts.Processor.dispatch(alert)

            #ctrl = obplayer.Player.create_controller('testsource', 60, default_play_mode='overlap')
            #ctrl.add_request(media_type='break', duration=40)
            #ctrl.add_request(media_type='testsignal', duration=40)
            #ctrl.add_request(media_type='video', uri="file:///home/trans/.openbroadcaster/fallback_media/110-Unknown-The_Return_Of_Doctor_X.ogg", duration=153)
            #ctrl.add_request(media_type='audio', start_time=time.time() + 5, uri="file:///home/trans/.openbroadcaster/alerts/2014_12_01T00_13_00_00_00I2.49.0.1.124.b7fb9ec4.2014", duration=10)
            #ctrl.add_request(media_type='video', uri="file:///home/trans/.openbroadcaster/fallback_media/110-Unknown-The_Return_Of_Doctor_X.ogg", duration=153)
            #ctrl.add_request(media_type='image', start_time=time.time(), uri="file:///home/trans/.openbroadcaster/fallback_media/97-ctfn_potlatch-sdfsdg.svg", duration=5)
            #ctrl.add_request(media_type='audio', start_time=time.time(), uri="file:///home/trans/.openbroadcaster/fallback_media/104-Lamb-Piste_6.mp3", duration=70)
            #ctrl.add_request(media_type='audio', start_time=time.time(), uri="file:///home/trans/.openbroadcaster/alerts-backup/2014_07_08T10_09_38_05_00I473E9B47_D474_B3F1_9765_1AFED0761075-english.wav", duration=70)
            #ctrl.add_request(media_type='video', start_time=time.time() + 2, uri="file:///home/trans/.openbroadcaster/fallback_media/109-Unknown-The_Pit_And_The_Pendulum.ogg", duration=153)
            #ctrl.add_request(media_type='rtsp', start_time=time.time() + 2, uri="rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov", duration=153)
            #ctrl.add_request(media_type='sdp', start_time=time.time() + 2, uri="file:///home/obsuser/xnode-rtsp.sdp", duration=3600)
            #ctrl.add_request(media_type='sdp', start_time=time.time() + 2, uri="file:///media/work/OpenBroadcaster/Player/tools/local_streamer.sdp", duration=3600)
            #ctrl.add_request(media_type='rtp', start_time=time.time() + 2, duration=3600)
            #ctrl.add_request(media_type='rtsp', start_time=time.time() + 2, uri="rtsp://localhost:8554/by-id/1", duration=153)
            #ctrl.add_request(media_type='rtspa', start_time=time.time() + 2, uri="rtsp://localhost:8554/by-id/1", duration=153)

            #alertctrl = obplayer.Player.create_controller('testalert', 100, default_play_mode='overlap', allow_overlay=True)
            #alertctrl.add_request(media_type='audio', start_time=time.time() + 7, uri=obplayer.Player.file_uri("obplayer/alerts/data", "attention-signal.ogg"), duration=4)
            #alertctrl.add_request(media_type='audio', uri="file:///home/trans/.openbroadcaster/alerts/2014_12_01T00_13_00_00_00I2.49.0.1.124.b7fb9ec4.2014", duration=5)

            #ctrl.add_request(media_type='rtp_2', start_time=time.time() + 2, duration=3600)

            #ctrl.add_request(media_type='rtp_2', start_time=time.time() + 2, duration=3600)

            #### END TEST CODE ####

            obplayer.Player.start_player()
            self.loop.run()
        except KeyboardInterrupt:
            print("Keyboard Interrupt")
        except:
            obplayer.Log.log(
                "exception occurred in main thead. Terminating...", 'error')
            obplayer.Log.log(traceback.format_exc(), 'error')

        self.application_shutdown()
Example #22
0
 def __init__(self, object_path):
     dbus.service.Object.__init__(self, dbus.SessionBus(), object_path)
     self._mainloop = GObject.MainLoop()
     self._activeSync = []
     self._profiles = ButeoSyncFw.PROFILES
Example #23
0
def main():
    logging.basicConfig(level=logging.DEBUG,
                        format="%(levelname)8s %(asctime)s %(message)s ")
    logging.debug('Starting up worker')
    parser = argparse.ArgumentParser(description='Worker for kaldigstserver')
    parser.add_argument('-u',
                        '--uri',
                        default="ws://localhost:8888/worker/ws/speech",
                        dest="uri",
                        help="Server<-->worker websocket URI")
    parser.add_argument('-f', '--fork', default=1, dest="fork", type=int)
    parser.add_argument('-c',
                        '--conf',
                        dest="conf",
                        help="YAML file with decoder configuration")

    args = parser.parse_args()

    if args.fork > 1:
        import tornado.process

        logging.info("Forking into %d processes" % args.fork)
        tornado.process.fork_processes(args.fork)

    conf = {}
    if args.conf:
        with open(args.conf) as f:
            conf = yaml.safe_load(f)

    if "logging" in conf:
        logging.config.dictConfig(conf["logging"])

    # fork off the post-processors before we load the model into memory
    post_processor = None
    if "post-processor" in conf:
        post_processor = Popen(conf["post-processor"],
                               shell=True,
                               stdin=PIPE,
                               stdout=PIPE)

    full_post_processor = None
    if "full-post-processor" in conf:
        full_post_processor = Popen(conf["full-post-processor"],
                                    shell=True,
                                    stdin=PIPE,
                                    stdout=PIPE)

    global USE_NNET2
    USE_NNET2 = conf.get("use-nnet2", False)

    global SILENCE_TIMEOUT
    SILENCE_TIMEOUT = conf.get("silence-timeout", 5)
    if USE_NNET2:
        decoder_pipeline = DecoderPipeline2(conf)
    else:
        decoder_pipeline = DecoderPipeline(conf)

    loop = GObject.MainLoop()
    thread.start_new_thread(loop.run, ())
    while True:
        ws = ServerWebsocket(args.uri,
                             decoder_pipeline,
                             post_processor,
                             full_post_processor=full_post_processor)
        try:
            logger.info("Opening websocket connection to master server")
            ws.connect()
            ws.run_forever()
        except Exception:
            logger.error("Couldn't connect to server, waiting for %d seconds",
                         CONNECT_TIMEOUT)
            time.sleep(CONNECT_TIMEOUT)
        # fixes a race condition
        time.sleep(1)
Example #24
0
 def setUp(self):
     self.main_loop = GObject.MainLoop()
     self.simple_queries_counter = AMOUNT_SIMPLE_QUERIES
     self.simple_queries_answers = 0
Example #25
0
def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    for i in range(0, len(args) - 1):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
    number_sources = len(args) - 1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest3_pgie_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size,
              " with number of sources ", number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    model_engine_file = engine_filename("resnet10", number_sources)
    pgie.set_property('model-engine-file', model_engine_file)
    print(f"setting model-engine-file property on pgie to {model_engine_file}")
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
    tiler_src_pad = pgie.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                tiler_src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)
Example #26
0
class TestCallbacks(unittest.TestCase):
    called = False
    main_loop = GObject.MainLoop()

    def testCallback(self):
        TestCallbacks.called = False

        def callback():
            TestCallbacks.called = True

        Everything.test_simple_callback(callback)
        self.assertTrue(TestCallbacks.called)

    def testCallbackException(self):
        """
        This test ensures that we get errors from callbacks correctly
        and in particular that we do not segv when callbacks fail
        """
        def callback():
            x = 1 / 0

        try:
            Everything.test_simple_callback(callback)
        except ZeroDivisionError:
            pass

    def testDoubleCallbackException(self):
        """
        This test ensures that we get errors from callbacks correctly
        and in particular that we do not segv when callbacks fail
        """
        def badcallback():
            x = 1 / 0

        def callback():
            Everything.test_boolean(True)
            Everything.test_boolean(False)
            Everything.test_simple_callback(badcallback())

        try:
            Everything.test_simple_callback(callback)
        except ZeroDivisionError:
            pass

    def testReturnValueCallback(self):
        TestCallbacks.called = False

        def callback():
            TestCallbacks.called = True
            return 44

        self.assertEquals(Everything.test_callback(callback), 44)
        self.assertTrue(TestCallbacks.called)

    def testCallbackAsync(self):
        TestCallbacks.called = False

        def callback(foo):
            TestCallbacks.called = True
            return foo

        Everything.test_callback_async(callback, 44)
        i = Everything.test_callback_thaw_async()
        self.assertEquals(44, i)
        self.assertTrue(TestCallbacks.called)

    def testCallbackScopeCall(self):
        TestCallbacks.called = 0

        def callback():
            TestCallbacks.called += 1
            return 0

        Everything.test_multi_callback(callback)
        self.assertEquals(TestCallbacks.called, 2)

    def testCallbackUserdata(self):
        TestCallbacks.called = 0

        def callback(userdata):
            self.assertEquals(userdata, "Test%d" % TestCallbacks.called)
            TestCallbacks.called += 1
            return TestCallbacks.called

        for i in range(100):
            val = Everything.test_callback_user_data(callback, "Test%d" % i)
            self.assertEquals(val, i + 1)

        self.assertEquals(TestCallbacks.called, 100)

    def testCallbackUserdataRefCount(self):
        TestCallbacks.called = False

        def callback(userdata):
            TestCallbacks.called = True
            return 1

        ud = "Test User Data"

        start_ref_count = getrefcount(ud)
        for i in range(100):
            Everything.test_callback_destroy_notify(callback, ud)

        Everything.test_callback_thaw_notifications()
        end_ref_count = getrefcount(ud)

        self.assertEquals(start_ref_count, end_ref_count)

    def testAsyncReadyCallback(self):
        TestCallbacks.called = False
        TestCallbacks.main_loop = GObject.MainLoop()

        def callback(obj, result, user_data):
            TestCallbacks.main_loop.quit()
            TestCallbacks.called = True

        Everything.test_async_ready_callback(callback)

        TestCallbacks.main_loop.run()

        self.assertTrue(TestCallbacks.called)

    def testCallbackDestroyNotify(self):
        def callback(user_data):
            TestCallbacks.called = True
            return 42

        TestCallbacks.called = False
        self.assertEquals(
            Everything.test_callback_destroy_notify(callback, 42), 42)
        self.assertTrue(TestCallbacks.called)
        self.assertEquals(Everything.test_callback_thaw_notifications(), 42)

    def testCallbackInMethods(self):
        object_ = Everything.TestObj()

        def callback():
            TestCallbacks.called = True
            return 42

        TestCallbacks.called = False
        object_.instance_method_callback(callback)
        self.assertTrue(TestCallbacks.called)

        TestCallbacks.called = False
        Everything.TestObj.static_method_callback(callback)
        self.assertTrue(TestCallbacks.called)

        def callbackWithUserData(user_data):
            TestCallbacks.called = True
            return 42

        TestCallbacks.called = False
        obj_ = Everything.TestObj.new_callback(callbackWithUserData, None)
        self.assertTrue(TestCallbacks.called)

    def testCallbackNone(self):
        # make sure this doesn't assert or crash
        Everything.test_simple_callback(None)
      call(uri, shell=True)
      return Unity.ActivationResponse.new(Unity.HandledType.HIDE_DASH)

if __name__ == "__main__":
  # NOTE: If we used the normal 'dbus' module for Python we'll get
  #       slightly odd results because it uses a default connection
  #       to the session bus that is different from the default connection
  #       GDBus (hence libunity) will use. Meaning that the daemon name
  #       will be owned by a connection different from the one all our
  #       Dee + Unity magic is working on...
  #       Still waiting for nice GDBus bindings to land:
  #                        http://www.piware.de/2011/01/na-zdravi-pygi/
  session_bus_connection = Gio.bus_get_sync(Gio.BusType.SESSION, None)
  session_bus = Gio.DBusProxy.new_sync(session_bus_connection, 0, None,
                                        'org.freedesktop.DBus',
                                        '/org/freedesktop/DBus',
                                        'org.freedesktop.DBus', None)
  result = session_bus.call_sync('RequestName',
                                 GLib.Variant("(su)", (BUS_NAME, 0x4)),
                                 0, -1, None)

  # Unpack variant response with signature "(u)". 1 means we got it.
  result = result.unpack()[0]

  if result != 1:
    print >> sys.stderr, "Failed to own name %s. Bailing out." % BUS_NAME
    raise SystemExit(1)

  daemon = Daemon()
  GObject.MainLoop().run()
Example #28
0
    def __init__(self):
        GObject.threads_init()

        self._loop = GObject.MainLoop()
        self._player = Player()
Example #29
0
import gi
from gi.repository import GObject

from zocp import ZOCP
import zmq

GObject.threads_init()
loop = GObject.MainLoop()

z = ZOCP("GLibTest")
z.register_percent('myPercent', 12, access='rw')


def zocp_handle(*args, **kwargs):
    z.run_once()
    return True


GObject.io_add_watch(z.inbox.getsockopt(zmq.FD), GObject.PRIORITY_DEFAULT,
                     GObject.IO_IN, zocp_handle)
z.start()
try:
    loop.run()
except Exception as e:
    print(e)
finally:
    z.stop()
def main(args):

    # Standard GStreamer initialization
    cameras_list = [
        {
            "source": 0,
            "name": "Camera 1",
        },
        {
            "source": 1,
            "name": "Camera 2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    pipeline.add(streammux)

    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter",
                                       "source-caps-source-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"
            ))
        pipeline.add(source)
        pipeline.add(caps)

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
        srcpad = source.get_static_pad("src")

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    queue1 = create_element_or_error("queue", "queue1")
    queue2 = create_element_or_error("queue", "queue2")
    queue3 = create_element_or_error("queue", "queue3")
    queue4 = create_element_or_error("queue", "queue4")
    queue5 = create_element_or_error("queue", "queue5")
    queue6 = create_element_or_error("queue", "queue6")
    queue7 = create_element_or_error("queue", "queue7")

    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    convertor = create_element_or_error("nvvideoconvert", "convertor")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_mot_klt.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)

    analytics.set_property("config-file", "./nvdsanalytics/live.txt")

    nvosd.set_property('process-mode', 0)
    nvosd.set_property('display-text', 0)

    sink.set_property('sync', False)

    print("Adding elements to Pipeline")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(analytics)
    pipeline.add(tiler)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(analytics)
    analytics.link(queue4)
    queue4.link(tiler)
    tiler.link(queue5)
    queue5.link(convertor)
    convertor.link(queue6)
    queue6.link(nvosd)
    nvosd.link(queue7)
    queue7.link(transform)
    transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    analytics_src_pad = analytics.get_static_pad("src")
    if not analytics_src_pad:
        sys.stderr.write("Unable to get src pad")
    else:
        analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                    nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Starting pipeline")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)