def add_sources(data): global pipeline print("Calling Start %d " % 1) #Create a uridecode bin with the chosen source id uri = "rtsp://*****:*****@192.168.2.64:554/Facit/media.smp" source_bin = create_source_bin(1, uri) if (not source_bin): sys.stderr.write("Failed to create source bin. Exiting.") exit(1) pipeline.add(source_bin) padname = "sink_%u" % data sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) fps_streams["stream{0}".format(1)] = GETFPS(1) #Set state of source bin to playing state_return = source_bin.set_state(Gst.State.PLAYING) if state_return == Gst.StateChangeReturn.SUCCESS: print("STATE CHANGE SUCCESS\n") elif state_return == Gst.StateChangeReturn.FAILURE: print("STATE CHANGE FAILURE\n") elif state_return == Gst.StateChangeReturn.ASYNC: state_return = source_bin.get_state(Gst.CLOCK_TIME_NONE) elif state_return == Gst.StateChangeReturn.NO_PREROLL: print("STATE CHANGE NO PREROLL\n") return False
def deepstream_main(config): global image_timer global number_sources global id_dict global fps_streams source_type = config["source_type"] sources = config["source"] if config["source_type"] == "rtsp": number_sources = len(list(sources)) elif config["source_type"] == "mipi" or config["source_type"] == "usb": number_sources = 1 display = config["display"] MUXER_OUTPUT_WIDTH = config["processing_width"] MUXER_OUTPUT_HEIGHT = config["processing_height"] TILED_OUTPUT_WIDTH = config["tiler_width"] TILED_OUTPUT_HEIGHT = config["tiler_height"] image_timer = config["image_timer"] for i in range(number_sources): #initialise id dictionary to keep track of object_id streamwise id_dict[i] = Queue(maxsize=config["queue_size"]) fps_streams["stream{0}".format(i)] = GETFPS(i) #create image directories for separate streams if not os.path.exists(os.path.join(path1, "stream_" + str(i))): os.mkdir(os.path.join(path1, "stream_" + str(i))) if not os.path.exists(os.path.join(path2, "stream_" + str(i))): os.mkdir(os.path.join(path2, "stream_" + str(i))) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) if source_type == "rtsp": is_live = False for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = sources["stream_" + str(i)] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) elif source_type == "mipi": # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("nvarguscamerasrc", "src_elem") if not source: sys.stderr.write(" Unable to create Source \n") source.set_property('bufapi-version', True) # Converter to scale the image nvvidconv_src = Gst.ElementFactory.make("nvvideoconvert", "convertor_src") if not nvvidconv_src: sys.stderr.write(" Unable to create nvvidconv_src \n") # Caps for NVMM and resolution scaling caps_nvvidconv_src = Gst.ElementFactory.make("capsfilter", "nvmm_caps") if not caps_nvvidconv_src: sys.stderr.write(" Unable to create capsfilter \n") caps_nvvidconv_src.set_property( 'caps', Gst.Caps.from_string( 'video/x-raw(memory:NVMM), width=1280, height=720')) elif source_type == "usb": # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("v4l2src", "usb-cam-source") if not source: sys.stderr.write(" Unable to create Source \n") source.set_property('device', "/dev/video0") caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps") if not caps_v4l2src: sys.stderr.write(" Unable to create v4l2src capsfilter \n") caps_v4l2src.set_property( 'caps', Gst.Caps.from_string("video/x-raw, framerate=30/1, YUY2")) print("Creating Video Converter \n") # videoconvert to make sure a superset of raw formats are supported vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1") if not vidconvsrc: sys.stderr.write(" Unable to create videoconvert \n") # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API) nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2") if not nvvidconvsrc: sys.stderr.write(" Unable to create Nvvideoconvert \n") caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps") if not caps_vidconvsrc: sys.stderr.write(" Unable to create capsfilter \n") caps_vidconvsrc.set_property( 'caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)")) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") if display: print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") else: print("Creating FakeSink \n") sink = Gst.ElementFactory.make("fakesink", "fakesink") if not sink: sys.stderr.write(" Unable to create fake sink \n") # Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) streammux.set_property('width', MUXER_OUTPUT_WIDTH) streammux.set_property('height', MUXER_OUTPUT_HEIGHT) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "primary_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("qos", 0) sink.set_property("sync", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) queue1 = Gst.ElementFactory.make("queue", "queue1") queue2 = Gst.ElementFactory.make("queue", "queue2") queue3 = Gst.ElementFactory.make("queue", "queue3") queue4 = Gst.ElementFactory.make("queue", "queue4") queue5 = Gst.ElementFactory.make("queue", "queue5") queue6 = Gst.ElementFactory.make("queue", "queue6") queue7 = Gst.ElementFactory.make("queue", "queue7") queue8 = Gst.ElementFactory.make("queue", "queue8") pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) pipeline.add(queue6) pipeline.add(queue7) pipeline.add(queue8) if source_type == "mipi": print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(nvvidconv_src) pipeline.add(caps_nvvidconv_src) print("Linking elements in the Pipeline \n") source.link(nvvidconv_src) nvvidconv_src.link(caps_nvvidconv_src) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = caps_nvvidconv_src.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of source \n") srcpad.link(sinkpad) elif source_type == "usb": print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(caps_v4l2src) pipeline.add(vidconvsrc) pipeline.add(nvvidconvsrc) pipeline.add(caps_vidconvsrc) print("Linking elements in the Pipeline \n") source.link(caps_v4l2src) caps_v4l2src.link(vidconvsrc) vidconvsrc.link(nvvidconvsrc) nvvidconvsrc.link(caps_vidconvsrc) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = caps_vidconvsrc.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n") srcpad.link(sinkpad) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(tracker) tracker.link(queue3) queue3.link(nvvidconv1) nvvidconv1.link(queue4) queue4.link(filter1) filter1.link(queue5) queue5.link(tiler) tiler.link(queue6) queue6.link(nvvidconv) nvvidconv.link(queue7) queue7.link(nvosd) if is_aarch64() and display: nvosd.link(queue8) queue8.link(transform) transform.link(sink) else: nvosd.link(queue8) queue8.link(sink) # create an event loop and feed gstreamer bus messages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = tiler.get_static_pad("sink") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, src in sources.items(): print(i, ": ", src) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write( "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0]) sys.exit(1) print("Argumentos :", args) for i in range(0, len(args) - 2): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 2 print("Numero de fuentes :", number_sources) global folder_name folder_name = args[-1] print(folder_name) if path.exists(folder_name): sys.stderr.write( "The output folder %s already exists. Please remove it first.\n" % folder_name) sys.exit(1) else: os.mkdir(folder_name) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # We load the database of known faces here if there is one, and we define the output DB name if we are only reading pwd = os.getcwd() known_faces_db_name = pwd + '/data/encoded_known_faces/knownFaces.dat' output_db_name = pwd + '/data/video_encoded_faces/test_video_default.data' set_known_faces_db_name(known_faces_db_name) set_output_db_name(output_db_name) # try to read the information from the known faces DB total, encodings, metadata = biblio.read_pickle(known_faces_db_name, False) set_known_faces_db(total, encodings, metadata) if total == 0: action = 'read' else: action = 'find' set_video_initial_time() if com.file_exists_and_not_empty(output_db_name): action = 'compare' set_action(actions[action]) #print(action) #quit() # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Creation of tracking to follow up the model face # April 21th # ERM tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") # edgar: cambio esta linea para no desplegar video - #sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") sink = Gst.ElementFactory.make("fakesink", "fakesink") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) # Camaras meraki 720p #streammux.set_property('width', 1920) streammux.set_property('width', 1280) #streammux.set_property('height', 1080) streammux.set_property('height', 720) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) print('CURRENT_DIR', CURRENT_DIR) pgie.set_property('config-file-path', CURRENT_DIR + "/configs/pgie_config_facenet.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Set properties of tracker # April 21th # ERM config = configparser.ConfigParser() config.read('configs/tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) elif key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) elif key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) elif key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) elif key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) elif key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("sync", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") # Add tracker in pipeline # April 21th # ERM pipeline.add(pgie) pipeline.add(tracker) # Tracker pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) # se añade para tracker # pgie.link(nvvidconv1) se modifica tracker.link( nvvidconv1) # se añade para ligar tracker con los demas elementos nvvidconv1.link(filter1) filter1.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) #tiler_sink_pad=tiler.get_static_pad("sink") #if not tiler_sink_pad: # sys.stderr.write(" Unable to get src pad \n") #else: # tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0) tiler_src_pad = tiler.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[:-1]): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write( "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 2): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 2 global folder_name folder_name = args[-1] if path.exists(folder_name): sys.stderr.write( "The output folder %s already exists. Please remove it first.\n" % folder_name) sys.exit(1) os.mkdir(folder_name) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("fakesink", "fakesink") if not sink: sys.stderr.write(" Unable to create egl sink \n") # print("Playing file %s " %args[1]) # source.set_property('location', args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) #Set properties of pgie and sgie pgie.set_property('config-file-path', "dstest2_sgie2_config.txt") print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. # Add a probe on the primary-infer source pad to get inference output tensors pgiesrcpad = pgie.get_static_pad("src") if not pgiesrcpad: sys.stderr.write(" Unable to get src pad of primary infer \n") pgiesrcpad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
# # covert the array into cv2 default color format # frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) # cv2.imwrite("./frame_" + str(frame_number) + ".jpg", # frame_image) # print('saved to') try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK if __name__ == '__main__': fps_stream = GETFPS(0) # out_file_name = '{}.mp4'.format(sys.argv[1]) # in_file_path = sys.argv[2] out_file_name = 'out.mp4' # pipeline = Pipeline(output_file_path=out_file_name) # pipeline = Pipeline(in_file_path, output_file_path=out_file_name) pipeline = PipelineCamera(output_file_path=out_file_name) try: pipeline.start() except KeyboardInterrupt as e: # sink.get_static_pad('sink').send_event(Gst.Event.new_eos()) # pipeline.send_event(Gst.Event.new_eos()) # pipeline.set_state(Gst.State.NULL) pipeline.pipeline.send_event(Gst.Event.new_eos())
def main(): args = parse_args() global g_args g_args = args num_sources = args.num_sources path = os.path.abspath(os.getcwd()) if (args.prof): INPUT_VIDEO = 'file://' + path +'/../source_code/dataset/wt_prof.mp4' else : INPUT_VIDEO = 'file://' + path +'/../source_code/dataset/wt.mp4' print("Creating pipeline with "+str(num_sources)+" streams") # Initialise FPS for i in range(0,num_sources): fps_streams["stream{0}".format(i)]=GETFPS(i) # Standard GStreamer initialization Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") ########### Create Elements required for the Pipeline ########### # Create nvstreammux instance to form batches from one or more sources. streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer","Stream-muxer") pipeline.add(streammux) for i in range(num_sources): print("Creating source_bin ",i," \n ") uri_name=INPUT_VIDEO if uri_name.find("rtsp://") == 0 : is_live = True source_bin=create_source_bin(args, i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file pgie = make_elm_or_print_err("nvinfer", "primary-inference" ,"pgie") # Create Sink for storing the output fakesink = make_elm_or_print_err("fakesink", "fakesink", "Sink") # Queues to enable buffering queue1=make_elm_or_print_err("queue","queue1","queue1") queue2=make_elm_or_print_err("queue","queue2","queue2") queue3=make_elm_or_print_err("queue","queue3","queue3") ############ Set properties for the Elements ############ # Set Input Width , Height and Batch Size streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', num_sources) # Timeout in microseconds to wait after the first buffer is available # to push the batch even if a complete batch is not formed. streammux.set_property('batched-push-timeout', 4000000) # Set configuration file for nvinfer pgie.set_property('config-file-path', "../source_code/N3/dstest1_pgie_config.txt") # Setting batch_size for pgie pgie_batch_size=pgie.get_property("batch-size") if(pgie_batch_size != num_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", num_sources," \n") pgie.set_property("batch-size",num_sources) # Fake sink properties fakesink.set_property("sync", 0) fakesink.set_property("async", 0) ########## Add and Link ELements in the Pipeline ########## print("Adding elements to Pipeline \n") pipeline.add(queue1) pipeline.add(pgie) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(fakesink) print("Linking elements in the Pipeline \n") streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(queue3) queue3.link(fakesink) # create an event loop and feed gstreamer bus mesages to it loop = GLib.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) print("Added and Linked elements to pipeline") src_pad=queue3.get_static_pad("src") if not src_pad: sys.stderr.write(" Unable to get src pad \n") else: src_pad.add_probe(Gst.PadProbeType.BUFFER, src_pad_buffer_probe, 0) # List the sources print("Now playing...") print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) start_time = time.time() try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL) print("--- %s seconds ---" % (time.time() - start_time))
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") #IK: read stream/mp4 file # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) #IK: number_sources is important!!! for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") #IK: Primary neural net # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie1 = Gst.ElementFactory.make("nvinfer", "primary-inference-1") if not pgie1: sys.stderr.write(" Unable to create pgie1 \n") #IK: tracker tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") #IK: secondary network 1 sgie = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie: sys.stderr.write(" Unable to make sgie \n") #IK: secondary network 2 pgie2 = Gst.ElementFactory.make("nvinfer", "primary-inference-2") if not pgie2: sys.stderr.write(" Unable to make pgie2 \n") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") #IK: RTSP sink # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) #IK: set config file for each network #Set properties of pgie and sgie pgie1.set_property('config-file-path', "dstest2_pgie1_config.txt") sgie.set_property('config-file-path', "dstest2_sgie_config.txt") pgie2.set_property('config-file-path', "dstest2_pgie2_config.txt") tiler.set_property("rows", 1) tiler.set_property("columns", 1) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) #IK: populate pipeline print("Adding elements to Pipeline \n") pipeline.add(streammux) pipeline.add(pgie1) pipeline.add(tracker) pipeline.add(sgie) pipeline.add(pgie2) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) #IK: elements linked together # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") streammux.link(pgie1) pgie1.link(tracker) tracker.link(sgie) sgie.link(pgie2) pgie2.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") #IK: Custom method call to display data on screen osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) queue1 = Gst.ElementFactory.make("queue", "queue1") queue2 = Gst.ElementFactory.make("queue", "queue2") queue3 = Gst.ElementFactory.make("queue", "queue3") queue4 = Gst.ElementFactory.make("queue", "queue4") queue5 = Gst.ElementFactory.make("queue", "queue5") pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvosd.set_property('process-mode', OSD_PROCESS_MODE) nvosd.set_property('display-text', OSD_DISPLAY_TEXT) if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest3_pgie_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("qos", 0) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(tiler) tiler.link(queue3) queue3.link(nvvidconv) nvvidconv.link(queue4) queue4.link(nvosd) if is_aarch64(): nvosd.link(queue5) queue5.link(transform) transform.link(sink) else: nvosd.link(queue5) queue5.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def __init__(self): super().__init__('inference_publisher') self.declare_parameter('input_sources') input_sources = self.get_parameter('input_sources').value number_sources = len(input_sources) for i in range(number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) self.publisher_detection = self.create_publisher( Detection2DArray, 'multi_detection', 10) self.publisher_classification = self.create_publisher( Classification2D, 'multi_classification', 10) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") self.pipeline = Gst.Pipeline() if not self.pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") self.pipeline.add(streammux) for i in range(number_sources): frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = input_sources[i] if uri_name.find("rtsp://") == 0: is_live = True source_bin = self.create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") self.pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference1") if not pgie: sys.stderr.write(" Unable to create pgie1 \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") pgie2 = Gst.ElementFactory.make("nvinfer", "primary-inference2") if not pgie2: sys.stderr.write(" Unable to create pgie2 \n") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler1 \n ") tiler1 = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler1") if not tiler1: sys.stderr.write(" Unable to create tiler1 \n") print("Creating nvvidconv_1 \n ") nvvidconv_1 = Gst.ElementFactory.make("nvvideoconvert", "convertor_1") if not nvvidconv_1: sys.stderr.write(" Unable to create nvvidconv_1 \n") nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2") if not nvvidconv2: sys.stderr.write(" Unable to create nvvidconv2 \n") caps2 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter2 = Gst.ElementFactory.make("capsfilter", "filter2") if not filter2: sys.stderr.write(" Unable to get the caps filter2 \n") filter2.set_property("caps", caps2) print("Creating tiler2 \n ") tiler2 = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler2") if not tiler2: sys.stderr.write(" Unable to create tiler2 \n") print("Creating nvvidconv_2 \n ") nvvidconv_2 = Gst.ElementFactory.make("nvvideoconvert", "convertor_2") if not nvvidconv_2: sys.stderr.write(" Unable to create nvvidconv_2 \n") # Create OSD to draw on the converted RGBA buffer nvosd1 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay1") if not nvosd1: sys.stderr.write(" Unable to create nvosd1 \n") nvosd2 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay2") if not nvosd2: sys.stderr.write(" Unable to create nvosd2 \n") # Finally render the osd output if is_aarch64(): transform1 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform1") transform2 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform2") print("Creating EGLSink \n") sink1 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer1") if not sink1: sys.stderr.write(" Unable to create egl sink1 \n") sink2 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer2") if not sink2: sys.stderr.write(" Unable to create egl sink2 \n") streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) #Set properties of pgie and sgie location = os.getcwd() + "/src/ros2_deepstream/config_files/" pgie.set_property('config-file-path', location + "dstest2_pgie_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) sgie1.set_property('config-file-path', location + "dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', location + "dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', location + "dstest2_sgie3_config.txt") pgie2.set_property('config-file-path', location + "dstest1_pgie_config.txt") sink1.set_property('sync', False) sink2.set_property('sync', False) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler1.set_property("rows", tiler_rows) tiler1.set_property("columns", tiler_columns) tiler1.set_property("width", TILED_OUTPUT_WIDTH) tiler1.set_property("height", TILED_OUTPUT_HEIGHT) tiler2.set_property("rows", tiler_rows) tiler2.set_property("columns", tiler_columns) tiler2.set_property("width", TILED_OUTPUT_WIDTH) tiler2.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read(location + 'dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) tee = Gst.ElementFactory.make('tee', 'tee') queue1 = Gst.ElementFactory.make('queue', 'infer1') queue2 = Gst.ElementFactory.make('queue', 'infer2') print("Adding elements to Pipeline \n") self.pipeline.add(pgie) self.pipeline.add(pgie2) self.pipeline.add(tracker) self.pipeline.add(sgie1) self.pipeline.add(sgie2) self.pipeline.add(sgie3) self.pipeline.add(nvvidconv1) self.pipeline.add(nvvidconv2) self.pipeline.add(nvosd1) self.pipeline.add(nvosd2) self.pipeline.add(sink1) self.pipeline.add(sink2) self.pipeline.add(tee) self.pipeline.add(queue1) self.pipeline.add(queue2) self.pipeline.add(tiler1) self.pipeline.add(tiler2) self.pipeline.add(filter1) self.pipeline.add(filter2) self.pipeline.add(nvvidconv_1) self.pipeline.add(nvvidconv_2) if is_aarch64(): self.pipeline.add(transform1) self.pipeline.add(transform2) # Link the elements together print("Linking elements in the Pipeline \n") streammux.link(tee) tee.link(queue1) tee.link(queue2) queue1.link(pgie) queue2.link(pgie2) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(nvvidconv1) nvvidconv1.link(filter1) filter1.link(tiler1) tiler1.link(nvvidconv_1) nvvidconv_1.link(nvosd1) pgie2.link(nvvidconv2) nvvidconv2.link(filter2) filter2.link(tiler2) tiler2.link(nvvidconv_2) nvvidconv_2.link(nvosd2) if is_aarch64(): nvosd1.link(transform1) transform1.link(sink1) nvosd2.link(transform2) transform2.link(sink2) else: nvosd1.link(sink1) nvosd2.link(sink2) # create and event loop and feed gstreamer bus mesages to it self.loop = GObject.MainLoop() bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, self.loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. tiler_sink_pad_1 = tiler1.get_static_pad("sink") if not tiler_sink_pad_1: sys.stderr.write(" Unable to get src pad \n") else: tiler_sink_pad_1.add_probe(Gst.PadProbeType.BUFFER, self.tiler_sink_pad_buffer_probe, 0) tiler_sink_pad_2 = tiler2.get_static_pad("sink") if not tiler_sink_pad_2: sys.stderr.write(" Unable to get src pad \n") else: tiler_sink_pad_2.add_probe(Gst.PadProbeType.BUFFER, self.tiler_sink_pad_buffer_probe, 0)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") #IK: read stream/mp4 file # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) #IK: number_sources is important!!! for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") #IK: Primary neural net # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") #IK: tracker tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") #IK: secondary network 1 sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") #IK: secondary network 2 sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") #IK: secondary network 3 sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") #IK: create sink. Looks like it's a on-screen renderer print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) #IK: set config file for each network #Set properties of pgie and sgie pgie.set_property('config-file-path', "dstest2_pgie_config.txt") sgie1.set_property('config-file-path', "dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', "dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', "dstest2_sgie3_config.txt") tiler.set_property("rows", 1) tiler.set_property("columns", 1) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) #IK: populate pipeline print("Adding elements to Pipeline \n") pipeline.add(streammux) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) #IK: elements linked together # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(): # Check input arguments # Permite introducir un numero x de fuentes, en nuestro caso streamings delas camaras Meraki reading_server_config() number_sources = len(get_sources()) if number_sources < 1: log_error( "No source to analyze or not service associated to the source. check configuration file" ) # Variable para verificar si al menos un video esta vivo is_live = False for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) global folder_name #folder_name=args[-1] folder_name = "frames" if not path.exists(folder_name): os.mkdir(folder_name) # sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name) # sys.exit(1) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) # Se crea elemento que acepta todo tipo de video o RTSP i = 0 for source in get_sources(): if not path.exists(folder_name + "/stream_" + str(i)): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin...........", i, " \n ") uri_name = source if uri_name.find("rtsp://") == 0: print('is_alive_TRUE') is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) i += 1 # el video con RTSP para Meraki viene optimizado a H264, por lo que no debe ser necesario crear un elemento h264parser stream # print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # # version 2.1 no realizara inferencias secundarias. # por lo que sgie1, sgie2 y sgie3 no estaran habilitados # #sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") #if not sgie1: # sys.stderr.write(" Unable to make sgie1 \n") #sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") #if not sgie1: # sys.stderr.write(" Unable to make sgie2 \n") #sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") #if not sgie3: # sys.stderr.write(" Unable to make sgie3 \n") # # La misma version 2.1 debe permitir opcionalmente mandar a pantalla o no # print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") sink.set_property('sync', 0) if is_live: print("At least one of the sources is live") streammux.set_property('live-source', 1) #streammux.set_property('live-source', 1) # Tamano del streammux, si el video viene a 720, se ajusta automaticamente streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # # Configuracion de modelo # dstest2_pgie_config contiene modelo estandar, para yoloV3, yoloV3_tiny y fasterRCNN # #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_pgie_config.txt") #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_nano.txt") #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/deepstream_app_source1_video_masknet_gpu.txt") #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3.txt") #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/kairos_peoplenet_pgie_config.txt") # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3_tiny.txt") # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_fasterRCNN.txt") # Falta añadir la ruta completa del archivo de configuracion pgie.set_property('config-file-path', CURRENT_DIR + "/configs/pgie_config_fd_lpd.txt" ) # modelo para caras, placas, modelo y marca pgie_batch_size = pgie.get_property("batch-size") print(pgie_batch_size) if pgie_batch_size != number_sources: print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Set properties of pgie and sgiae # version 2.1 no configura inferencias secundarias # #sgie1.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie1_config.txt") #sgie2.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie2_config.txt") #sgie3.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read('configs/Plate_tracker_config.txt') #config.read('configs/kairos_peoplenet_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) print(tracker_width) tracker.set_property('tracker-width', tracker_width) elif key == 'tracker-height': tracker_height = config.getint('tracker', key) print(tracker_height) tracker.set_property('tracker-height', tracker_height) elif key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) elif key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) elif key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) elif key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) # Creacion del marco de tiler tiler_rows = int(math.sqrt(number_sources)) # Example 3 = 1 renglones tiler_columns = int(math.ceil( (1.0 * number_sources) / tiler_rows)) # Example 3 = 3 columnas tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") # # version 2.1 no requiere inferencias secundarias # pipeline.add(h264parser) # agrego h264 pipeline.add(decoder) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(tiler) pipeline.add(nvvidconv1) # Se anaden para un mejor manejo de la imagen pipeline.add(filter1) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # source_bin -> -> nvh264-decoder -> PGIE -> Tracker # tiler -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") # lineas ya ejecutadas en el for anterior #sinkpad = streammux.get_request_pad("sink_0") #if not sinkpad: # sys.stderr.write(" Unable to get the sink pad of streammux \n") #srcpad = decoder.get_static_pad("src") #if not srcpad: # sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) source_bin.link(h264parser) h264parser.link(decoder) decoder.link(streammux) # ------- streammux.link(pgie) pgie.link(nvvidconv1) nvvidconv1.link(filter1) filter1.link(tracker) tracker.link(tiler) #filter1.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) #pgie.link(tracker) ''' srcpad.link(sinkpad) source_bin.link(h264parser) h264parser.link(decoder) #source_bin.link(decoder) Se agregaron las dos lineas anteriores decoder.link(streammux) streammux.link(pgie) pgie.link(tracker) tracker.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) ''' # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. tiler_src_pad = tracker.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) # start play back and listed to events try: loop.run() except Exception as e: print("This line? " + str(e)) pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): global pipeline global streammux global streamdemux global SrcBin_list global SinkBin_list # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 #//////////////////////////////////////////////// if (tracker_status == 1): for i in range(number_sources): print(f" sources {i}") x1, y1, x2, y2, x3, y3, x4, y4 = Roi_points_list[i] merge = tracker.DetectandTrack(x1, y1, x2, y2, x3, y3, x4, y4, tracker_width, tracker_height, classes, direction=0) trackers_list.append(merge) #//////////////////////////////////////////////// # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") streamdemux = Gst.ElementFactory.make("nvstreamdemux", "Stream-demuxer") pipeline.add(streammux) pipeline.add(streamdemux) SrcBin_list = [] for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] source_bin = create_source_bin(i, uri_name) SrcBin_list.append(source_bin) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) streammux.set_property('width', 800) streammux.set_property('height', 600) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 200000) streammux.set_property('live-source', 1) pgie.set_property('config-file-path', "config_infer_primary.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) print("Adding elements to Pipeline \n") pipeline.add(pgie) queue1 = Gst.ElementFactory.make("queue", "queue1") queue2 = Gst.ElementFactory.make("queue", "queue2") pipeline.add(queue1) pipeline.add(queue2) streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(streamdemux) SinkBin_list = [] for i in range(number_sources): print("demux source", i, "\n") srcpad = streamdemux.get_request_pad("src_%u" % i) if not srcpad: sys.stderr.write(" Unable to get the src pad of streamdemux \n") sink_bin = create_sink_bin(i) SinkBin_list.append(sink_bin) pipeline.add(sink_bin) sinkpad = sink_bin.get_static_pad("sink") if not sinkpad: sys.stderr.write(" Unable to get sink pad of nvvidconv \n") srcpad.link(sinkpad) srcpad = streamdemux.get_request_pad("src_%u" % 4) if not srcpad: sys.stderr.write(" Unable to get the src pad of streamdemux \n") loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) #GObject.timeout_add_seconds(60, stop_release_source, 0) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments # Permite introducir un numero x de fuentes, en nuestro caso streamings delas camaras Meraki number_sources = len(args)-1 if number_sources+1 < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) # Arreglo de FPS, para manejo con Tiler for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") # Variable para verificar si al menos un video esta vivo is_live = False # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) # Se crea elemento que acepta todo tipo de video o RTSP for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i+1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # el video con RTSP para Meraki viene optimizado a H264, por lo que no debe ser necesario crear un elemento h264parser stream #print("Creating H264Parser \n") #h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") #if not h264parser: # sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU # el video con RTSP para Meraki viene optimizado a H264, pero si necesita ser decodificado print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # Set properties of pgie and sgie pgie.set_property('config-file-path', CURRENT_DIR + "/dstest2_pgie_config.txt") sgie1.set_property('config-file-path', CURRENT_DIR + "/dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', CURRENT_DIR + "/dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', CURRENT_DIR + "/dstest2_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) elif key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) elif key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) elif key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) elif key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) elif key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(decoder) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # source_bin -> -> nvh264-decoder -> PGIE -> Tracker # nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") #source.link(h264parser) #h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") # Flujo anterior antes de incluir el decoder #srcpad.link(sinkpad) #source_bin.link(streammux) #streammux.link(pgie) #pgie.link(tracker) #tracker.link(sgie1) #sgie1.link(sgie2) #sgie2.link(sgie3) #sgie3.link(nvvidconv) #nvvidconv.link(nvosd) # Flujo Añadiendo el decoder srcpad.link(sinkpad) source_bin.link(decoder) decoder.link(streammux) streammux.link(pgie) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except Exception as e: print(str(e)) pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # READ RTSP/MP4 STREAM # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) # NOTE: number_sources is important!!! for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie = Gst.ElementFactory.make("nvinfer", "secondary-nvinference-engine") if not sgie: sys.stderr.write(" Unable to make sgie \n") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvosd.set_property('process-mode', OSD_PROCESS_MODE) nvosd.set_property('display-text', OSD_DISPLAY_TEXT) if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) print("Playing file %s " % args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', PGIE_CONFIG_FILE) sgie.set_property('config-file-path', SGIE_CONFIG_FILE) # Set properties of tracker config = configparser.ConfigParser() config.read(TRACKER_KLT_CONFIG) config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # rstp stream -> nvinfer -> nvinfer2 -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) tracker.link(sgie) sgie.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): global number_sources # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): if not os.path.exists(os.path.join(path1, "stream_" + str(i))): os.mkdir(os.path.join(path1, "stream_" + str(i))) print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) queue1 = Gst.ElementFactory.make("queue", "queue1") queue2 = Gst.ElementFactory.make("queue", "queue2") queue3 = Gst.ElementFactory.make("queue", "queue3") queue4 = Gst.ElementFactory.make("queue", "queue4") queue5 = Gst.ElementFactory.make("queue", "queue5") queue6 = Gst.ElementFactory.make("queue", "queue6") queue7 = Gst.ElementFactory.make("queue", "queue7") queue8 = Gst.ElementFactory.make("queue", "queue8") queue9 = Gst.ElementFactory.make("queue", "queue9") pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) pipeline.add(queue6) pipeline.add(queue7) pipeline.add(queue8) pipeline.add(queue9) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") print("Creating nvdsanalytics \n ") nvanalytics = Gst.ElementFactory.make("nvdsanalytics", "analytics") if not nvanalytics: sys.stderr.write(" Unable to create nvanalytics \n") nvanalytics.set_property("config-file", "config_nvdsanalytics.txt") sgie = Gst.ElementFactory.make("nvinfer", "secondary-nvinference-engine") if not sgie: sys.stderr.write(" Unable to make sgie \n") print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # nvosd.set_property('process-mode',OSD_PROCESS_MODE) # nvosd.set_property('display-text',OSD_DISPLAY_TEXT) if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', MUXER_OUTPUT_WIDTH) streammux.set_property('height', MUXER_OUTPUT_HEIGHT) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "lpd_us_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Set properties of tracker config = configparser.ConfigParser() config.read('lpr_sample_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) sgie.set_property('config-file-path', "lpr_config_sgie_us.txt") sgie.set_property('process-mode', 2) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("qos", 0) sink.set_property("sync", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(nvanalytics) pipeline.add(sgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(queue1) queue1.link(tracker) tracker.link(queue2) queue2.link(nvanalytics) nvanalytics.link(queue3) queue3.link(sgie) sgie.link(queue4) queue4.link(nvvidconv1) nvvidconv1.link(queue5) queue5.link(filter1) filter1.link(queue6) queue6.link(tiler) tiler.link(queue7) queue7.link(nvvidconv) nvvidconv.link(queue8) queue8.link(nvosd) if is_aarch64(): nvosd.link(queue9) queue9.link(transform) transform.link(sink) else: nvosd.link(queue9) queue9.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = tiler.get_static_pad("sink") if not tiler_src_pad: sys.stderr.write(" Unable to get sink pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup thread_stop = True print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Get all arguments input_file = args.input_file no_display = args.no_display # load config file config = configparser.ConfigParser() config.read('configs/plugin_properties.ini') number_sources = len(input_file) if number_sources < 1: sys.stderr.write("Please provide path for file input or rtsp streams") sys.exit(1) for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") print("Loading streammux properties \n") streammux_prop = config['streammux'] streammux.set_property('width', streammux_prop.getint('width')) streammux.set_property('height', streammux_prop.getint('height')) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', streammux_prop.getint('batched-push-timeout')) pipeline.add(streammux) for i, uri in enumerate(input_file): print("Creating source_bin ", i, " \n ") uri_name = uri if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Loading Pgie properties \n") pgie_prop = config['primary-gie'] pgie.set_property('config-file-path', pgie_prop['config-file']) pgie.set_property('model-engine-file', pgie_prop['model-engine-file']) pgie.set_property("batch-size", number_sources) # pgie_batch_size=pgie.get_property("batch-size") # if(pgie_batch_size != number_sources): # print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") # pgie.set_property("batch-size",number_sources) print("Creating tracker \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # tracker properties print("Loading tracker properties \n") tracker_prop = config['tracker'] tracker.set_property("tracker-width", tracker_prop.getint('tracker-width')) tracker.set_property("tracker-height", tracker_prop.getint('tracker-height')) tracker.set_property("ll-lib-file", tracker_prop['ll-lib-file']) tracker.set_property("ll-config-file", tracker_prop['ll-config-file']) tracker.set_property("enable-batch-process", tracker_prop.getint('enable-batch-process')) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Loading tiler properties \n") tiler_prop = config['tiled-display'] tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", tiler_prop.getint('width')) tiler.set_property("height", tiler_prop.getint('height')) print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") print("Creating msconv \n ") msgconv = Gst.ElementFactory.make("nvmsgconv", "nvmsg-converter") if not msgconv: sys.stderr.write(" Unable to create msgconv \n") # msgconv properties print("Loading msgconv properties \n") msgconv_prop = config['message-converter'] msgconv.set_property("config", msgconv_prop['msg-conv-config']) msgconv.set_property("payload-type", msgconv_prop.getint('msg-conv-payload-type')) print("Creating msgbroker \n ") msgbroker = Gst.ElementFactory.make("nvmsgbroker", "nvmsg-broker") if not msgconv: sys.stderr.write(" Unable to create msgbroker \n") print("Loading message broker properties \n") msgbroker_prop = config['message-broker'] msgbroker.set_property("proto-lib", msgbroker_prop['proto-lib']) msgbroker.set_property("conn-str", msgbroker_prop['conn-str']) msgbroker_cfg_file = msgbroker_prop["msg-broker-config"] topic = msgbroker_prop["topic"] if msgbroker_cfg_file is not None: msgbroker.set_property("config", msgbroker_cfg_file) if topic is not None: msgbroker.set_property("topic", topic) msgbroker.set_property("sync", msgbroker_prop.getboolean("sync")) print("Creating tee \n ") tee = Gst.ElementFactory.make("tee", "nvsink-tee") if not tee: sys.stderr.write(" Unable to create tee \n") print("Creating queue1 \n ") queue1 = Gst.ElementFactory.make("queue", "nvtee-que1") if not tee: sys.stderr.write(" Unable to create queue1 \n") print("Creating queue2 \n ") queue2 = Gst.ElementFactory.make("queue", "nvtee-que2") if not tee: sys.stderr.write(" Unable to create queue2 \n") print("Creating queue3 \n ") queue3 = Gst.ElementFactory.make("queue", "nvtee-que3") if not tee: sys.stderr.write(" Unable to create queue3 \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating nvvidconv_postosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter print("Creating caps filter \n") caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder print("Loading encoder properties \n") encoder_prop = config['encoder'] codec = encoder_prop['codec'] if codec == "H264": print("Creating H264 Encoder") encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") elif codec == "H265": print("Creating H265 Encoder") encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") if not encoder: sys.stderr.write(" Unable to create encoder \n") encoder.set_property('bitrate', encoder_prop.getint('bitrate')) if is_aarch64(): encoder.set_property('preset-level', encoder_prop.getint('preset-level')) encoder.set_property('insert-sps-pps', encoder_prop.getint('insert-sps-pps')) encoder.set_property('bufapi-version', encoder_prop.getint('bufapi-version')) # Make the payload-encode video into RTP packets if codec == "H264": print("Creating H264 rtppay") rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") elif codec == "H265": print("Creating H265 rtppay") rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay \n") # Make the UDP sink print("Creating udp sink \n") sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") udpsink_prop = config['udpsink'] sink.set_property('host', udpsink_prop['host']) sink.set_property('port', udpsink_prop.getint('port')) sink.set_property('async', udpsink_prop.getboolean('async')) sink.set_property('sync', udpsink_prop.getint('sync')) # create fake sink print("Creating FakeSink \n") fakesink = Gst.ElementFactory.make("fakesink", "fakesink") if not fakesink: sys.stderr.write(" Unable to create fakesink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(tee) pipeline.add(queue1) pipeline.add(queue2) # pipeline.add(queue3) pipeline.add(msgconv) pipeline.add(msgbroker) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) #pipeline.add(fakesink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) tracker.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(tee) queue1.link(msgconv) msgconv.link(msgbroker) queue2.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) if is_aarch64(): rtppay.link(transform) transform.link(sink) else: rtppay.link(sink) # queue3.link(fakesink) tee_msg_pad = tee.get_request_pad('src_%u') tee_render_pad = tee.get_request_pad("src_%u") # tee_fakesink_pad = tee.get_request_pad("src_%u") if not tee_msg_pad or not tee_render_pad: sys.stderr.write("Unable to get request pads \n") msg_sink_pad = queue1.get_static_pad("sink") tee_msg_pad.link(msg_sink_pad) vid_sink_pad = queue2.get_static_pad("sink") tee_render_pad.link(vid_sink_pad) # fake_sink_pad = queue3.get_static_pad("sink") # tee_fakesink_pad.link(fake_sink_pad) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_prop = config['rtsp-server'] rtsp_port_num = rtsp_prop.getint('port') server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (udpsink_prop.getint('port'), codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(input_file): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pyds.unset_callback_funcs() pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0,len(args)-1): fps_streams["stream{0}".format(i)]=GETFPS(i) number_sources=len(args)-1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ",i," \n ") uri_name=args[i+1] if uri_name.find("rtsp://") == 0 : is_live = True source_bin=create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i sinkpad= streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if(is_aarch64()): print("Creating transform \n ") transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder codec = "H264" bitrate = 4000000 if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) #print("Creating EGLSink \n") #sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") #if not sink: # sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest3_pgie_config.txt") pgie_batch_size=pgie.get_property("batch-size") if(pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") pgie.set_property("batch-size",number_sources) tiler_rows=int(math.sqrt(number_sources)) tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows)) tiler.set_property("rows",tiler_rows) tiler.set_property("columns",tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) if is_aarch64(): rtppay.link(transform) transform.link(sink) else: rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) tiler_src_pad=pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(): # Check input arguments parser = argparse.ArgumentParser( description="Deepstream inference application") parser.add_argument("--input_video", type=str, required=True, help="Input video path") parser.add_argument("--out_dir", type=str, required=True, help="Directory to store result") parser.add_argument( "--inference_type", type=int, required=True, help="0 for TensorRT and 1 for TensorFlow Frozen Inference Graph") parser.add_argument("--config", type=str, required=True, help="Deepstream Config file") parser.add_argument("--label_path", type=str, required=True, help="Label file Path") args = parser.parse_args() fps_streams["stream{0}".format(0)] = GETFPS(0) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file source = create_source_bin(0, args.input_video) # Since the data format in the input file is elementary h264 stream, # we need a h264parser # Create nvstreammux instance to form batches from one or more sources. streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer", "NvStreamMux") pipeline.add(source) # Use nvinferserver to run inferencing on decoder's output, # behaviour of inferencing is set through config file if args.inference_type == 0: pgie = make_elm_or_print_err("nvinfer", "primary-inference", "Nvinferserver") elif args.inference_type == 1: pgie = make_elm_or_print_err("nvinferserver", "primary-inference", "Nvinferserver") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = make_elm_or_print_err("nvvideoconvert", "convertor", "Nvvidconv") # Create OSD to draw on the converted RGBA buffer nvosd = make_elm_or_print_err("nvdsosd", "onscreendisplay", "OSD (nvosd)") # Finally encode and save the osd output queue = make_elm_or_print_err("queue", "queue", "Queue") nvvidconv2 = make_elm_or_print_err("nvvideoconvert", "convertor2", "Converter 2 (nvvidconv2)") capsfilter = make_elm_or_print_err("capsfilter", "capsfilter", "capsfilter") caps = Gst.Caps.from_string("video/x-raw, format=I420") capsfilter.set_property("caps", caps) # On Jetson, there is a problem with the encoder failing to initialize # due to limitation on TLS usage. To work around this, preload libgomp. # Add a reminder here in case the user forgets. preload_reminder = "If the following error is encountered:\n" + \ "/usr/lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block\n" + \ "Preload the offending library:\n" + \ "export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n" encoder = make_elm_or_print_err("avenc_mpeg4", "encoder", "Encoder", preload_reminder) encoder.set_property("bitrate", 2000000) codeparser = make_elm_or_print_err("mpeg4videoparse", "mpeg4-parser", 'Code Parser') container = make_elm_or_print_err("qtmux", "qtmux", "Container") sink = make_elm_or_print_err("filesink", "filesink", "Sink") video_base_path = os.path.basename(args.input_video) output_video_path = args.out_dir + "/neuralet_deepstream_" + video_base_path if not os.path.exists(args.out_dir): os.makedirs(args.out_dir) sink.set_property("location", output_video_path) sink.set_property("sync", 0) sink.set_property("async", 0) print("Playing file %s " % args.input_video) streammux.set_property("width", IMAGE_WIDTH) streammux.set_property("height", IMAGE_HEIGHT) streammux.set_property("batch-size", 1) streammux.set_property("batched-push-timeout", 4000000) pgie.set_property("config-file-path", args.config) print("Adding elements to Pipeline \n") pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(queue) pipeline.add(nvvidconv2) pipeline.add(capsfilter) pipeline.add(encoder) pipeline.add(codeparser) pipeline.add(container) pipeline.add(sink) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") padname = "sink_0" sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(queue) queue.link(nvvidconv2) nvvidconv2.link(capsfilter) capsfilter.link(encoder) encoder.link(codeparser) codeparser.link(container) container.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Add a probe on the primary-infer source pad to get inference output tensors pgiesrcpad = pgie.get_static_pad("src") if not pgiesrcpad: sys.stderr.write(" Unable to get src pad of primary infer \n") pgie_src_pad_buffer_probe_label = partial(pgie_src_pad_buffer_probe, label_path=args.label_path) pgiesrcpad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe_label, 0) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osd_sink_pad_buffer_probe_label = partial(osd_sink_pad_buffer_probe, label_path=args.label_path) osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe_label, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0,len(args)-1): fps_streams["stream{0}".format(i)]=GETFPS(i) number_sources=len(args)-1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ",i," \n ") uri_name=args[i+1] if uri_name.find("rtsp://") == 0 : is_live = True source_bin=create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i sinkpad= streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) queue1=Gst.ElementFactory.make("queue","queue1") queue2=Gst.ElementFactory.make("queue","queue2") queue3=Gst.ElementFactory.make("queue","queue3") queue4=Gst.ElementFactory.make("queue","queue4") queue5=Gst.ElementFactory.make("queue","queue5") queue6=Gst.ElementFactory.make("queue","queue6") queue7=Gst.ElementFactory.make("queue","queue7") pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) pipeline.add(queue6) pipeline.add(queue7) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating nvtracker \n ") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") print("Creating nvdsanalytics \n ") nvanalytics = Gst.ElementFactory.make("nvdsanalytics", "analytics") if not nvanalytics: sys.stderr.write(" Unable to create nvanalytics \n") nvanalytics.set_property("config-file", "config_nvdsanalytics.txt") print("Creating tiler \n ") tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvosd.set_property('process-mode',OSD_PROCESS_MODE) nvosd.set_property('display-text',OSD_DISPLAY_TEXT) nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) codec = "H264" bitrate = 4000000 # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dsnvanalytics_pgie_config.txt") pgie_batch_size=pgie.get_property("batch-size") if(pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") pgie.set_property("batch-size",number_sources) tiler_rows=int(math.sqrt(number_sources)) tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows)) tiler.set_property("rows",tiler_rows) tiler.set_property("columns",tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read('dsnvanalytics_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width' : tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height' : tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id' : tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file' : tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file' : tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process' : tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) if key == 'enable-past-frame' : tracker_enable_past_frame = config.getint('tracker', key) tracker.set_property('enable_past_frame', tracker_enable_past_frame) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(nvanalytics) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) # We link elements in the following order: # sourcebin -> streammux -> nvinfer -> nvtracker -> nvdsanalytics -> # nvtiler -> nvvideoconvert -> nvdsosd -> sink print("Linking elements in the Pipeline \n") streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(tracker) tracker.link(queue3) queue3.link(nvanalytics) nvanalytics.link(queue4) queue4.link(tiler) tiler.link(queue5) queue5.link(nvvidconv) nvvidconv.link(queue6) queue6.link(nvosd) nvosd.link(queue7) queue7.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) nvanalytics_src_pad=nvanalytics.get_static_pad("src") if not nvanalytics_src_pad: sys.stderr.write(" Unable to get src pad \n") else: nvanalytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) global fps_stream fps_stream = GETFPS(0) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output # if is_aarch64(): # transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating Queue \n") queue = Gst.ElementFactory.make("queue", "queue") if not queue: sys.stderr.write(" Unable to create queue \n") print("Creating converter 2\n") nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2") if not nvvidconv2: sys.stderr.write(" Unable to create nvvidconv2 \n") print("Creating capsfilter \n") capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter") if not capsfilter: sys.stderr.write(" Unable to create capsfilter \n") caps = Gst.Caps.from_string("video/x-raw, format=I420") capsfilter.set_property("caps", caps) print("Creating Encoder \n") encoder = Gst.ElementFactory.make("avenc_mpeg4", "encoder") if not encoder: sys.stderr.write(" Unable to create encoder \n") encoder.set_property("bitrate", 2000000) print("Creating Code Parser \n") codeparser = Gst.ElementFactory.make("mpeg4videoparse", "mpeg4-parser") if not codeparser: sys.stderr.write(" Unable to create code parser \n") print("Creating Container \n") container = Gst.ElementFactory.make("qtmux", "qtmux") if not container: sys.stderr.write(" Unable to create code parser \n") print("Creating Sink \n") sink = Gst.ElementFactory.make("filesink", "filesink") if not sink: sys.stderr.write(" Unable to create file sink \n") sink.set_property("location", "./out.mp4") sink.set_property("sync", 1) sink.set_property("async", 0) # print("Creating EGLSink \n") # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") # if not sink: # sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " %args[1]) source.set_property('location', args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest1_pgie_config.txt") print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(queue) pipeline.add(nvvidconv2) pipeline.add(capsfilter) pipeline.add(encoder) pipeline.add(codeparser) pipeline.add(container) pipeline.add(sink) # if is_aarch64(): # pipeline.add(transform) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) # if is_aarch64(): # nvosd.link(transform) # transform.link(sink) # else: # nvosd.link(sink) nvosd.link(queue) queue.link(nvvidconv2) nvvidconv2.link(capsfilter) capsfilter.link(encoder) encoder.link(codeparser) codeparser.link(container) container.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write( "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 2): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 2 global folder_name folder_name = args[-1] if path.exists(folder_name): sys.stderr.write( "The output folder %s already exists. Please remove it first.\n" % folder_name) sys.exit(1) os.mkdir(folder_name) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest_imagedata_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("sync", 0) sink.set_property("qos", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(nvvidconv1) nvvidconv1.link(filter1) filter1.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_sink_pad = tiler.get_static_pad("sink") if not tiler_sink_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[:-1]): if i != 0: print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): global fps_stream # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) fps_stream = GETFPS(0) print(fps_stream) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file face_detector = Gst.ElementFactory.make("nvinfer", "primary-inference face detector") if not face_detector: sys.stderr.write(" Unable to create face_detector \n") face_classifier = Gst.ElementFactory.make( "nvinfer", "secondary-inference face_classifier") if not face_classifier: sys.stderr.write(" Unable to create face_classifier \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % args[1]) source.set_property('location', args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) face_detector.set_property('config-file-path', "detector_config.txt") face_classifier.set_property('config-file-path', "classifier_config.txt") #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(tracker) pipeline.add(face_detector) pipeline.add(face_classifier) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(face_detector) face_detector.link(tracker) tracker.link(face_classifier) face_classifier.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) vidconvsinkpad = nvvidconv.get_static_pad("sink") if not vidconvsinkpad: sys.stderr.write(" Unable to get sink pad of nvvidconv \n") vidconvsinkpad.add_probe(Gst.PadProbeType.BUFFER, sgie_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments for i in range(0, len(args)): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") if gie=="nvinfer": pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") else: pgie = Gst.ElementFactory.make("nvinferserver", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make( "nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420") ) # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property("bitrate", bitrate) if is_aarch64(): encoder.set_property("preset-level", 1) encoder.set_property("insert-sps-pps", 1) encoder.set_property("bufapi-version", 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property("host", "224.224.255.255") sink.set_property("port", updsink_port_num) sink.set_property("async", False) sink.set_property("sync", 1) streammux.set_property("width", 1920) streammux.set_property("height", 1080) streammux.set_property("batch-size", 1) streammux.set_property("batched-push-timeout", 4000000) if gie=="nvinfer": pgie.set_property("config-file-path", "dstest1_pgie_config.txt") else: pgie.set_property("config-file-path", "dstest1_pgie_inferserver_config.txt") pgie_batch_size = pgie.get_property("batch-size") if pgie_batch_size != number_sources: print( "WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n", ) pgie.set_property("batch-size", number_sources) print("Adding elements to Pipeline \n") tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("qos", 0) pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(tiler) tiler.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( '( udpsrc name=pay0 port=%d buffer-size=524288 caps="application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 " )' % (updsink_port_num, codec) ) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num ) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except BaseException: pass # cleanup pipeline.set_state(Gst.State.NULL)
# FPS Probe fps_streams_new["stream{0}".format(frame_meta.pad_index)].get_fps() try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK # Define variables to be used later fps_streams_new = {} # Initialise FPS for i in range(0, num_sources): fps_streams_new["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") ########### Create Elements required for the Pipeline ########### ######### Defining Stream 1
def main(args): # Check input arguments # Permite introducir un numero x de fuentes, en nuestro caso streamings delas camaras Meraki number_sources = len(args)-1 if number_sources+1 < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") # Variable para verificar si al menos un video esta vivo is_live = False # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) # Se crea elemento que acepta todo tipo de video o RTSP for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i+1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i print("Padname : "+padname) sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # el video con RTSP para Meraki viene optimizado a H264, por lo que no debe ser necesario crear un elemento h264parser stream print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # # version 2.0 no realizara inferencias secundarias. # por lo que sgie1, sgie2 y sgie3 no estaran habilitados # #sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") #if not sgie1: # sys.stderr.write(" Unable to make sgie1 \n") #sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") #if not sgie1: # sys.stderr.write(" Unable to make sgie2 \n") #sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") #if not sgie3: # sys.stderr.write(" Unable to make sgie3 \n") # # La misma version 2 debe permitir opcionalmente mandar a pantalla o no # print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") sink.set_property('sync', 0) if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("At least one of the sources is live") streammux.set_property('live-source', 1) #streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # # Configuracion de modelo # dstest2_pgie_config contiene modelo estandar, las delas son para yoloV3, yoloV3_tiny y fasterRCNN # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_pgie_config.txt") #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3.txt") # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3_tiny.txt") # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_fasterRCNN.txt") # Falta añadir la ruta completa del archivo de configuracion pgie_batch_size = pgie.get_property("batch-size") if pgie_batch_size != number_sources: print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Set properties of pgie and sgiae # version 2.0 no configura inferencias secundarias # #sgie1.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie1_config.txt") #sgie2.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie2_config.txt") #sgie3.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read('configs/dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) elif key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) elif key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) elif key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) elif key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) elif key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) # Creacion del marco de tiler tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources)/tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") # # version 2 no requiere inferencias secundarias # pipeline.add(decoder) pipeline.add(pgie) pipeline.add(tracker) #pipeline.add(sgie1) #pipeline.add(sgie2) #pipeline.add(sgie3) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # source_bin -> -> nvh264-decoder -> PGIE -> Tracker # tiler -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") #source.link(h264parser) #h264parser.link(decoder) #sinkpad = streammux.get_request_pad("sink_0") #if not sinkpad: # sys.stderr.write(" Unable to get the sink pad of streammux \n") #srcpad = decoder.get_static_pad("src") #if not srcpad: # sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) source_bin.link(decoder) decoder.link(streammux) streammux.link(pgie) pgie.link(tracker) tracker.link(tiler) #tracker.link(sgie1) #sgie1.link(sgie2) #sgie2.link(sgie3) #sgie3.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. tiler_src_pad = tracker.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except Exception as e: print(str(e)) pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments # Al menos debe de tener un video o un RTSP if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. # En test2 los parametros son "filesrc" y "file-source" que solo permite un archivo, # aqui es para una o mas fuentes de diferente tipo streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Creacion de elementos de Gstreamer # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") # Finally render the osd output , a la version test 2 le falta la validacion de transform # if is_aarch64(): # transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if is_aarch64(): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") # Hasta Aqui codigo añadido # print("Creating Pgie \n ") # pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") # if not pgie: # sys.stderr.write(" Unable to create pgie \n") # print("Creating tiler \n ") # tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") # if not tiler: # sys.stderr.write(" Unable to create tiler \n") # print("Creating nvvidconv \n ") # nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") # if not nvvidconv: # sys.stderr.write(" Unable to create nvvidconv \n") # print("Creating nvosd \n ") # nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") # if not nvosd: # sys.stderr.write(" Unable to create nvosd \n") # if(is_aarch64()): # print("Creating transform \n ") # transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform") # if not transform: # sys.stderr.write(" Unable to create transform \n") # print("Creating EGLSink \n") # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") # if not sink: # sys.stderr.write(" Unable to create egl sink \n") if is_live: print("At least one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', CURRENT_DIR + "/dstest5_pgie_config.txt") # Falta añadir la ruta completa del archivo de configuracion pgie_batch_size = pgie.get_property("batch-size") if pgie_batch_size != number_sources: print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Se añaden las propiedades de Sgie, falta agregarles la ruta completa de los archivos de configuracion sgie1.set_property('config-file-path', CURRENT_DIR + "/dstest5_sgie1_config.txt") sgie2.set_property('config-file-path', CURRENT_DIR + "/dstest5_sgie2_config.txt") sgie3.set_property('config-file-path', CURRENT_DIR + "/dstest5_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read(CURRENT_DIR + '/dstest5_tracker_config.txt') # Falta añadir la ruta completa del archivo de configuracion config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) # Hasta aqui codigo añadido tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") # Añado elementos adicionales para detección y tracking pipeline.add(pgie) pipeline.add(tracker) # añadido pipeline.add(sgie1) # añadido pipeline.add(sgie2) # añadido pipeline.add(sgie3) # añadido pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) # añadido if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) # pgie.link(tiler) # debe ir despues de los clasificadores secundarios pgie.link( tracker) # La linea anterior se modifica con la opcion de tracker tracker.link(sgie1) # Se añade sgie1.link(sgie2) # Se añade sgie2.link(sgie3) # Se añade sgie3.link(tiler) # Se añade .... nvvidconv tiler.link(nvvidconv) # Duda sobre si se liga con nvosd o nvvidconv nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Dudas en que hace tiler... tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if i != 0: print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except Exception as e: print(str(e)) pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0,len(args)-1): fps_streams["stream{0}".format(i)]=GETFPS(i) number_sources=len(args)-1 print("number_sources : ",number_sources) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ",i," \n ") uri_name=args[i+1] source_bin=create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i sinkpad= streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") print("Creating tiler \n ") tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if(is_aarch64()): print("Creating transform \n ") transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") streammux.set_property('width', 640) streammux.set_property('height', 480) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "config_infer_primary_yoloV4.txt") pgie_batch_size=pgie.get_property("batch-size") sink.set_property("qos",0) sink.set_property('sync', False) if(pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") pgie.set_property("batch-size",number_sources) tiler_rows=int(math.sqrt(number_sources)) tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows)) tiler.set_property("rows",tiler_rows) tiler.set_property("columns",tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width' : tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height' : tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id' : tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file' : tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file' : tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process' : tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) tracker.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) tiler_src_pad=tracker.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) #try: loop.run() #except: # pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): enable_osd = True for i in range(0, len(args) - 1): name = "stream{0}".format(i) fps_streams[name] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): # os.mkdir(folder_name+"/stream_"+str(i)) # frame_count["stream_"+str(i)]=0 # saved_count["stream_"+str(i)]=0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") #Set properties of tracker config = configparser.ConfigParser() #config.read('model/tracker_config.txt') config.sections() # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder codec = "H264" encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") bitrate = 40_00_000 encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', MUXER_OUTPUT_WIDTH) streammux.set_property('height', MUXER_OUTPUT_HEIGHT) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "model/primary_inference.cfg") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("sync", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(nvvidconv1) nvvidconv1.link(filter1) filter1.link(tiler) tiler.link(nvvidconv) if enable_osd: nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) else: nvvidconv.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) tiler_sink_pad = tiler.get_static_pad("sink") if not tiler_sink_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[:-1]): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(): args = parse_args() global g_args g_args = args num_sources = args.num_sources path = os.path.abspath(os.getcwd()) if (args.prof): INPUT_VIDEO = 'file://' + path + '/../source_code/dataset/sample_720p_prof.mp4' else: INPUT_VIDEO = 'file:///opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264' print("Creating pipeline with " + str(num_sources) + " streams") # Initialise FPS for i in range(0, num_sources): fps_streams_new["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") ########### Create Elements required for the Pipeline ########### # Create nvstreammux instance to form batches from one or more sources. streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer", "Stream-muxer") pipeline.add(streammux) for i in range(num_sources): print("Creating source_bin ", i, " \n ") uri_name = INPUT_VIDEO if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(args, i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file pgie = make_elm_or_print_err("nvinfer", "primary-inference", "pgie") # Use nvtracker to give objects unique-ids tracker = make_elm_or_print_err("nvtracker", "tracker", 'tracker') # Seconday inference for Finding Car Color sgie1 = make_elm_or_print_err("nvinfer", "secondary1-nvinference-engine", 'sgie1') # Seconday inference for Finding Car Make sgie2 = make_elm_or_print_err("nvinfer", "secondary2-nvinference-engine", 'sgie2') # # Seconday inference for Finding Car Type sgie3 = make_elm_or_print_err("nvinfer", "secondary3-nvinference-engine", 'sgie3') # Create Sink for storing the output fakesink = make_elm_or_print_err("fakesink", "fakesink", "Sink") # Queues to enable buffering queue1 = make_elm_or_print_err("queue", "queue1", "queue1") queue2 = make_elm_or_print_err("queue", "queue2", "queue2") queue3 = make_elm_or_print_err("queue", "queue3", "queue3") queue4 = make_elm_or_print_err("queue", "queue4", "queue4") queue5 = make_elm_or_print_err("queue", "queue5", "queue5") queue6 = make_elm_or_print_err("queue", "queue6", "queue6") ############ Set properties for the Elements ############ # Set Input Width , Height and Batch Size streammux.set_property('width', 1280) streammux.set_property('height', 720) streammux.set_property('batch-size', num_sources) # Timeout in microseconds to wait after the first buffer is available # to push the batch even if a complete batch is not formed. streammux.set_property('batched-push-timeout', 4000000) # Set configuration file for nvinfer # Set Congifuration file for nvinfer pgie.set_property('config-file-path', "../source_code/N1/dstest4_pgie_config.txt") sgie1.set_property('config-file-path', "../source_code/N1/dstest4_sgie1_config.txt") sgie2.set_property('config-file-path', "../source_code/N1/dstest4_sgie2_config.txt") sgie3.set_property('config-file-path', "../source_code/N1/dstest4_sgie3_config.txt") #Set properties of tracker from tracker_config config = configparser.ConfigParser() config.read('../source_code/N1/dstest4_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) # Fake sink properties fakesink.set_property("sync", 0) fakesink.set_property("async", 0) ########## Add and Link ELements in the Pipeline ########## print("Adding elements to Pipeline \n") pipeline.add(queue1) pipeline.add(pgie) pipeline.add(queue2) pipeline.add(tracker) pipeline.add(queue3) pipeline.add(sgie1) pipeline.add(queue4) pipeline.add(sgie2) pipeline.add(queue5) pipeline.add(sgie3) pipeline.add(queue6) pipeline.add(fakesink) print("Linking elements in the Pipeline \n") streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(tracker) tracker.link(queue3) queue3.link(sgie1) sgie1.link(queue4) queue4.link(sgie2) sgie2.link(queue5) queue5.link(sgie3) sgie3.link(fakesink) # queue6.link(fakesink) # create an event loop and feed gstreamer bus mesages to it loop = GLib.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) print("Added and Linked elements to pipeline") src_pad = sgie3.get_static_pad("src") if not src_pad: sys.stderr.write(" Unable to get src pad \n") else: src_pad.add_probe(Gst.PadProbeType.BUFFER, src_pad_buffer_probe, 0) # List the sources print("Now playing...") print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) start_time = time.time() try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL) print("--- %s seconds ---" % (time.time() - start_time))
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) queue1 = Gst.ElementFactory.make("queue", "queue1") queue2 = Gst.ElementFactory.make("queue", "queue2") queue3 = Gst.ElementFactory.make("queue", "queue3") queue4 = Gst.ElementFactory.make("queue", "queue4") queue5 = Gst.ElementFactory.make("queue", "queue5") pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") ########################################### if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvosd.set_property('process-mode', OSD_PROCESS_MODE) nvosd.set_property('display-text', OSD_DISPLAY_TEXT) if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('gpu-id', 0) streammux.set_property('enable-padding', 0) streammux.set_property('nvbuf-memory-type', 0) streammux.set_property('width', 640) streammux.set_property('height', 480) streammux.set_property('batch-size', 4) streammux.set_property('batched-push-timeout', 40000) pgie.set_property('config-file-path', "config_infer_primary_yoloV3.txt") #pgie_batch_size=pgie.get_property("batch-size") #if(pgie_batch_size != number_sources): #print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") pgie.set_property("batch-size", 4) sink.set_property('sync', False) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("qos", 0) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(tiler) tiler.link(queue3) queue3.link(nvvidconv) nvvidconv.link(queue4) queue4.link(nvosd) if is_aarch64(): nvosd.link(queue5) queue5.link(transform) transform.link(sink) else: nvosd.link(queue5) queue5.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) ###################################################################### # Create the node death payload deathPayload = sparkplug.getNodeDeathPayload() # Start of main program - Set up the MQTT client connection client.on_connect = on_connect client.on_message = on_message client.username_pw_set(myUsername, myPassword) deathByteArray = bytearray(deathPayload.SerializeToString()) client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False) client.connect(serverUrl, 1883, 60) # Publish the birth certificates publishBirth() def foo(): # Periodically publish some new data payload = sparkplug.getDdataPayload() # Add some random data to the inputs addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx) addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx) addMetric(payload, "input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, Object1) addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, Object2) addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, Object3) addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, Object4) addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, Object5) addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, Object6) addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, Object7) addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, Object8) addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, Object9) addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, Object10) # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish( "spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) # Sit and wait for inbound or outbound events for _ in range(1): time.sleep(1) client.loop() threading.Timer(WAIT_SECONDS, foo).start() foo() ###################################################################### print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)