def decodebin_child_added(child_proxy, Object, name, user_data): print("Decodebin child added:", name, "\n") if (name.find("decodebin") != -1): Object.connect("child-added", decodebin_child_added, user_data) if (is_aarch64() and name.find("nvv4l2decoder") != -1): print("Seting bufapi_version\n") Object.set_property("bufapi-version", True)
def decodebin_child_added(child_proxy, Object, name, user_data): log.info(f'- Decodebin child added: {name}') if (name.find("decodebin") != -1): Object.connect("child-added", decodebin_child_added, user_data) if (is_aarch64() and name.find("nvv4l2decoder") != -1): log.info('- Seting bufapi_version') Object.set_property("bufapi-version", True)
def decodebin_child_added(child_proxy, Object, name, user_data): print("Decodebin child added:", name, "\n") if (name.find("decodebin") != -1): Object.connect("child-added", decodebin_child_added, user_data) if (name.find("nvv4l2decoder") != -1): if (is_aarch64()): Object.set_property("enable-max-performance", True) Object.set_property("drop-frame-interval", 0) Object.set_property("num-extra-surfaces", 0) else: Object.set_property("gpu_id", GPU_ID)
def create_sink_bin(index): print("Creating source bin") bin_name = "source-bin-%02d" % index print(bin_name) nbin = Gst.Bin.new(bin_name) if not nbin: sys.stderr.write(" Unable to create source bin \n") if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") Gst.Bin.add(nbin, transform) Gst.Bin.add(nbin, sink) transform.link(sink) transformPad = Gst.Element.get_static_pad(transform, "sink") print(transformPad) bin_pad = nbin.add_pad(Gst.GhostPad.new("sink", transformPad)) if not bin_pad: sys.stderr.write(" Failed to add ghost pad in source bin \n") return None return nbin
def main(args): # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) print("Playing file %s " % stream_path) source.set_property('location', stream_path) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property( 'config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/python/apps/deepstream-test1/dstest1_pgie_config.txt" ) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) # Link the elements together: # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd -> # caps -> encoder -> rtppay -> udpsink print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0]) sys.exit(1) # TODO: use parsing width = int(args[3]) height = int(args[5]) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("v4l2src", "usb-cam-source") if not source: sys.stderr.write(" Unable to create Source \n") caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps") if not caps_v4l2src: sys.stderr.write(" Unable to create v4l2src capsfilter \n") print("Creating Video Converter \n") # Adding videoconvert -> nvvideoconvert as not all # raw formats are supported by nvvideoconvert; # Say YUYV is unsupported - which is the common # raw format for many logi usb cams # In case we have a camera with raw format supported in # nvvideoconvert, GStreamer plugins' capability negotiation # shall be intelligent enough to reduce compute by # videoconvert doing passthrough (TODO we need to confirm this) # videoconvert to make sure a superset of raw formats are supported vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1") if not vidconvsrc: sys.stderr.write(" Unable to create videoconvert \n") # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API) nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2") if not nvvidconvsrc: sys.stderr.write(" Unable to create Nvvideoconvert \n") caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps") if not caps_vidconvsrc: sys.stderr.write(" Unable to create capsfilter \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on camera's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing cam %s " %args[1]) caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1")) caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)")) source.set_property('device', args[1]) streammux.set_property('width', width) streammux.set_property('height', height) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest1_pgie_config.txt") # Set sync = false to avoid late frame drops at the display-sink sink.set_property('sync', False) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(caps_v4l2src) pipeline.add(vidconvsrc) pipeline.add(nvvidconvsrc) pipeline.add(caps_vidconvsrc) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # v4l2src -> nvvideoconvert -> mux -> # nvinfer -> nvvideoconvert -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(caps_v4l2src) caps_v4l2src.link(vidconvsrc) vidconvsrc.link(nvvidconvsrc) nvvidconvsrc.link(caps_vidconvsrc) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = caps_vidconvsrc.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <config>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Set the model output resolution num_srcs = 2 if args[1] == 'prisma_config.txt': op_size = 256 if args[1] == 'deeplab_config.txt': op_size = 513 # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source elements for reading from camera1 print("Creating Source: Cam1... \n ") source_cam1 = Gst.ElementFactory.make("v4l2src", "camera-source1") source_cam1.set_property("device", "/dev/video1") vidconv_src1 = Gst.ElementFactory.make("videoconvert", "vidconv_src1") nvvidconv_src1 = Gst.ElementFactory.make("nvvideoconvert", "nvvidconv_src1") filter_src1 = Gst.ElementFactory.make("capsfilter", "filter_src1") nvvidconv_src1.set_property("nvbuf-memory-type", 0) caps_filter_src1 = Gst.Caps.from_string( "video/x-raw(memory:NVMM), format=NV12, width=1280, height=720, framerate=20/1" ) # Set max webcam resolution filter_src1.set_property("caps", caps_filter_src1) if not source_cam1: sys.stderr.write(" Unable to create source: cam1 \n") # Source elements for reading from camera2 print("Creating Source: Cam2... \n ") source_cam2 = Gst.ElementFactory.make("v4l2src", "camera-source2") source_cam2.set_property("device", "/dev/video0") vidconv_src2 = Gst.ElementFactory.make("videoconvert", "vidconv_src2") nvvidconv_src2 = Gst.ElementFactory.make("nvvideoconvert", "nvvidconv_src2") filter_src2 = Gst.ElementFactory.make("capsfilter", "filter_src2") nvvidconv_src2.set_property("nvbuf-memory-type", 0) caps_filter_src2 = Gst.Caps.from_string( "video/x-raw(memory:NVMM), format=NV12, width=640, height=480, framerate=20/1" ) # Set max webcam resolution filter_src2.set_property("caps", caps_filter_src2) if not source_cam2: sys.stderr.write(" Unable to create source: cam2 \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") streammux.set_property('width', 1280) streammux.set_property('height', 720) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file seg = Gst.ElementFactory.make("nvinfer", "primary-inference") if not seg: sys.stderr.write(" Unable to create seg \n") seg.set_property('config-file-path', args[1]) seg.set_property('batch-size', 1) # Use nvsegvisual to visualizes segmentation results nvsegvisual = Gst.ElementFactory.make("nvsegvisual", "nvsegvisual") if not nvsegvisual: sys.stderr.write(" Unable to create nvsegvisual \n") nvsegvisual.set_property('batch-size', 1) nvsegvisual.set_property('width', op_size) nvsegvisual.set_property('height', op_size) # Use nvtiler to composite the batched frames into a 2D tiled array tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") tiler_rows = int(math.sqrt(num_srcs)) tiler_columns = int(math.ceil((1.0 * num_srcs) // tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", op_size * 2) tiler.set_property("height", op_size) # Use nvegltransform to convert video for eglsink if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink... \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") sink.set_property("sync", False) # Set up the pipeline print("Adding elements to Pipeline...\n") # Add all elements into the pipeline pipeline.add(source_cam1) pipeline.add(vidconv_src1) pipeline.add(nvvidconv_src1) pipeline.add(filter_src1) pipeline.add(source_cam2) pipeline.add(vidconv_src2) pipeline.add(nvvidconv_src2) pipeline.add(filter_src2) pipeline.add(streammux) pipeline.add(seg) pipeline.add(nvsegvisual) pipeline.add(tiler) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # Connect the pipeline elements together print("Linking elements in the Pipeline...\n") # Link the elements together for first camera source # camera-source -> videoconvert -> nvvideoconvert -> # capsfilter -> nvstreammux source_cam1.link(vidconv_src1) vidconv_src1.link(nvvidconv_src1) nvvidconv_src1.link(filter_src1) sinkpad1 = streammux.get_request_pad("sink_0") if not sinkpad1: sys.stderr.write(" Unable to get the sink pad of streammux for src1\n") srcpad1 = filter_src1.get_static_pad("src") if not srcpad1: sys.stderr.write(" Unable to get source pad of decoder for src1\n") srcpad1.link(sinkpad1) # Link the elements together for second camera source # camera-source -> videoconvert -> nvvideoconvert -> # capsfilter -> nvstreammux source_cam2.link(vidconv_src2) vidconv_src2.link(nvvidconv_src2) nvvidconv_src2.link(filter_src2) sinkpad2 = streammux.get_request_pad("sink_1") if not sinkpad2: sys.stderr.write(" Unable to get the sink pad of streammux for src2\n") srcpad2 = filter_src2.get_static_pad("src") if not srcpad2: sys.stderr.write(" Unable to get source pad of decoder for src2\n") srcpad2.link(sinkpad2) #Link the elements together for rendering outputs # nvstreammux -> nvinfer -> nvsegvisual -> # nvtiler -> nvegltransform -> nveglglessink streammux.link(seg) seg.link(nvsegvisual) nvsegvisual.link(tiler) if is_aarch64(): tiler.link(transform) transform.link(sink) else: tiler.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % args[1]) source.set_property('location', args[1]) streammux.set_property('width', stream_width) streammux.set_property('height', stream_height) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # Set properties of pgie and sgie pgie.set_property('config-file-path', "dstest2_pgie_config.txt") sgie1.set_property('config-file-path', "dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', "dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', "dstest2_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(tracker) tracker.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass print( "\n******************** Statistical Analysis of Video Stream ********************\n" ) x_min_list = [] x_max_list = [] y_min_list = [] y_max_list = [] id_list = [] print( 'Data of all vehicles detected after a quarter of the maximum pixel height:' ) for car_objects in gate_list: x_min_list.append(car_objects.x_smallest) x_max_list.append(car_objects.x_largest) y_min_list.append(car_objects.y_smallest) y_max_list.append(car_objects.y_largest) id_list.append(car_objects.vehicle_id) print(car_objects.vehicle_id, car_objects.frames_list, car_objects.x_list, car_objects.y_list, car_objects.xc_list, car_objects.yc_list, car_objects.lane, car_objects.x_smallest, car_objects.x_largest, car_objects.y_smallest, car_objects.y_largest, sep=' ') print( '\n', 'Tracking IDs of all vehicles detected after (below) a quarter of the maximum pixel height:', id_list, len(id_list), '\n') x_min_list.sort() x_max_list.sort() y_min_list.sort() y_max_list.sort() print('x_min:', x_min_list, len(x_min_list), '\n') print('x_max:', x_max_list, len(x_max_list), '\n') print('y_min:', y_min_list, len(y_min_list), '\n') print('y_max:', y_max_list, len(y_max_list), '\n') print('Optimal Frame Range:') print('x:', min(x_min_list), max(x_max_list)) print('y:', min(y_max_list), max(y_min_list)) midpoint = (min(y_max_list) + max(y_min_list)) / 2 midpoint = int(midpoint) print('Midpoint of y = ', midpoint) id_list_gate = [] for c in gate_list: for y in c.yc_list: if c.vehicle_id not in id_list_gate: id_list_gate.append(c.vehicle_id) print('\n', 'Tracking IDs of all vehicles detected in the optimal frame range:', id_list_gate) print('\n', 'Number of vehicles =', len(id_list_gate), '\n') for f in gate_list: if f.vehicle_id in id_list_gate: my_array = np.array(f.yc_list) pos = (np.abs(my_array - midpoint)).argmin() print('tracking id =', f.vehicle_id, ', optimal frame number =', f.frames_list[pos], ', optimal coordinate = (', f.xc_list[pos], ',', f.yc_list[pos], ')', ', lane =', f.lane[pos]) optimal_frame = { "x1": min(x_min_list), "x2": max(x_max_list), "y1": min(y_max_list), "y2": max(y_min_list), "x11": x11, "x12": x12, "x13": x13, "x14": x14, "x21": x21, "x22": x22, "x23": x23, "x24": x24 } with open("optimal_frame.json", "w") as write_file: json.dump(optimal_frame, write_file) # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): GObject.threads_init() Gst.init(None) print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") msgconv = Gst.ElementFactory.make("nvmsgconv", "nvmsg-converter") if not msgconv: sys.stderr.write(" Unable to create msgconv \n") msgbroker = Gst.ElementFactory.make("nvmsgbroker", "nvmsg-broker") if not msgbroker: sys.stderr.write(" Unable to create msgbroker \n") tee = Gst.ElementFactory.make("tee", "nvsink-tee") if not tee: sys.stderr.write(" Unable to create tee \n") queue1 = Gst.ElementFactory.make("queue", "nvtee-que1") if not queue1: sys.stderr.write(" Unable to create queue1 \n") queue2 = Gst.ElementFactory.make("queue", "nvtee-que2") if not queue2: sys.stderr.write(" Unable to create queue2 \n") if (no_display): print("Creating FakeSink \n") sink = Gst.ElementFactory.make("fakesink", "fakesink") if not sink: sys.stderr.write(" Unable to create fakesink \n") else: if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % input_file) source.set_property('location', input_file) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', PGIE_CONFIG_FILE) msgconv.set_property('config', MSCONV_CONFIG_FILE) msgconv.set_property('payload-type', schema_type) msgbroker.set_property('proto-lib', proto_lib) msgbroker.set_property('conn-str', conn_str) msgbroker.set_property('config', cfg_file) if topic is not None: msgbroker.set_property('topic', topic) msgbroker.set_property('sync', False) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(tee) pipeline.add(queue1) pipeline.add(queue2) pipeline.add(msgconv) pipeline.add(msgbroker) pipeline.add(sink) if is_aarch64() and not no_display: pipeline.add(transform) print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(tee) queue1.link(msgconv) msgconv.link(msgbroker) if is_aarch64() and not no_display: queue2.link(transform) transform.link(sink) else: queue2.link(sink) sink_pad = queue1.get_static_pad("sink") tee_msg_pad = tee.get_request_pad('src_%u') tee_render_pad = tee.get_request_pad("src_%u") if not tee_msg_pad or not tee_render_pad: sys.stderr.write("Unable to get request pads\n") tee_msg_pad.link(sink_pad) sink_pad = queue2.get_static_pad("sink") tee_render_pad.link(sink_pad) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pyds.unset_callback_funcs() pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % args[1]) source.set_property('location', args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # Set properties of pgie and sgie pgie.set_property( 'config-file-path', "/home/edgar/deepstream_python_v0.9/python/apps/deepstream-test2/dstest2_pgie_config.txt" ) sgie1.set_property( 'config-file-path', "/home/edgar/deepstream_python_v0.9/python/apps/deepstream-test2/dstest2_sgie1_config.txt" ) sgie2.set_property( 'config-file-path', "/home/edgar/deepstream_python_v0.9/python/apps/deepstream-test2/dstest2_sgie2_config.txt" ) sgie3.set_property( 'config-file-path', "/home/edgar/deepstream_python_v0.9/python/apps/deepstream-test2/dstest2_sgie3_config.txt" ) # Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except Exception as e: print(str(e)) pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) queue1 = Gst.ElementFactory.make("queue", "queue1") queue2 = Gst.ElementFactory.make("queue", "queue2") queue3 = Gst.ElementFactory.make("queue", "queue3") queue4 = Gst.ElementFactory.make("queue", "queue4") queue5 = Gst.ElementFactory.make("queue", "queue5") pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvosd.set_property('process-mode', OSD_PROCESS_MODE) nvosd.set_property('display-text', OSD_DISPLAY_TEXT) if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest3_pgie_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("qos", 0) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(tiler) tiler.link(queue3) queue3.link(nvvidconv) nvvidconv.link(queue4) queue4.link(nvosd) if is_aarch64(): nvosd.link(queue5) queue5.link(transform) transform.link(sink) else: nvosd.link(queue5) queue5.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0,len(args)-1): fps_streams["stream{0}".format(i)]=GETFPS(i) number_sources=len(args)-1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ",i," \n ") uri_name=args[i+1] if uri_name.find("rtsp://") == 0 : is_live = True source_bin=create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i sinkpad= streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if(is_aarch64()): print("Creating transform \n ") transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder codec = "H264" bitrate = 4000000 if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) #print("Creating EGLSink \n") #sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") #if not sink: # sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest3_pgie_config.txt") pgie_batch_size=pgie.get_property("batch-size") if(pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") pgie.set_property("batch-size",number_sources) tiler_rows=int(math.sqrt(number_sources)) tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows)) tiler.set_property("rows",tiler_rows) tiler.set_property("columns",tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) if is_aarch64(): rtppay.link(transform) transform.link(sink) else: rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) tiler_src_pad=pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Get all arguments input_file = args.input_file no_display = args.no_display # load config file config = configparser.ConfigParser() config.read('configs/plugin_properties.ini') number_sources = len(input_file) if number_sources < 1: sys.stderr.write("Please provide path for file input or rtsp streams") sys.exit(1) for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") print("Loading streammux properties \n") streammux_prop = config['streammux'] streammux.set_property('width', streammux_prop.getint('width')) streammux.set_property('height', streammux_prop.getint('height')) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', streammux_prop.getint('batched-push-timeout')) pipeline.add(streammux) for i, uri in enumerate(input_file): print("Creating source_bin ", i, " \n ") uri_name = uri if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Loading Pgie properties \n") pgie_prop = config['primary-gie'] pgie.set_property('config-file-path', pgie_prop['config-file']) pgie.set_property('model-engine-file', pgie_prop['model-engine-file']) pgie.set_property("batch-size", number_sources) # pgie_batch_size=pgie.get_property("batch-size") # if(pgie_batch_size != number_sources): # print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") # pgie.set_property("batch-size",number_sources) print("Creating tracker \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # tracker properties print("Loading tracker properties \n") tracker_prop = config['tracker'] tracker.set_property("tracker-width", tracker_prop.getint('tracker-width')) tracker.set_property("tracker-height", tracker_prop.getint('tracker-height')) tracker.set_property("ll-lib-file", tracker_prop['ll-lib-file']) tracker.set_property("ll-config-file", tracker_prop['ll-config-file']) tracker.set_property("enable-batch-process", tracker_prop.getint('enable-batch-process')) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Loading tiler properties \n") tiler_prop = config['tiled-display'] tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", tiler_prop.getint('width')) tiler.set_property("height", tiler_prop.getint('height')) print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") print("Creating msconv \n ") msgconv = Gst.ElementFactory.make("nvmsgconv", "nvmsg-converter") if not msgconv: sys.stderr.write(" Unable to create msgconv \n") # msgconv properties print("Loading msgconv properties \n") msgconv_prop = config['message-converter'] msgconv.set_property("config", msgconv_prop['msg-conv-config']) msgconv.set_property("payload-type", msgconv_prop.getint('msg-conv-payload-type')) print("Creating msgbroker \n ") msgbroker = Gst.ElementFactory.make("nvmsgbroker", "nvmsg-broker") if not msgconv: sys.stderr.write(" Unable to create msgbroker \n") print("Loading message broker properties \n") msgbroker_prop = config['message-broker'] msgbroker.set_property("proto-lib", msgbroker_prop['proto-lib']) msgbroker.set_property("conn-str", msgbroker_prop['conn-str']) msgbroker_cfg_file = msgbroker_prop["msg-broker-config"] topic = msgbroker_prop["topic"] if msgbroker_cfg_file is not None: msgbroker.set_property("config", msgbroker_cfg_file) if topic is not None: msgbroker.set_property("topic", topic) msgbroker.set_property("sync", msgbroker_prop.getboolean("sync")) print("Creating tee \n ") tee = Gst.ElementFactory.make("tee", "nvsink-tee") if not tee: sys.stderr.write(" Unable to create tee \n") print("Creating queue1 \n ") queue1 = Gst.ElementFactory.make("queue", "nvtee-que1") if not tee: sys.stderr.write(" Unable to create queue1 \n") print("Creating queue2 \n ") queue2 = Gst.ElementFactory.make("queue", "nvtee-que2") if not tee: sys.stderr.write(" Unable to create queue2 \n") print("Creating queue3 \n ") queue3 = Gst.ElementFactory.make("queue", "nvtee-que3") if not tee: sys.stderr.write(" Unable to create queue3 \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating nvvidconv_postosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter print("Creating caps filter \n") caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder print("Loading encoder properties \n") encoder_prop = config['encoder'] codec = encoder_prop['codec'] if codec == "H264": print("Creating H264 Encoder") encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") elif codec == "H265": print("Creating H265 Encoder") encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") if not encoder: sys.stderr.write(" Unable to create encoder \n") encoder.set_property('bitrate', encoder_prop.getint('bitrate')) if is_aarch64(): encoder.set_property('preset-level', encoder_prop.getint('preset-level')) encoder.set_property('insert-sps-pps', encoder_prop.getint('insert-sps-pps')) encoder.set_property('bufapi-version', encoder_prop.getint('bufapi-version')) # Make the payload-encode video into RTP packets if codec == "H264": print("Creating H264 rtppay") rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") elif codec == "H265": print("Creating H265 rtppay") rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay \n") # Make the UDP sink print("Creating udp sink \n") sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") udpsink_prop = config['udpsink'] sink.set_property('host', udpsink_prop['host']) sink.set_property('port', udpsink_prop.getint('port')) sink.set_property('async', udpsink_prop.getboolean('async')) sink.set_property('sync', udpsink_prop.getint('sync')) # create fake sink print("Creating FakeSink \n") fakesink = Gst.ElementFactory.make("fakesink", "fakesink") if not fakesink: sys.stderr.write(" Unable to create fakesink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(tee) pipeline.add(queue1) pipeline.add(queue2) # pipeline.add(queue3) pipeline.add(msgconv) pipeline.add(msgbroker) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) #pipeline.add(fakesink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) tracker.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(tee) queue1.link(msgconv) msgconv.link(msgbroker) queue2.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) if is_aarch64(): rtppay.link(transform) transform.link(sink) else: rtppay.link(sink) # queue3.link(fakesink) tee_msg_pad = tee.get_request_pad('src_%u') tee_render_pad = tee.get_request_pad("src_%u") # tee_fakesink_pad = tee.get_request_pad("src_%u") if not tee_msg_pad or not tee_render_pad: sys.stderr.write("Unable to get request pads \n") msg_sink_pad = queue1.get_static_pad("sink") tee_msg_pad.link(msg_sink_pad) vid_sink_pad = queue2.get_static_pad("sink") tee_render_pad.link(vid_sink_pad) # fake_sink_pad = queue3.get_static_pad("sink") # tee_fakesink_pad.link(fake_sink_pad) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_prop = config['rtsp-server'] rtsp_port_num = rtsp_prop.getint('port') server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (udpsink_prop.getint('port'), codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(input_file): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pyds.unset_callback_funcs() pipeline.set_state(Gst.State.NULL)
def main(args): enable_osd = True for i in range(0, len(args) - 1): name = "stream{0}".format(i) fps_streams[name] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): # os.mkdir(folder_name+"/stream_"+str(i)) # frame_count["stream_"+str(i)]=0 # saved_count["stream_"+str(i)]=0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") #Set properties of tracker config = configparser.ConfigParser() #config.read('model/tracker_config.txt') config.sections() # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder codec = "H264" encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") bitrate = 40_00_000 encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', MUXER_OUTPUT_WIDTH) streammux.set_property('height', MUXER_OUTPUT_HEIGHT) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "model/primary_inference.cfg") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("sync", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(nvvidconv1) nvvidconv1.link(filter1) filter1.link(tiler) tiler.link(nvvidconv) if enable_osd: nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) else: nvvidconv.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) tiler_sink_pad = tiler.get_static_pad("sink") if not tiler_sink_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[:-1]): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements debug("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file #debug("Creating Source \n ") #source = Gst.ElementFactory.make("filesrc", "file-source") #if not source: # sys.stderr.write(" Unable to create Source \n") # Source element for reading from an rtsp stream debug("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) # Initialization folder_name = tempfile.mkdtemp() frame_count = {} saved_count = {} # For each source... for now, just one source, #0: i = 0 os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 debug("Creating source_bin " + str(i) + " \n ") #uri_name=args[i+1] #uri_name="rtsp://192.168.1.11/LiveH264_0" # <-- rtsp URI goes here! uri_name = stream_path if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Since the data format in the input file is elementary h264 stream, # we need a h264parser debug("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU debug("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # *** already made above *** # Create nvstreammux instance to form batches from one or more sources. #streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") #if not streammux: # sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") debug("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") debug("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") debug("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") debug("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") #debug("Creating EGLSink \n") #sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") #if not sink: # sys.stderr.write(" Unable to create egl sink \n") #sink.set_property("sync", 0) #debug("Creating FAKE sink \n") #sink = Gst.ElementFactory.make("fakesink", "nvvideo-renderer") #if not sink: # sys.stderr.write(" Unable to create FAKE sink sink \n") #sink.set_property("sync", 0) debug("Creating RTSP output stream sink\n") # Make the multicast UDP sink UDP_MULTICAST_ADDRESS = '224.224.255.255' UDP_MULTICAST_PORT = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', UDP_MULTICAST_ADDRESS) sink.set_property('port', UDP_MULTICAST_PORT) sink.set_property('async', False) sink.set_property('sync', 1) debug("Playing RTSP input stream...") #debug("Playing file %s " %stream_path) #source.set_property('location', stream_path) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', CONFIG_FILE) debug("Adding elements to Pipeline \n") #pipeline.add(source) #pipeline.add(h264parser) #pipeline.add(decoder) #pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) # Link the elements together: # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd -> # caps -> encoder -> rtppay -> udpsink debug("Linking elements in the Pipeline \n") #source.link(h264parser) #h264parser.link(decoder) #sinkpad = streammux.get_request_pad("sink_0") #if not sinkpad: # sys.stderr.write(" Unable to get the sink pad of streammux \n") # # srcpad = decoder.get_static_pad("src") # if not srcpad: # sys.stderr.write(" Unable to get source pad of decoder \n") # srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming server = GstRtspServer.RTSPServer.new() server.props.service = RTSPPORTNUM server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (UDP_MULTICAST_PORT, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print("\n *** DeepStream: Launched RTSP Streaming!\n\n\n\n") # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events debug("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments # Al menos debe de tener un video o un RTSP if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. # En test2 los parametros son "filesrc" y "file-source" que solo permite un archivo, # aqui es para una o mas fuentes de diferente tipo streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # Creacion de elementos de Gstreamer # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") # Finally render the osd output , a la version test 2 le falta la validacion de transform # if is_aarch64(): # transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if is_aarch64(): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") # Hasta Aqui codigo añadido # print("Creating Pgie \n ") # pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") # if not pgie: # sys.stderr.write(" Unable to create pgie \n") # print("Creating tiler \n ") # tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") # if not tiler: # sys.stderr.write(" Unable to create tiler \n") # print("Creating nvvidconv \n ") # nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") # if not nvvidconv: # sys.stderr.write(" Unable to create nvvidconv \n") # print("Creating nvosd \n ") # nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") # if not nvosd: # sys.stderr.write(" Unable to create nvosd \n") # if(is_aarch64()): # print("Creating transform \n ") # transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform") # if not transform: # sys.stderr.write(" Unable to create transform \n") # print("Creating EGLSink \n") # sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") # if not sink: # sys.stderr.write(" Unable to create egl sink \n") if is_live: print("At least one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', CURRENT_DIR + "/dstest5_pgie_config.txt") # Falta añadir la ruta completa del archivo de configuracion pgie_batch_size = pgie.get_property("batch-size") if pgie_batch_size != number_sources: print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Se añaden las propiedades de Sgie, falta agregarles la ruta completa de los archivos de configuracion sgie1.set_property('config-file-path', CURRENT_DIR + "/dstest5_sgie1_config.txt") sgie2.set_property('config-file-path', CURRENT_DIR + "/dstest5_sgie2_config.txt") sgie3.set_property('config-file-path', CURRENT_DIR + "/dstest5_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read(CURRENT_DIR + '/dstest5_tracker_config.txt') # Falta añadir la ruta completa del archivo de configuracion config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) # Hasta aqui codigo añadido tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") # Añado elementos adicionales para detección y tracking pipeline.add(pgie) pipeline.add(tracker) # añadido pipeline.add(sgie1) # añadido pipeline.add(sgie2) # añadido pipeline.add(sgie3) # añadido pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) # añadido if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) # pgie.link(tiler) # debe ir despues de los clasificadores secundarios pgie.link( tracker) # La linea anterior se modifica con la opcion de tracker tracker.link(sgie1) # Se añade sgie1.link(sgie2) # Se añade sgie2.link(sgie3) # Se añade sgie3.link(tiler) # Se añade .... nvvidconv tiler.link(nvvidconv) # Duda sobre si se liga con nvosd o nvvidconv nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Dudas en que hace tiler... tiler_src_pad = pgie.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if i != 0: print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except Exception as e: print(str(e)) pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def __init__(self): super().__init__('inference_publisher') # Taking name of input source from user self.declare_parameter('input_source') param_ip_src = self.get_parameter('input_source').value self.publisher_detection = self.create_publisher(Detection2DArray, 'infer_detection', 10) self.publisher_classification = self.create_publisher(Classification2D, 'infer_classification', 10) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") self.pipeline = Gst.Pipeline() if not self.pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating Source \n ") source = Gst.ElementFactory.make("v4l2src", "usb-cam-source") if not source: sys.stderr.write(" Unable to create Source \n") caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps") if not caps_v4l2src: sys.stderr.write(" Unable to create v4l2src capsfilter \n") print("Creating Video Converter \n") # videoconvert to make sure a superset of raw formats are supported vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1") if not vidconvsrc: sys.stderr.write(" Unable to create videoconvert \n") # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API) nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2") if not nvvidconvsrc: sys.stderr.write(" Unable to create Nvvideoconvert \n") caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps") if not caps_vidconvsrc: sys.stderr.write(" Unable to create capsfilter \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference1") if not pgie: sys.stderr.write(" Unable to create pgie1 \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie1 \n") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") if not sgie1: sys.stderr.write(" Unable to make sgie2 \n") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") if not sgie3: sys.stderr.write(" Unable to make sgie3 \n") pgie2 = Gst.ElementFactory.make("nvinfer", "primary-inference2") if not pgie2: sys.stderr.write(" Unable to create pgie2 \n") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") nvvidconv2 = Gst.ElementFactory.make("nvvideoconvert", "convertor2") if not nvvidconv2: sys.stderr.write(" Unable to create nvvidconv2 \n") # Create OSD to draw on the converted RGBA buffer nvosd1 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay1") if not nvosd1: sys.stderr.write(" Unable to create nvosd1 \n") nvosd2 = Gst.ElementFactory.make("nvdsosd", "onscreendisplay2") if not nvosd2: sys.stderr.write(" Unable to create nvosd2 \n") # Finally render the osd output if is_aarch64(): transform1 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform1") transform2 = Gst.ElementFactory.make("nvegltransform", "nvegl-transform2") print("Creating EGLSink \n") sink1 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer1") if not sink1: sys.stderr.write(" Unable to create egl sink1 \n") sink2 = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer2") if not sink2: sys.stderr.write(" Unable to create egl sink2 \n") source.set_property('device', param_ip_src) caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1")) caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)")) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) #Set properties of pgie and sgie location = os.getcwd() + "/src/ros2_deepstream/config_files/" pgie.set_property('config-file-path', location+"dstest2_pgie_config.txt") sgie1.set_property('config-file-path', location+"dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', location+"dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', location+"dstest2_sgie3_config.txt") pgie2.set_property('config-file-path', location+"dstest1_pgie_config.txt") sink1.set_property('sync', False) sink2.set_property('sync', False) #Set properties of tracker config = configparser.ConfigParser() config.read(location+'dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width' : tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height' : tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id' : tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file' : tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file' : tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process' : tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) tee = Gst.ElementFactory.make('tee', 'tee') queue1 = Gst.ElementFactory.make('queue','infer1') queue2 = Gst.ElementFactory.make('queue','infer2') print("Adding elements to Pipeline \n") self.pipeline.add(source) self.pipeline.add(caps_v4l2src) self.pipeline.add(vidconvsrc) self.pipeline.add(nvvidconvsrc) self.pipeline.add(caps_vidconvsrc) self.pipeline.add(streammux) self.pipeline.add(pgie) self.pipeline.add(pgie2) self.pipeline.add(tracker) self.pipeline.add(sgie1) self.pipeline.add(sgie2) self.pipeline.add(sgie3) self.pipeline.add(nvvidconv1) self.pipeline.add(nvvidconv2) self.pipeline.add(nvosd1) self.pipeline.add(nvosd2) self.pipeline.add(sink1) self.pipeline.add(sink2) self.pipeline.add(tee) self.pipeline.add(queue1) self.pipeline.add(queue2) if is_aarch64(): self.pipeline.add(transform1) self.pipeline.add(transform2) # Link the elements together print("Linking elements in the Pipeline \n") source.link(caps_v4l2src) caps_v4l2src.link(vidconvsrc) vidconvsrc.link(nvvidconvsrc) nvvidconvsrc.link(caps_vidconvsrc) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = caps_vidconvsrc.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(tee) tee.link(queue1) tee.link(queue2) queue1.link(pgie) queue2.link(pgie2) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(nvvidconv1) nvvidconv1.link(nvosd1) pgie2.link(nvvidconv2) nvvidconv2.link(nvosd2) if is_aarch64(): nvosd1.link(transform1) transform1.link(sink1) nvosd2.link(transform2) transform2.link(sink2) else: nvosd1.link(sink1) nvosd2.link(sink2) # create and event loop and feed gstreamer bus mesages to it self.loop = GObject.MainLoop() bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, self.loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad1 = nvosd1.get_static_pad("sink") if not osdsinkpad1: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad1.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0) osdsinkpad2 = nvosd2.get_static_pad("sink") if not osdsinkpad2: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad2.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0)
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") ######################################### # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("v4l2src", "usb-cam-source") if not source: sys.stderr.write(" Unable to create Source \n") caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps") if not caps_v4l2src: sys.stderr.write(" Unable to create v4l2src capsfilter \n") print("Creating Video Converter \n") # Adding videoconvert -> nvvideoconvert as not all # raw formats are supported by nvvideoconvert; # Say YUYV is unsupported - which is the common # raw format for many logi usb cams # In case we have a camera with raw format supported in # nvvideoconvert, GStreamer plugins' capability negotiation # shall be intelligent enough to reduce compute by # videoconvert doing passthrough (TODO we need to confirm this) # videoconvert to make sure a superset of raw formats are supported vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1") if not vidconvsrc: sys.stderr.write(" Unable to create videoconvert \n") # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API) nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2") if not nvvidconvsrc: sys.stderr.write(" Unable to create Nvvideoconvert \n") caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps") if not caps_vidconvsrc: sys.stderr.write(" Unable to create capsfilter \n") ########################################### # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on camera's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") ########################################### if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output #if is_aarch64(): #transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder bitrate = 4000000 codec = "H264" if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 0) print("Playing cam %s " % args[1]) caps_v4l2src.set_property( 'caps', Gst.Caps.from_string("video/x-raw, framerate=30/1")) caps_vidconvsrc.set_property( 'caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)")) source.set_property('device', args[1]) streammux.set_property('gpu-id', 0) streammux.set_property('live-source', 1) streammux.set_property('enable-padding', 0) streammux.set_property('nvbuf-memory-type', 0) streammux.set_property('width', 640) streammux.set_property('height', 480) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 40000) pgie.set_property('config-file-path', "dstest1_pgie_config.txt") # Set sync = false to avoid late frame drops at the display-sink #sink.set_property('sync', False) print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(caps_v4l2src) pipeline.add(vidconvsrc) pipeline.add(nvvidconvsrc) pipeline.add(caps_vidconvsrc) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) #if is_aarch64(): #pipeline.add(transform) # we link the elements together # v4l2src -> nvvideoconvert -> mux -> # nvinfer -> nvvideoconvert -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(caps_v4l2src) caps_v4l2src.link(vidconvsrc) vidconvsrc.link(nvvidconvsrc) nvvidconvsrc.link(caps_vidconvsrc) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = caps_vidconvsrc.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) ########################################### # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) ###################################################################### # Create the node death payload deathPayload = sparkplug.getNodeDeathPayload() # Start of main program - Set up the MQTT client connection client.on_connect = on_connect client.on_message = on_message client.username_pw_set(myUsername, myPassword) deathByteArray = bytearray(deathPayload.SerializeToString()) client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False) client.connect(serverUrl, 1883, 60) # Publish the birth certificates publishBirth() def foo(): # Periodically publish some new data payload = sparkplug.getDdataPayload() # Add some random data to the inputs addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx) addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx) addMetric(payload, "input/Vehicle count", AliasMap.Device_counter1, MetricDataType.Int16, counter1) addMetric(payload, "input/Person count", AliasMap.Device_counter2, MetricDataType.Int16, counter2) # Note this data we're setting to STALE via the propertyset as an example metric = addMetric(payload, None, AliasMap.Device_Metric1, MetricDataType.Boolean, random.choice([True, False])) metric.properties.keys.extend(["Quality"]) propertyValue = metric.properties.values.add() propertyValue.type = ParameterDataType.Int32 propertyValue.int_value = 500 # Publish a message data byteArray = bytearray(payload.SerializeToString()) client.publish( "spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False) threading.Timer(WAIT_SECONDS, foo).start() foo() ###################################################################### # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments # Permite introducir un numero x de fuentes, en nuestro caso streamings delas camaras Meraki number_sources = len(args)-1 if number_sources+1 < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, number_sources): fps_streams["stream{0}".format(i)] = GETFPS(i) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") # Variable para verificar si al menos un video esta vivo is_live = False # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) # Se crea elemento que acepta todo tipo de video o RTSP for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i+1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i print("Padname : "+padname) sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) # el video con RTSP para Meraki viene optimizado a H264, por lo que no debe ser necesario crear un elemento h264parser stream print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # # version 2.0 no realizara inferencias secundarias. # por lo que sgie1, sgie2 y sgie3 no estaran habilitados # #sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") #if not sgie1: # sys.stderr.write(" Unable to make sgie1 \n") #sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") #if not sgie1: # sys.stderr.write(" Unable to make sgie2 \n") #sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") #if not sgie3: # sys.stderr.write(" Unable to make sgie3 \n") # # La misma version 2 debe permitir opcionalmente mandar a pantalla o no # print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") sink.set_property('sync', 0) if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("At least one of the sources is live") streammux.set_property('live-source', 1) #streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # # Configuracion de modelo # dstest2_pgie_config contiene modelo estandar, las delas son para yoloV3, yoloV3_tiny y fasterRCNN # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_pgie_config.txt") #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3.txt") # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3_tiny.txt") # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_fasterRCNN.txt") # Falta añadir la ruta completa del archivo de configuracion pgie_batch_size = pgie.get_property("batch-size") if pgie_batch_size != number_sources: print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Set properties of pgie and sgiae # version 2.0 no configura inferencias secundarias # #sgie1.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie1_config.txt") #sgie2.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie2_config.txt") #sgie3.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie3_config.txt") # Set properties of tracker config = configparser.ConfigParser() config.read('configs/dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) elif key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) elif key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) elif key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) elif key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) elif key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) # Creacion del marco de tiler tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources)/tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) print("Adding elements to Pipeline \n") # # version 2 no requiere inferencias secundarias # pipeline.add(decoder) pipeline.add(pgie) pipeline.add(tracker) #pipeline.add(sgie1) #pipeline.add(sgie2) #pipeline.add(sgie3) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # source_bin -> -> nvh264-decoder -> PGIE -> Tracker # tiler -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") #source.link(h264parser) #h264parser.link(decoder) #sinkpad = streammux.get_request_pad("sink_0") #if not sinkpad: # sys.stderr.write(" Unable to get the sink pad of streammux \n") #srcpad = decoder.get_static_pad("src") #if not srcpad: # sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) source_bin.link(decoder) decoder.link(streammux) streammux.link(pgie) pgie.link(tracker) tracker.link(tiler) #tracker.link(sgie1) #sgie1.link(sgie2) #sgie2.link(sgie3) #sgie3.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. tiler_src_pad = tracker.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except Exception as e: print(str(e)) pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(): number_sources = 1 GObject.threads_init() Gst.init(None) pipeline = Gst.Pipeline() is_live = False uri_name = "rtsp://192.168.1.10:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp" streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") pipeline.add(streammux) source_bin = create_source_bin(1, uri_name) pipeline.add(source_bin) sinkpad = streammux.get_request_pad("sink_1") srcpad = source_bin.get_static_pad("src") srcpad.link(sinkpad) pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") tracker = Gst.ElementFactory.make("nvtracker", "tracker") sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine") sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") filter1.set_property("caps", caps1) tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("queue", "queue") sink = Gst.ElementFactory.make("fakesink", "fakesink") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 640) streammux.set_property('height', 480) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) ds_pgie_config = '/home/proxeye/dev/proxeye/proxeye/resources/ds_pgie_config.txt' pgie.set_property('config-file-path', ds_pgie_config) sink.set_property('sync', False) pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", 640) tiler.set_property("height", 480) sgie1.set_property('config-file-path', "dstest2_sgie1_config.txt") sgie2.set_property('config-file-path', "dstest2_sgie2_config.txt") sgie3.set_property('config-file-path', "dstest2_sgie3_config.txt") #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width' : tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height' : tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id' : tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file' : tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file' : tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process' : tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) if key == 'enable-past-frame' : tracker_enable_past_frame = config.getint('tracker', key) tracker.set_property('enable_past_frame', tracker_enable_past_frame) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(sgie1) pipeline.add(sgie2) pipeline.add(sgie3) # pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) tracker.link(sgie1) sgie1.link(sgie2) sgie2.link(sgie3) sgie3.link(nvvidconv) # pgie.link(nvvidconv1) # nvvidconv1.link(filter1) # filter1.link(nvvidconv) # tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) GObject.idle_add(refreshApp) loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_src_pad = nvosd.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get sink pad of nvosd \n") # tiler_src_pad = tiler.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Now playing...") print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) GObject.threads_init() Gst.init(None) print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") source.set_property('location', args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) #pgie.set_property('config-file-path', "DeepStream/config_infer_primary_yoloV3_tiny.txt") pgie.set_property('config-file-path', "DeepStream/config_infer_primary_yoloV4.txt") #pgie.set_property('config-file-path', "DeepStream/config_infer_primary_yoloV3.txt") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL) cv2.destroyAllWindows()
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0,len(args)-1): fps_streams["stream{0}".format(i)]=GETFPS(i) number_sources=len(args)-1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ",i," \n ") uri_name=args[i+1] if uri_name.find("rtsp://") == 0 : is_live = True source_bin=create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i sinkpad= streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) queue1=Gst.ElementFactory.make("queue","queue1") queue2=Gst.ElementFactory.make("queue","queue2") queue3=Gst.ElementFactory.make("queue","queue3") queue4=Gst.ElementFactory.make("queue","queue4") queue5=Gst.ElementFactory.make("queue","queue5") queue6=Gst.ElementFactory.make("queue","queue6") queue7=Gst.ElementFactory.make("queue","queue7") pipeline.add(queue1) pipeline.add(queue2) pipeline.add(queue3) pipeline.add(queue4) pipeline.add(queue5) pipeline.add(queue6) pipeline.add(queue7) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating nvtracker \n ") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") print("Creating nvdsanalytics \n ") nvanalytics = Gst.ElementFactory.make("nvdsanalytics", "analytics") if not nvanalytics: sys.stderr.write(" Unable to create nvanalytics \n") nvanalytics.set_property("config-file", "config_nvdsanalytics.txt") print("Creating tiler \n ") tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvosd.set_property('process-mode',OSD_PROCESS_MODE) nvosd.set_property('display-text',OSD_DISPLAY_TEXT) nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) codec = "H264" bitrate = 4000000 # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dsnvanalytics_pgie_config.txt") pgie_batch_size=pgie.get_property("batch-size") if(pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") pgie.set_property("batch-size",number_sources) tiler_rows=int(math.sqrt(number_sources)) tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows)) tiler.set_property("rows",tiler_rows) tiler.set_property("columns",tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read('dsnvanalytics_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width' : tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height' : tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id' : tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file' : tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file' : tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process' : tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) if key == 'enable-past-frame' : tracker_enable_past_frame = config.getint('tracker', key) tracker.set_property('enable_past_frame', tracker_enable_past_frame) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(nvanalytics) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) # We link elements in the following order: # sourcebin -> streammux -> nvinfer -> nvtracker -> nvdsanalytics -> # nvtiler -> nvvideoconvert -> nvdsosd -> sink print("Linking elements in the Pipeline \n") streammux.link(queue1) queue1.link(pgie) pgie.link(queue2) queue2.link(tracker) tracker.link(queue3) queue3.link(nvanalytics) nvanalytics.link(queue4) queue4.link(tiler) tiler.link(queue5) queue5.link(nvvidconv) nvvidconv.link(queue6) queue6.link(nvosd) nvosd.link(queue7) queue7.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) nvanalytics_src_pad=nvanalytics.get_static_pad("src") if not nvanalytics_src_pad: sys.stderr.write(" Unable to get src pad \n") else: nvanalytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0,len(args)-1): fps_streams["stream{0}".format(i)]=GETFPS(i) number_sources=len(args)-1 print("number_sources : ",number_sources) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ",i," \n ") uri_name=args[i+1] source_bin=create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i sinkpad= streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") print("Creating tiler \n ") tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if(is_aarch64()): print("Creating transform \n ") transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") streammux.set_property('width', 640) streammux.set_property('height', 480) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "config_infer_primary_yoloV4.txt") pgie_batch_size=pgie.get_property("batch-size") sink.set_property("qos",0) sink.set_property('sync', False) if(pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n") pgie.set_property("batch-size",number_sources) tiler_rows=int(math.sqrt(number_sources)) tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows)) tiler.set_property("rows",tiler_rows) tiler.set_property("columns",tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width' : tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height' : tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id' : tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file' : tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file' : tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process' : tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tracker) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) tracker.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect ("message", bus_call, loop) tiler_src_pad=tracker.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) #try: loop.run() #except: # pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write( "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0]) sys.exit(1) print("Argumentos :", args) for i in range(0, len(args) - 2): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 2 print("Numero de fuentes :", number_sources) global folder_name folder_name = args[-1] print(folder_name) if path.exists(folder_name): sys.stderr.write( "The output folder %s already exists. Please remove it first.\n" % folder_name) sys.exit(1) else: os.mkdir(folder_name) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # We load the database of known faces here if there is one, and we define the output DB name if we are only reading pwd = os.getcwd() known_faces_db_name = pwd + '/data/encoded_known_faces/knownFaces.dat' output_db_name = pwd + '/data/video_encoded_faces/test_video_default.data' set_known_faces_db_name(known_faces_db_name) set_output_db_name(output_db_name) # try to read the information from the known faces DB total, encodings, metadata = biblio.read_pickle(known_faces_db_name, False) set_known_faces_db(total, encodings, metadata) if total == 0: action = 'read' else: action = 'find' set_video_initial_time() if com.file_exists_and_not_empty(output_db_name): action = 'compare' set_action(actions[action]) #print(action) #quit() # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Creation of tracking to follow up the model face # April 21th # ERM tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") # edgar: cambio esta linea para no desplegar video - #sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") sink = Gst.ElementFactory.make("fakesink", "fakesink") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) # Camaras meraki 720p #streammux.set_property('width', 1920) streammux.set_property('width', 1280) #streammux.set_property('height', 1080) streammux.set_property('height', 720) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) print('CURRENT_DIR', CURRENT_DIR) pgie.set_property('config-file-path', CURRENT_DIR + "/configs/pgie_config_facenet.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) # Set properties of tracker # April 21th # ERM config = configparser.ConfigParser() config.read('configs/tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) elif key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) elif key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) elif key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) elif key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) elif key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("sync", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") # Add tracker in pipeline # April 21th # ERM pipeline.add(pgie) pipeline.add(tracker) # Tracker pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) # se añade para tracker # pgie.link(nvvidconv1) se modifica tracker.link( nvvidconv1) # se añade para ligar tracker con los demas elementos nvvidconv1.link(filter1) filter1.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) #tiler_sink_pad=tiler.get_static_pad("sink") #if not tiler_sink_pad: # sys.stderr.write(" Unable to get src pad \n") #else: # tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0) tiler_src_pad = tiler.get_static_pad("src") if not tiler_src_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[:-1]): if (i != 0): print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 1): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 1 # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") #IK: read stream/mp4 file # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) #IK: number_sources is important!!! for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") #IK: Primary neural net # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie1 = Gst.ElementFactory.make("nvinfer", "primary-inference-1") if not pgie1: sys.stderr.write(" Unable to create pgie1 \n") #IK: tracker tracker = Gst.ElementFactory.make("nvtracker", "tracker") if not tracker: sys.stderr.write(" Unable to create tracker \n") #IK: secondary network 1 sgie = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine") if not sgie: sys.stderr.write(" Unable to make sgie \n") #IK: secondary network 2 pgie2 = Gst.ElementFactory.make("nvinfer", "primary-inference-2") if not pgie2: sys.stderr.write(" Unable to make pgie2 \n") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") #IK: RTSP sink # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('async', False) sink.set_property('sync', 1) if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) #IK: set config file for each network #Set properties of pgie and sgie pgie1.set_property('config-file-path', "dstest2_pgie1_config.txt") sgie.set_property('config-file-path', "dstest2_sgie_config.txt") pgie2.set_property('config-file-path', "dstest2_pgie2_config.txt") tiler.set_property("rows", 1) tiler.set_property("columns", 1) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) #Set properties of tracker config = configparser.ConfigParser() config.read('dstest2_tracker_config.txt') config.sections() for key in config['tracker']: if key == 'tracker-width': tracker_width = config.getint('tracker', key) tracker.set_property('tracker-width', tracker_width) if key == 'tracker-height': tracker_height = config.getint('tracker', key) tracker.set_property('tracker-height', tracker_height) if key == 'gpu-id': tracker_gpu_id = config.getint('tracker', key) tracker.set_property('gpu_id', tracker_gpu_id) if key == 'll-lib-file': tracker_ll_lib_file = config.get('tracker', key) tracker.set_property('ll-lib-file', tracker_ll_lib_file) if key == 'll-config-file': tracker_ll_config_file = config.get('tracker', key) tracker.set_property('ll-config-file', tracker_ll_config_file) if key == 'enable-batch-process': tracker_enable_batch_process = config.getint('tracker', key) tracker.set_property('enable_batch_process', tracker_enable_batch_process) #IK: populate pipeline print("Adding elements to Pipeline \n") pipeline.add(streammux) pipeline.add(pgie1) pipeline.add(tracker) pipeline.add(sgie) pipeline.add(pgie2) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) #IK: elements linked together # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") streammux.link(pgie1) pgie1.link(tracker) tracker.link(sgie) sgie.link(pgie2) pgie2.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create and event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") #IK: Custom method call to display data on screen osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) != 2: sys.stderr.write("usage: %s <media file or uri>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source element for reading from the file print("Creating Source \n ") source = Gst.ElementFactory.make("filesrc", "file-source") if not source: sys.stderr.write(" Unable to create Source \n") # Since the data format in the input file is elementary h264 stream, # we need a h264parser print("Creating H264Parser \n") h264parser = Gst.ElementFactory.make("h264parse", "h264-parser") if not h264parser: sys.stderr.write(" Unable to create h264 parser \n") # Use nvdec_h264 for hardware accelerated decode on GPU print("Creating Decoder \n") decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder") if not decoder: sys.stderr.write(" Unable to create Nvv4l2 Decoder \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Use convertor to convert from NV12 to RGBA as required by nvosd nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") # Create OSD to draw on the converted RGBA buffer nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % args[1]) source.set_property('location', args[1]) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest1_pgie_config.txt") print("Adding elements to Pipeline \n") pipeline.add(source) pipeline.add(h264parser) pipeline.add(decoder) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # we link the elements together # file-source -> h264-parser -> nvh264-decoder -> # nvinfer -> nvvidconv -> nvosd -> video-renderer print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) sinkpad = streammux.get_request_pad("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") if not srcpad: sys.stderr.write(" Unable to get source pad of decoder \n") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Lets add probe to get informed of the meta data generated, we add probe to # the sink pad of the osd element, since by that time, the buffer would have # had got all the metadata. osdsinkpad = nvosd.get_static_pad("sink") if not osdsinkpad: sys.stderr.write(" Unable to get sink pad of nvosd \n") osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): parser = OptionParser() parser.add_option("--width", dest="width", help="Width", metavar="STR") parser.add_option("--height", dest="height", help="Height", metavar="STR") (options, args) = parser.parse_args() width = int(options.width) height = int(options.height) device = args[0] # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # create pipeline pipeline = Gst.Pipeline() source = Gst.ElementFactory.make("v4l2src", "usb-cam-source") caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps") vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1") nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2") caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps") streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if is_aarch64(): transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") # properties caps_v4l2src.set_property( 'caps', Gst.Caps.from_string("video/x-raw, framerate=30/1")) caps_vidconvsrc.set_property( 'caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)")) source.set_property('device', device) streammux.set_property('width', width) streammux.set_property('height', height) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "ds_jet.txt") sink.set_property('sync', False) # add elements to pipeline pipeline.add(source) pipeline.add(caps_v4l2src) pipeline.add(vidconvsrc) pipeline.add(nvvidconvsrc) pipeline.add(caps_vidconvsrc) pipeline.add(streammux) pipeline.add(pgie) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(sink) if is_aarch64(): pipeline.add(transform) # link pipeline source.link(caps_v4l2src) caps_v4l2src.link(vidconvsrc) vidconvsrc.link(nvvidconvsrc) nvvidconvsrc.link(caps_vidconvsrc) # source pipeline sinkpad = streammux.get_request_pad("sink_0") srcpad = caps_vidconvsrc.get_static_pad("src") srcpad.link(sinkpad) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) nvosd.link(transform) transform.link(sink) # this starts the video capture playing pipeline.set_state(Gst.State.PLAYING) loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) loop.run() pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write( "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" % args[0]) sys.exit(1) for i in range(0, len(args) - 2): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) - 2 global folder_name folder_name = args[-1] if path.exists(folder_name): sys.stderr.write( "The output folder %s already exists. Please remove it first.\n" % folder_name) sys.exit(1) os.mkdir(folder_name) print("Frames will be saved in ", folder_name) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): os.mkdir(folder_name + "/stream_" + str(i)) frame_count["stream_" + str(i)] = 0 saved_count["stream_" + str(i)] = 0 print("Creating source_bin ", i, " \n ") uri_name = args[i + 1] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") # Add nvvidconv1 and filter1 to convert the frames to RGBA # which is easier to work with in Python. print("Creating nvvidconv1 \n ") nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1") if not nvvidconv1: sys.stderr.write(" Unable to create nvvidconv1 \n") print("Creating filter1 \n ") caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA") filter1 = Gst.ElementFactory.make("capsfilter", "filter1") if not filter1: sys.stderr.write(" Unable to get the caps filter1 \n") filter1.set_property("caps", caps1) print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") if (is_aarch64()): print("Creating transform \n ") transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform") if not transform: sys.stderr.write(" Unable to create transform \n") print("Creating EGLSink \n") sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: print("Atleast one of the sources is live") streammux.set_property('live-source', 1) streammux.set_property('width', 1920) streammux.set_property('height', 1080) streammux.set_property('batch-size', number_sources) streammux.set_property('batched-push-timeout', 4000000) pgie.set_property('config-file-path', "dstest_imagedata_config.txt") pgie_batch_size = pgie.get_property("batch-size") if (pgie_batch_size != number_sources): print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n") pgie.set_property("batch-size", number_sources) tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("sync", 0) sink.set_property("qos", 0) if not is_aarch64(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(filter1) pipeline.add(nvvidconv1) pipeline.add(nvosd) if is_aarch64(): pipeline.add(transform) pipeline.add(sink) print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(nvvidconv1) nvvidconv1.link(filter1) filter1.link(tiler) tiler.link(nvvidconv) nvvidconv.link(nvosd) if is_aarch64(): nvosd.link(transform) transform.link(sink) else: nvosd.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) tiler_sink_pad = tiler.get_static_pad("sink") if not tiler_sink_pad: sys.stderr.write(" Unable to get src pad \n") else: tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0) # List the sources print("Now playing...") for i, source in enumerate(args[:-1]): if i != 0: print(i, ": ", source) print("Starting pipeline \n") # start play back and listed to events pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup print("Exiting app\n") pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments if len(args) < 2: sys.stderr.write("usage: %s <config> <encoder>\n" % args[0]) sys.exit(1) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Set the model output resolution num_srcs = 2 if args[1] == 'prisma_config.txt': op_size = 256 if args[1] == 'deeplab_config.txt': op_size = 513 # Create gstreamer elements # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") # Source elements for reading from camera1 print("Creating Source: Cam1... \n ") source_cam1 = Gst.ElementFactory.make("v4l2src", "camera-source1") source_cam1.set_property("device", "/dev/video1") vidconv_src1 = Gst.ElementFactory.make("videoconvert", "vidconv_src1") nvvidconv_src1 = Gst.ElementFactory.make("nvvideoconvert", "nvvidconv_src1") filter_src1 = Gst.ElementFactory.make("capsfilter", "filter_src1") nvvidconv_src1.set_property("nvbuf-memory-type", 0) caps_filter_src1 = Gst.Caps.from_string( "video/x-raw(memory:NVMM), format=NV12, width=1280, height=720, framerate=20/1" ) # Set max webcam resolution filter_src1.set_property("caps", caps_filter_src1) if not source_cam1: sys.stderr.write(" Unable to create source: cam1 \n") # Source elements for reading from camera2 print("Creating Source: Cam2... \n ") source_cam2 = Gst.ElementFactory.make("v4l2src", "camera-source2") source_cam2.set_property("device", "/dev/video0") vidconv_src2 = Gst.ElementFactory.make("videoconvert", "vidconv_src2") nvvidconv_src2 = Gst.ElementFactory.make("nvvideoconvert", "nvvidconv_src2") filter_src2 = Gst.ElementFactory.make("capsfilter", "filter_src2") nvvidconv_src2.set_property("nvbuf-memory-type", 0) caps_filter_src2 = Gst.Caps.from_string( "video/x-raw(memory:NVMM), format=NV12, width=640, height=480, framerate=20/1" ) # Set max webcam resolution filter_src2.set_property("caps", caps_filter_src2) if not source_cam2: sys.stderr.write(" Unable to create source: cam2 \n") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") streammux.set_property('width', 1280) streammux.set_property('height', 720) streammux.set_property('batch-size', 1) streammux.set_property('batched-push-timeout', 4000000) # Use nvinfer to run inferencing on decoder's output, # behaviour of inferencing is set through config file seg = Gst.ElementFactory.make("nvinfer", "primary-inference") if not seg: sys.stderr.write(" Unable to create seg \n") seg.set_property('config-file-path', args[1]) seg.set_property('batch-size', 1) # Use nvsegvisual to visualizes segmentation results nvsegvisual = Gst.ElementFactory.make("nvsegvisual", "nvsegvisual") if not nvsegvisual: sys.stderr.write(" Unable to create nvsegvisual \n") nvsegvisual.set_property('batch-size', 1) nvsegvisual.set_property('width', op_size) nvsegvisual.set_property('height', op_size) # Use nvtiler to composite the batched frames into a 2D tiled array tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") tiler_rows = int(math.sqrt(num_srcs)) tiler_columns = int(math.ceil((1.0 * num_srcs) // tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", op_size * 2) tiler.set_property("height", op_size) # Convert the output video nvvidconv_posttile = Gst.ElementFactory.make("nvvideoconvert", "convertor_posttile") if not nvvidconv_posttile: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")) # Make the encoder codec = args[2] if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', 4000000) if is_aarch64(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) encoder.set_property('bufapi-version', 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") print("Creating UDP Sink... \n") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property('host', '224.224.255.255') sink.set_property('port', updsink_port_num) sink.set_property('sync', 0) # Set up the pipeline print("Adding elements to Pipeline...\n") # Add all elements into the pipeline pipeline.add(source_cam1) pipeline.add(vidconv_src1) pipeline.add(nvvidconv_src1) pipeline.add(filter_src1) pipeline.add(source_cam2) pipeline.add(vidconv_src2) pipeline.add(nvvidconv_src2) pipeline.add(filter_src2) pipeline.add(streammux) pipeline.add(seg) pipeline.add(nvsegvisual) pipeline.add(tiler) pipeline.add(nvvidconv_posttile) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) # Connect the pipeline elements together print("Linking elements in the Pipeline...\n") # Link the elements together for first camera source # camera-source -> videoconvert -> nvvideoconvert -> # capsfilter -> nvstreammux source_cam1.link(vidconv_src1) vidconv_src1.link(nvvidconv_src1) nvvidconv_src1.link(filter_src1) sinkpad1 = streammux.get_request_pad("sink_0") if not sinkpad1: sys.stderr.write(" Unable to get the sink pad of streammux for src1\n") srcpad1 = filter_src1.get_static_pad("src") if not srcpad1: sys.stderr.write(" Unable to get source pad of decoder for src1\n") srcpad1.link(sinkpad1) # Link the elements together for second camera source # camera-source -> videoconvert -> nvvideoconvert -> # capsfilter -> nvstreammux source_cam2.link(vidconv_src2) vidconv_src2.link(nvvidconv_src2) nvvidconv_src2.link(filter_src2) sinkpad2 = streammux.get_request_pad("sink_1") if not sinkpad2: sys.stderr.write(" Unable to get the sink pad of streammux for src2\n") srcpad2 = filter_src2.get_static_pad("src") if not srcpad2: sys.stderr.write(" Unable to get source pad of decoder for src2\n") srcpad2.link(sinkpad2) #Link the elements together for encoding and streaming # nvstreammux -> nvinfer -> nvsegvisual -> nvtiler -> # nvvideoconvert -> capsfilter -> nvv4l2h264enc -> rtph264pay -> udpsink streammux.link(seg) seg.link(nvsegvisual) nvsegvisual.link(tiler) tiler.link(nvvidconv_posttile) nvvidconv_posttile.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, codec)) factory.set_shared(True) server.get_mount_points().add_factory("/ds-seg", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-seg ***\n\n" % rtsp_port_num) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except: pass # cleanup pipeline.set_state(Gst.State.NULL)
def main(args): # Check input arguments for i in range(0, len(args)): fps_streams["stream{0}".format(i)] = GETFPS(i) number_sources = len(args) # Standard GStreamer initialization GObject.threads_init() Gst.init(None) # Create gstreamer elements */ # Create Pipeline element that will form a connection of other elements print("Creating Pipeline \n ") pipeline = Gst.Pipeline() is_live = False if not pipeline: sys.stderr.write(" Unable to create Pipeline \n") print("Creating streamux \n ") # Create nvstreammux instance to form batches from one or more sources. streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") if not streammux: sys.stderr.write(" Unable to create NvStreamMux \n") pipeline.add(streammux) for i in range(number_sources): print("Creating source_bin ", i, " \n ") uri_name = args[i] if uri_name.find("rtsp://") == 0: is_live = True source_bin = create_source_bin(i, uri_name) if not source_bin: sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i sinkpad = streammux.get_request_pad(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") if not srcpad: sys.stderr.write("Unable to create src pad bin \n") srcpad.link(sinkpad) print("Creating Pgie \n ") if gie=="nvinfer": pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") else: pgie = Gst.ElementFactory.make("nvinferserver", "primary-inference") if not pgie: sys.stderr.write(" Unable to create pgie \n") print("Creating tiler \n ") tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler") if not tiler: sys.stderr.write(" Unable to create tiler \n") print("Creating nvvidconv \n ") nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor") if not nvvidconv: sys.stderr.write(" Unable to create nvvidconv \n") print("Creating nvosd \n ") nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") nvvidconv_postosd = Gst.ElementFactory.make( "nvvideoconvert", "convertor_postosd") if not nvvidconv_postosd: sys.stderr.write(" Unable to create nvvidconv_postosd \n") # Create a caps filter caps = Gst.ElementFactory.make("capsfilter", "filter") caps.set_property( "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420") ) # Make the encoder if codec == "H264": encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder") print("Creating H264 Encoder") elif codec == "H265": encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder") print("Creating H265 Encoder") if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property("bitrate", bitrate) if is_aarch64(): encoder.set_property("preset-level", 1) encoder.set_property("insert-sps-pps", 1) encoder.set_property("bufapi-version", 1) # Make the payload-encode video into RTP packets if codec == "H264": rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay") print("Creating H264 rtppay") elif codec == "H265": rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay") print("Creating H265 rtppay") if not rtppay: sys.stderr.write(" Unable to create rtppay") # Make the UDP sink updsink_port_num = 5400 sink = Gst.ElementFactory.make("udpsink", "udpsink") if not sink: sys.stderr.write(" Unable to create udpsink") sink.set_property("host", "224.224.255.255") sink.set_property("port", updsink_port_num) sink.set_property("async", False) sink.set_property("sync", 1) streammux.set_property("width", 1920) streammux.set_property("height", 1080) streammux.set_property("batch-size", 1) streammux.set_property("batched-push-timeout", 4000000) if gie=="nvinfer": pgie.set_property("config-file-path", "dstest1_pgie_config.txt") else: pgie.set_property("config-file-path", "dstest1_pgie_inferserver_config.txt") pgie_batch_size = pgie.get_property("batch-size") if pgie_batch_size != number_sources: print( "WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ", number_sources, " \n", ) pgie.set_property("batch-size", number_sources) print("Adding elements to Pipeline \n") tiler_rows = int(math.sqrt(number_sources)) tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows)) tiler.set_property("rows", tiler_rows) tiler.set_property("columns", tiler_columns) tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) sink.set_property("qos", 0) pipeline.add(pgie) pipeline.add(tiler) pipeline.add(nvvidconv) pipeline.add(nvosd) pipeline.add(nvvidconv_postosd) pipeline.add(caps) pipeline.add(encoder) pipeline.add(rtppay) pipeline.add(sink) streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(tiler) tiler.link(nvosd) nvosd.link(nvvidconv_postosd) nvvidconv_postosd.link(caps) caps.link(encoder) encoder.link(rtppay) rtppay.link(sink) # create an event loop and feed gstreamer bus mesages to it loop = GObject.MainLoop() bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", bus_call, loop) # Start streaming rtsp_port_num = 8554 server = GstRtspServer.RTSPServer.new() server.props.service = "%d" % rtsp_port_num server.attach(None) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch( '( udpsrc name=pay0 port=%d buffer-size=524288 caps="application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 " )' % (updsink_port_num, codec) ) factory.set_shared(True) server.get_mount_points().add_factory("/ds-test", factory) print( "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num ) # start play back and listen to events print("Starting pipeline \n") pipeline.set_state(Gst.State.PLAYING) try: loop.run() except BaseException: pass # cleanup pipeline.set_state(Gst.State.NULL)