Ejemplo n.º 1
0
def main():

    # Standard GStreamer initialization
    Gst.debug_set_active(True)
    Gst.debug_set_default_threshold(4)
    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    sink = create_element_or_error("nvoverlaysink", "overlay")

    source.set_property('sensor-id', 0)

    pipeline.add(source)
    pipeline.add(sink)

    source.link(sink)

    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 2
0
def main():
    
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        print("Unable to create Pipeline")
        return False
    
    # Create GST Elements
    source = create_element_or_error("filesrc", "file-source")
    
    demuxer = create_element_or_error("matroskademux", "demuxer")
    parser = create_element_or_error("h264parse", "parser")
    muxer = create_element_or_error("flvmux", "muxer")
    sink = create_element_or_error("rtmpsink", "sink")

    if not (source or demuxer or parseer or muxer or sink):
        return

    # Set Element Properties
    source.set_property('location', '../streamit-virtual-edge-appliance/storage/tests/concourse/1.MKV')
    sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/stream-test')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(demuxer)
    pipeline.add(parser)
    pipeline.add(muxer)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(demuxer)
    demuxer.link(parser)
    parser.link(muxer)
    muxer.link(sink)
    
    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 3
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline")

    print("Creating Elements")
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    caps = create_element_or_error("capsfilter", "source-caps-source")
    caps.set_property(
        "caps",
        Gst.Caps.from_string(
            "video/x-raw(memory:NVMM),width=1280,height=720,framerate=30/1,format=NV12"
        ))
    convertor = create_element_or_error("nvvidconv", "converter-1")
    s_encoder = create_element_or_error("jpegenc", "snapshot-encoder")
    s_sink = create_element_or_error("filesink", "snapshot-sink")

    print("Set element properties")
    source.set_property('sensor-id', 1)
    source.set_property('num-buffers', 1)
    s_sink.set_property('location', 'python-test.jpeg')

    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(caps)
    pipeline.add(convertor)
    pipeline.add(s_encoder)
    pipeline.add(s_sink)

    print("Linking elements in the Pipeline")
    source.link(caps)
    caps.link(convertor)
    convertor.link(s_encoder)
    s_encoder.link(s_sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 4
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        print("Unable to create Pipeline")
        return False

    # Create GST Elements
    source = create_element_or_error("nvarguscamerasrc", "camera-source")

    encoder = create_element_or_error("nvv4l2h264enc", "encoder")
    parser = create_element_or_error("h264parse", "parser")
    muxer = create_element_or_error("flvmux", "muxer")
    sink = create_element_or_error("rtmpsink", "sink")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    sink.set_property('location',
                      'rtmp://media.streamit.live/LiveApp/stream-test')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(muxer)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(encoder)
    encoder.link(parser)
    parser.link(muxer)
    muxer.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 5
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    #
    # ______________________________
    # Create Elements
    source = create_element_or_error("filesink", "camera-source")
    convertor = create_element_or_error("nvvidconv", "converter-1")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(convertor)
    pipeline.add(sink)
    pipeline.add(transform)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(convertor)
    convertor.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 6
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    # Create Source Element
    source = create_element_or_error('nvarguscamerasrc', 'camera-source')
    encoder = create_element_or_error('nvv4l2h265enc', 'encoder')
    parser = create_element_or_error('h265parse', 'parser')
    sink = create_element_or_error('filesink', 'sink')

    # Set Element Properties
    source.set_property('sensor-id', 0)
    encoder.set_property('bitrate', 8000000)
    sink.set_property('location', 'prueba.mp4')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(encoder)
    encoder.link(parser)
    parser.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
def create_source_bin(uri):
    print("Creating source bin")
    source_bin = Gst.Bin.new('source-bin')
    if not source_bin:
        print("Unable to create source bin")
        exit(0)

    uri_decode_bin = create_element_or_error("uridecodebin", "uri-decode-bin")
    uri_decode_bin.set_property("uri", uri)
    uri_decode_bin.connect("pad-added", cb_newpad, source_bin)
    uri_decode_bin.connect("child-added", decodebin_child_added, source_bin)
    Gst.Bin.add(source_bin, uri_decode_bin)
    bin_pad = source_bin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))

    if not bin_pad:
        print("Failed to add ghost pad in source bin")
        exit(0)

    return source_bin
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        print("Unable to create Pipeline")
        return False

    # Create GST Elements
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    caps = create_element_or_error("capsfilter", "source-caps")
    caps.set_property(
        "caps",
        Gst.Caps.from_string(
            "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=30/1, format=(string)NV12"
        ))
    converter = create_element_or_error('nvvidconv', 'converter')
    capsConverter = create_element_or_error("capsfilter", "converter-caps")
    capsConverter.set_property(
        "caps",
        Gst.Caps.from_string(
            "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=30/1, format=(string)NV12"
        ))
    encoder = create_element_or_error("nvv4l2h264enc", "encoder")
    parser = create_element_or_error("h264parse", "parser")
    muxer = create_element_or_error("flvmux", "muxer")
    queue = create_element_or_error("queue", "queue")
    sink = create_element_or_error("rtmpsink", "sink")

    # Set Element Properties
    # converter.set_property('flip-method', 1)
    encoder.set_property('bitrate', 8000000)
    # encoder.set_property('maxperf-enable', True)
    source.set_property('sensor-id', 0)
    # source.set_property('do-timestamp', True)
    # muxer.set_property('streamable', True)
    sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/test')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(caps)
    pipeline.add(converter)
    pipeline.add(capsConverter)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(muxer)
    pipeline.add(queue)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(caps)
    caps.link(converter)
    converter.link(capsConverter)
    capsConverter.link(encoder)
    encoder.link(parser)
    parser.link(muxer)
    muxer.link(queue)
    queue.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
def main(args):

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    print("Creating Pipeline")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
    print("Creating streamux")

    # Muxer
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pipeline.add(streammux)

    # Source
    source_bin = create_source_bin("file:/deepstream-examples/Analitycs/traffic.mp4")

    if not source_bin:
        sys.stderr.write("Unable to create source bin")
    pipeline.add(source_bin)

    sinkpad = streammux.get_request_pad('sink_0') 
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin")
    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin")
    srcpad.link(sinkpad)

    # Primary Inferance
    pgie = create_element_or_error("nvinfer", "primary-inference")
    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")
    pipeline.add(pgie)
    streammux.link(pgie)

    # Tracker
    tracker = create_element_or_error("nvtracker", "tracker")
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)
    pipeline.add(tracker)
    pgie.link(tracker)

    # Second Inferance
    sgie = create_element_or_error("nvinfer", "secondary-inference")
    sgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_secondary_carmake.txt")
    sgie.set_property('unique-id', 12345)
    pipeline.add(sgie)
    tracker.link(sgie)

    # Analytcis
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    analytics.set_property("config-file", "./analitycs.txt")
    pipeline.add(analytics)
    sgie.link(analytics)

    # Converter
    converter = create_element_or_error("nvvideoconvert", "convertor")
    pipeline.add(converter)
    analytics.link(converter)
    
    # Nvosd
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    # nvosd.set_property('process-mode', 2)
    nvosd.set_property('display-text', False)
    nvosd.set_property('display-mask', False)
    nvosd.set_property('display-bbox', True)
    pipeline.add(nvosd)
    converter.link(nvosd)

    # Transform
    transform=create_element_or_error("nvegltransform", "nvegl-transform")
    pipeline.add(transform)
    nvosd.link(transform)

    # Sink
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")
    pipeline.add(sink)
    transform.link(sink)

    # Prove
    # tracker_prove_src_pad = tracker.get_static_pad("src")
    # if not tracker_prove_src_pad:
    #     sys.stderr.write("Unable to get src pad")
    # else:
    #     tracker_prove_src_pad.add_probe(Gst.PadProbeType.BUFFER, handle_src_pad_buffer_probe, 0)

    analytics_prove_src_pad = analytics.get_static_pad("src")
    if not analytics_prove_src_pad:
        sys.stderr.write("Unable to get src pad")
    else:
        analytics_prove_src_pad.add_probe(Gst.PadProbeType.BUFFER, handle_src_pad_buffer_probe, 0)

    loop = GObject.MainLoop()
    pipeline.set_state(Gst.State.PLAYING)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    try:
        loop.run()
    except:
        pass

    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 10
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)
    print(Gst)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline")

    # Create GST Source
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    caps = Gst.ElementFactory.make("capsfilter", "source-caps")
    caps.set_property(
        "caps",
        Gst.Caps.from_string(
            "video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1"
        ))

    # Create Gst Threads
    tee = create_element_or_error("tee", "tee")
    streaming_queue = create_element_or_error("queue", "streaming_queue")
    recording_queue = create_element_or_error("queue", "recording_queue")
    display_queue = create_element_or_error("queue", "display_queue")

    # Create Gst Elements for Streaming Branch
    s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder")
    s_parser = create_element_or_error("h264parse", "streaming-parser")
    s_muxer = create_element_or_error("flvmux", "streaming-muxer")
    s_sink = create_element_or_error("rtmpsink", "streaming-sink")

    # Create Gst Elements for Recording Branch
    r_encoder = create_element_or_error('nvv4l2h264enc', 'recording-encoder')
    r_parser = create_element_or_error('h264parse', 'recording-parser')
    r_sink = create_element_or_error('splitmuxsink', 'recording-sink')

    # Create Gst Elements for Display Branch
    d_sink = create_element_or_error("nvoverlaysink", "display-sink")

    # Set Source Properties
    source.set_property('sensor-id', 0)
    source.set_property('saturation', 1.2)
    source.set_property('exposurecompensation', 1.2)
    source.set_property('wbmode', 0)

    # Set Streaming Properties
    s_sink.set_property('location',
                        'rtmp://media.streamit.link/LiveApp/streaming-test')

    # Set Display Properties
    d_sink.set_property('overlay', 1)
    d_sink.set_property('overlay-x', 0)
    d_sink.set_property('overlay-y', 0)
    d_sink.set_property('overlay-w', 640)
    d_sink.set_property('overlay-h', 360)

    # Set Streaming Properties
    five_minutes = 900000000000
    r_sink.set_property('max-size-time', 30000000000)
    r_sink.connect('format-location', __location)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(caps)
    pipeline.add(tee)
    pipeline.add(streaming_queue)
    pipeline.add(s_encoder)
    pipeline.add(s_parser)
    pipeline.add(s_muxer)
    pipeline.add(s_sink)
    pipeline.add(recording_queue)
    pipeline.add(r_encoder)
    pipeline.add(r_parser)
    pipeline.add(r_sink)
    pipeline.add(display_queue)
    pipeline.add(d_sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(caps)
    caps.link(tee)

    # Streaming Queue
    streaming_queue.link(s_encoder)
    s_encoder.link(s_parser)
    s_parser.link(s_muxer)
    s_muxer.link(s_sink)

    # Recording Queue
    recording_queue.link(r_encoder)
    r_encoder.link(r_parser)
    r_parser.link(r_sink)

    # Display Queue
    display_queue.link(d_sink)

    # Get pad templates from source
    tee_src_pad_template = tee.get_pad_template("src_%u")

    # Get source to Streaming Queue
    tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None)
    streaming_queue_pad = streaming_queue.get_static_pad("sink")

    # Get source to Recording Queue
    tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None)
    recording_queue_pad = recording_queue.get_static_pad("sink")

    # Get source to Display Queue
    tee_display_pad = tee.request_pad(tee_src_pad_template, None, None)
    display_queue_pad = display_queue.get_static_pad("sink")

    # Link sources
    if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK or
            tee_recording_pad.link(recording_queue_pad) != Gst.PadLinkReturn.OK
            or
            tee_display_pad.link(display_queue_pad) != Gst.PadLinkReturn.OK):
        print("ERROR: Tee streaming could not be linked")
        sys.exit(1)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 11
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    convertor = Gst.ElementFactory.make("nvvideoconvert", "convertor-1")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    convertor2 = Gst.ElementFactory.make("nvvidconv", "converter-2")
    sink = Gst.ElementFactory.make("nvoverlaysink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    pgie.set_property('batch-size', 1)
    pgie.set_property('unique-id', 1)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    print('Create OSD Sink Pad')
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 12
0
def main():
    print('Tracker Example')

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
        return

    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "converter-2")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 1)
    source.set_property('bufapi-version', True)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('nvbuf-memory-type', 4)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)
    # tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/tracker_config.yml')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    # pipeline.add(caps)
    pipeline.add(transform)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    # convertor2.link(caps)
    convertor2.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    print('Create OSD Sink Pad')
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write("Unable to get sink pad of nvosd")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
def main():

   cameras_list = [
       {"source": "/dev/video0", "name": "Camera 1"},
       {"source": "/dev/video1", "name": "Camera 1"},
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    pipeline.add(streammux)

    for camera in cameras_list:
        source = create_element_or_error("v4l2src", "source-" + camera['name'])
        source.set_property('device', camera["source"])
        caps = create_element_or_error("capsfilter", "source-caps-source-1")
        caps.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
    
    pipeline.add(source)
    pipeline.add(caps)

    sinkpad = streammux.get_request_pad('sink_0')
    srcpad = source.get_static_pad("src")

    if not sinkpad:
        print("Unable to create source sink pad")
        exit(0)
    if not srcpad:
        print("Unable to create source src pad")
        exit(0)
    srcpad.link(sinkpad)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = create_element_or_error("nvvideoconvert", "converter-1")
    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    convertor2 = create_element_or_error("nvvideoconvert", "converter-2")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    queue1=create_element_or_error("queue","queue1")
    queue2=create_element_or_error("queue","queue2")
    queue3=create_element_or_error("queue","queue3")
    queue4=create_element_or_error("queue","queue4")
    queue5=create_element_or_error("queue","queue5")
    queue6=create_element_or_error("queue","queue6")

    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)

    # Set Element Properties
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt")

    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so')
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 480)

    tiler.set_property("rows", 1)
    tiler.set_property("columns", 1)
    tiler.set_property("width", 1280)
    tiler.set_property("height", 720)
    sink.set_property("qos", 0)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")

    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(tiler)
    tiler.link(queue4)
    queue4.link(convertor)
    convertor.link(queue5)
    queue5.link(nvosd)
    nvosd.link(queue6)
    queue6.link(transform)
    transform.link(sink)
    
    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 14
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
        return

    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    # src_caps = create_element_or_error("capsfilter", "source-caps-definition")
    # src_caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, framerate=30/1, format=(string)NV12"))

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = Gst.ElementFactory.make("nvvideoconvert", "convertor-1")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    convertor2 = Gst.ElementFactory.make("nvvidconv", "converter-2")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "./nv-inferance-config-files/config_infer_primary_trafficcamnet.txt")

    #Set properties of tracker
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 384)
    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('ll-config-file', './tracker_config.yml')

    # Add Elemements to Pipielin
    pipeline.add(source)
    # pipeline.add(src_caps)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(transform)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    source.link(streammux)
    # src_caps.link(streammux)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    #Feed tracker
    tracker_sinkpad = tracker.get_static_pad("sink")
    if not tracker_sinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd")

    tracker_sinkpad.add_probe(Gst.PadProbeType.BUFFER,
                              osd_sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    # Create GST Source
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "convertor-2")

    # Create Gst Threads
    tee = create_element_or_error("tee", "tee")
    streaming_queue = create_element_or_error("queue", "streaming_queue")
    recording_queue = create_element_or_error("queue", "recording_queue")

    # Create Gst Elements for Streaming Branch
    s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder")
    s_parser = create_element_or_error("h264parse", "streaming-parser")
    s_muxer = create_element_or_error("flvmux", "streaming-muxer")
    s_sink = create_element_or_error("rtmpsink", "streaming-sink")

    # Create Gst Elements for Recording Branch
    r_encoder = create_element_or_error('nvv4l2h265enc', 'encoder')
    r_parser = create_element_or_error('h265parse', 'parser')
    r_sink = create_element_or_error('filesink', 'sink')

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    s_sink.set_property('location',
                        'rtmp://media.streamit.live/LiveApp/streaming-test')
    r_encoder.set_property('bitrate', 8000000)
    r_sink.set_property(
        'location', 'video_' + str(datetime.datetime.utcnow().date()) + '.mp4')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(tee)
    pipeline.add(streaming_queue)
    pipeline.add(s_encoder)
    pipeline.add(s_parser)
    pipeline.add(s_muxer)
    pipeline.add(s_sink)
    pipeline.add(recording_queue)
    pipeline.add(r_encoder)
    pipeline.add(r_parser)
    pipeline.add(r_sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(tee)

    # Streaming Queue
    streaming_queue.link(s_encoder)
    s_encoder.link(s_parser)
    s_parser.link(s_muxer)
    s_muxer.link(s_sink)

    # Recording Queue
    recording_queue.link(r_encoder)
    r_encoder.link(r_parser)
    r_parser.link(r_sink)

    # Get pad templates from source
    tee_src_pad_template = tee.get_pad_template("src_%u")

    # Get source to Streaming Queue
    tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None)
    streaming_queue_pad = streaming_queue.get_static_pad("sink")

    # Get source to recording Queue
    tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None)
    recording_queue_pad = recording_queue.get_static_pad("sink")

    # Link sources
    if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK
            or tee_recording_pad.link(recording_queue_pad) !=
            Gst.PadLinkReturn.OK):
        print("ERROR: Tees could not be linked")
        sys.exit(1)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 16
0
def main(args):

    # Standard GStreamer initialization
    cameras_list = [
        {
            "source": 0,
            "name": "camera1"
        },
        {
            "source": 1,
            "name": "camera2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    # Muxer
    muxer = create_element_or_error("nvstreammux", "muxer")
    muxer.set_property('live-source', True)
    muxer.set_property('sync-inputs', True)
    muxer.set_property('width', 720)
    muxer.set_property('height', 480)
    muxer.set_property('batch-size', 3)
    muxer.set_property('batched-push-timeout', 4000000)
    pipeline.add(muxer)

    # Primart Inferance
    pgie = create_element_or_error("nvinfer", "primary-inference")
    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    pipeline.add(pgie)
    muxer.link(pgie)

    #Tracker
    tracker = create_element_or_error("nvtracker", "tracker")
    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream/lib/libnvds_mot_klt.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)
    pipeline.add(tracker)
    pgie.link(tracker)

    #Analitics
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    analytics.set_property("config-file", "./../Analitycs/analitycs.txt")
    pipeline.add(analytics)
    tracker.link(analytics)

    # Converter
    converterOsd = create_element_or_error("nvvideoconvert",
                                           "to-osd-convertor")
    pipeline.add(converterOsd)
    analytics.link(converterOsd)

    # Demuxer
    demux = create_element_or_error("nvstreamdemux", "demuxer")
    pipeline.add(demux)
    converterOsd.link(demux)

    # Sources
    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])

        caps = create_element_or_error("capsfilter",
                                       "source-caps-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=(fraction)30/1, format=(string)NV12"
            ))

        source.set_property('do-timestamp', True)
        source.set_property('bufapi-version', True)
        source.set_property('tnr-mode', 2)
        source.set_property('ee-mode', 2)
        source.set_property('aeantibanding', 0)

        pipeline.add(source)
        pipeline.add(caps)

        source.link(caps)

        srcpad = caps.get_static_pad("src")
        sinkpad = muxer.get_request_pad('sink_' + str(camera['source']))

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    # Outputs
    for camera in cameras_list:

        queue = create_element_or_error("queue", "queue-" + camera['name'])
        pipeline.add(queue)

        _srcpad = demux.get_request_pad("src_" + str(camera['source']))
        if not _srcpad:
            print("Unable to create output src pad")
            exit(0)

        _sinkpad = queue.get_static_pad('sink')
        if not _sinkpad:
            print("Unable to create output sink pad")
            exit(0)

        _srcpad.link(_sinkpad)

        # Converter
        converter = create_element_or_error("nvvideoconvert",
                                            "converter-" + camera['name'])
        pipeline.add(converter)
        queue.link(converter)

        # Nvosd
        nvosd = create_element_or_error("nvdsosd",
                                        "on-screen-display" + camera['name'])
        pipeline.add(nvosd)
        converter.link(nvosd)

        # Transform
        transform = create_element_or_error(
            "nvegltransform", "nvegl-transform-" + camera['name'])
        pipeline.add(transform)
        nvosd.link(transform)

        # Sink
        sink = create_element_or_error("nveglglessink",
                                       "sink-" + camera['name'])
        sink.set_property('sync', False)
        pipeline.add(sink)
        transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # List the sources
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # cleanup
    pipeline.set_state(Gst.State.NULL)

    print("Exiting app")
Ejemplo n.º 17
0
def main():

    cameras_list = [
        {"source": 0, "name": "Camera 1",},
        {"source": 1, "name": "Camera 2"},
    ]
    
    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    # Muxer
    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pipeline.add(streammux)

    # Sources
    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc", "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter", "source-caps-source-" + camera['name'])
        caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"))
        pipeline.add(source)
        pipeline.add(caps)

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
        srcpad = source.get_static_pad("src")

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    # Primary Inferance
    pgie = create_element_or_error("nvinfer", "primary-inference")
    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")
    pipeline.add(pgie)
    streammux.link(pgie)

    # Tracker
    tracker = create_element_or_error("nvtracker", "tracker")
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so')
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 480)
    pipeline.add(tracker)
    pgie.link(tracker)

    # Analitycs
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    analytics.set_property("config-file", "/deepstream-examples/Analitycs/analitycs.txt")
    pipeline.add(analytics)
    tracker.link(analytics)

    # Tee
    tee = create_element_or_error("tee", "tee")
    pipeline.add(tee)
    analytics.link(tee)

    # Get Main Tee Sink Pad for the Queues
    tee_src_pad_template = tee.get_pad_template("src_%u")
    tee_display_src_pad = tee.request_pad(tee_src_pad_template, None, None)

    # Display Queue
    queue = create_element_or_error("queue", "queue")
    pipeline.add(queue)
    queue_sink_pad = queue.get_static_pad("sink")

    # Link Main Tee to Display Queue
    if (tee_display_src_pad.link(queue_sink_pad) != Gst.PadLinkReturn.OK):
        print("Could not link main tee to display queue")
        return

    # Tiler
    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    tiler.set_property("rows", 2)
    tiler.set_property("columns", 2)
    tiler.set_property("width", 1920)
    tiler.set_property("height", 1080)
    pipeline.add(tiler)
    queue.link(tiler)

    # Converter
    convertor = create_element_or_error("nvvideoconvert", "converter-1")
    pipeline.add(convertor)
    tiler.link(convertor)

    # Nvosd
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    pipeline.add(nvosd)
    convertor.link(nvosd)

    # Transform
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    pipeline.add(transform)
    nvosd.link(transform)

    # Sink
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")
    sink.set_property("qos", 0)
    pipeline.add(sink)
    transform.link(sink)

    # Play Pipeline
    loop = GObject.MainLoop()
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 18
0
def main(args):

    # Standard GStreamer initialization
    cameras_list = [
        {
            "source": 0,
            "name": "Camera 1",
        },
        {
            "source": 1,
            "name": "Camera 2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    pipeline.add(streammux)

    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter",
                                       "source-caps-source-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"
            ))
        pipeline.add(source)
        pipeline.add(caps)

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
        srcpad = source.get_static_pad("src")

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    queue1 = create_element_or_error("queue", "queue1")
    queue2 = create_element_or_error("queue", "queue2")
    queue3 = create_element_or_error("queue", "queue3")
    queue4 = create_element_or_error("queue", "queue4")
    queue5 = create_element_or_error("queue", "queue5")
    queue6 = create_element_or_error("queue", "queue6")
    queue7 = create_element_or_error("queue", "queue7")

    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    convertor = create_element_or_error("nvvideoconvert", "convertor")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_mot_klt.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)

    analytics.set_property("config-file", "./nvdsanalytics/live.txt")

    nvosd.set_property('process-mode', 0)
    nvosd.set_property('display-text', 0)

    sink.set_property('sync', False)

    print("Adding elements to Pipeline")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(analytics)
    pipeline.add(tiler)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(analytics)
    analytics.link(queue4)
    queue4.link(tiler)
    tiler.link(queue5)
    queue5.link(convertor)
    convertor.link(queue6)
    queue6.link(nvosd)
    nvosd.link(queue7)
    queue7.link(transform)
    transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    analytics_src_pad = analytics.get_static_pad("src")
    if not analytics_src_pad:
        sys.stderr.write("Unable to get src pad")
    else:
        analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                    nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Starting pipeline")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 19
0
def main():

    cameras_list = [
        {
            "source": 0,
            "name": "Camera 1"
        },
        {
            "source": 1,
            "name": "Camera 2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    pipeline.add(streammux)

    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter",
                                       "source-caps-source-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"
            ))
        pipeline.add(source)
        pipeline.add(caps)

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))
        srcpad = source.get_static_pad("src")

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    queue1 = create_element_or_error("queue", "queue1")
    queue2 = create_element_or_error("queue", "queue2")

    pipeline.add(queue1)
    pipeline.add(queue2)

    # Set Element Properties
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    tiler.set_property("rows", 2)
    tiler.set_property("columns", 2)
    tiler.set_property("width", 1920)
    tiler.set_property("height", 1080)

    sink.set_property("qos", 0)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(tiler)
    pipeline.add(transform)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")

    streammux.link(queue1)
    queue1.link(tiler)
    tiler.link(queue2)
    queue2.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 20
0
def main(args):

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    print("Creating Pipeline")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
    print("Creating streamux")

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")

    pipeline.add(streammux)

    source_bin = create_source_bin(
        "file:/home/socieboy/edge/deepstream-examples/videos/front.mp4")

    if not source_bin:
        sys.stderr.write("Unable to create source bin")

    pipeline.add(source_bin)

    sinkpad = streammux.get_request_pad('sink_0')
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin")

    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin")

    srcpad.link(sinkpad)

    queue1 = create_element_or_error("queue", "queue1")
    # queue2 = create_element_or_error("queue","queue2")
    # queue3 = create_element_or_error("queue","queue3")
    # queue4 = create_element_or_error("queue","queue4")
    # queue5 = create_element_or_error("queue","queue5")
    queue6 = create_element_or_error("queue", "queue6")
    queue7 = create_element_or_error("queue", "queue7")

    pipeline.add(queue1)
    # pipeline.add(queue2)
    # pipeline.add(queue3)
    # pipeline.add(queue4)
    # pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    converter = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd.set_property('process-mode', 2)
    # nvosd.set_property('display-text', 0)

    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    sink.set_property('sync', False)
    sink.set_property('window-width', 1080)
    sink.set_property('window-height', 720)

    # pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")
    pgie.set_property(
        'config-file-path',
        "models/yolov3-nurawash-80/config_infer_primary_yoloV3.txt")

    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(converter)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline")
    streammux.link(pgie)
    pgie.link(converter)
    converter.link(nvosd)
    nvosd.link(transform)
    transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # analytics_src_pad = analytics.get_static_pad("src")
    # if not analytics_src_pad:
    #     sys.stderr.write("Unable to get src pad")
    # else:
    #     analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Starting pipeline")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 21
0
def main():

    cameras_list = [
        {
            "index": 0,
            "source": "/dev/video0",
            "name": "Camera-1"
        },
        # {"index" : 1, "source": "/dev/video1", "name": "Camera-2"},
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    # Muxer
    muxer = create_element_or_error("nvstreammux", "stream-muxer")
    muxer.set_property('live-source', True)
    muxer.set_property('width', 1280)
    muxer.set_property('height', 720)
    muxer.set_property('num-surfaces-per-frame', 1)
    muxer.set_property('batch-size', 1)
    muxer.set_property('batched-push-timeout', 4000000)
    pipeline.add(muxer)

    # Sources
    for camera in cameras_list:

        # Source
        source = create_element_or_error("nvv4l2camerasrc",
                                         "source-" + camera['name'])
        source.set_property('device', camera["source"])
        source.set_property('do-timestamp', True)
        source.set_property('bufapi-version', True)
        pipeline.add(source)

        # Caps
        caps = create_element_or_error("capsfilter", "source-caps-source-1")
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=(fraction)30/1, format=(string)UYVY"
            ))
        pipeline.add(caps)
        source.link(caps)

        convertor = create_element_or_error("nvvideoconvert", "converter-1")
        pipeline.add(convertor)
        caps.link(convertor)

        srcpad = convertor.get_static_pad("src")
        sinkpad = muxer.get_request_pad('sink_' + str(camera['index']))

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    pipeline.add(pgie)
    muxer.link(pgie)

    tracker = create_element_or_error("nvtracker", "tracker")
    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so')
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('tracker-width', 640)
    tracker.set_property('tracker-height', 480)
    pipeline.add(tracker)
    pgie.link(tracker)

    tiler = create_element_or_error("nvmultistreamtiler", "nvtiler")
    tiler.set_property("rows", 1)
    tiler.set_property("columns", 1)
    tiler.set_property("width", 1280)
    tiler.set_property("height", 720)
    pipeline.add(tiler)
    tracker.link(tiler)

    convertor2 = create_element_or_error("nvvideoconvert", "converter-2")
    pipeline.add(convertor2)
    tiler.link(convertor2)

    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    pipeline.add(nvosd)
    convertor2.link(nvosd)

    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    pipeline.add(transform)
    nvosd.link(transform)

    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")
    pipeline.add(sink)
    transform.link(sink)

    loop = GObject.MainLoop()

    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 22
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline")

    # Source
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    source.set_property('sensor-id', 1)
    pipeline.add(source)

    # Convertor
    convertor = create_element_or_error("nvvidconv", "converter-1")
    pipeline.add(convertor)
    source.link(convertor)

    # Video Rate
    videorate = create_element_or_error("videorate", "videorate")
    pipeline.add(videorate)
    convertor.link(videorate)

    # Video Rate Caps
    videoRateCaps = create_element_or_error("capsfilter",
                                            "videorate-caps-source")
    videoRateCaps.set_property(
        "caps", Gst.Caps.from_string("video/x-raw,framerate=1/5"))
    pipeline.add(videoRateCaps)
    videorate.link(videoRateCaps)

    # Encoder
    encoder = create_element_or_error("nvjpegenc", "snapshot-encoder")
    pipeline.add(encoder)
    videoRateCaps.link(encoder)

    # File Sink
    sink = create_element_or_error("multifilesink", "snapshot-sink")
    sink.set_property('location', 'snapshot-%05d.jpg')
    pipeline.add(sink)
    encoder.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
def main():
    
    print('Tracker Example')

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        print("Unable to create Pipeline")
        return False
    
    # Create GST Elements
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    srcCaps = create_element_or_error("capsfilter", "source-caps")
    srcCaps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=30/1, format=(string)NV12"))
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "converter-2")
    encoder = create_element_or_error("nvv4l2h264enc", "encoder")
    parser = create_element_or_error("h264parse", "parser")
    muxer = create_element_or_error("flvmux", "muxer")
    queue = create_element_or_error("queue", "queue-sink")
    sink = create_element_or_error("rtmpsink", "sink")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    encoder.set_property('insert-sps-pps', True)
    encoder.set_property('bitrate', 4000000)
    
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1080)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('nvbuf-memory-type', 4)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")

    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/tracker_config.yml')

    muxer.set_property('streamable', True)
    sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/csi-camera')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(srcCaps)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(muxer)
    pipeline.add(queue)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(srcCaps)
    srcCaps.link(streammux)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(encoder)
    encoder.link(parser)
    parser.link(muxer)
    muxer.link(queue)
    queue.link(sink)
    
    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    print('Create OSD Sink Pad')
    nvosd_sinkpad = nvosd.get_static_pad("sink")
    if not nvosd_sinkpad:
        sys.stderr.write("Unable to get sink pad of nvosd")

    nvosd_sinkpad.add_probe(Gst.PadProbeType.BUFFER, sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 24
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    encoder = create_element_or_error("nvv4l2h265enc", "encoder")
    parser = create_element_or_error("h265parse", "h265-parser")
    rtppay = create_element_or_error("rtph265pay", "rtppay")
    sink = create_element_or_error("udpsink", "udpsink")

    # Set Element Properties
    source.set_property('sensor-id', 0)

    encoder.set_property('insert-sps-pps', True)
    encoder.set_property('bitrate', 4000000)

    rtppay.set_property('pt', 96)
    updsink_port_num = 5400

    sink.set_property('host', '127.0.0.1')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(parser)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(encoder)
    encoder.link(parser)
    parser.link(rtppay)
    rtppay.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start streaming
    rtsp_port_num = 8554

    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch(
        "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )"
        % (updsink_port_num, 'H265'))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/streaming", factory)

    print(
        "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/streaming ***\n\n"
        % rtsp_port_num)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 25
0
def main():

    cameras_list = [
        {
            "source": 0,
            "name": "Camera 1",
        },
        {
            "source": 1,
            "name": "Camera 2"
        },
    ]

    GObject.threads_init()
    Gst.init(None)

    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline")
        exit(0)

    streammux = create_element_or_error("nvstreammux", "stream-muxer")
    pipeline.add(streammux)

    for camera in cameras_list:
        source = create_element_or_error("nvarguscamerasrc",
                                         "source-" + camera['name'])
        source.set_property('sensor-id', camera['source'])
        source.set_property('bufapi-version', True)
        caps = create_element_or_error("capsfilter",
                                       "source-caps-source-" + camera['name'])
        caps.set_property(
            "caps",
            Gst.Caps.from_string(
                "video/x-raw(memory:NVMM),width=1920,height=1080,framerate=60/1,format=NV12"
            ))
        pipeline.add(source)
        pipeline.add(caps)

        srcpad = source.get_static_pad("src")

        sinkpad = streammux.get_request_pad('sink_' + str(camera['source']))

        if not sinkpad:
            print("Unable to create source sink pad")
            exit(0)
        if not srcpad:
            print("Unable to create source src pad")
            exit(0)
        srcpad.link(sinkpad)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "converter-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    tee = create_element_or_error("tee", "tee")
    queue = create_element_or_error("queue", "queue1")
    queue2 = create_element_or_error("queue", "queue2")

    # Set Element Properties
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(transform)

    # Create outputs
    for camera in cameras_list:
        sink = create_element_or_error("nveglglessink",
                                       "nvvideo-renderer-" + camera['name'])
        # sink.set_property("qos", 0)
        pipeline.add(sink)
        srcpad = streammux.get_pad_template("src_%u")
        srcpad.link

    # Link the elements together:
    print("Linking elements in the Pipeline")
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(transform)
    # transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        print("Unable to create Pipeline")
        return False

    # Create GST Elements
    source = create_element_or_error("filesrc", "file-source")
    # parser = create_element_or_error("h264parse", "parse")
    # decoder = create_element_or_error("nvv4l2decoder", "decoder")
    # streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    # pgie = create_element_or_error("nvinfer", "primary-inference")
    # tracker = create_element_or_error("nvtracker", "tracker")
    # convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    # nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('location', './videos/sample_qHD.h264')
    # sink.set_property('location', 'rtmp://media.streamit.live/LiveApp/stream-test')

    # streammux.set_property('width', 1280)
    # streammux.set_property('height', 720)
    # streammux.set_property('batch-size', 1)
    # streammux.set_property('batched-push-timeout', 4000000)

    # pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt")

    # tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so')
    # tracker.set_property('gpu-id', 0)
    # tracker.set_property('enable-past-frame', 1)
    # tracker.set_property('enable-batch-process', 1)
    # tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/tracker_config.yml')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    # pipeline.add(parser)
    # pipeline.add(decoder)
    # pipeline.add(streammux)
    # pipeline.add(pgie)
    # pipeline.add(tracker)
    # pipeline.add(convertor)
    # pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    # source.link(parser)
    # parser.link(decoder)

    # sinkpad = streammux.get_request_pad("sink_0")
    # if not sinkpad:
    #     sys.stderr.write(" Unable to get the sink pad of streammux \n")
    # srcpad = decoder.get_static_pad("src")
    # if not srcpad:
    #     sys.stderr.write(" Unable to get source pad of decoder \n")
    # srcpad.link(sinkpad)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    # source.link(parser)
    # parser.link(decoder)
    # decoder.link(streammux)
    # streammux.link(pgie)
    # pgie.link(tracker)
    # tracker.link(convertor)
    # convertor.link(nvosd)
    # nvosd.link(transform)
    source.link(transform)
    transform.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 27
0
def main(args):

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    print("Creating Pipeline")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
    print("Creating streamux")

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")

    pipeline.add(streammux)

    source_bin = create_source_bin(
        "file:/deepstream-examples/videos/traffic2.mp4")

    if not source_bin:
        sys.stderr.write("Unable to create source bin")

    pipeline.add(source_bin)

    sinkpad = streammux.get_request_pad('sink_0')
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin")

    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin")

    srcpad.link(sinkpad)

    queue1 = create_element_or_error("queue", "queue1")
    queue2 = create_element_or_error("queue", "queue2")
    queue3 = create_element_or_error("queue", "queue3")
    queue4 = create_element_or_error("queue", "queue4")
    queue5 = create_element_or_error("queue", "queue5")
    queue6 = create_element_or_error("queue", "queue6")

    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    analytics = create_element_or_error("nvdsanalytics", "analytics")
    converter = create_element_or_error("nvvideoconvert", "convertor")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")

    nvosd.set_property('process-mode', 2)
    # nvosd.set_property('display-text', 0)

    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    tracker.set_property(
        'll-lib-file',
        '/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_mot_klt.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)

    analytics.set_property("config-file", "./nvdsanalytics/traffic2.txt")

    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(analytics)
    pipeline.add(converter)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(analytics)
    analytics.link(queue4)
    queue4.link(converter)
    converter.link(queue5)
    queue5.link(nvosd)
    nvosd.link(queue6)
    queue6.link(transform)
    transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    analytics_src_pad = analytics.get_static_pad("src")
    if not analytics_src_pad:
        sys.stderr.write("Unable to get src pad")
    else:
        analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                    nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Starting pipeline")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 28
0
def main(args):

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    print("Creating Pipeline")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write("Unable to create Pipeline")
    
    print("Create elements")

    muxer = create_element_or_error("nvstreammux", "Stream-muxer")
    muxer.set_property('live-source', True)
    muxer.set_property('sync-inputs', True)
    muxer.set_property('width', 1920)
    muxer.set_property('height', 1080)
    muxer.set_property('batch-size', 3)
    muxer.set_property('batched-push-timeout', 4000000)
    pipeline.add(muxer)

    source_bin = create_source_bin("file:/deepstream-examples/Analitycs/traffic.mp4")
    pipeline.add(source_bin)

    sinkpad = muxer.get_request_pad('sink_0') 
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin")

    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin")

    srcpad.link(sinkpad)

    queue1 = create_element_or_error("queue","queue1")
    queue2 = create_element_or_error("queue","queue2")
    queue3 = create_element_or_error("queue","queue3")
    queue4 = create_element_or_error("queue","queue4")
    queue5 = create_element_or_error("queue","queue5")
    queue6 = create_element_or_error("queue","queue6")
    queue7 = create_element_or_error("queue", "queue7")

    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt")

    tracker = create_element_or_error("nvtracker", "tracker")
    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream/lib/libnvds_mot_klt.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-past-frame', 1)
    tracker.set_property('enable-batch-process', 1)

    analytics = create_element_or_error("nvdsanalytics", "analytics")
    analytics.set_property("config-file", "./analitycs.txt")

    converter = create_element_or_error("nvvideoconvert", "convertor")

    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    nvosd.set_property('process-mode', 2)
    nvosd.set_property('display-text', 0)

    encoder = create_element_or_error("nvv4l2h264enc", "encoder")
    encoder.set_property('maxperf-enable', True)
    encoder.set_property('insert-sps-pps', True)
    encoder.set_property('bitrate', 8000000)

    parser = create_element_or_error("h264parse", "parser")

    flmuxer = create_element_or_error("flvmux", "flmuxer")
    flmuxer.set_property('streamable', True)

    sink = create_element_or_error("rtmpsink", "sink")
    sink.set_property('location', "rtmp://media.streamit.live/LiveApp/analitycs")
    sink.set_property('sync', False)

    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(analytics)
    pipeline.add(converter)
    pipeline.add(nvosd)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(flmuxer)
    pipeline.add(sink)

    print("Linking elements in the Pipeline")
    muxer.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(analytics)
    analytics.link(queue4)
    queue4.link(converter)
    converter.link(queue5)
    queue5.link(nvosd)
    nvosd.link(queue6)
    queue6.link(encoder)
    encoder.link(parser)
    parser.link(flmuxer)
    flmuxer.link(queue7)
    queue7.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    analytics_src_pad = analytics.get_static_pad("src")
    if not analytics_src_pad:
        sys.stderr.write("Unable to get src pad")
    else:
        analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Starting pipeline")

    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)
Ejemplo n.º 29
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)
    # Gst.debug_set_active(False)
    # Gst.debug_set_default_threshold(3)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    # Create Gst Threads
    tee = create_element_or_error("tee", "tee")
    recording_queue = create_element_or_error("queue", "recording_queue")

    # Create Source Element
    source = create_element_or_error('nvarguscamerasrc', 'source')
    caps = create_element_or_error('capsfilter', 'source-capsfilter')
    encoder = create_element_or_error('nvv4l2h265enc', 'encoder')
    parser = create_element_or_error('h265parse', 'parser')
    # demuxer = create_element_or_error('matroskamux', 'matroxdemux')
    sink = create_element_or_error('splitmuxsink', 'sink')

    # Set Element Properties
    source.set_property('sensor-id', 0)
    caps.set_property(
        "caps",
        Gst.Caps.from_string(
            "video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1"
        ))
    sink.set_property('max-size-time', 30000000000)
    sink.set_property('muxer', 'matroskamux')
    sink.connect('format-location', __location)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(caps)
    pipeline.add(tee)
    pipeline.add(recording_queue)
    pipeline.add(encoder)
    pipeline.add(parser)
    # pipeline.add(demuxer)
    pipeline.add(sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(caps)
    caps.link(tee)
    recording_queue.link(encoder)
    encoder.link(parser)
    # parser.link(demuxer)
    parser.link(sink)

    # Get pad templates from source
    tee_src_pad_template = tee.get_pad_template("src_%u")

    # Get source to Recording Queue
    tee_pad = tee.request_pad(tee_src_pad_template, None, None)
    queue_pad = recording_queue.get_static_pad("sink")

    if (tee_pad.link(queue_pad) != Gst.PadLinkReturn.OK):
        print("ERROR: Tee streaming could not be linked")
        sys.exit(1)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline")

    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "convertor-2")
    caps = create_element_or_error("capsfilter", "filter-convertor-2")
    encoder = create_element_or_error("nvv4l2h265enc", "encoder")
    parser = create_element_or_error("h265parse", "h265-parser")
    rtppay = create_element_or_error("rtph265pay", "rtppay")
    sink = create_element_or_error("udpsink", "udpsink")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    encoder.set_property('insert-sps-pps', True)
    encoder.set_property('bitrate', 4000000)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt"
    )

    rtppay.set_property('pt', 96)

    updsink_port_num = 5400

    sink.set_property('host', '127.0.0.1')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(rtppay)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(encoder)
    encoder.link(parser)
    parser.link(rtppay)
    rtppay.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start streaming
    rtsp_port_num = 8554

    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch(
        "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )"
        % (updsink_port_num, 'H265'))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/streaming", factory)

    print(
        "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/streaming ***\n\n"
        % rtsp_port_num)

    print('Create OSD Sink Pad')
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)