def _make_audio_pipeline(self):
        # Make two - one for URL playing, and one for content we already have
        
        self.audio_player = Gst.ElementFactory.make("playbin", "player")

        # now fit an equalizer into that playbin
        
        equalizer = Gst.ElementFactory.make("equalizer-3bands", "equalizer")
        convert = Gst.ElementFactory.make("audioconvert", "convert")
        
        asink = Gst.ElementFactory.make("autoaudiosink", "audio_sink")

        audiobin = Gst.Bin("audio_sink_bin")
        audiobin.add(equalizer)
        audiobin.add(convert)
        audiobin.add(asink)

        equalizer.link(convert)
        convert.link(asink)

        ghost_pad = Gst.GhostPad.new("sink",
                                     Gst.Element.get_static_pad(equalizer, "sink"))
        ghost_pad.set_active(True)
        audiobin.add_pad(ghost_pad)
        
        self.audio_player.set_property('audio-sink', audiobin)

        bus = self.audio_player.get_bus()
        bus.enable_sync_message_emission()
        bus.add_signal_watch()
        bus.connect('message::tag', self.on_tag)
        bus.connect('message::error', self.on_error)
        bus.connect('message::eos', self.on_eos, self.audio_player)
        bus.connect('message::buffering', self.on_buffering)
        bus.connect('message::state-changed', self.on_state_changed)

        pipeline = Gst.Pipeline("audio_pipeline")
        src = Gst.ElementFactory.make("appsrc")
        mad = Gst.ElementFactory.make("mad")
        convert = Gst.ElementFactory.make("audioconvert")
        volume = Gst.ElementFactory.make("volume")
        sink = Gst.ElementFactory.make("alsasink")

        pipeline.add(src)
        pipeline.add(mad)
        pipeline.add(convert)
        pipeline.add(volume)
        pipeline.add(sink)

        src.link(mad)
        mad.link(convert)
        convert.link(volume)
        volume.link(sink)

        bus = pipeline.get_bus()
        bus.enable_sync_message_emission()
        bus.add_signal_watch()
        bus.connect('message::tag', self.on_tag)
        bus.connect('message::error', self.on_error)
        bus.connect('message::eos', self.on_eos, pipeline)
        bus.connect('message::buffering', self.on_buffering)
        bus.connect('message::state-changed', self.on_state_changed)

        pipeline.token = ''
        
        self.audio_source = src
        self.audio_pipeline = pipeline
Esempio n. 2
0
def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    for i in range(0, len(args) - 1):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
    number_sources = len(args) - 1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    print("Creating tracker \n")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")
    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert",
                                                "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write(" Unable to create nvvidconv_postosd \n")

    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property(
        "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

    # Make the encoder
    codec = "H264"
    bitrate = 4000000
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        encoder.set_property('bufapi-version', 1)

    # Make the payload-encode video into RTP packets
    if codec == "H264":
        rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
        print("Creating H264 rtppay")
    elif codec == "H265":
        rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
        print("Creating H265 rtppay")
    if not rtppay:
        sys.stderr.write(" Unable to create rtppay")

    # Make the UDP sink
    updsink_port_num = 5400
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write(" Unable to create udpsink")

    sink.set_property('host', '224.224.255.255')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)
    #print("Creating EGLSink \n")
    #sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    #if not sink:
    #    sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 640)
    streammux.set_property('height', 480)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path',
                      "facedetectir/config_infer_primary_facedetectir.txt")
    pgie.set_property(
        'model-engine-file',
        "facedetectir/resnet18_facedetectir_pruned.etlt_b1_gpu0_int8.engine")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size,
              " with number of sources ", number_sources, " \n")
        pgie.set_property("batch-size", number_sources)

    # tracker properties
    tracker.set_property("tracker-width", 640)
    tracker.set_property("tracker-height", 384)
    tracker.set_property(
        "ll-lib-file",
        "/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so")
    tracker.set_property(
        "ll-config-file",
        "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/tracker_config.yml"
    )
    tracker.set_property("enable-batch-process", 1)

    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(nvvidconv_postosd)
    nvvidconv_postosd.link(caps)
    caps.link(encoder)
    encoder.link(rtppay)
    rtppay.link(sink)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    #else:
    #    nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start streaming
    rtsp_port_num = 8554

    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch(
        "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )"
        % (updsink_port_num, codec))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/ds-test", factory)
    tiler_src_pad = pgie.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                tiler_src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)
Esempio n. 3
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        print("Unable to create Pipeline")
        return False

    # Create GST Elements
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    src_caps = create_element_or_error("capsfilter", "source-caps-definition")
    src_caps.set_property(
        "caps",
        Gst.Caps.from_string(
            "video/x-raw(memory:NVMM), width=(int)3264, height=(int)2464, framerate=30/1, format=(string)NV12"
        ))

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "convertor-2")

    encoder = create_element_or_error("nvv4l2h264enc", "encoder")
    parser = create_element_or_error("h264parse", "parser")
    muxer = create_element_or_error("flvmux", "muxer")
    sink = create_element_or_error("rtmpsink", "sink")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    encoder.set_property('insert-sps-pps', True)
    encoder.set_property('bitrate', 4000000)

    streammux.set_property('live-source', 1)
    streammux.set_property('width', 720)
    streammux.set_property('height', 480)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    sink.set_property('location',
                      'rtmp://media.streamit.live/LiveApp/streaming-test')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(src_caps)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(muxer)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(src_caps)
    src_caps.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(encoder)
    encoder.link(parser)
    parser.link(muxer)
    muxer.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    print('Create OSD Sink Pad')
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Esempio n. 4
0
    def construct_pipeline(self):
        """
        Add and link elements in a Gstreamer pipeline.
        """
        # Create the pipeline instance

        self.player = Gst.Pipeline()

        # Define pipeline elements
        self.filesrc = Gst.ElementFactory.make("filesrc", "filesrc")
        self.filesrc.set_property("location", self.inFileLocation)

        self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")

        # audioconvert for audio processing pipeline
        self.audioconvert = Gst.ElementFactory.make("audioconvert",
                                                    "audioconvert")

        # Autoconvert element for video processing
        self.autoconvert = Gst.ElementFactory.make("autoconvert",
                                                   "autoconvert")

        self.audiosink = Gst.ElementFactory.make("autoaudiosink",
                                                 "autoaudiosink")
        self.videosink = Gst.ElementFactory.make("autovideosink",
                                                 "autovideosink")

        # As a precaution add videio capability filter
        # in the video processing pipeline.

        videocap = Gst.Caps.from_string("video/x-raw")
        self.filter = Gst.ElementFactory.make("capsfilter", "filter")
        self.filter.set_property("caps", videocap)

        # Converts the video from one colorspace to another
        self.colorSpace = Gst.ElementFactory.make(
            "ffmpegcolorspace")  # bu patlıyordu...

        self.queue1 = Gst.ElementFactory.make("queue")
        self.queue2 = Gst.ElementFactory.make("queue")

        factory = self.player.get_factory()
        self.gtksink = factory.make('gtksink')

        # print(
        #       self.filesrc,
        #       self.decodebin,
        #       self.autoconvert,
        #       self.audioconvert,
        #       self.queue1,
        #       self.queue2,
        #       self.filter,
        #       self.colorspace,
        #       self.audiosink,
        #       self.videosink)

        # Add elements to the pipeline
        self.player.add(self.filesrc)
        self.player.add(self.decodebin)
        self.player.add(self.autoconvert)
        self.player.add(self.audioconvert)
        self.player.add(self.queue1)
        self.player.add(self.queue2)
        self.player.add(self.filter)
        self.player.add(self.audiosink)
        self.player.add(self.videosink)
        self.player.add(self.gtksink)

        # Link elements in the pipeline.
        self.filesrc.link(self.decodebin)

        self.queue1.link(self.autoconvert)
        self.autoconvert.link(self.filter)
        self.filter.link(self.gtksink)

        self.queue2.link(self.audioconvert)
        self.audioconvert.link(self.audiosink)
        """
Esempio n. 5
0
def main(
    input_filename: str,
    output_filename: str = None,
):
    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        print("Unable to create Pipeline", error=True)

    source_bin = create_source_bin(0, input_filename)

    # Finally encode and save the osd output
    queue = make_elm_or_print_err("queue", "queue", "Queue")

    # Video capabilities: check format and GPU/CPU location
    capsfilter = make_elm_or_print_err("capsfilter", "capsfilter",
                                       "capsfilter")
    caps = Gst.Caps.from_string("video/x-raw, format=I420")
    capsfilter.set_property("caps", caps)

    print("Creating MPEG-4 stream")
    encoder = make_elm_or_print_err("avenc_mpeg4", "encoder", "Encoder")
    codeparser = make_elm_or_print_err("mpeg4videoparse", "mpeg4-parser",
                                       "Code Parser")

    encoder.set_property("insert-sps-pps", 1)
    encoder.set_property("bitrate", 4e6)

    queue_file = make_elm_or_print_err("queue", "queue_file",
                                       "File save queue")
    # codeparser already created above depending on codec
    container = make_elm_or_print_err("qtmux", "qtmux", "Container")
    filesink = make_elm_or_print_err("filesink", "filesink", "File Sink")
    filesink.set_property("location", output_filename)

    pipeline.add(source_bin)
    pipeline.add(streammux)

    pipeline.add(queue)
    pipeline.add(capsfilter)
    pipeline.add(encoder)

    pipeline.add(queue_file)
    pipeline.add(codeparser)
    pipeline.add(container)
    pipeline.add(filesink)

    print("Linking elements in the Pipeline \n")

    # Pipeline Links
    srcpad = source_bin.get_static_pad("src")
    demux_sink = streammux.get_request_pad("sink_0")
    demux_sink.add_probe(Gst.PadProbeType.BUFFER, cb_buffer_probe, None)

    if not srcpad or not demux_sink:
        print("Unable to get file source or mux sink pads", error=True)
    srcpad.link(demux_sink)
    streammux.link(queue)
    queue.link(capsfilter)
    capsfilter.link(encoder)
    encoder.link(queue_file.get_static_pad("sink"))

    # Output to File or fake sinks
    queue_file.link(codeparser)
    codeparser.link(container)
    container.link(filesink)

    # GLib loop required for RTSP server
    g_loop = GLib.MainLoop()
    g_context = g_loop.get_context()

    # GStreamer message bus
    bus = pipeline.get_bus()

    # start play back and listen to events
    pipeline.set_state(Gst.State.PLAYING)

    # After setting pipeline to PLAYING, stop it even on exceptions
    try:
        # Custom event loop
        running = True
        while running:
            g_context.iteration(may_block=True)

            message = bus.pop()
            if message is not None:
                t = message.type

                if t == Gst.MessageType.EOS:
                    print("End-of-stream\n")
                    running = False
                elif t == Gst.MessageType.WARNING:
                    err, debug = message.parse_warning()
                    print(f"{err}: {debug}", warning=True)
                elif t == Gst.MessageType.ERROR:
                    err, debug = message.parse_error()
                    print(f"{err}: {debug}", error=True)
                    running = False

        print("Inference main loop ending.")
        pipeline.set_state(Gst.State.NULL)

        print(f"Output file saved: [green bold]{output_filename}[/green bold]")
    except:
        console.print_exception()
        pipeline.set_state(Gst.State.NULL)
Esempio n. 6
0
from time import sleep
import gi

gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst

Gst.init(None)

pipeline = Gst.Pipeline()
# bin = Gst.parse_bin_from_description("v4l2src device=/dev/video0 ! image/jpeg,framerate=30/1,width=1280,height=720 ! jpegdec ! autovideosink", False)
bin = Gst.parse_bin_from_description(
    "v4l2src device=/dev/video0 ! image/jpeg, width=1280, height=720, framerate=60/1 ! rtpjpegpay ! multiudpsink clients=10.0.1.54:1234,10.0.1.54:5678 sync=false",
    False)
# bin = Gst.parse_bin_from_description("v4l2src device=/dev/video1 ! image/jpeg,framerate=30/1,width=1280,height=720 ! rtpjpegpay ! application/x-rtp,encoding-name=JPEG,payload=26 ! rtpjpegdepay ! jpegdec ! autovideosink", False)
pipeline.add(bin)
pipeline.set_state(Gst.State.PLAYING)
loop = GObject.MainLoop()
loop.run()
Esempio n. 7
0
def main(args):
    f = open('optimal_frame_extraction.txt', 'w')  # erase previous contents
    f.close()
    with open('optimal_frame_extraction.txt', 'a') as the_file:
        the_file.write(str('f-no v-id wid hei x-top y-left lane date time'))
        the_file.write('\n')
    f.close()

    # Check input arguments
    if len(args) < 2:
        sys.stderr.write(
            "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" %
            args[0])
        sys.exit(1)

    for i in range(0, len(args) - 2):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
    number_sources = len(args) - 2

    global folder_name
    folder_name = args[-1]
    if path.exists(folder_name):
        sys.stderr.write("The output folder %s already exists. Removing...\n" %
                         folder_name)
        shutil.rmtree(folder_name, ignore_errors=True)
        #sys.exit(1)

    os.mkdir(folder_name)
    print("Frames will be saved in ", folder_name)
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    tracker = Gst.ElementFactory.make(
        "nvtracker", "tracker")  # tracker to assign unique ids to objects
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "dstest_imagedata_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size,
              " with number of sources ", number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)

    # Set properties of tracker
    config = configparser.ConfigParser()
    config.read('dstest2_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width':
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height':
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id':
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file':
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file':
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process':
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process',
                                 tracker_enable_batch_process)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_sink_pad = tiler.get_static_pad("sink")
    if not tiler_sink_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER,
                                 tiler_sink_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)
Esempio n. 8
0
    def Prepare(self):
        '''
        Build the gstreamer pipeline and all necessary objects and bindings.
        '''
        GObject.threads_init()

        self.ready = threading.Event()
        self.ready.set()

        self.active = True
        self.finished = False
        frameRate = self.GetProfile().GetFrameRate()
        # 1000ms / fps == x msec/frame
        self.imgDuration = int(round(1000 * Gst.MSECOND / frameRate.AsFloat()))
        self._Log(logging.DEBUG, "set imgDuration=%s", self.imgDuration)

        self.pipeline = Gst.Pipeline()

        caps = Gst.caps_from_string("image/jpeg,framerate={0}".format(
            frameRate.AsStr()))
        videoSrc = Gst.ElementFactory.make("appsrc")
        videoSrc.set_property("block", True)
        videoSrc.set_property("caps", caps)
        videoSrc.connect("need-data", self._GstNeedData)
        self.pipeline.add(videoSrc)

        queueVideo = Gst.ElementFactory.make("queue")
        self.pipeline.add(queueVideo)

        jpegDecoder = Gst.ElementFactory.make("jpegdec")
        self.pipeline.add(jpegDecoder)

        colorConverter = Gst.ElementFactory.make("videoconvert")
        self.pipeline.add(colorConverter)

        videoEnc = self._GetVideoEncoder()
        self.pipeline.add(videoEnc)

        if self.GetTypedProperty(
                "RenderSubtitle",
                bool) and Gst.ElementFactory.find("textoverlay"):
            self.textoverlay = Gst.ElementFactory.make("textoverlay")
            self.textoverlay.set_property("text", "")
            self._SetupTextOverlay()
            self.pipeline.add(self.textoverlay)

        # link elements for video stream
        videoSrc.link(jpegDecoder)
        jpegDecoder.link(colorConverter)
        if self.textoverlay:
            colorConverter.link(self.textoverlay)
            self.textoverlay.link(queueVideo)
        else:
            colorConverter.link(queueVideo)
        queueVideo.link(videoEnc)

        audioEnc = None
        if self.GetAudioFiles():
            self.concat = Gst.ElementFactory.make("concat")
            self.pipeline.add(self.concat)

            srcpad = self.concat.get_static_pad("src")
            srcpad.add_probe(
                Gst.PadProbeType.
                BUFFER,  # | Gst.PadProbeType.EVENT_DOWNSTREAM,
                self._GstProbeBuffer)

            self._GstAddAudioFile(self.GetAudioFiles()[self.idxAudioFile])

            audioConv = Gst.ElementFactory.make("audioconvert")
            self.pipeline.add(audioConv)

            audiorate = Gst.ElementFactory.make("audioresample")
            self.pipeline.add(audiorate)

            audioQueue = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue)

            audioEnc = self._GetAudioEncoder()
            self.pipeline.add(audioEnc)

            self.concat.link(audioConv)
            audioConv.link(audiorate)
            audiorate.link(audioQueue)
            audioQueue.link(audioEnc)

        if self.GetProfile().IsMPEGProfile():
            vp = Gst.ElementFactory.make("mpegvideoparse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

            if audioEnc:
                ap = Gst.ElementFactory.make("mpegaudioparse")
                self.pipeline.add(ap)
                audioEnc.link(ap)
                audioEnc = ap
        elif isinstance(self, MkvX265AC3):
            vp = Gst.ElementFactory.make("h265parse")
            self.pipeline.add(vp)
            videoEnc.link(vp)
            videoEnc = vp

        mux = self._GetMux()
        self.pipeline.add(mux)

        videoQueue2 = Gst.ElementFactory.make("queue")
        self.pipeline.add(videoQueue2)

        videoEncCaps = self._GetVideoEncoderCaps()
        if videoEncCaps:
            videoEnc.link_filtered(videoQueue2, videoEncCaps)
        else:
            videoEnc.link(videoQueue2)
        videoQueue2.link(mux)

        if audioEnc:
            audioQueue2 = Gst.ElementFactory.make("queue")
            self.pipeline.add(audioQueue2)
            audioEnc.link(audioQueue2)
            audioQueue2.link(mux)

        sink = Gst.ElementFactory.make("filesink")
        sink.set_property("location", self.GetOutputFile())
        self.pipeline.add(sink)

        mux.link(sink)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self._GstOnMessage)

        self.pipeline.set_state(Gst.State.PLAYING)

        self.gtkMainloop = GObject.MainLoop()
        gtkMainloopThread = threading.Thread(name="gtkMainLoop",
                                             target=self._GtkMainloop)
        gtkMainloopThread.start()

        self.ready.clear()
Esempio n. 9
0
def main():
    # Check input arguments
    # Permite introducir un numero x de fuentes, en nuestro caso streamings delas camaras Meraki
    reading_server_config()

    number_sources = len(get_sources())

    if number_sources < 1:
        log_error(
            "No source to analyze or not service associated to the source. check configuration file"
        )

    # Variable para verificar si al menos un video esta vivo
    is_live = False

    for i in range(0, number_sources):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
        #print(fps_streams["stream{0}".format(i)])

    global folder_name
    #folder_name=args[-1]
    folder_name = "frames"
    folder_name = "placas_encontrada"
    if not path.exists(folder_name):
        os.mkdir(folder_name)
    #    sys.stderr.write("The output folder %s already exists. Please remove it first.\n" % folder_name)
    #    sys.exit(1)

    print("Frames will be saved in ", folder_name)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")

    # Create nvstreammux instance to form batches from one or more sources.

    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")

    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")
    pipeline.add(streammux)

    # Se crea elemento que acepta todo tipo de video o RTSP
    i = 0
    for source in get_sources():

        if not path.exists(folder_name + "/stream_" + str(i)):
            os.mkdir(folder_name + "/stream_" + str(i))
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0

        print("Creating source_bin...........", i, '.-', source, " \n ")
        uri_name = source

        if uri_name.find("rtsp://") == 0:
            print('is_alive_TRUE')
            is_live = True

        source_bin = create_source_bin(i, uri_name)

        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")

        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)

        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")

        srcpad = source_bin.get_static_pad("src")

        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")

        srcpad.link(sinkpad)
        i += 1

    # el video con RTSP para Meraki viene optimizado a H264, por lo que no debe ser necesario crear un elemento h264parser stream
    # print("Creating H264Parser \n")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    if not h264parser:
        sys.stderr.write(" Unable to create h264 parser \n")

    print("Creating Decoder \n")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file

    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)

    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    #
    #  version 2.1 no realizara inferencias secundarias.
    #  por lo que sgie1, sgie2 y sgie3 no estaran habilitados
    #

    #sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")
    #if not sgie1:
    #    sys.stderr.write(" Unable to make sgie1 \n")

    #sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine")
    #if not sgie1:
    #    sys.stderr.write(" Unable to make sgie2 \n")

    #sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine")
    #if not sgie3:
    #    sys.stderr.write(" Unable to make sgie3 \n")

    #
    #   La misma version 2.1 debe permitir opcionalmente mandar a pantalla o no
    #

    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")

    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")

    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    # Finally render the osd output
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")
    sink.set_property('sync', 0)

    if is_live:
        print("At least one of the sources is live")
        streammux.set_property('live-source', 1)
        #streammux.set_property('live-source', 1)

    # Tamano del streammux, si el video viene a 720, se ajusta automaticamente

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    #
    # Configuracion de modelo
    # dstest2_pgie_config contiene modelo estandar, para  yoloV3, yoloV3_tiny y fasterRCNN
    #

    #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_pgie_config.txt")
    #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_nano.txt")
    #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/deepstream_app_source1_video_masknet_gpu.txt")
    #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3.txt")
    #pgie.set_property('config-file-path', CURRENT_DIR + "/configs/kairos_peoplenet_pgie_config.txt")
    # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_yoloV3_tiny.txt")
    # pgie.set_property('config-file-path', CURRENT_DIR + "/configs/config_infer_primary_fasterRCNN.txt")
    # Falta añadir la ruta completa del archivo de configuracion

    pgie.set_property('config-file-path',
                      CURRENT_DIR + "/configs/pgie_config_fd_lpd.txt"
                      )  # modelo para caras, placas, modelo y marca
    pgie_batch_size = pgie.get_property("batch-size")
    print(pgie_batch_size)
    if pgie_batch_size != number_sources:
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size,
              " with number of sources ", number_sources, " \n")
        pgie.set_property("batch-size", number_sources)

    # Set properties of pgie and sgiae
    # version 2.1 no configura inferencias secundarias
    #

    #sgie1.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie1_config.txt")
    #sgie2.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie2_config.txt")
    #sgie3.set_property('config-file-path', CURRENT_DIR + "/configs/dstest2_sgie3_config.txt")

    # Set properties of tracker
    config = configparser.ConfigParser()
    config.read('configs/Plate_tracker_config.txt')
    #config.read('configs/kairos_peoplenet_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width':
            tracker_width = config.getint('tracker', key)
            print(tracker_width)
            tracker.set_property('tracker-width', tracker_width)
        elif key == 'tracker-height':
            tracker_height = config.getint('tracker', key)
            print(tracker_height)
            tracker.set_property('tracker-height', tracker_height)
        elif key == 'gpu-id':
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        elif key == 'll-lib-file':
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        elif key == 'll-config-file':
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        elif key == 'enable-batch-process':
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process',
                                 tracker_enable_batch_process)

    # Creacion del marco de tiler
    tiler_rows = int(math.sqrt(number_sources))  # Example 3 = 1 renglones
    tiler_columns = int(math.ceil(
        (1.0 * number_sources) / tiler_rows))  # Example 3 = 3 columnas
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    print("Adding elements to Pipeline \n")

    #
    #  version 2.1 no requiere inferencias secundarias
    #
    pipeline.add(h264parser)  # agrego h264
    pipeline.add(decoder)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv1)  # Se anaden para un mejor manejo de la imagen
    pipeline.add(filter1)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)

    # we link the elements together
    # source_bin -> -> nvh264-decoder -> PGIE -> Tracker
    # tiler -> nvvidconv -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")

    # lineas ya ejecutadas en el for anterior
    #sinkpad = streammux.get_request_pad("sink_0")
    #if not sinkpad:
    #    sys.stderr.write(" Unable to get the sink pad of streammux \n")
    #srcpad = decoder.get_static_pad("src")
    #if not srcpad:
    #    sys.stderr.write(" Unable to get source pad of decoder \n")

    srcpad.link(sinkpad)
    source_bin.link(h264parser)
    h264parser.link(decoder)
    decoder.link(streammux)
    # -------
    streammux.link(pgie)
    pgie.link(nvvidconv1)
    nvvidconv1.link(filter1)
    filter1.link(tracker)
    tracker.link(tiler)
    #filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    #pgie.link(tracker)
    '''
    srcpad.link(sinkpad)
    source_bin.link(h264parser)
    h264parser.link(decoder)     
    #source_bin.link(decoder)     Se agregaron las dos lineas anteriores
    decoder.link(streammux)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    '''

    # create and event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()

    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.

    tiler_src_pad = tracker.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                tiler_src_pad_buffer_probe, 0)

    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)

    # start play back and listed to events
    try:
        loop.run()
    except Exception as e:
        print("This line? " + str(e))
        pass

    # cleanup
    pipeline.set_state(Gst.State.NULL)
def main():
    
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline")
    
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "convertor-2")
    caps = create_element_or_error("capsfilter", "filter-convertor-2")
    encoder = create_element_or_error("nvv4l2h265enc", "encoder")
    parser = create_element_or_error("h265parse", "h265-parser")
    rtppay = create_element_or_error("rtph265pay", "rtppay")
    sink = create_element_or_error("udpsink", "udpsink")


    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)

    encoder.set_property('insert-sps-pps', True)
    encoder.set_property('bitrate', 4000000)
    
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt")

    rtppay.set_property('pt', 96)
    
    updsink_port_num = 5400

    sink.set_property('host', '127.0.0.1')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)
    
    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(encoder)
    pipeline.add(parser)
    pipeline.add(rtppay)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(encoder)
    encoder.link(parser)
    parser.link(rtppay)
    rtppay.link(sink)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)


    # Start streaming
    rtsp_port_num = 8554
    
    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)
    
    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (updsink_port_num, 'H265'))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/streaming", factory)
    
    print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/streaming ***\n\n" % rtsp_port_num)

    print('Create OSD Sink Pad')
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass


    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Esempio n. 11
0
def main():
    
    args = parse_args()
    global g_args
    g_args = args

    num_sources = args.num_sources
    sgie_batch_size = args.sgie_batch_size
    path = os.path.abspath(os.getcwd())
    
    if (args.prof):
        INPUT_VIDEO = 'file://' + path +'/../source_code/dataset/sample_720p_prof.mp4'
    else :
        INPUT_VIDEO = 'file:///opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_720p.h264'
    print("Creating pipeline with "+str(num_sources)+" streams")
    # Initialise FPS
    for i in range(0,num_sources):
            fps_streams_new["stream{0}".format(i)]=GETFPS(i)

    # Standard GStreamer initialization
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    ########### Create Elements required for the Pipeline ########### 

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer","Stream-muxer") 

    pipeline.add(streammux)

    for i in range(num_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=INPUT_VIDEO
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(args, i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad = streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)


    # Use nvinfer to run inferencing on decoder's output, behaviour of inferencing is set through config file
    pgie = make_elm_or_print_err("nvinfer", "primary-inference" ,"pgie")
    # Use nvtracker to give objects unique-ids
    tracker = make_elm_or_print_err("nvtracker", "tracker",'tracker')
    # Seconday inference for Finding Car Color
    sgie1 = make_elm_or_print_err("nvinfer", "secondary1-nvinference-engine",'sgie1')
    # Seconday inference for Finding Car Make
    sgie2 = make_elm_or_print_err("nvinfer", "secondary2-nvinference-engine",'sgie2')
    # # Seconday inference for Finding Car Type
    sgie3 = make_elm_or_print_err("nvinfer", "secondary3-nvinference-engine",'sgie3')
    # Create Sink for storing the output 
    fakesink = make_elm_or_print_err("fakesink", "fakesink", "Sink")

    # Queues to enable buffering
    queue1=make_elm_or_print_err("queue","queue1","queue1")
    queue2=make_elm_or_print_err("queue","queue2","queue2")
    queue3=make_elm_or_print_err("queue","queue3","queue3")
    queue4=make_elm_or_print_err("queue","queue4","queue4")
    queue5=make_elm_or_print_err("queue","queue5","queue5")
    queue6=make_elm_or_print_err("queue","queue6","queue6")

    ############ Set properties for the Elements ############
    # Set Input Width , Height and Batch Size 
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('batch-size', num_sources)
    # Timeout in microseconds to wait after the first buffer is available 
    # to push the batch even if a complete batch is not formed.
    streammux.set_property('batched-push-timeout', 4000000)
    # Set configuration file for nvinfer 
    pgie.set_property('config-file-path', "../source_code/N1/dstest4_pgie_config.txt")
    sgie1.set_property('config-file-path', "../source_code/N1/dstest4_sgie1_config.txt")
    sgie2.set_property('config-file-path', "../source_code/N1/dstest4_sgie2_config.txt")
    sgie3.set_property('config-file-path', "../source_code/N1/dstest4_sgie3_config.txt")
    # Setting batch_size for pgie
    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != num_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", num_sources," \n")
        pgie.set_property("batch-size",num_sources)
        
    #################### Secondary Batch size ######################
     # Setting batch_size for sgie1
    sgie1_batch_size=sgie1.get_property("batch-size")
    if(sgie1_batch_size != sgie_batch_size):
        print("WARNING: Overriding infer-config batch-size",sgie1_batch_size," with number of sources ", sgie_batch_size," \n")
        sgie1.set_property("batch-size",sgie_batch_size)
    # Setting batch_size for sgie2
    sgie2_batch_size=sgie2.get_property("batch-size")
    if(sgie2_batch_size != sgie_batch_size):
        print("WARNING: Overriding infer-config batch-size",sgie2_batch_size," with number of sources ", sgie_batch_size," \n")
        sgie2.set_property("batch-size",sgie_batch_size)
    # Setting batch_size for sgie2
    sgie3_batch_size=sgie3.get_property("batch-size")
    if(sgie3_batch_size != sgie_batch_size):
        print("WARNING: Overriding infer-config batch-size",sgie3_batch_size," with number of sources ", sgie_batch_size," \n")
        sgie3.set_property("batch-size",sgie_batch_size)
    #Set properties of tracker from tracker_config
    config = configparser.ConfigParser()
    config.read('../source_code/N1/dstest4_tracker_config.txt')
    config.sections()
    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)

    # Fake sink properties 
    fakesink.set_property("sync", 0)
    fakesink.set_property("async", 0)

    ########## Add and Link ELements in the Pipeline ########## 

    print("Adding elements to Pipeline \n")
    pipeline.add(queue1)
    pipeline.add(pgie)
    pipeline.add(queue2)
    pipeline.add(tracker)
    pipeline.add(queue3)
    pipeline.add(sgie1)
    pipeline.add(queue4)
    pipeline.add(sgie2)
    pipeline.add(queue5)
    pipeline.add(sgie3)
    pipeline.add(queue6)
    pipeline.add(fakesink)

    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(sgie1)
    sgie1.link(queue4)
    queue4.link(sgie2)
    sgie2.link(queue5)
    queue5.link(sgie3)
    sgie3.link(fakesink)
    # queue6.link(fakesink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GLib.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    print("Added and Linked elements to pipeline")

    src_pad=sgie3.get_static_pad("src")
    if not src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        src_pad.add_probe(Gst.PadProbeType.BUFFER, src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    start_time = time.time()
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)
    print("--- %s seconds ---" % (time.time() - start_time))
Esempio n. 12
0
    def __init__(self, path):
        print "Init"
        self.path = path
        self.caps = Gst.caps_from_string(
            'audio/x-raw,channels=2,rate=48000,format=F32LE')

        self.src1 = Gst.ElementFactory.make('filesrc', None)
        self.src1_decode = Gst.ElementFactory.make('mad', None)
        self.src1_queue = Gst.ElementFactory.make('queue', None)
        self.src1_audioconvert = Gst.ElementFactory.make('audioconvert', None)
        self.src1_audioresample = Gst.ElementFactory.make(
            'audioresample', None)
        self.src1_volume = Gst.ElementFactory.make('volume', None)

        self.src1_volume_queue = Gst.ElementFactory.make('volume', None)

        self.filesink_queue = Gst.ElementFactory.make('queue', None)
        self.filesink_audioconvert = Gst.ElementFactory.make(
            'audioconvert', None)

        self.tee1 = Gst.ElementFactory.make('tee', None)
        self.audiomixer = Gst.ElementFactory.make('audiomixer', None)

        self.audiosink_queue = Gst.ElementFactory.make('queue', None)
        self.audiosink_audioconvert = Gst.ElementFactory.make(
            'audioconvert', None)
        self.audiosink = Gst.ElementFactory.make('autoaudiosink', None)
        self.filesink_encode = Gst.ElementFactory.make('lamemp3enc', None)
        self.filesink = Gst.ElementFactory.make('filesink', None)

        self.src2 = Gst.ElementFactory.make('autoaudiosrc', None)
        self.src2_level = Gst.ElementFactory.make('level', None)
        self.src2_volume = Gst.ElementFactory.make('volume', None)

        self.src2_queue = Gst.ElementFactory.make('queue', None)
        self.src2_audioconvert = Gst.ElementFactory.make('audioconvert', None)
        self.src2_audioresample = Gst.ElementFactory.make(
            'audioresample', None)

        self.pipeline = Gst.Pipeline()
        self.pipeline.add(self.src1)
        self.pipeline.add(self.src1_decode)
        self.pipeline.add(self.src1_volume)
        self.pipeline.add(self.tee1)
        self.pipeline.add(self.src1_volume_queue)
        self.pipeline.add(self.src1_queue)
        self.pipeline.add(self.src1_audioconvert)
        self.pipeline.add(self.src1_audioresample)
        self.pipeline.add(self.filesink_queue)
        self.pipeline.add(self.filesink_audioconvert)
        self.pipeline.add(self.filesink_encode)
        self.pipeline.add(self.audiomixer)
        self.pipeline.add(self.filesink)

        self.pipeline.add(self.audiosink_queue)
        self.pipeline.add(self.audiosink_audioconvert)
        self.pipeline.add(self.audiosink)
        self.pipeline.add(self.src2)
        self.pipeline.add(self.src2_level)
        self.pipeline.add(self.src2_volume)
        self.pipeline.add(self.src2_queue)
        self.pipeline.add(self.src2_audioconvert)
        self.pipeline.add(self.src2_audioresample)

        self.src1.link(self.src1_decode)
        self.src1_decode.link(self.src1_queue)
        self.src1_queue.link(self.src1_audioconvert)
        self.src1_audioconvert.link(self.src1_audioresample)
        self.src1_audioresample.link_filtered(self.tee1, self.caps)
        self.tee1.link_filtered(self.src1_volume_queue, self.caps)
        self.src1_volume_queue.link(self.src1_volume)
        self.src1_volume.link(self.audiomixer)
        self.audiomixer.link(self.filesink_queue)
        self.filesink_queue.link(self.filesink_audioconvert)
        self.filesink_audioconvert.link(self.filesink_encode)
        self.filesink_encode.link(self.filesink)

        self.tee1.link(self.audiosink_queue)
        self.audiosink_queue.link(self.audiosink_audioconvert)
        self.audiosink_audioconvert.link_filtered(self.audiosink, self.caps)

        self.src2.link(self.src2_level)
        self.src2_level.link(self.src2_volume)
        self.src2_volume.link(self.src2_queue)
        self.src2_queue.link(self.src2_audioconvert)
        self.src2_audioconvert.link(self.src2_audioresample)
        self.src2_audioresample.link_filtered(self.audiomixer, self.caps)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.on_message)
def main(args):

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    print("Creating Pipeline")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
    print("Creating streamux")

    streammux = create_element_or_error("nvstreammux", "Stream-muxer")

    pipeline.add(streammux)

    source_bin = create_source_bin(
        "file:/deepstream-examples/videos/front.mp4")

    if not source_bin:
        sys.stderr.write("Unable to create source bin")

    pipeline.add(source_bin)

    sinkpad = streammux.get_request_pad('sink_0')
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin")

    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin")

    srcpad.link(sinkpad)

    queue1 = create_element_or_error("queue", "queue1")
    # queue2 = create_element_or_error("queue","queue2")
    # queue3 = create_element_or_error("queue","queue3")
    # queue4 = create_element_or_error("queue","queue4")
    # queue5 = create_element_or_error("queue","queue5")
    queue6 = create_element_or_error("queue", "queue6")
    queue7 = create_element_or_error("queue", "queue7")

    pipeline.add(queue1)
    # pipeline.add(queue2)
    # pipeline.add(queue3)
    # pipeline.add(queue4)
    # pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)

    pgie = create_element_or_error("nvinfer", "primary-inference")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    converter = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd.set_property('process-mode', 2)
    # nvosd.set_property('display-text', 0)

    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "nvvideo-renderer")

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    # pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.1/samples/configs/deepstream-app/config_infer_primary.txt")
    pgie.set_property('config-file-path', "models/ssd-nurawash/config.txt")

    print("Adding elements to Pipeline")
    pipeline.add(pgie)
    pipeline.add(converter)
    pipeline.add(nvosd)
    pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline")
    streammux.link(pgie)
    pgie.link(converter)
    converter.link(nvosd)
    nvosd.link(transform)
    transform.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # analytics_src_pad = analytics.get_static_pad("src")
    # if not analytics_src_pad:
    #     sys.stderr.write("Unable to get src pad")
    # else:
    #     analytics_src_pad.add_probe(Gst.PadProbeType.BUFFER, nvanalytics_src_pad_buffer_probe, 0)

    # List the sources
    print("Starting pipeline")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app")
    pipeline.set_state(Gst.State.NULL)
    def create_pipeline(self, conf):

        self.appsrc = Gst.ElementFactory.make("appsrc", "appsrc")
        self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")
        self.audioconvert = Gst.ElementFactory.make("audioconvert",
                                                    "audioconvert")
        self.audioresample = Gst.ElementFactory.make("audioresample",
                                                     "audioresample")
        self.tee = Gst.ElementFactory.make("tee", "tee")
        self.queue1 = Gst.ElementFactory.make("queue", "queue1")
        self.filesink = Gst.ElementFactory.make("filesink", "filesink")
        self.queue2 = Gst.ElementFactory.make("queue", "queue2")
        self.asr = Gst.ElementFactory.make("kaldinnet2onlinedecoder", "asr")
        self.fakesink = Gst.ElementFactory.make("fakesink", "fakesink")

        if not self.asr:
            print >> sys.stderr, "ERROR: Couldn't create the kaldinnet2onlinedecoder element!"
            gst_plugin_path = os.environ.get("GST_PLUGIN_PATH")
            if gst_plugin_path:
                print >> sys.stderr, \
                    "Couldn't find kaldinnet2onlinedecoder element at %s. " \
                    "If it's not the right path, try to set GST_PLUGIN_PATH to the right one, and retry. " \
                    "You can also try to run the following command: " \
                    "'GST_PLUGIN_PATH=%s gst-inspect-1.0 kaldinnet2onlinedecoder'." \
                    % (gst_plugin_path, gst_plugin_path)
            else:
                print >> sys.stderr, \
                    "The environment variable GST_PLUGIN_PATH wasn't set or it's empty. " \
                    "Try to set GST_PLUGIN_PATH environment variable, and retry."
            sys.exit(-1)

        # This needs to be set first
        if "use-threaded-decoder" in conf["decoder"]:
            self.asr.set_property("use-threaded-decoder",
                                  conf["decoder"]["use-threaded-decoder"])

        decoder_config = conf.get("decoder", {})
        if 'nnet-mode' in decoder_config:
            logger.info("Setting decoder property: %s = %s" %
                        ('nnet-mode', decoder_config['nnet-mode']))
            self.asr.set_property('nnet-mode', decoder_config['nnet-mode'])
            del decoder_config['nnet-mode']

        decoder_config = OrderedDict(decoder_config)

        if "fst" in decoder_config:
            decoder_config["fst"] = decoder_config.pop("fst")
        if "model" in decoder_config:
            decoder_config["model"] = decoder_config.pop("model")

        for (key, val) in decoder_config.iteritems():
            if key != "use-threaded-decoder":
                logger.info("Setting decoder property: %s = %s" % (key, val))
                self.asr.set_property(key, val)

        self.appsrc.set_property("is-live", True)
        self.filesink.set_property("location", "/dev/null")
        logger.info('Created GStreamer elements')

        self.pipeline = Gst.Pipeline()
        for element in [
                self.appsrc, self.decodebin, self.audioconvert,
                self.audioresample, self.tee, self.queue1, self.filesink,
                self.queue2, self.asr, self.fakesink
        ]:
            logger.debug("Adding %s to the pipeline" % element)
            self.pipeline.add(element)

        logger.info('Linking GStreamer elements')

        self.appsrc.link(self.decodebin)
        #self.appsrc.link(self.audioconvert)
        self.decodebin.connect('pad-added', self._connect_decoder)
        self.audioconvert.link(self.audioresample)

        self.audioresample.link(self.tee)

        self.tee.link(self.queue1)
        self.queue1.link(self.filesink)

        self.tee.link(self.queue2)
        self.queue2.link(self.asr)

        self.asr.link(self.fakesink)

        # Create bus and connect several handlers
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.enable_sync_message_emission()
        self.bus.connect('message::eos', self._on_eos)
        self.bus.connect('message::error', self._on_error)
        #self.bus.connect('message::cutter', self._on_cutter)

        self.asr.connect('partial-result', self._on_partial_result)
        self.asr.connect('final-result', self._on_final_result)
        self.asr.connect('full-final-result', self._on_full_final_result)

        logger.info("Setting pipeline to READY")
        self.pipeline.set_state(Gst.State.READY)
        logger.info("Set pipeline to READY")
Esempio n. 15
0
def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    for i in range(0, len(args) - 1):
        fps_streams["stream{0}".format(i)] = GETFPS(i)

    number_sources = len(args) - 1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # READ RTSP/MP4 STREAM
    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")
    pipeline.add(streammux)
    # NOTE: number_sources is important!!!
    for i in range(number_sources):
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    sgie = Gst.ElementFactory.make("nvinfer", "secondary-nvinference-engine")
    if not sgie:
        sys.stderr.write(" Unable to make sgie \n")

    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")

    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    nvosd.set_property('process-mode', OSD_PROCESS_MODE)
    nvosd.set_property('display-text', OSD_DISPLAY_TEXT)

    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    print("Playing file %s " % args[1])
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', PGIE_CONFIG_FILE)
    sgie.set_property('config-file-path', SGIE_CONFIG_FILE)
    # Set properties of tracker
    config = configparser.ConfigParser()
    config.read(TRACKER_KLT_CONFIG)
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width':
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height':
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id':
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file':
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file':
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process':
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process',
                                 tracker_enable_batch_process)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(sgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(sink)
    if is_aarch64():
        pipeline.add(transform)

    # we link the elements together
    # rstp stream -> nvinfer -> nvinfer2 -> nvvidconv -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(sgie)
    sgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)
    # create and event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)
    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")
    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    print("Starting pipeline \n")

    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    # cleanup
    pipeline.set_state(Gst.State.NULL)
def main(args):
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    print("Creating Source \n ")
    source = Gst.ElementFactory.make("filesrc", "file-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")

    # Since the data format in the input file is elementary h264 stream,
    # we need a h264parser
    print("Creating H264Parser \n")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    if not h264parser:
        sys.stderr.write(" Unable to create h264 parser \n")

    # Use nvdec_h264 for hardware accelerated decode on GPU
    print("Creating Decoder \n")
    decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
    if not decoder:
        sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert",
                                                "convertor_postosd")
    if not nvvidconv_postosd:
        sys.stderr.write(" Unable to create nvvidconv_postosd \n")

    # Create a caps filter
    caps = Gst.ElementFactory.make("capsfilter", "filter")
    caps.set_property(
        "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420"))

    # Make the encoder
    if codec == "H264":
        encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
        print("Creating H264 Encoder")
    elif codec == "H265":
        encoder = Gst.ElementFactory.make("nvv4l2h265enc", "encoder")
        print("Creating H265 Encoder")
    if not encoder:
        sys.stderr.write(" Unable to create encoder")
    encoder.set_property('bitrate', bitrate)
    if is_aarch64():
        encoder.set_property('preset-level', 1)
        encoder.set_property('insert-sps-pps', 1)
        encoder.set_property('bufapi-version', 1)

    # Make the payload-encode video into RTP packets
    if codec == "H264":
        rtppay = Gst.ElementFactory.make("rtph264pay", "rtppay")
        print("Creating H264 rtppay")
    elif codec == "H265":
        rtppay = Gst.ElementFactory.make("rtph265pay", "rtppay")
        print("Creating H265 rtppay")
    if not rtppay:
        sys.stderr.write(" Unable to create rtppay")

    # Make the UDP sink
    updsink_port_num = 5400
    sink = Gst.ElementFactory.make("udpsink", "udpsink")
    if not sink:
        sys.stderr.write(" Unable to create udpsink")

    sink.set_property('host', '224.224.255.255')
    sink.set_property('port', updsink_port_num)
    sink.set_property('async', False)
    sink.set_property('sync', 1)

    print("Playing file %s " % stream_path)
    source.set_property('location', stream_path)
    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', "dstest1_pgie_config.txt")

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(h264parser)
    pipeline.add(decoder)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(nvvidconv_postosd)
    pipeline.add(caps)
    pipeline.add(encoder)
    pipeline.add(rtppay)
    pipeline.add(sink)

    # Link the elements together:
    # file-source -> h264-parser -> nvh264-decoder ->
    # nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd ->
    # caps -> encoder -> rtppay -> udpsink

    print("Linking elements in the Pipeline \n")
    source.link(h264parser)
    h264parser.link(decoder)
    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")

    srcpad = decoder.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")

    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(nvvidconv_postosd)
    nvvidconv_postosd.link(caps)
    caps.link(encoder)
    encoder.link(rtppay)
    rtppay.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start streaming
    rtsp_port_num = 8554

    server = GstRtspServer.RTSPServer.new()
    server.props.service = "%d" % rtsp_port_num
    server.attach(None)

    factory = GstRtspServer.RTSPMediaFactory.new()
    factory.set_launch(
        "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )"
        % (updsink_port_num, codec))
    factory.set_shared(True)
    server.get_mount_points().add_factory("/ds-test", factory)

    print(
        "\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n"
        % rtsp_port_num)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)
    def __init__(self):
        self.baseDir = "/run/media/peter/Elements/peter/data/tmp-20130801/"
        self.inFirstMinute = True

        import logging, logging.handlers
        # Make a global logging object.
        self.log = logging.getLogger("log")
        self.log.setLevel(logging.DEBUG)
        h = logging.StreamHandler()
        f = logging.Formatter(
            "%(levelname)s %(asctime)s <%(funcName)s> [%(lineno)d] %(message)s"
        )
        h.setFormatter(f)
        for handler in self.log.handlers:
            self.log.removeHandler(handler)

        self.log.addHandler(h)

        self.mainloop = GObject.MainLoop()

        # create gtk interface
        self.log.debug("create Gtk interface")
        self.window = Gtk.Window()
        self.window.connect('destroy', self.quit)
        self.window.set_default_size(800, 450)

        self.box = Gtk.Box(homogeneous=False, spacing=6)
        self.window.add(self.box)

        self.drawingarea = Gtk.DrawingArea()
        self.drawingarea.set_size_request(800, 350)
        self.box.pack_start(self.drawingarea, True, True, 0)

        self.button = Gtk.Button(label="Click Here")
        self.button.connect("clicked", self.on_button_clicked)
        self.box.pack_start(self.button, True, True, 0)

        self.log.debug("create self.pipelines")
        self.pipelines = dict()
        self.pipelines["main"] = Gst.Pipeline()
        self.pipelines["catch"] = Gst.Pipeline()

        self.log.debug("link message bus")
        # Create bus to get events from GStreamer pipeline
        self.bus = self.pipelines["main"].get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::error', self.on_error)

        # This is needed to make the video output in our DrawingArea:
        self.bus.enable_sync_message_emission()
        self.bus.connect('sync-message::element', self.on_sync_message)

        # create all self.elements that we will need later
        self.log.debug("create gst elements")

        # gst-launch-1.0 -e filesrc location="/run/media/peter/home/tmp/webcam/Video 66.mp4" ! qtdemux ! queue ! tee name=t ! queue ! h264parse ! avdec_h264 ! autovideosink t. ! queue ! h264parse ! mp4mux ! filesink location=/run/media/peter/home/tmp/webcam/test.mp4
        self.elements = dict()
        self.pads = dict()
        self.elements["src"] = Gst.ElementFactory.make("uvch264src", "src")

        self.elements["prevQueue"] = Gst.ElementFactory.make(
            "queue", "prevQueue")
        self.elements["vfcaps"] = Gst.ElementFactory.make(
            "capsfilter", "vfcaps")
        self.elements["preview_sink"] = Gst.ElementFactory.make(
            "autovideosink", "previewsink")

        self.elements["vidQueue"] = Gst.ElementFactory.make(
            "queue", "vidQueue")
        self.elements["vidcaps"] = Gst.ElementFactory.make(
            "capsfilter", "vidcaps")
        self.elements["t"] = Gst.ElementFactory.make("tee", "t")

        self.elements["srcQueue"] = Gst.ElementFactory.make(
            "queue", "srcQueue")
        self.elements["recBin1"] = Gst.Bin.new("recoding bin 1")
        self.elements["fileQueue1"] = Gst.ElementFactory.make(
            "queue", "fileQueue1")
        self.elements["ph264_1"] = Gst.ElementFactory.make(
            "h264parse", "ph264_1")
        self.elements["mux_1"] = Gst.ElementFactory.make("mp4mux", "mux1")
        self.elements["filesink1"] = Gst.ElementFactory.make(
            "filesink", "filesink1")

        self.elements["recBin2"] = Gst.Bin.new("recoding bin 2")
        self.elements["fileQueue2"] = Gst.ElementFactory.make(
            "queue", "fileQueue2")
        self.elements["ph264_2"] = Gst.ElementFactory.make(
            "h264parse", "ph264_2")
        self.elements["mux_2"] = Gst.ElementFactory.make("mp4mux", "mux2")
        self.elements["filesink2"] = Gst.ElementFactory.make(
            "filesink", "filesink2")

        self.elements["recBin3"] = Gst.Bin.new("recoding bin 3")
        self.elements["fileQueue3"] = Gst.ElementFactory.make(
            "queue", "fileQueue3")
        self.elements["ph264_3"] = Gst.ElementFactory.make(
            "h264parse", "ph264_3")
        self.elements["mux_3"] = Gst.ElementFactory.make("mp4mux", "mux3")
        self.elements["filesink3"] = Gst.ElementFactory.make(
            "filesink", "filesink3")

        if any(self.elements[k] is None for k in self.elements.keys()):
            raise RuntimeError(
                "one or more self.elements could not be created")

        self.log.debug("populate main pipeline")
        self.pipelines["main"].add(self.elements["src"])

        self.pipelines["main"].add(self.elements["prevQueue"])
        self.pipelines["main"].add(self.elements["vfcaps"])
        self.pipelines["main"].add(self.elements["preview_sink"])

        self.pipelines["main"].add(self.elements["vidQueue"])
        self.pipelines["main"].add(self.elements["vidcaps"])
        self.pipelines["main"].add(self.elements["t"])

        self.pipelines["main"].add(self.elements["srcQueue"])

        self.log.debug("link self.elements in main pipeline")
        self.log.debug("1. linking preview branch...")
        srcP2 = self.elements["src"].get_static_pad('vfsrc')
        tP2 = self.elements["prevQueue"].get_static_pad("sink")
        assert (srcP2.link(tP2) == Gst.PadLinkReturn.OK)
        assert (self.elements["prevQueue"].link(self.elements["vfcaps"]))
        assert (self.elements["vfcaps"].link(self.elements["preview_sink"]))

        self.log.debug("2. linking H264 branch until tee...")
        srcP = self.elements["src"].get_static_pad('vidsrc')
        tP = self.elements["vidQueue"].get_static_pad("sink")
        assert (srcP.link(tP) == Gst.PadLinkReturn.OK)
        assert (self.elements["vidQueue"].link(self.elements["vidcaps"]))
        assert (self.elements["vidcaps"].link(self.elements["t"]))  #

        self.log.debug("populate recBin1")
        self.elements["recBin1"].add(self.elements["fileQueue1"])
        self.elements["recBin1"].add(self.elements["ph264_1"])
        self.elements["recBin1"].add(self.elements["mux_1"])
        self.elements["recBin1"].add(self.elements["filesink1"])

        self.log.debug("link elements in recBin1")
        assert (self.elements["fileQueue1"].link(self.elements["ph264_1"]))
        assert (self.elements["ph264_1"].link(self.elements["mux_1"]))
        assert (self.elements["mux_1"].link(self.elements["filesink1"]))

        self.log.debug("create ghost pad for recBin1")
        self.elements["recBin1"].add_pad(
            Gst.GhostPad.new(
                "sink", self.elements["fileQueue1"].get_static_pad("sink")))

        self.log.debug("populate recBin2")
        self.elements["recBin2"].add(self.elements["fileQueue2"])
        self.elements["recBin2"].add(self.elements["ph264_2"])
        self.elements["recBin2"].add(self.elements["mux_2"])
        self.elements["recBin2"].add(self.elements["filesink2"])

        self.log.debug("link elements in recBin2")
        assert (self.elements["fileQueue2"].link(self.elements["ph264_2"]))
        assert (self.elements["ph264_2"].link(self.elements["mux_2"]))
        assert (self.elements["mux_2"].link(self.elements["filesink2"]))

        self.log.debug("create ghost pad for recBin2")
        self.elements["recBin2"].add_pad(
            Gst.GhostPad.new(
                "sink", self.elements["fileQueue2"].get_static_pad("sink")))

        self.log.debug("add recBin1 to main pipeline")
        self.pipelines["main"].add(self.elements["recBin1"])

        self.log.debug("link srcQueue --> recBin to tee")
        self.pads["tPad2"] = Gst.Element.get_request_pad(
            self.elements["t"], 'src_%u')
        self.pads["tPad2"].link(
            self.elements["srcQueue"].get_static_pad("sink"))
        self.elements["srcQueue"].link(self.elements["recBin1"])

        self.log.debug("set filesink1 location")
        self.updateFilesinkLocation(self.elements["filesink1"],
                                    self.elements["mux_1"])

        self.log.debug("set uvch264 properties")
        self.elements["src"].set_property("auto-start", True)
        self.elements["src"].set_property("fixed-framerate", True)
        self.elements["src"].set_property("async-handling", False)
        self.elements["src"].set_property("iframe-period", 30)
        #         self.elements["src"].set_property("num-clock-samples", -1)
        self.elements["src"].set_property("device", "/dev/video1")

        self.log.debug("set caps")
        caps = Gst.Caps.from_string(
            "video/x-h264,width=1920,height=1080,framerate=30/1,profile=constrained-baseline"
        )
        self.elements["vidcaps"].props.caps = caps
        caps2 = Gst.Caps.from_string(
            'video/x-raw,width=320,height=240,framerate=15/1')
        self.elements["vfcaps"].props.caps = caps2

        #         self.elements["mux_1"].set_property("dts-method", 2)

        self.debugBuffer = None

        self.log.debug("done")
        self.elementRefcounting()

        # register function that initiates swap of filename in Gtk mainloop #
        # make sure that that it will be called at the beginning of the next
        # minute
        self.timeoutSec = 60
        GLib.timeout_add_seconds(self.timeoutSec - localtime().tm_sec,
                                 self.blockFirstFrame)
Esempio n. 18
0
def main(args):
    # Check input arguments
    if len(args) != 2:
        sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    bin_name = "source-bin"
    source_bin = Gst.Bin.new(bin_name)
    if not source_bin:
        sys.stderr.write(" Unable to create source bin \n")
    uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
    if not uri_decode_bin:
        sys.stderr.write(" Unable to create uri decode bin \n")
    uri_decode_bin.set_property("uri", args[1])
    uri_decode_bin.connect("pad-added", cb_newpad, source_bin)
    uri_decode_bin.connect("child-added", decodebin_child_added, source_bin)
    Gst.Bin.add(source_bin, uri_decode_bin)
    source_bin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC))

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")
    pipeline.add(streammux)
    pipeline.add(source_bin)
    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write("Unable to create sink pad bin \n")
    srcpad = source_bin.get_static_pad("src")
    if not srcpad:
        sys.stderr.write("Unable to create src pad bin \n")
    srcpad.link(sinkpad)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    # Use nvinfer to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    # Finally render the osd output
    if is_aarch64():
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")
    sink.set_property("qos", 0)

    pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    pgie.set_property('config-file-path', "dstest1_pgie_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != 1):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size,
              " with number of sources ", number_sources, " \n")

    print("Linking elements in the Pipeline \n")
    streammux.link(pgie)
    pgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)
    cv2.destroyAllWindows()
Esempio n. 19
0
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)
    print(Gst)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline")

    # Create GST Source
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    caps = Gst.ElementFactory.make("capsfilter", "source-caps")
    caps.set_property(
        "caps",
        Gst.Caps.from_string(
            "video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)30/1"
        ))

    # Create Gst Threads
    tee = create_element_or_error("tee", "tee")
    streaming_queue = create_element_or_error("queue", "streaming_queue")
    recording_queue = create_element_or_error("queue", "recording_queue")
    display_queue = create_element_or_error("queue", "display_queue")

    # Create Gst Elements for Streaming Branch
    s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder")
    s_parser = create_element_or_error("h264parse", "streaming-parser")
    s_muxer = create_element_or_error("flvmux", "streaming-muxer")
    s_sink = create_element_or_error("rtmpsink", "streaming-sink")

    # Create Gst Elements for Recording Branch
    r_encoder = create_element_or_error('nvv4l2h264enc', 'recording-encoder')
    r_parser = create_element_or_error('h264parse', 'recording-parser')
    r_sink = create_element_or_error('splitmuxsink', 'recording-sink')

    # Create Gst Elements for Display Branch
    d_sink = create_element_or_error("nvoverlaysink", "display-sink")

    # Set Source Properties
    source.set_property('sensor-id', 0)
    source.set_property('saturation', 1.2)
    source.set_property('exposurecompensation', 1.2)
    source.set_property('wbmode', 0)

    # Set Streaming Properties
    s_sink.set_property('location',
                        'rtmp://media.streamit.link/LiveApp/streaming-test')

    # Set Display Properties
    d_sink.set_property('overlay', 1)
    d_sink.set_property('overlay-x', 0)
    d_sink.set_property('overlay-y', 0)
    d_sink.set_property('overlay-w', 640)
    d_sink.set_property('overlay-h', 360)

    # Set Streaming Properties
    five_minutes = 900000000000
    r_sink.set_property('max-size-time', 30000000000)
    r_sink.connect('format-location', __location)

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(caps)
    pipeline.add(tee)
    pipeline.add(streaming_queue)
    pipeline.add(s_encoder)
    pipeline.add(s_parser)
    pipeline.add(s_muxer)
    pipeline.add(s_sink)
    pipeline.add(recording_queue)
    pipeline.add(r_encoder)
    pipeline.add(r_parser)
    pipeline.add(r_sink)
    pipeline.add(display_queue)
    pipeline.add(d_sink)

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(caps)
    caps.link(tee)

    # Streaming Queue
    streaming_queue.link(s_encoder)
    s_encoder.link(s_parser)
    s_parser.link(s_muxer)
    s_muxer.link(s_sink)

    # Recording Queue
    recording_queue.link(r_encoder)
    r_encoder.link(r_parser)
    r_parser.link(r_sink)

    # Display Queue
    display_queue.link(d_sink)

    # Get pad templates from source
    tee_src_pad_template = tee.get_pad_template("src_%u")

    # Get source to Streaming Queue
    tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None)
    streaming_queue_pad = streaming_queue.get_static_pad("sink")

    # Get source to Recording Queue
    tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None)
    recording_queue_pad = recording_queue.get_static_pad("sink")

    # Get source to Display Queue
    tee_display_pad = tee.request_pad(tee_src_pad_template, None, None)
    display_queue_pad = display_queue.get_static_pad("sink")

    # Link sources
    if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK or
            tee_recording_pad.link(recording_queue_pad) != Gst.PadLinkReturn.OK
            or
            tee_display_pad.link(display_queue_pad) != Gst.PadLinkReturn.OK):
        print("ERROR: Tee streaming could not be linked")
        sys.exit(1)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Esempio n. 20
0
def main(configuration):
    cam_configuration, udp_high_configuration, udp_low_configutration = configuration

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from USB camera
    source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
    if not source:
        sys.stderr.write(" Unable to create Source \n")
    source.set_property('device', cam_configuration.device)
    source.set_property('do-timestamp', 1)

    caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
    if not caps_v4l2src:
        sys.stderr.write(" Unable to create v4l2src capsfilter \n")
    caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, width={}, height={}, format={}, framerate={}/1".format( \
                cam_configuration.width, cam_configuration.height , cam_configuration.format, cam_configuration.frame_rate)))

    # videoconvert to make sure a superset of raw formats are supported
    vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
    if not vidconvsrc:
        sys.stderr.write(" Unable to create videoconvert \n")

    # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
    nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
    if not nvvidconvsrc:
        sys.stderr.write(" Unable to create Nvvideoconvert \n")

    caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
    if not caps_vidconvsrc:
        sys.stderr.write(" Unable to create capsfilter \n")
    caps_vidconvsrc.set_property(
        'caps',
        Gst.Caps.from_string("video/x-raw(memory:NVMM), framerate={}/1".format(
            cam_configuration.frame_rate)))

    tee = Gst.ElementFactory.make("tee", "tee")
    if not tee:
        sys.stderr.write("Unable to create tee \n")

    # nvvideoconvert to resize
    resize_high = Gst.ElementFactory.make("nvvideoconvert", "tee-resize-high")
    if not resize_high:
        sys.stderr.write(" Unable to create Resize-high \n")

    caps_resize_high = Gst.ElementFactory.make("capsfilter",
                                               "resize-high-caps")
    if not caps_resize_high:
        sys.stderr.write(" Unable to create caps_resize_high \n")
    caps_resize_high.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), width={}, height={}, framerate={}/1".format( \
                                            udp_high_configuration.width, udp_high_configuration.height, cam_configuration.frame_rate)))

    # nvvideoconvert to resize
    resize_low = Gst.ElementFactory.make("nvvideoconvert", "tee-resize-low")
    if not resize_low:
        sys.stderr.write(" Unable to create Resize-low \n")

    caps_resize_low = Gst.ElementFactory.make("capsfilter", "resize-low-caps")
    if not caps_resize_low:
        sys.stderr.write(" Unable to create caps_resize_low \n")
    caps_resize_low.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM), width={}, height={}, framerate={}/1".format( \
                                            udp_low_configutration.width, udp_low_configutration.height, cam_configuration.frame_rate)))

    # Make the encoder_high
    encoder_high = Gst.ElementFactory.make("nvv4l2h265enc", "tee-encoder-high")
    if not encoder_high:
        sys.stderr.write(" Unable to create encoder-high")
    encoder_high.set_property('bitrate', udp_high_configuration.bitrate)
    encoder_high.set_property('maxperf-enable', 1)
    encoder_high.set_property('preset-level', 1)
    encoder_high.set_property('insert-sps-pps', 1)
    encoder_high.set_property('bufapi-version', 1)
    encoder_high.set_property('profile', 1)
    encoder_high.set_property('iframeinterval', 1)

    # Make the encoder_low
    encoder_low = Gst.ElementFactory.make("nvv4l2h265enc", "tee-encoder-low")
    if not encoder_low:
        sys.stderr.write(" Unable to create encoder-low")
    encoder_low.set_property('bitrate', udp_low_configutration.bitrate)
    encoder_low.set_property('maxperf-enable', 1)
    encoder_low.set_property('preset-level', 3)
    encoder_low.set_property('insert-sps-pps', 1)
    encoder_low.set_property('bufapi-version', 1)
    encoder_low.set_property('profile', 1)

    #Make parser_high
    parser_high = Gst.ElementFactory.make("h265parse", "tee-parser-high")
    if not parser_high:
        sys.stderr.write("Unable to create parser-high")

    #Make parser_low
    parser_low = Gst.ElementFactory.make("h265parse", "tee-parser-low")
    if not parser_low:
        sys.stderr.write("Unable to create parser-low")

    # Make the payload-encode video into RTP packets
    rtppay_high = Gst.ElementFactory.make("rtph265pay", "tee-rtppay-high")
    if not rtppay_high:
        sys.stderr.write(" Unable to create rtppay-high")

    # Make the payload-encode video into RTP packets
    rtppay_low = Gst.ElementFactory.make("rtph265pay", "tee-rtppay-low")
    if not rtppay_low:
        sys.stderr.write(" Unable to create rtppay-low")

    # Make the UDP sink-high
    sink_high = Gst.ElementFactory.make("udpsink", "udpsink-high")
    if not sink_high:
        sys.stderr.write(" Unable to create udpsink-high")
    sink_high.set_property('host', '127.0.0.1')
    sink_high.set_property('port', udp_high_configuration.port)
    sink_high.set_property('async', False)
    sink_high.set_property('sync', 0)

    # Make the UDP sink-low
    sink_low = Gst.ElementFactory.make("udpsink", "udpsink-low")
    if not sink_low:
        sys.stderr.write(" Unable to create udpsink-low")
    sink_low.set_property('host', '127.0.0.1')
    sink_low.set_property('port', udp_low_configutration.port)
    sink_low.set_property('async', False)
    sink_low.set_property('sync', 0)

    pipeline.add(source)
    pipeline.add(caps_v4l2src)
    pipeline.add(vidconvsrc)
    pipeline.add(nvvidconvsrc)
    pipeline.add(caps_vidconvsrc)
    pipeline.add(tee)
    pipeline.add(resize_high)
    pipeline.add(resize_low)
    pipeline.add(caps_resize_high)
    pipeline.add(caps_resize_low)
    pipeline.add(encoder_high)
    pipeline.add(encoder_low)
    pipeline.add(parser_high)
    pipeline.add(parser_low)
    pipeline.add(rtppay_high)
    pipeline.add(rtppay_low)
    pipeline.add(sink_high)
    pipeline.add(sink_low)

    source.link(caps_v4l2src)
    caps_v4l2src.link(vidconvsrc)
    vidconvsrc.link(nvvidconvsrc)
    nvvidconvsrc.link(caps_vidconvsrc)

    caps_vidconvsrc.link(tee)

    resize_high.link(caps_resize_high)
    caps_resize_high.link(encoder_high)
    encoder_high.link(parser_high)
    parser_high.link(rtppay_high)
    rtppay_high.link(sink_high)

    resize_low.link(caps_resize_low)
    caps_resize_low.link(encoder_low)
    encoder_low.link(parser_low)
    parser_low.link(rtppay_low)
    rtppay_low.link(sink_low)

    tee_low_pad = tee.get_request_pad('src_0')
    tee_high_pad = tee.get_request_pad('src_1')
    if not tee_high_pad or not tee_low_pad:
        sys.stderr.write('Unable to get src pads of tee \n')
    sink_high_pad = resize_high.get_static_pad("sink")
    tee_high_pad.link(sink_high_pad)
    sink_low_pad = resize_low.get_static_pad("sink")
    tee_low_pad.link(sink_low_pad)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print('Stopped')
    pipeline.set_state(Gst.State.NULL)
def main():

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create Pipeline Element
    print("Creating Pipeline")
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")

    # Create GST Source
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "convertor-2")

    # Create Gst Threads
    tee = create_element_or_error("tee", "tee")
    streaming_queue = create_element_or_error("queue", "streaming_queue")
    recording_queue = create_element_or_error("queue", "recording_queue")

    # Create Gst Elements for Streaming Branch
    s_encoder = create_element_or_error("nvv4l2h264enc", "streaming-encoder")
    s_parser = create_element_or_error("h264parse", "streaming-parser")
    s_muxer = create_element_or_error("flvmux", "streaming-muxer")
    s_sink = create_element_or_error("rtmpsink", "streaming-sink")

    # Create Gst Elements for Recording Branch
    r_encoder = create_element_or_error('nvv4l2h265enc', 'encoder')
    r_parser = create_element_or_error('h265parse', 'parser')
    r_sink = create_element_or_error('filesink', 'sink')

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property(
        'config-file-path',
        "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt"
    )
    s_sink.set_property('location',
                        'rtmp://media.streamit.live/LiveApp/streaming-test')
    r_encoder.set_property('bitrate', 8000000)
    r_sink.set_property(
        'location', 'video_' + str(datetime.datetime.utcnow().date()) + '.mp4')

    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(tee)
    pipeline.add(streaming_queue)
    pipeline.add(s_encoder)
    pipeline.add(s_parser)
    pipeline.add(s_muxer)
    pipeline.add(s_sink)
    pipeline.add(recording_queue)
    pipeline.add(r_encoder)
    pipeline.add(r_parser)
    pipeline.add(r_sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together:
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(tee)

    # Streaming Queue
    streaming_queue.link(s_encoder)
    s_encoder.link(s_parser)
    s_parser.link(s_muxer)
    s_muxer.link(s_sink)

    # Recording Queue
    recording_queue.link(r_encoder)
    r_encoder.link(r_parser)
    r_parser.link(r_sink)

    # Get pad templates from source
    tee_src_pad_template = tee.get_pad_template("src_%u")

    # Get source to Streaming Queue
    tee_streaming_pad = tee.request_pad(tee_src_pad_template, None, None)
    streaming_queue_pad = streaming_queue.get_static_pad("sink")

    # Get source to recording Queue
    tee_recording_pad = tee.request_pad(tee_src_pad_template, None, None)
    recording_queue_pad = recording_queue.get_static_pad("sink")

    # Link sources
    if (tee_streaming_pad.link(streaming_queue_pad) != Gst.PadLinkReturn.OK
            or tee_recording_pad.link(recording_queue_pad) !=
            Gst.PadLinkReturn.OK):
        print("ERROR: Tees could not be linked")
        sys.exit(1)

    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Esempio n. 22
0
    def update(self):
        self.pipeline.get_state(Gst.CLOCK_TIME_NONE) 
        do_callback = False
        try:
            pos = self.pipeline.query_position(Gst.Format(Gst.Format.TIME))[1] / Gst.MSECOND * self.speed + self.offset
        except:
            self.end()
        self.set_pos(pos) # if self.speed <= 1 else pos / self.speed)
        return True
    def playing(self):
        return self.pipeline.get_state(Gst.CLOCK_TIME_NONE)[1] == Gst.State.PLAYING
    def get_pos(self):
        if self.timeout != None:
            if not self.playing():
                return -1000*(self.countdown_end - time.time())
        self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
        try:
            pos = self.pipeline.query_position(Gst.Format(Gst.Format.TIME))[1] / Gst.MSECOND * self.speed + self.offset
        except:
            return 0
        return pos #if self.speed <= 1 else pos / self.speed

Media.prober = Gst.ElementFactory.make('playbin')
Media.probe_pipeline = Gst.Pipeline()
Media.probe_pipeline.add(Media.prober)
Media.probesink = Gst.ElementFactory.make('gdkpixbufsink')
Media.prober.set_property('video-sink', Media.probesink)

# vim: set expandtab tabstop=4 shiftwidth=4 :
 
def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write("usage: %s <uri1> [uri2] ... [uriN]\n" % args[0])
        sys.exit(1)

    for i in range(0,len(args)-1):
        fps_streams["stream{0}".format(i)]=GETFPS(i)
    number_sources=len(args)-1

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        print("Creating source_bin ",i," \n ")
        uri_name=args[i+1]
        if uri_name.find("rtsp://") == 0 :
            is_live = True
        source_bin=create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname="sink_%u" %i
        sinkpad= streammux.get_request_pad(padname) 
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad=source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    queue1=Gst.ElementFactory.make("queue","queue1")
    queue2=Gst.ElementFactory.make("queue","queue2")
    queue3=Gst.ElementFactory.make("queue","queue3")
    queue4=Gst.ElementFactory.make("queue","queue4")
    queue5=Gst.ElementFactory.make("queue","queue5")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")
    print("Creating tiler \n ")
    tiler=Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    nvosd.set_property('process-mode',OSD_PROCESS_MODE)
    nvosd.set_property('display-text',OSD_DISPLAY_TEXT)
    if(is_aarch64()):
        print("Creating transform \n ")
        transform=Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    streammux.set_property('width', 1920)
    streammux.set_property('height', 1080)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    pgie.set_property('config-file-path', "config_infer_primary_yoloV3.txt")
    pgie_batch_size=pgie.get_property("batch-size")
    if(pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size",pgie_batch_size," with number of sources ", number_sources," \n")
        pgie.set_property("batch-size",number_sources)
    tiler_rows=int(math.sqrt(number_sources))
    tiler_columns=int(math.ceil((1.0*number_sources)/tiler_rows))
    tiler.set_property("rows",tiler_rows)
    tiler.set_property("columns",tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)
    sink.set_property("qos",0)

    print("Adding elements to Pipeline \n")
    pipeline.add(pgie)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tiler)
    tiler.link(queue3)
    queue3.link(nvvidconv)
    nvvidconv.link(queue4)
    queue4.link(nvosd)
    if is_aarch64():
        nvosd.link(queue5)
        queue5.link(transform)
        transform.link(sink)
    else:
        nvosd.link(queue5)
        queue5.link(sink)   

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)
    tiler_src_pad=pgie.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events		
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)
Esempio n. 24
0
    def play(self, filename=None, data=None):
        if self.is_playing():
            self.stop()

        if filename:

            pipeline = Gst.Pipeline()
            self._pipeline = pipeline

            self._pipeline.set_name("audio-player")
            source = Gst.ElementFactory.make("filesrc", "file-source")
            demuxer = Gst.ElementFactory.make("oggdemux", "ogg-demuxer")
            decoder = Gst.ElementFactory.make("vorbisdec", "vorbis-decoder")
            conv = Gst.ElementFactory.make("audioconvert", "converter")
            sink = Gst.ElementFactory.make("alsasink", "audio-output")

            source.set_property('location', filename)

            demuxer.connect('pad-added', self._on_pad_added, decoder)

            pipeline.add(source)
            pipeline.add(demuxer)
            pipeline.add(decoder)
            pipeline.add(conv)
            pipeline.add(sink)

            source.link(demuxer)
            decoder.link(conv)
            conv.link(sink)

            self._pipeline.set_state(Gst.State.PLAYING)

        elif data:
            pipeline = Gst.Pipeline()
            self._pipeline = pipeline

            self._pipeline.set_name("audio-player")
            source = Gst.ElementFactory.make("appsrc", "data-source")
            demuxer = Gst.ElementFactory.make("oggdemux", "ogg-demuxer")
            decoder = Gst.ElementFactory.make("vorbisdec", "vorbis-decoder")
            conv = Gst.ElementFactory.make("audioconvert", "converter")
            sink = Gst.ElementFactory.make("alsasink", "audio-output")

            source.set_property('location', filename)

            demuxer.connect('pad-added', self._on_pad_added, decoder)

            pipeline.add(source)
            pipeline.add(demuxer)
            pipeline.add(decoder)
            pipeline.add(conv)
            pipeline.add(sink)

            source.link(demuxer)
            decoder.link(conv)
            conv.link(sink)

            self._pipeline.set_state(Gst.State.PLAYING)

        else:
            raise Exception("filename or data must be supplied")

        return
def deepstream_main(config):

    global image_timer
    global number_sources
    global id_dict
    global fps_streams

    source_type = config["source_type"]
    sources = config["source"]
    if config["source_type"] == "rtsp":
        number_sources = len(list(sources))
    elif config["source_type"] == "mipi" or config["source_type"] == "usb":
        number_sources = 1

    display = config["display"]
    MUXER_OUTPUT_WIDTH = config["processing_width"]
    MUXER_OUTPUT_HEIGHT = config["processing_height"]
    TILED_OUTPUT_WIDTH = config["tiler_width"]
    TILED_OUTPUT_HEIGHT = config["tiler_height"]
    image_timer = config["image_timer"]
    for i in range(number_sources):
        #initialise id dictionary to keep track of object_id streamwise
        id_dict[i] = Queue(maxsize=config["queue_size"])
        fps_streams["stream{0}".format(i)] = GETFPS(i)
        #create image directories for separate streams
        if not os.path.exists(os.path.join(path1, "stream_" + str(i))):
            os.mkdir(os.path.join(path1, "stream_" + str(i)))
        if not os.path.exists(os.path.join(path2, "stream_" + str(i))):
            os.mkdir(os.path.join(path2, "stream_" + str(i)))

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    print("Creating streamux \n ")
    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")
    pipeline.add(streammux)

    if source_type == "rtsp":
        is_live = False
        for i in range(number_sources):
            print("Creating source_bin ", i, " \n ")
            uri_name = sources["stream_" + str(i)]
            if uri_name.find("rtsp://") == 0:
                is_live = True
            source_bin = create_source_bin(i, uri_name)
            if not source_bin:
                sys.stderr.write("Unable to create source bin \n")
            pipeline.add(source_bin)
            padname = "sink_%u" % i
            sinkpad = streammux.get_request_pad(padname)
            if not sinkpad:
                sys.stderr.write("Unable to create sink pad bin \n")
            srcpad = source_bin.get_static_pad("src")
            if not srcpad:
                sys.stderr.write("Unable to create src pad bin \n")
            srcpad.link(sinkpad)

        if is_live:
            print("Atleast one of the sources is live")
            streammux.set_property('live-source', 1)

    elif source_type == "mipi":
        # Source element for reading from the file
        print("Creating Source \n ")
        source = Gst.ElementFactory.make("nvarguscamerasrc", "src_elem")
        if not source:
            sys.stderr.write(" Unable to create Source \n")
        source.set_property('bufapi-version', True)

        # Converter to scale the image
        nvvidconv_src = Gst.ElementFactory.make("nvvideoconvert",
                                                "convertor_src")
        if not nvvidconv_src:
            sys.stderr.write(" Unable to create nvvidconv_src \n")

        # Caps for NVMM and resolution scaling
        caps_nvvidconv_src = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
        if not caps_nvvidconv_src:
            sys.stderr.write(" Unable to create capsfilter \n")
        caps_nvvidconv_src.set_property(
            'caps',
            Gst.Caps.from_string(
                'video/x-raw(memory:NVMM), width=1280, height=720'))

    elif source_type == "usb":
        # Source element for reading from the file
        print("Creating Source \n ")
        source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
        if not source:
            sys.stderr.write(" Unable to create Source \n")
        source.set_property('device', "/dev/video0")

        caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
        if not caps_v4l2src:
            sys.stderr.write(" Unable to create v4l2src capsfilter \n")
        caps_v4l2src.set_property(
            'caps', Gst.Caps.from_string("video/x-raw, framerate=30/1, YUY2"))

        print("Creating Video Converter \n")
        # videoconvert to make sure a superset of raw formats are supported
        vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
        if not vidconvsrc:
            sys.stderr.write(" Unable to create videoconvert \n")

        # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
        nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert",
                                               "convertor_src2")
        if not nvvidconvsrc:
            sys.stderr.write(" Unable to create Nvvideoconvert \n")

        caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
        if not caps_vidconvsrc:
            sys.stderr.write(" Unable to create capsfilter \n")
        caps_vidconvsrc.set_property(
            'caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))

    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")

    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")

    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)

    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")

    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")

    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    if display:
        print("Creating EGLSink \n")
        sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
        if not sink:
            sys.stderr.write(" Unable to create egl sink \n")
    else:
        print("Creating FakeSink \n")
        sink = Gst.ElementFactory.make("fakesink", "fakesink")
        if not sink:
            sys.stderr.write(" Unable to create fake sink \n")

    # Set properties of tracker
    config = configparser.ConfigParser()
    config.read('dstest2_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width':
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height':
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id':
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file':
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file':
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process':
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process',
                                 tracker_enable_batch_process)

    streammux.set_property('width', MUXER_OUTPUT_WIDTH)
    streammux.set_property('height', MUXER_OUTPUT_HEIGHT)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', "primary_config.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size,
              " with number of sources ", number_sources, " \n")
        pgie.set_property("batch-size", number_sources)

    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("qos", 0)
    sink.set_property("sync", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    queue1 = Gst.ElementFactory.make("queue", "queue1")
    queue2 = Gst.ElementFactory.make("queue", "queue2")
    queue3 = Gst.ElementFactory.make("queue", "queue3")
    queue4 = Gst.ElementFactory.make("queue", "queue4")
    queue5 = Gst.ElementFactory.make("queue", "queue5")
    queue6 = Gst.ElementFactory.make("queue", "queue6")
    queue7 = Gst.ElementFactory.make("queue", "queue7")
    queue8 = Gst.ElementFactory.make("queue", "queue8")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)
    pipeline.add(queue8)

    if source_type == "mipi":
        print("Adding elements to Pipeline \n")
        pipeline.add(source)
        pipeline.add(nvvidconv_src)
        pipeline.add(caps_nvvidconv_src)

        print("Linking elements in the Pipeline \n")
        source.link(nvvidconv_src)
        nvvidconv_src.link(caps_nvvidconv_src)

        sinkpad = streammux.get_request_pad("sink_0")
        if not sinkpad:
            sys.stderr.write(" Unable to get the sink pad of streammux \n")
        srcpad = caps_nvvidconv_src.get_static_pad("src")

        if not srcpad:
            sys.stderr.write(" Unable to get source pad of source \n")
        srcpad.link(sinkpad)

    elif source_type == "usb":
        print("Adding elements to Pipeline \n")
        pipeline.add(source)
        pipeline.add(caps_v4l2src)
        pipeline.add(vidconvsrc)
        pipeline.add(nvvidconvsrc)
        pipeline.add(caps_vidconvsrc)

        print("Linking elements in the Pipeline \n")
        source.link(caps_v4l2src)
        caps_v4l2src.link(vidconvsrc)
        vidconvsrc.link(nvvidconvsrc)
        nvvidconvsrc.link(caps_vidconvsrc)

        sinkpad = streammux.get_request_pad("sink_0")
        if not sinkpad:
            sys.stderr.write(" Unable to get the sink pad of streammux \n")
        srcpad = caps_vidconvsrc.get_static_pad("src")
        if not srcpad:
            sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
        srcpad.link(sinkpad)

    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(nvvidconv1)
    nvvidconv1.link(queue4)
    queue4.link(filter1)
    filter1.link(queue5)
    queue5.link(tiler)
    tiler.link(queue6)
    queue6.link(nvvidconv)
    nvvidconv.link(queue7)
    queue7.link(nvosd)
    if is_aarch64() and display:
        nvosd.link(queue8)
        queue8.link(transform)
        transform.link(sink)
    else:
        nvosd.link(queue8)
        queue8.link(sink)

    # create an event loop and feed gstreamer bus messages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    tiler_src_pad = tiler.get_static_pad("sink")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                            tiler_src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, src in sources.items():
        print(i, ": ", src)

    print("Starting pipeline \n")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup

    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)
def main(args):
    # Check input arguments
    if len(args) != 2:
        sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
        sys.exit(1)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    # Create gstreamer elements
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")

    # Source element for reading from the file
    source = make_elm_or_print_err("filesrc", "file-source", "Source")

    # Since the data format in the input file is elementary h264 stream,
    # we need a h264parser
    h264parser = make_elm_or_print_err("h264parse", "h264-parser",
                                       "H264Parser")

    # Use nvdec_h264 for hardware accelerated decode on GPU
    decoder = make_elm_or_print_err("nvv4l2decoder", "nvv4l2-decoder",
                                    "Decoder")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer",
                                      "NvStreamMux")

    # Use nvinferserver to run inferencing on decoder's output,
    # behaviour of inferencing is set through config file
    pgie = make_elm_or_print_err("nvinferserver", "primary-inference",
                                 "Nvinferserver")

    # Use convertor to convert from NV12 to RGBA as required by nvosd
    nvvidconv = make_elm_or_print_err("nvvideoconvert", "convertor",
                                      "Nvvidconv")

    # Create OSD to draw on the converted RGBA buffer
    nvosd = make_elm_or_print_err("nvdsosd", "onscreendisplay", "OSD (nvosd)")

    # Finally encode and save the osd output
    queue = make_elm_or_print_err("queue", "queue", "Queue")

    nvvidconv2 = make_elm_or_print_err("nvvideoconvert", "convertor2",
                                       "Converter 2 (nvvidconv2)")

    capsfilter = make_elm_or_print_err("capsfilter", "capsfilter",
                                       "capsfilter")

    caps = Gst.Caps.from_string("video/x-raw, format=I420")
    capsfilter.set_property("caps", caps)

    # On Jetson, there is a problem with the encoder failing to initialize
    # due to limitation on TLS usage. To work around this, preload libgomp.
    # Add a reminder here in case the user forgets.
    preload_reminder = "If the following error is encountered:\n" + \
                       "/usr/lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block\n" + \
                       "Preload the offending library:\n" + \
                       "export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n"
    encoder = make_elm_or_print_err("avenc_mpeg4", "encoder", "Encoder",
                                    preload_reminder)

    encoder.set_property("bitrate", 2000000)

    codeparser = make_elm_or_print_err("mpeg4videoparse", "mpeg4-parser",
                                       'Code Parser')

    container = make_elm_or_print_err("qtmux", "qtmux", "Container")

    sink = make_elm_or_print_err("filesink", "filesink", "Sink")

    sink.set_property("location", OUTPUT_VIDEO_NAME)
    sink.set_property("sync", 0)
    sink.set_property("async", 0)

    print("Playing file %s " % args[1])
    source.set_property("location", args[1])
    streammux.set_property("width", IMAGE_WIDTH)
    streammux.set_property("height", IMAGE_HEIGHT)
    streammux.set_property("batch-size", 1)
    streammux.set_property("batched-push-timeout", 4000000)
    pgie.set_property("config-file-path", "dstest_ssd_nopostprocess.txt")

    print("Adding elements to Pipeline \n")
    pipeline.add(source)
    pipeline.add(h264parser)
    pipeline.add(decoder)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(nvvidconv)
    pipeline.add(nvosd)
    pipeline.add(queue)
    pipeline.add(nvvidconv2)
    pipeline.add(capsfilter)
    pipeline.add(encoder)
    pipeline.add(codeparser)
    pipeline.add(container)
    pipeline.add(sink)

    # we link the elements together
    # file-source -> h264-parser -> nvh264-decoder ->
    # nvinfer -> nvvidconv -> nvosd -> video-renderer
    print("Linking elements in the Pipeline \n")
    source.link(h264parser)
    h264parser.link(decoder)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux \n")
    srcpad = decoder.get_static_pad("src")
    if not srcpad:
        sys.stderr.write(" Unable to get source pad of decoder \n")
    srcpad.link(sinkpad)
    streammux.link(pgie)
    pgie.link(nvvidconv)
    nvvidconv.link(nvosd)
    nvosd.link(queue)
    queue.link(nvvidconv2)
    nvvidconv2.link(capsfilter)
    capsfilter.link(encoder)
    encoder.link(codeparser)
    codeparser.link(container)
    container.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Add a probe on the primary-infer source pad to get inference output tensors
    pgiesrcpad = pgie.get_static_pad("src")
    if not pgiesrcpad:
        sys.stderr.write(" Unable to get src pad of primary infer \n")

    pgiesrcpad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0)

    # Lets add probe to get informed of the meta data generated, we add probe to
    # the sink pad of the osd element, since by that time, the buffer would have
    # had got all the metadata.
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)

    # start play back and listen to events
    print("Starting pipeline \n")
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    pipeline.set_state(Gst.State.NULL)
Esempio n. 27
0
def main():
    print('Tracker Example')
    
    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)


    # Create Pipeline Element
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline")
        return
    
    source = create_element_or_error("nvarguscamerasrc", "camera-source")
    streammux = create_element_or_error("nvstreammux", "Stream-muxer")
    pgie = create_element_or_error("nvinfer", "primary-inference")
    tracker = create_element_or_error("nvtracker", "tracker")
    convertor = create_element_or_error("nvvideoconvert", "convertor-1")
    nvosd = create_element_or_error("nvdsosd", "onscreendisplay")
    convertor2 = create_element_or_error("nvvideoconvert", "converter-2")
    transform = create_element_or_error("nvegltransform", "nvegl-transform")
    sink = create_element_or_error("nveglglessink", "egl-overlay")

    # Set Element Properties
    source.set_property('sensor-id', 0)
    source.set_property('bufapi-version', True)
    
    streammux.set_property('live-source', 1)
    streammux.set_property('width', 1280)
    streammux.set_property('height', 720)
    streammux.set_property('num-surfaces-per-frame', 1)
    streammux.set_property('batch-size', 1)
    streammux.set_property('batched-push-timeout', 4000000)

    pgie.set_property('config-file-path', "/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/config_infer_primary.txt")

    tracker.set_property('ll-lib-file', '/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so')
    tracker.set_property('gpu-id', 0)
    tracker.set_property('enable-batch-process', 1)
    tracker.set_property('ll-config-file', '/opt/nvidia/deepstream/deepstream-5.0/samples/configs/deepstream-app/tracker_config.yml')


    # Add Elemements to Pipielin
    print("Adding elements to Pipeline")
    pipeline.add(source)
    pipeline.add(streammux)
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(convertor)
    pipeline.add(nvosd)
    pipeline.add(convertor2)
    pipeline.add(transform)
    pipeline.add(sink)

    sinkpad = streammux.get_request_pad("sink_0")
    if not sinkpad:
        sys.stderr.write(" Unable to get the sink pad of streammux")

    # Link the elements together
    print("Linking elements in the Pipeline")
    source.link(streammux)
    streammux.link(pgie)
    pgie.link(tracker)
    tracker.link(convertor)
    convertor.link(nvosd)
    nvosd.link(convertor2)
    convertor2.link(transform)
    transform.link(sink)
    
    # Create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    print('Create OSD Sink Pad')
    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write("Unable to get sink pad of nvosd")

    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, sink_pad_buffer_probe, 0)

    # Start play back and listen to events
    print("Starting pipeline")
    pipeline.set_state(Gst.State.PLAYING)

    try:
        loop.run()
    except:
        pass

    # Cleanup
    pipeline.set_state(Gst.State.NULL)
Esempio n. 28
0
    def __init__(self, sampling_rate):
        GObject.GObject.__init__(self)

        self.is_playing = False
        self.rate = sampling_rate
        self.max_spectrum_freq = 20000  # Hz
        self.spectrum_nbands = 1600
        self.spectrum_freqs = []
        self.spectrum_x_axis = np.array([])
        self.spectrum_n_points = 250  # number of freqs displayed
        self.spectrum_nfreqs = 0
        self.spectrum_threshold = -120  # dB

        self.log = logging.getLogger('PulseEffects')

        self.calc_spectrum_freqs()

        self.pipeline = Gst.Pipeline()

        # Create bus to get events from GStreamer pipeline
        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message::error', self.on_message_error)
        bus.connect('message::info', self.on_message_info)
        bus.connect('message::warning', self.on_message_warning)
        bus.connect('message::latency', self.on_message_latency)
        # on_message_element is implemented by child class
        bus.connect('message::element', self.on_message_element)

        self.audio_src = Gst.ElementFactory.make('pulsesrc', 'audio_src')

        self.queue = Gst.ElementFactory.make('queue', None)

        self.source_caps = Gst.ElementFactory.make("capsfilter", None)

        self.effects_bin = GstInsertBin.InsertBin.new('effects_bin')

        self.spectrum = Gst.ElementFactory.make('spectrum', 'spectrum')

        self.audio_sink = Gst.ElementFactory.make('pulsesink', 'audio_sink')

        self.audio_src.set_property('volume', 1.0)
        self.audio_src.set_property('mute', False)
        self.audio_src.set_property('provide-clock', False)
        self.audio_src.set_property('slave-method', 're-timestamp')

        caps = [
            'audio/x-raw', 'format=F32LE', 'rate=' + str(self.rate),
            'channels=2'
        ]

        src_caps = Gst.Caps.from_string(",".join(caps))
        self.source_caps.set_property("caps", src_caps)

        self.audio_sink.set_property('volume', 1.0)
        self.audio_sink.set_property('mute', False)

        self.spectrum.set_property('bands', self.spectrum_nbands)
        self.spectrum.set_property('threshold', self.spectrum_threshold)

        self.pipeline.add(self.audio_src)
        self.pipeline.add(self.queue)
        self.pipeline.add(self.source_caps)
        self.pipeline.add(self.effects_bin)
        self.pipeline.add(self.audio_sink)

        self.audio_src.link(self.queue)
        self.queue.link(self.source_caps)
        self.source_caps.link(self.effects_bin)
        self.effects_bin.link(self.audio_sink)
Esempio n. 29
0
    def createPipeLine(self):
        # Create GStreamer pipeline
        self.player = Gst.Pipeline()
        # Create bus to get events from GStreamer pipeline
        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::eos', self.on_eos)
        self.bus.connect('message::error', self.on_error)

        # This is needed to make the video output in our DrawingArea:
        self.bus.enable_sync_message_emission()
        self.bus.connect('sync-message::element', self.on_sync_message)

        self.filesrc = Gst.ElementFactory.make("filesrc", "filesource")
        self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")
        self.decodebin.connect("pad_added", self.decodebin_pad_added)

        #--------------Audio Session -------------------#

        self.audioQueue = Gst.ElementFactory.make("queue", "audioqueue")
        self.audioconvert = Gst.ElementFactory.make("audioconvert",
                                                    "audioconvert")
        self.audiocapfilter = Gst.ElementFactory.make("capsfilter",
                                                      "audiocapfilter")
        audiocaps = Gst.Caps.from_string("audio/x-raw,channels=1")
        self.audiocapfilter.set_property("caps", audiocaps)
        self.audiosink = Gst.ElementFactory.make("autoaudiosink", "alsasink")

        #--------------Video Session -------------------#
        self.videoQueue = Gst.ElementFactory.make("queue", "videoqueue")
        self.videoconvert = Gst.ElementFactory.make("videoconvert",
                                                    "videoconvert")
        self.scale = Gst.ElementFactory.make("videoscale", "videoscale")
        self.videosink = Gst.ElementFactory.make("xvimagesink", "videosink")

        #---------------- Text Overlay ----------------#
        self.OSD_Text = Gst.ElementFactory.make("textoverlay")
        self.OSD_Text.set_property("text", "Triple111")
        self.OSD_Text.set_property("halignment", "left")
        self.OSD_Text.set_property("valignment", "top")
        # self.OSD_Text.set_property("shaded-background",True)

        self.player.add(self.filesrc)
        self.player.add(self.decodebin)

        self.player.add(self.audioQueue)
        self.player.add(self.audioconvert)
        self.player.add(self.audiocapfilter)
        self.player.add(self.audiosink)

        self.player.add(self.videoQueue)
        self.player.add(self.videoconvert)
        self.player.add(self.OSD_Text)
        self.player.add(self.scale)
        self.player.add(self.videosink)

        self.filesrc.link(self.decodebin)
        #---------- For Audio -----------------#
        self.audioQueue.link(self.audioconvert)
        self.audioconvert.link(self.audiocapfilter)
        self.audiocapfilter.link(self.audiosink)
        #---------- For Video -----------------#
        self.videoQueue.link(self.videoconvert)
        self.videoconvert.link(self.OSD_Text)
        self.OSD_Text.link(self.scale)
        self.scale.link(self.videosink)

        self.player.set_state(Gst.State.READY)
def main():
    number_sources = 1
    GObject.threads_init()
    Gst.init(None)
    pipeline = Gst.Pipeline()
    is_live = False
    uri_name = "rtsp://192.168.1.10:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp"
    
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    pipeline.add(streammux)
    
    source_bin = create_source_bin(1, uri_name)
    pipeline.add(source_bin)
    sinkpad = streammux.get_request_pad("sink_1")
    srcpad = source_bin.get_static_pad("src")
    srcpad.link(sinkpad)
    
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")
    sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine")
    sgie3 = Gst.ElementFactory.make("nvinfer", "secondary3-nvinference-engine")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    filter1.set_property("caps", caps1)
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("queue", "queue")


    sink = Gst.ElementFactory.make("fakesink", "nvvideo-renderer")
    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)
    streammux.set_property('width', 640)
    streammux.set_property('height', 480)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    ds_pgie_config = 'dstest2_pgie_config.txt'
    pgie.set_property('config-file-path', ds_pgie_config)
    sink.set_property('sync', False)
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size, " with number of sources ",
              number_sources, " \n")
        pgie.set_property("batch-size", number_sources)
    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", 640)
    tiler.set_property("height", 480)
    
    sgie1.set_property('config-file-path', "dstest2_sgie1_config.txt")
    sgie2.set_property('config-file-path', "dstest2_sgie2_config.txt")
    sgie3.set_property('config-file-path', "dstest2_sgie3_config.txt")

    #Set properties of tracker
    config = configparser.ConfigParser()
    config.read('dstest2_tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width' :
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        if key == 'tracker-height' :
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        if key == 'gpu-id' :
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        if key == 'll-lib-file' :
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        if key == 'll-config-file' :
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        if key == 'enable-batch-process' :
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process', tracker_enable_batch_process)
        if key == 'enable-past-frame' :
            tracker_enable_past_frame = config.getint('tracker', key)
            tracker.set_property('enable_past_frame', tracker_enable_past_frame)

    print("Adding elements to Pipeline \n")
    
    queue1=Gst.ElementFactory.make("queue","queue1")
    queue2=Gst.ElementFactory.make("queue","queue2")
    queue3=Gst.ElementFactory.make("queue","queue3")
    queue4=Gst.ElementFactory.make("queue","queue4")
    queue5=Gst.ElementFactory.make("queue","queue5")
    queue6=Gst.ElementFactory.make("queue","queue6")
    queue7=Gst.ElementFactory.make("queue","queue7")
    queue8=Gst.ElementFactory.make("queue","queue8")
    pipeline.add(queue1)
    pipeline.add(queue2)
    pipeline.add(queue3)
    pipeline.add(queue4)
    pipeline.add(queue5)
    pipeline.add(queue6)
    pipeline.add(queue7)
    pipeline.add(queue8)
    
    pipeline.add(pgie)
    pipeline.add(tracker)
    pipeline.add(sgie1)
    pipeline.add(sgie2)
    pipeline.add(sgie3)

    # pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)
    print("Linking elements in the Pipeline \n")
    streammux.link(queue1)
    queue1.link(pgie)
    pgie.link(queue2)
    queue2.link(tracker)
    tracker.link(queue3)
    queue3.link(sgie1)
    sgie1.link(queue4)
    queue4.link(sgie2)
    sgie2.link(queue5)
    queue5.link(sgie3)
    sgie3.link(queue6)
    queue6.link(nvvidconv)
    nvvidconv.link(queue7)
    queue7.link(nvosd)
    if is_aarch64():
        nvosd.link(queue8)
        queue8.link(transform)
        transform.link(sink)
    else:
        nvosd.link(queue8)
        queue8.link(sink)

   # create and event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect ("message", bus_call, loop)

    osdsinkpad = nvosd.get_static_pad("sink")
    if not osdsinkpad:
        sys.stderr.write(" Unable to get sink pad of nvosd \n")
    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, tiler_src_pad_buffer_probe, 0)

    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)