Exemplo n.º 1
0
def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    num_rects=0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
           frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list

        #### BERK

        client.send_message("/frame_number", frame_number)
        client.send_message("/num_rects", num_rects)
        
        lcounter = 0

        #### BERK
        
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1

            ### BERK

            class_id = obj_meta.class_id
            # agent = class_id + long_to_int(obj_meta.object_id)
            top =  obj_meta.rect_params.top
            left =  obj_meta.rect_params.left
            width = obj_meta.rect_params.width
            height = obj_meta.rect_params.height
            # confidence = obj_meta.confidence
            
            client.send_message("/oxywhc", [lcounter, left, top, width, height, class_id])

            ###

            try: 
                l_obj=l_obj.next
                lcounter = lcounter + 1
            except StopIteration:
                break
        ### BERK

        client.send_message("/num_vehicles", obj_counter[PGIE_CLASS_ID_VEHICLE]) 
        client.send_message("/num_people", obj_counter[PGIE_CLASS_ID_PERSON])


        ###


        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
			
    return Gst.PadProbeReturn.OK	
def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    num_rects=0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
    #past traking meta data
    if(past_tracking_meta[0]==1):
        l_user=batch_meta.batch_user_meta_list
        while l_user is not None:
            try:
                # Note that l_user.data needs a cast to pyds.NvDsUserMeta
                # The casting is done by pyds.NvDsUserMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone
                user_meta=pyds.NvDsUserMeta.cast(l_user.data)
            except StopIteration:
                break
            if(user_meta and user_meta.base_meta.meta_type==pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META):
                try:
                    # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch
                    # The casting is done by pyds.NvDsPastFrameObjBatch.cast()
                    # The casting also keeps ownership of the underlying memory
                    # in the C code, so the Python garbage collector will leave
                    # it alone
                    pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data)
                except StopIteration:
                    break
                for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch):
                    print("streamId=",trackobj.streamID)
                    print("surfaceStreamID=",trackobj.surfaceStreamID)
                    for pastframeobj in pyds.NvDsPastFrameObjStream.list(trackobj):
                        print("numobj=",pastframeobj.numObj)
                        print("uniqueId=",pastframeobj.uniqueId)
                        print("classId=",pastframeobj.classId)
                        print("objLabel=",pastframeobj.objLabel)
                        for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj):
                            print('frameNum:', objlist.frameNum)
                            print('tBbox.left:', objlist.tBbox.left)
                            print('tBbox.width:', objlist.tBbox.width)
                            print('tBbox.top:', objlist.tBbox.top)
                            print('tBbox.right:', objlist.tBbox.height)
                            print('confidence:', objlist.confidence)
                            print('age:', objlist.age)
            try:
                l_user=l_user.next
            except StopIteration:
                break
    return Gst.PadProbeReturn.OK	
def tiler_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        '''
        print("Frame Number is ", frame_meta.frame_num)
        print("Source id is ", frame_meta.source_id)
        print("Batch id is ", frame_meta.batch_id)
        print("Source Frame Width ", frame_meta.source_frame_width)
        print("Source Frame Height ", frame_meta.source_frame_height)
        print("Num object meta ", frame_meta.num_obj_meta)
        '''
        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        """display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} 
        Person_count={}".format(frame_number, num_rects, vehicle_count, person)
        py_nvosd_text_params.x_offset = 10;
        py_nvosd_text_params.y_offset = 12;
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        py_nvosd_text_params.font_params.font_color.red = 1.0
        py_nvosd_text_params.font_params.font_color.green = 1.0
        py_nvosd_text_params.font_params.font_color.blue = 1.0
        py_nvosd_text_params.font_params.font_color.alpha = 1.0
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.red = 0.0
        py_nvosd_text_params.text_bg_clr.green = 0.0
        py_nvosd_text_params.text_bg_clr.blue = 0.0
        py_nvosd_text_params.text_bg_clr.alpha = 1.0
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,
        "Person_count=",person)
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)"""
        print("Frame Number=", frame_number, "Number of Objects=", num_rects,
              "Vehicle_count=", obj_counter[PGIE_CLASS_ID_VEHICLE],
              "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON])

        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
def sgie_sink_pad_buffer_probe(pad, info, u_data):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:

            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta

        l_obj = frame_meta.obj_meta_list

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)

            except StopIteration:
                break

            l_user = obj_meta.obj_user_meta_list
            print(f'obj_meta.obj_user_meta_list {l_user}')
            # while l_user is not None:
            #     print('Inside l_user = obj_meta.obj_user_meta_list Loop')
            #     try:
            #         # Casting l_obj.data to pyds.NvDsObjectMeta
            #         user_meta=pyds.NvDsUserMeta.cast(l_user.data)
            #     except StopIteration:
            #         break

            #     if (
            #         user_meta.base_meta.meta_type
            #         != pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META
            #     ):
            #         continue

            #     tensor_meta = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data)

            #     # Boxes in the tensor meta should be in network resolution which is
            #     # found in tensor_meta.network_info. Use this info to scale boxes to
            #     # the input frame resolution.
            #     layers_info = []

            #     for i in range(tensor_meta.num_output_layers):
            #         layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
            #         layers_info.append(layer)
            #         print(f'Layer: {i}, Layer name: {layer.layerName}')

            #     try:
            #         l_user = l_user.next
            #     except StopIteration:
            #         break

            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemplo n.º 5
0
    def overlay_display_type(self, gst_buffer, client_data):

        # cast the C void* client_data back to a py_object pointer and deref
        meta_data = cast(client_data, POINTER(py_object)).contents.value
        meta_data.mutex.acquire()

        frame_number = 0

        # Retrieve batch metadata from the gst_buffer
        # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
        l_frame = batch_meta.frame_meta_list
        while l_frame is not None:
            try:
                # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.NvDsFrameMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            except StopIteration:
                break

            if meta_data.active_display_type is not None:
                display_meta = pyds.nvds_acquire_display_meta_from_pool(
                    batch_meta)

                display_meta.num_lines = 0
                display_meta.num_rects = 0

                if 'Line' in meta_data.active_display_type:
                    line = meta_data.active_display_type['Line']
                    py_nvosd_line_params = display_meta.line_params[
                        display_meta.num_lines]
                    display_meta.num_lines += 1

                    py_nvosd_line_params = line.copy(py_nvosd_line_params)

                elif 'Polygon' in meta_data.active_display_type:

                    lines = meta_data.active_display_type['Polygon']

                    for line in lines:
                        if line:
                            py_nvosd_line_params = display_meta.line_params[
                                display_meta.num_lines]
                            display_meta.num_lines += 1

                            py_nvosd_line_params = line.copy(
                                py_nvosd_line_params)

                pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

            try:
                l_frame = l_frame.next
            except StopIteration:
                break

        meta_data.mutex.release()
        return True
Exemplo n.º 6
0
def sink_pad_buffer_probe(pad, info, u_data):

    gst_buffer = info.get_buffer()

    if not gst_buffer:
        sys.stderr.write("Unable to get GstBuffer")

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    frame_list = batch_meta.frame_meta_list

    while frame_list is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(frame_list.data)
        except StopIteration:
            break

        list_of_objects = frame_meta.obj_meta_list

        while list_of_objects is not None:

            try:
                object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data)
                # https://docs.nvidia.com/metropolis/deepstream/5.0DP/python-api/NvDsMeta/NvDsObjectMeta.html

                if object_meta.object_id not in detectedObjectsIds:
                    t = time.localtime()
                    current_time = time.strftime("%H:%M:%S", t)

                    detectedObjectsIds.append(object_meta.object_id)
                    detectedObjects.append({
                        'id':
                        str(object_meta.object_id),
                        'label':
                        str(object_meta.obj_label),
                        'time':
                        current_time,
                        'confidence':
                        str(object_meta.confidence)
                    })

            except StopIteration:
                break
            # obj_counter[object_meta.class_id] += 1
            try:
                list_of_objects = list_of_objects.next
            except StopIteration:
                break
        try:
            frame_list = frame_list.next
        except StopIteration:
            break

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]

        textDisplay = "DETECTED OBJECTS:\n\n"
        if len(detectedObjects) > 10:
            detectedObjectsList = detectedObjects[-10]
        else:
            detectedObjectsList = detectedObjects

        for _object in detectedObjectsList:
            textDisplay = textDisplay + _object[
                "time"] + ": Detected: \"" + _object[
                    "label"] + "\", ID: " + _object[
                        "id"] + ", Confidence: " + _object["confidence"] + "\n"
            print(textDisplay)

        py_nvosd_text_params.display_text = textDisplay
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

    return Gst.PadProbeReturn.OK
def sgie_sink_pad_buffer_probe(pad, info, u_data):

    frame_number = 0

    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta

        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            l_user = obj_meta.obj_user_meta_list
            # if obj_meta.class_id == SGIE_CLASS_ID_FACE:
            #     print(f'obj_meta.obj_user_meta_list {l_user}')
            while l_user is not None:

                try:
                    # Casting l_user.data to pyds.NvDsUserMeta
                    user_meta = pyds.NvDsUserMeta.cast(l_user.data)
                except StopIteration:
                    break

                if (user_meta.base_meta.meta_type !=
                        pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META):
                    continue

                # Converting to tensor metadata
                # Casting user_meta.user_meta_data to NvDsInferTensorMeta
                tensor_meta = pyds.NvDsInferTensorMeta.cast(
                    user_meta.user_meta_data)

                # Get output layer as NvDsInferLayerInfo
                layer = pyds.get_nvds_LayerInfo(tensor_meta, 0)

                # Convert NvDsInferLayerInfo buffer to numpy array
                ptr = ctypes.cast(pyds.get_ptr(layer.buffer),
                                  ctypes.POINTER(ctypes.c_float))
                v = np.ctypeslib.as_array(ptr, shape=(128, ))

                # Pridict face neme
                yhat = v.reshape((1, -1))
                face_to_predict_embedding = normalize_vectors(yhat)
                result = predict_using_classifier(faces_embeddings, labels,
                                                  face_to_predict_embedding)
                result = (str(result).title())
                # print('Predicted name: %s' % result)

                # Generate classifer metadata and attach to obj_meta

                # Get NvDsClassifierMeta object
                classifier_meta = pyds.nvds_acquire_classifier_meta_from_pool(
                    batch_meta)

                # Pobulate classifier_meta data with pridction result
                classifier_meta.unique_component_id = tensor_meta.unique_id

                label_info = pyds.nvds_acquire_label_info_meta_from_pool(
                    batch_meta)

                label_info.result_prob = 0
                label_info.result_class_id = 0

                pyds.nvds_add_label_info_meta_to_classifier(
                    classifier_meta, label_info)
                pyds.nvds_add_classifier_meta_to_object(
                    obj_meta, classifier_meta)

                display_text = pyds.get_string(
                    obj_meta.text_params.display_text)
                obj_meta.text_params.display_text = f'{display_text} {result}'

                try:
                    l_user = l_user.next
                except StopIteration:
                    break

            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        '''
        print("Frame Number is ", frame_meta.frame_num)
        print("Source id is ", frame_meta.source_id)
        print("Batch id is ", frame_meta.batch_id)
        print("Source Frame Width ", frame_meta.source_frame_width)
        print("Source Frame Height ", frame_meta.source_frame_height)
        print("Num object meta ", frame_meta.num_obj_meta)
        '''
        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        bboxes = []
        classids = []
        num_rects = frame_meta.num_obj_meta
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1

            bos = np.array([
                int(obj_meta.rect_params.left),
                int(obj_meta.rect_params.top),
                int(obj_meta.rect_params.left + obj_meta.rect_params.width),
                int(obj_meta.rect_params.top + obj_meta.rect_params.height)
            ])
            bboxes.append(bos.astype("int"))

            classids.append(obj_meta.class_id)

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        boxes_dic[frame_meta.source_id] = bboxes
        frameTime = int(datetime.now().timestamp())

        if tracker_status == 1:
            counter, counts, trackers = trackers_list[
                frame_meta.source_id].detectandkalmtrack(
                    boxes_dic[frame_meta.source_id],
                    classids,
                    frameTime=frameTime)
            for index, values in counts.items():
                counters = list(values.keys())
                #print(f' This {frame_meta.source_id + 1} Tracker {classes[index]} -- {counters[0]} ----{str(values["inCount"])} ------ {counters[1]}---- {str(values["outCount"])}')
        """display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, vehicle_count, person)
        py_nvosd_text_params.x_offset = 10;
        py_nvosd_text_params.y_offset = 12;
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        py_nvosd_text_params.font_params.font_color.red = 1.0
        py_nvosd_text_params.font_params.font_color.green = 1.0
        py_nvosd_text_params.font_params.font_color.blue = 1.0
        py_nvosd_text_params.font_params.font_color.alpha = 1.0
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.red = 0.0
        py_nvosd_text_params.text_bg_clr.green = 0.0
        py_nvosd_text_params.text_bg_clr.blue = 0.0
        py_nvosd_text_params.text_bg_clr.alpha = 1.0
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person)
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)"""
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",obj_counter[PGIE_CLASS_ID_VEHICLE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON])

        framenumber = 0
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        x1, y1, x2, y2, x3, y3, x4, y4 = Roi_points_list[frame_meta.source_id]
        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x1
        line_params.y1 = y1
        line_params.x2 = x2
        line_params.y2 = y2
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1

        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x2
        line_params.y1 = y2
        line_params.x2 = x3
        line_params.y2 = y3
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1

        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x3
        line_params.y1 = y3
        line_params.x2 = x4
        line_params.y2 = y4
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1

        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x4
        line_params.y1 = y4
        line_params.x2 = x1
        line_params.y2 = y1
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_counter = {
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BAG: 0,
            PGIE_CLASS_ID_FACE: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1

#            osd_rect_params =  pyds.NvOSD_RectParams.cast(obj_meta.rect_params)
             # Draw black patch to cover faces (class_id = 2), can change to other colors 
            if (obj_meta.class_id == PGIE_CLASS_ID_FACE):
                obj_meta.rect_params.border_width = 0
                obj_meta.rect_params.has_bg_color = 1
                obj_meta.rect_params.bg_color.red = 0.0
                obj_meta.rect_params.bg_color.green = 0.0
                obj_meta.rect_params.bg_color.blue = 0.0
                obj_meta.rect_params.bg_color.alpha = 1.0
            elif (obj_meta.class_id == PGIE_CLASS_ID_PERSON ) :
                obj_meta.rect_params.border_width = 0
                obj_meta.rect_params.has_bg_color = 1
                obj_meta.rect_params.bg_color.red = 0.0
                obj_meta.rect_params.bg_color.green = 0.0
                obj_meta.rect_params.bg_color.blue = 0.0
                obj_meta.rect_params.bg_color.alpha = 0.5

            # Periodically check for objects and save the annotated object to file.
            if saved_count["stream_{}".format(frame_meta.pad_index)] % 10 == 0 and obj_meta.class_id == PGIE_CLASS_ID_FACE :
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
                    n_frame = crop_object(n_frame, obj_meta)
                    # convert python array into numpy array format in the copy mode.
                    frame_copy = np.array(n_frame, copy=True, order='C')
                    # convert the array into cv2 default color format
                    frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA)


                save_image = True

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects, "Face_count=",
              obj_counter[PGIE_CLASS_ID_FACE], "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON])
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        if save_image:
            img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number)
            cv2.imwrite(img_path, frame_copy)
        saved_count["stream_{}".format(frame_meta.pad_index)] += 1
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemplo n.º 10
0
def src_pad_buffer_probe(pad, info, u_data):
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    # Set frame_number & rectangles to draw as 0
    frame_number = 0
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        # Get frame number , number of rectables to draw and object metadata
        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            # Increment Object class by 1
            obj_counter[obj_meta.class_id] += 1
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print(
            "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}"
            .format(frame_number, num_rects,
                    obj_counter[PGIE_CLASS_ID_VEHICLE],
                    obj_counter[PGIE_CLASS_ID_PERSON]))

        # FPS Probe
        fps_streams_new["stream{0}".format(frame_meta.pad_index)].get_fps()

        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemplo n.º 11
0
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    previous = service.get_previous()
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        service.set_frame_counter(frame_number)
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        ids = []
        boxes = []
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top
            obj_id = obj_meta.object_id

            # Service Aforo (in and out)
            ids.append(obj_id)
            boxes.append((x, y))
            # service.counting_in_and_out_first_detection((x, y), obj_id) In and out counting when the object finally desapears
            service.counting_in_and_out_when_changing_area((x, y), obj_id, ids,
                                                           previous)

            # Service People counting
            if previous:
                service.people_counting_last_time_detected(ids)
                service.people_counting_storing_fist_time(obj_id)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Service Social Distance
        if previous:
            service.get_distances_between_detected_elements_from_centroid(
                boxes, ids)

        if not service.get_previous():
            service.set_previous(True)
            previous = service.get_previous()

        # Service Aforo (in and out)
        # service.count_in_and_out_when_object_leaves_the_frame(ids) In and out counting when the object finally desapears

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemplo n.º 12
0
def osd_sink_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
            }
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    previous = pc.get_previous()
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        pc.set_frame_counter(frame_number)
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        ids = []
        boxes = []
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top
            obj_id = obj_meta.object_id

            # Service Aforo (in and out)
            pc.counting_in_and_out_first_detection((x, y), obj_id)
            ids.append(obj_id)
            boxes.append((x, y))

            # Service People counting
            if previous:
                pc.people_counting_last_time_detected(ids)
                pc.people_counting_storing_fist_time(obj_id)

            try: 
                l_obj = l_obj.next
            except StopIteration:
                break

        # Service Social Distance
        if previous:
            pc.get_distances_between_detected_elements_from_centroid(boxes, ids)

        pc.set_previous(True)
        previous = pc.get_previous()
        
        # Service Aforo (in and out)
        pc.count_in_and_out_when_object_leaves_the_frame(ids)

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        
        py_nvosd_text_params.display_text = \
            "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}"\
                .format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        
        # Using pyds.get_string() to get display_text as string
       
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK	


    # Get frame rate through this probe
    #    fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
    #    try:
    #        l_frame = l_frame.next
    #    except StopIteration:
    #        break

    return Gst.PadProbeReturn.OK
Exemplo n.º 13
0
def seg_src_pad_buffer_probe(pad, info, u_data):
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        frame_number = frame_meta.frame_num
        l_user = frame_meta.frame_user_meta_list
        while l_user is not None:
            try:
                # Note that l_user.data needs a cast to pyds.NvDsUserMeta
                # The casting is done by pyds.NvDsUserMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                seg_user_meta = pyds.NvDsUserMeta.cast(l_user.data)
            except StopIteration:
                break
            if seg_user_meta and seg_user_meta.base_meta.meta_type == \
                    pyds.NVDSINFER_SEGMENTATION_META:
                try:
                    # Note that seg_user_meta.user_meta_data needs a cast to
                    # pyds.NvDsInferSegmentationMeta
                    # The casting is done by pyds.NvDsInferSegmentationMeta.cast()
                    # The casting also keeps ownership of the underlying memory
                    # in the C code, so the Python garbage collector will leave
                    # it alone.
                    segmeta = pyds.NvDsInferSegmentationMeta.cast(seg_user_meta.user_meta_data)
                except StopIteration:
                    break
                # Retrieve mask data in the numpy format from segmeta
                # Note that pyds.get_segmentation_masks() expects object of
                # type NvDsInferSegmentationMeta
                masks = pyds.get_segmentation_masks(segmeta)
                masks = np.array(masks, copy=True, order='C')
                # map the obtained masks to colors of 2 classes.
                frame_image = map_mask_as_display_bgr(masks)
                cv2.imwrite(folder_name + "/" + str(frame_number) + ".jpg", frame_image)
            try:
                l_user = l_user.next
            except StopIteration:
                break
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            # Periodically check for objects with borderline confidence value that may be false positive detections.
            # If such detections are found, annoate the frame with bboxes and confidence value.
            # Save the annotated frame to file.
            if ((saved_count["stream_" + str(frame_meta.pad_index)] % 30 == 0)
                    and
                (obj_meta.confidence > 0.3 and obj_meta.confidence < 0.31)):
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(
                        hash(gst_buffer), frame_meta.batch_id)
                    #convert python array into numy array format.
                    frame_image = np.array(n_frame, copy=True, order='C')
                    #covert the array into cv2 default color format
                    frame_image = cv2.cvtColor(frame_image,
                                               cv2.COLOR_RGBA2BGRA)

                save_image = True
                frame_image = draw_bounding_boxes(frame_image, obj_meta,
                                                  obj_meta.confidence)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print("Frame Number=", frame_number, "Number of Objects=", num_rects,
              "Vehicle_count=", obj_counter[PGIE_CLASS_ID_VEHICLE],
              "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON])
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        if save_image:
            cv2.imwrite(
                folder_name + "/stream_" + str(frame_meta.pad_index) +
                "/frame_" + str(frame_number) + ".jpg", frame_image)
        saved_count["stream_" + str(frame_meta.pad_index)] += 1
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
def pgie_src_pad_buffer_probe(pad, info, u_data, label_path):

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    detection_params = DetectionParam(CLASS_NB, ACCURACY_ALL_CLASS)
    box_size_param = BoxSizeParam(IMAGE_HEIGHT, IMAGE_WIDTH, MIN_BOX_WIDTH,
                                  MIN_BOX_HEIGHT)
    nms_param = NmsParam(TOP_K, IOU_THRESHOLD)

    label_names = get_label_names_from_file(label_path)

    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        l_user = frame_meta.frame_user_meta_list
        while l_user is not None:
            try:
                # Note that l_user.data needs a cast to pyds.NvDsUserMeta
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                user_meta = pyds.NvDsUserMeta.cast(l_user.data)
            except StopIteration:
                break

            if (user_meta.base_meta.meta_type !=
                    pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META):
                continue

            tensor_meta = pyds.NvDsInferTensorMeta.cast(
                user_meta.user_meta_data)

            # Boxes in the tensor meta should be in network resolution which is
            # found in tensor_meta.network_info. Use this info to scale boxes to
            # the input frame resolution.
            layers_info = []

            for i in range(tensor_meta.num_output_layers):
                layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
                layers_info.append(layer)

            frame_object_list = nvds_infer_parse_custom_tf_ssd(
                layers_info, detection_params, box_size_param, nms_param)
            try:
                l_user = l_user.next
            except StopIteration:
                break

            for frame_object in frame_object_list:
                add_obj_meta_to_frame(frame_object, batch_meta, frame_meta,
                                      label_names)

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemplo n.º 16
0
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    # version mask detection solo reconoce mascarillas y sin mascarilla
    obj_counter = {
        PGIE_CLASS_ID_FACE: 0,
        PGIE_CLASS_ID_PLATES: 0,
    }

    frame_number = 0
    num_rects = 0  # numero de objetos en el frame
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    #====================== Definicion de valores de mensajes a pantalla
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index

    camera_id = get_camera_id(current_pad_index)

    #aforo_info = get_aforo(camera_id)
    #is_aforo_enabled = aforo_info['enabled']

    #social_distance_info = get_social_distance(camera_id)
    #is_social_distance_enabled = social_distance_info['enabled']

    #people_counting_info = get_people_counting(camera_id)
    #is_people_counting_enabled = people_counting_info['enabled']

    # Falta el servicio de Plates Detection
    #
    #

    #print( "entro al  tiler_src_pad_buffer_probe")
    # Todos los servicios requieren impresion de texto solo para Aforo se requiere una linea y un rectangulo
    display_meta.num_labels = 1  # numero de textos
    py_nvosd_text_params = display_meta.text_params[0]

    # Setup del label de impresion en pantalla
    py_nvosd_text_params.x_offset = 100
    py_nvosd_text_params.y_offset = 120
    py_nvosd_text_params.font_params.font_name = "Arial"
    py_nvosd_text_params.font_params.font_size = 10
    py_nvosd_text_params.font_params.font_color.red = 1.0
    py_nvosd_text_params.font_params.font_color.green = 1.0
    py_nvosd_text_params.font_params.font_color.blue = 1.0
    py_nvosd_text_params.font_params.font_color.alpha = 1.0
    py_nvosd_text_params.set_bg_clr = 1
    py_nvosd_text_params.text_bg_clr.red = 0.0
    py_nvosd_text_params.text_bg_clr.green = 0.0
    py_nvosd_text_params.text_bg_clr.blue = 0.0
    py_nvosd_text_params.text_bg_clr.alpha = 1.0

    #no_mask_ids = get_no_mask_ids_dict(camera_id)
    # por que ponerlo en 1 ????
    #frame_number = 1 # to avoid not definition issue

    client = boto3.client('rekognition')

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        #print( "primer ciclo")
        frame_number = frame_meta.frame_num
        #print(" fps:",frame_meta.num_surface_per_frame)
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False

        #print(num_rects) ID numero de stream
        ids = set()

        # Ciclo interno donde se evaluan los objetos dentro del frame
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            obj_counter[obj_meta.class_id] += 1
            #print(obj_meta.confidence,"   ",obj_meta.object_id)
            #print(obj_counter[obj_meta.class_id],"   ",obj_counter[obj_meta.class_id]%5)
            # and (obj_meta.confidence > 0.9 )
            print(frame_number)
            if ((obj_meta.class_id == 1) and (frame_number % 8 == 0)):
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(
                        hash(gst_buffer), frame_meta.batch_id)
                    #convert python array into numy array format.
                    frame_image = np.array(n_frame, copy=True, order='C')
                    #covert the array into cv2 default color format
                    frame_image = cv2.cvtColor(frame_image,
                                               cv2.COLOR_RGBA2BGRA)

                save_image = True
                frame_image = draw_bounding_boxes(frame_image, obj_meta,
                                                  obj_meta.confidence)

                response = client.detect_labels(Image={'Bytes': frame_image})
                print('Detected labels in ')
                for label in response['Labels']:
                    print(label['Name'] + ' : ' + str(label['Confidence']))

            #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Mask={} NoMaks={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_FACE], obj_counter[PGIE_CLASS_ID_PLATES])

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        #pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        #print(save_image)
        #print(folder_name)
        if save_image:
            print("Entre a guardar imagen")
            print(obj_meta.class_id)

            cv2.imwrite(
                folder_name + "/stream_" + str(frame_meta.pad_index) +
                "/frame_" + str(frame_number) + ".jpg", frame_image)
        saved_count["stream_" + str(frame_meta.pad_index)] += 1

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    '''
    if frame_number % 43 == 0:
        new_dict = {}
        no_mask_ids = get_no_mask_ids_dict(camera_id)

        for item in ids:
            if item in no_mask_ids:
                value = no_mask_ids[item]
                new_dict.update({item: value})

        set_no_mask_ids_dict(camera_id, new_dict)

        # Lo manda a directo streaming
    '''

    return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_datat, label_path):
    frame_number = 0
    # Intiallizing object counter with 0.
    obj_counter = dict(enumerate([0] * CLASS_NB))
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        id_dict = {
            val: index
            for index, val in enumerate(get_label_names_from_file(label_path))
        }
        disp_string = "Frame Number={} Number of Objects={} Person_count={}"
        py_nvosd_text_params.display_text = disp_string.format(
            frame_number, num_rects, obj_counter[id_dict["person"]])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemplo n.º 18
0
	def get_lva_MediaStreamMessage(self, buffer, gst_lva_message, ih, iw):

		msg = extension_pb2.MediaStreamMessage()		
		msg.ack_sequence_number = gst_lva_message.sequence_number
		msg.media_sample.timestamp = gst_lva_message.timestamp
			
		# # Retrieve batch metadata from the gst_buffer
		# # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
		# # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
		batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer))

		frame = batch_meta.frame_meta_list
		
		while frame is not None:
			try:
				# Note that frame.data needs a cast to pyds.NvDsFrameMeta
				# The casting is done by pyds.NvDsFrameMeta.cast()
				# The casting also keeps ownership of the underlying memory
				# in the C code, so the Python garbage collector will leave
				# it alone.
				frame_meta = pyds.NvDsFrameMeta.cast(frame.data)
				objInference = frame_meta.obj_meta_list
				frameWidth = frame_meta.source_frame_width
				frameHeight = frame_meta.source_frame_height
				# iterate through objects 
				while objInference is not None:
					try: 
						# Casting objInference.data to pyds.NvDsObjectMeta
						obj_meta=pyds.NvDsObjectMeta.cast(objInference.data)
					except StopIteration:
						break

					inference = msg.media_sample.inferences.add()	

					attributes = []
					obj_label = None
					obj_confidence = 0
					obj_left = 0
					obj_width = 0
					obj_top = 0
					obj_width = 0

					color = ''
					# Classification 
					attribute = None
					if(obj_meta.class_id == 0 and obj_meta.classifier_meta_list is not None):
						classifier_meta = obj_meta.classifier_meta_list
						while classifier_meta is not None:
							classifierItem = pyds.NvDsClassifierMeta.cast(classifier_meta.data)
							if(classifierItem is not None):
								label_meta = classifierItem.label_info_list
								while label_meta is not None:
									labelItem = pyds.NvDsLabelInfo.cast(label_meta.data)
									prob = round(labelItem.result_prob, 2)
									attrValue = labelItem.result_label

									attrName = 'unknown'
									if(classifierItem.unique_component_id == PGIE_CLASS_ID_VEHICLE_COLOR):
										attrName = 'color'
									else:
										attrName = 'type'

									attributes.append([attrName, attrValue, prob])
									
									try: 
										label_meta=label_meta.next
									except StopIteration:
										break

							try: 
								classifier_meta=classifier_meta.next
							except StopIteration:
								break
							
					rect_params=obj_meta.rect_params
					top=int(rect_params.top)
					left=int(rect_params.left)
					width=int(rect_params.width)
					height=int(rect_params.height)
					obj_confidence = obj_meta.confidence
					obj_label = obj_meta.obj_label
					
					obj_left = left / iw
					obj_top = top / ih
					obj_width = width/ iw
					obj_height = height / ih
					obj_id = None

					# Tracking: Active tracking bbox information
					if(self.trackinEnabled):
						obj_id = obj_meta.object_id
						obj_active_tracking = obj_meta.tracker_bbox_info
						tracking_coord = obj_active_tracking.org_bbox_coords
						if(tracking_coord is not None and tracking_coord.left > 0 and tracking_coord.width > 0 and tracking_coord.top > 0 and tracking_coord.height > 0):
							obj_left = tracking_coord.left / iw
							obj_top = tracking_coord.top / ih
							obj_width = tracking_coord.width/ iw
							obj_height = tracking_coord.height / ih

					inference.type = inferencing_pb2.Inference.InferenceType.ENTITY

					if obj_label is not None:
						try:
							entity = inferencing_pb2.Entity(
													tag = inferencing_pb2.Tag(
														value = obj_label,
														confidence = obj_confidence
													),
													box = inferencing_pb2.Rectangle(
														l = obj_left,
														t = obj_top,
														w = obj_width,
														h = obj_height
													)												
												)

							if(self.trackinEnabled and obj_id is not None):
								entity.id = str(obj_id)

							for attr in attributes:
								attribute = inferencing_pb2.Attribute(
									name = attr[0],
									value = attr[1],
									confidence = attr[2]
								)

								entity.attributes.append(attribute)
						except:
							PrintGetExceptionDetails()
										
						inference.entity.CopyFrom(entity)

					try: 
						objInference=objInference.next
					except StopIteration:
						break

			except StopIteration:
				break

			try:
				frame = frame.next
			except StopIteration:
				break

		return msg		
Exemplo n.º 19
0
def tiler_src_pad_buffer_probe(pad, info, u_data):

    # Intiallizing object counter with 0.
    # version 2.1 solo personas

    servicios_habilitados = service.emulate_reading_from_server()
    #print("Valor Aforo :", servicios_habilitados[AFORO_ENT_SAL_SERVICE],servicios_habilitados[PEOPLE_COUNTING_SERVICE],servicios_habilitados[SOCIAL_DISTANCE_SERVICE])

    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }

    frame_number = 0
    num_rects = 0  # numero de objetos en el frame
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    previous = service.get_previous()

    #====================== Definicion de valores de mensajes a pantalla
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    display_meta.num_labels = 1  # numero de textos
    display_meta.num_lines = 1  # numero de lineas
    display_meta.num_rects = 1  # numero de rectangulos

    py_nvosd_text_params = display_meta.text_params[0]
    py_nvosd_line_params = display_meta.line_params[0]
    py_nvosd_rect_params = display_meta.rect_params[0]

    # Setup del label de impresion en pantalla
    py_nvosd_text_params.x_offset = 100
    py_nvosd_text_params.y_offset = 120
    py_nvosd_text_params.font_params.font_name = "Arial"
    py_nvosd_text_params.font_params.font_size = 10
    py_nvosd_text_params.font_params.font_color.red = 1.0
    py_nvosd_text_params.font_params.font_color.green = 1.0
    py_nvosd_text_params.font_params.font_color.blue = 1.0
    py_nvosd_text_params.font_params.font_color.alpha = 1.0
    py_nvosd_text_params.set_bg_clr = 1
    py_nvosd_text_params.text_bg_clr.red = 0.0
    py_nvosd_text_params.text_bg_clr.green = 0.0
    py_nvosd_text_params.text_bg_clr.blue = 0.0
    py_nvosd_text_params.text_bg_clr.alpha = 1.0

    # Setup de la linea de Ent/Sal
    # los valos de las coordenadas tienen que ser obtenidos del archivo de configuracion
    # en este momento estan hardcode

    py_nvosd_line_params.x1 = 510
    py_nvosd_line_params.y1 = 740
    py_nvosd_line_params.x2 = 1050
    py_nvosd_line_params.y2 = 740
    py_nvosd_line_params.line_width = 5
    py_nvosd_line_params.line_color.red = 1.0
    py_nvosd_line_params.line_color.green = 1.0
    py_nvosd_line_params.line_color.blue = 1.0
    py_nvosd_line_params.line_color.alpha = 1.0

    # setup del rectangulo de Ent/Sal
    # de igual manera que los parametros de linea,
    # los valores del rectangulo se calculan en base a
    # los valoes del archivo de configuracion

    py_nvosd_rect_params.left = 500
    py_nvosd_rect_params.height = 120
    py_nvosd_rect_params.top = 680
    py_nvosd_rect_params.width = 560
    py_nvosd_rect_params.border_width = 4
    py_nvosd_rect_params.border_color.red = 0.0
    py_nvosd_rect_params.border_color.green = 0.0
    py_nvosd_rect_params.border_color.blue = 1.0
    py_nvosd_rect_params.border_color.alpha = 1.0

    #======================

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        # que hace esta funcion ????
        #if get_counter() == 60:
        #    set_counter()

        #    if get_current_time() > get_offset_time():
        #        print('aca...............')
        #        service.emulate_reading_from_server()
        #        set_offset_time()
        #    else:
        #        set_current_time()
        #else:
        #    increment()

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        #
        #print("stream_"+str(frame_meta.pad_index))     El numero de fuente viene en el pad_index
        # este valor debe usarse para identificar que servicio se debe ejecutar en el ciclo interno
        #

        ids = []
        boxes = []

        #
        # Ciclo interno donde se evaluan los objetos dentro del frame
        #
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            # validacion de solo personas, solo para control de debug
            # print(" Class ID ", pgie_classes_str[obj_meta.class_id])

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top

            # Service Aforo (in and out)
            ids.append(obj_meta.object_id)
            boxes.append((x, y))

            #print(servicios_habilitados[AFORO_ENT_SAL_SERVICE])
            if servicios_habilitados[AFORO_ENT_SAL_SERVICE]:
                #print("Servicio de Aforo habilitado")
                entrada, salida = service.aforo((x, y), obj_meta.object_id,
                                                ids, previous)
                #print("Valor Direccion ", entrada, salida)
                #if direction == 1:
                #    contador_entrada += 1
                #    print("Entrada", contador_entrada)
                #elif direction == 0:
                #    print("Salida", contador_salida)
                #    contador_salida += 1

            # Service People counting
            #if previous:
            #    service.people_counting_last_time_detected(ids)
            #    service.people_counting_storing_fist_time(obj_meta.object_id)

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Nota 18-Agst-2020
        # El codigo de Social Distance se esta en este momento ejecutando fuera del ciclo de identificacion
        # de objetos dentro del frame, creo que no debe ser asi

        # Service Social Distance
        if servicios_habilitados[SOCIAL_DISTANCE_SERVICE]:
            boxes_length = len(boxes)
            if boxes_length > 1:
                service.set_frame_counter(frame_number)
                service.tracked_on_time_social_distance(
                    boxes, ids, boxes_length)

        if not previous:
            previous = service.set_previous()

        # Impresion en el video de los valores que nos interesan
        # Dibujo de la linea de Ent/Sal
        #

        #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],obj_counter[PGIE_CLASS_ID_PERSON])
        py_nvosd_text_params.display_text = "Source ID={} Source Number={} Person_count={} Entradas=={} Salidas=={}".format(
            frame_meta.source_id, frame_meta.pad_index,
            obj_counter[PGIE_CLASS_ID_PERSON], entrada, salida)

        # Lo manda a directo streaming
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        # Lo manda a terminal, siguientes 2 lineas, hacen lo mismo, diferentes funciones

        #print(pyds.get_string(py_nvosd_text_params.display_text))
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemplo n.º 20
0
def analytics_meta_buffer_probe(pad, info, u_data):

    # Get the buffer from the pipeline
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer")
        return Gst.PadProbeReturn.OK

    # With the pyds wrapper get the batch of metadata from the buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    # From the batch of metadata get the list of frames
    list_of_frames = batch_meta.frame_meta_list

    # Iterate thru the list of frames
    while list_of_frames is not None:
        try:

            # Get the metadata on the current frame
            # The next frame is set at the end of the while loop
            frame_meta = pyds.NvDsFrameMeta.cast(list_of_frames.data)

        except StopIteration:
            break

        # INFORMATION THAT IS PRESENT THE FRAME
        #
        # - frame_meta.frame_num
        # - frame_meta.frame_num
        # - frame_meta.source_id
        # - frame_meta.batch_id
        # - frame_meta.source_frame_width
        # - frame_meta.source_frame_height
        # - frame_meta.num_obj_meta

        # Print the frame width and height to see what positions can the bounding boxed be drawed
        # print('Frame Width: ' + str(frame_meta.source_frame_width)) = 1920
        # print('Frame Height: ' + str(frame_meta.source_frame_height)) = 1080

        # In the information of the frame we can get a list of objects detected on the frame.
        list_of_objects = frame_meta.obj_meta_list

        # Iterate thru the list of objects
        while list_of_objects is not None:
            try:
                # Get the metadata for each object in the frame
                object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data)

            except StopIteration:
                break

            # Go to the next object in the list
            l_user_meta = object_meta.obj_user_meta_list

            while l_user_meta:
                try:
                    pass
                    # user_meta = pyds.NvDsUserMeta.cast(l_user_meta.data)
                    # print(user_meta.base_meta.meta_type)
                    # if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSOBJ.USER_META"):
                    #     user_meta_data = pyds.NvDsAnalyticsObjInfo.cast(user_meta.user_meta_data)
                    #     if user_meta_data.dirStatus: print("Object {0} moving in direction: {1}".format(object_meta.object_id, user_meta_data.dirStatus))
                    #     if user_meta_data.lcStatus: print("Object {0} line crossing status: {1}".format(object_meta.object_id, user_meta_data.lcStatus))
                    #     if user_meta_data.ocStatus: print("Object {0} overcrowding status: {1}".format(object_meta.object_id, user_meta_data.ocStatus))
                    #     if user_meta_data.roiStatus: print("Object {0} roi status: {1}".format(object_meta.object_id, user_meta_data.roiStatus))
                except StopIteration:
                    break

            try:
                list_of_objects = list_of_objects.next
            except StopIteration:
                break
        # When there is no more object in the list of objects
        # we continue here

        # INFORMATION OF THE OBJECT METADATA
        #
        # - object_meta.class_id
        # - object_meta.confidence
        # - object_meta.obj_label
        # - object_meta.object_id (If not tracker present on the pipeline, the ID is the same for all objects)
        # - object_meta.rect_params

        # Get the display meta from the batch meta, this is another metadata different that the frame meta collected
        # befor
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)

        # Define the number of rects that we are going to draw

        # Draw the boxes on the frame
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        try:
            # Go to the next frame in the list
            list_of_frames = list_of_frames.next
        except StopIteration:
            break
        # When there are not frames in the buffer we end here, and the function returns ok

    return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data):
    global fps_stream, face_counter
    frame_number = 0
    #Intiallizing object counter with 0.
    vehicle_count = 0
    person_count = 0
    face_count = 0
    lp_count = 0
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.glist_get_nvds_frame_meta()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            if obj_meta.unique_component_id == PRIMARY_DETECTOR_UID:
                if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:
                    vehicle_count += 1
                if obj_meta.class_id == PGIE_CLASS_ID_PERSON:
                    person_count += 1

            if obj_meta.unique_component_id == SECONDARY_DETECTOR_UID:
                if obj_meta.class_id == SGIE_CLASS_ID_FACE:
                    face_count += 1
                if obj_meta.class_id == SGIE_CLASS_ID_LP:
                    lp_count += 1

            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        fps_stream.get_fps()
        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={}  Person_count={} Face Count={}".format(
            frame_number, num_rects, person_count, face_count)
        face_counter.append(face_count)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemplo n.º 22
0
    def tiler_sink_pad_buffer_probe(self, pad, info, u_data):
        frame_number = 0
        num_rects = 0
        gst_buffer = info.get_buffer()
        if not gst_buffer:
            print("Unable to get GstBuffer ")
            return

        # Retrieve batch metadata from the gst_buffer
        # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

        l_frame = batch_meta.frame_meta_list
        while l_frame is not None:
            try:
                # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.NvDsFrameMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            except StopIteration:
                break

            frame_number = frame_meta.frame_num
            l_obj = frame_meta.obj_meta_list
            num_rects = frame_meta.num_obj_meta
            is_first_obj = True
            save_image = False
            obj_counter = {
                PGIE_CLASS_ID_VEHICLE: 0,
                PGIE_CLASS_ID_BICYCLE: 0,
                PGIE_CLASS_ID_PERSON: 0,
                PGIE_CLASS_ID_ROADSIGN: 0
            }

            # Message for output of detection inference
            msg = Detection2DArray()
            while l_obj is not None:
                try:
                    # Casting l_obj.data to pyds.NvDsObjectMeta
                    obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                    l_classifier = obj_meta.classifier_meta_list
                    # If object is a car (class ID 0), perform attribute classification
                    if obj_meta.class_id == 0 and l_classifier is not None:
                        # Creating and publishing message with output of classification inference
                        msg2 = Classification2D()

                        while l_classifier is not None:
                            result = ObjectHypothesis()
                            try:
                                classifier_meta = pyds.glist_get_nvds_classifier_meta(
                                    l_classifier.data)
                            except StopIteration:
                                print('Could not parse MetaData: ')
                                break

                            classifier_id = classifier_meta.unique_component_id
                            l_label = classifier_meta.label_info_list
                            label_info = pyds.glist_get_nvds_label_info(
                                l_label.data)
                            classifier_class = label_info.result_class_id

                            if classifier_id == 2:
                                result.id = class_color[classifier_class]
                            elif classifier_id == 3:
                                result.id = class_make[classifier_class]
                            else:
                                result.id = class_type[classifier_class]

                            result.score = label_info.result_prob
                            msg2.results.append(result)
                            l_classifier = l_classifier.next

                        self.publisher_classification.publish(msg2)

                except StopIteration:
                    break

                obj_counter[obj_meta.class_id] += 1

                # Creating message for output of detection inference
                result = ObjectHypothesisWithPose()
                result.id = str(class_obj[obj_meta.class_id])
                result.score = obj_meta.confidence

                left = obj_meta.rect_params.left
                top = obj_meta.rect_params.top
                width = obj_meta.rect_params.width
                height = obj_meta.rect_params.height
                bounding_box = BoundingBox2D()
                bounding_box.center.x = float(left + (width / 2))
                bounding_box.center.y = float(top - (height / 2))
                bounding_box.size_x = width
                bounding_box.size_y = height

                detection = Detection2D()
                detection.results.append(result)
                detection.bbox = bounding_box
                msg.detections.append(detection)

                # Periodically check for objects with borderline confidence value that may be false positive detections.
                # If such detections are found, annotate the frame with bboxes and confidence value.
                # Save the annotated frame to file.
                if ((saved_count["stream_" + str(frame_meta.pad_index)] % 30
                     == 0) and (obj_meta.confidence > 0.3
                                and obj_meta.confidence < 0.31)):
                    if is_first_obj:
                        is_first_obj = False
                        # Getting Image data using nvbufsurface
                        # the input should be address of buffer and batch_id
                        n_frame = pyds.get_nvds_buf_surface(
                            hash(gst_buffer), frame_meta.batch_id)
                        #convert python array into numy array format.
                        frame_image = np.array(n_frame, copy=True, order='C')
                        #covert the array into cv2 default color format
                        frame_image = cv2.cvtColor(frame_image,
                                                   cv2.COLOR_RGBA2BGRA)

                    save_image = True
                    frame_image = draw_bounding_boxes(frame_image, obj_meta,
                                                      obj_meta.confidence)
                try:
                    l_obj = l_obj.next
                except StopIteration:
                    break

            # Get frame rate through this probe
            fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

            # Publishing message with output of detection inference
            self.publisher_detection.publish(msg)

            if save_image:
                cv2.imwrite(
                    folder_name + "/stream_" + str(frame_meta.pad_index) +
                    "/frame_" + str(frame_number) + ".jpg", frame_image)
            saved_count["stream_" + str(frame_meta.pad_index)] += 1
            try:
                l_frame = l_frame.next
            except StopIteration:
                break

        return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_FACE: 0,
    }
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta

        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1

            l_user = obj_meta.obj_user_meta_list
            # print(l_user)
            while l_user is not None:
                print('Inside l_user = obj_meta.obj_user_meta_list Loop')
                try:
                    # Casting l_obj.data to pyds.NvDsObjectMeta
                    user_meta = pyds.NvDsUserMeta.cast(l_user.data)
                except StopIteration:
                    break

                if (user_meta.base_meta.meta_type !=
                        pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META):
                    continue

                tensor_meta = pyds.NvDsInferTensorMeta.cast(
                    user_meta.user_meta_data)

                # Boxes in the tensor meta should be in network resolution which is
                # found in tensor_meta.network_info. Use this info to scale boxes to
                # the input frame resolution.
                layers_info = []

                for i in range(tensor_meta.num_output_layers):
                    layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
                    layers_info.append(layer)
                    print(f'Layer: {i}, Layer name: {layer.layerName}')

                try:
                    l_user = l_user.next
                except StopIteration:
                    break

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Person_count={} Face_count={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_PERSON],
            obj_counter[PGIE_CLASS_ID_FACE])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    is_first_object=True
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    if not batch_meta:
        return Gst.PadProbeReturn.OK
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            continue
        is_first_object = True;

        '''
        print("Frame Number is ", frame_meta.frame_num)
        print("Source id is ", frame_meta.source_id)
        print("Batch id is ", frame_meta.batch_id)
        print("Source Frame Width ", frame_meta.source_frame_width)
        print("Source Frame Height ", frame_meta.source_frame_height)
        print("Num object meta ", frame_meta.num_obj_meta)
        '''
        frame_number=frame_meta.frame_num
        l_obj=frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                continue

            # Update the object text display
            txt_params=obj_meta.text_params

            # Set display_text. Any existing display_text string will be
            # freed by the bindings module.
            txt_params.display_text = pgie_classes_str[obj_meta.class_id]

            obj_counter[obj_meta.class_id] += 1

            # Font , font-color and font-size
            txt_params.font_params.font_name = "Serif"
            txt_params.font_params.font_size = 10
            # set(red, green, blue, alpha); set to White
            txt_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0);

            # Text background color
            txt_params.set_bg_clr = 1
            # set(red, green, blue, alpha); set to Black
            txt_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0);

            # Ideally NVDS_EVENT_MSG_META should be attached to buffer by the
            # component implementing detection / recognition logic.
            # Here it demonstrates how to use / attach that meta data.
            if(is_first_object and not (frame_number%30)):
                # Frequency of messages to be send will be based on use case.
                # Here message is being sent for first object every 30 frames.

                # Allocating an NvDsEventMsgMeta instance and getting reference
                # to it. The underlying memory is not manged by Python so that
                # downstream plugins can access it. Otherwise the garbage collector
                # will free it when this probe exits.
                msg_meta=pyds.alloc_nvds_event_msg_meta()
                msg_meta.bbox.top =  obj_meta.rect_params.top
                msg_meta.bbox.left =  obj_meta.rect_params.left
                msg_meta.bbox.width = obj_meta.rect_params.width
                msg_meta.bbox.height = obj_meta.rect_params.height
                msg_meta.frameId = frame_number
                msg_meta.trackingId = long_to_int(obj_meta.object_id)
                msg_meta.confidence = obj_meta.confidence
                msg_meta = generate_event_msg_meta(msg_meta, obj_meta.class_id)
                user_event_meta = pyds.nvds_acquire_user_meta_from_pool(batch_meta)
                if(user_event_meta):
                    user_event_meta.user_meta_data = msg_meta;
                    user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META
                    # Setting callbacks in the event msg meta. The bindings layer
                    # will wrap these callables in C functions. Currently only one
                    # set of callbacks is supported.
                    pyds.user_copyfunc(user_event_meta, meta_copy_func)
                    pyds.user_releasefunc(user_event_meta, meta_free_func)
                    pyds.nvds_add_user_meta_to_frame(frame_meta, user_event_meta)
                else:
                    print("Error in attaching event meta to buffer\n")

                is_first_object = False
            try:
                l_obj=l_obj.next
            except StopIteration:
                break
        try:
            l_frame=l_frame.next
        except StopIteration:
            break

    print("Frame Number =",frame_number,"Vehicle Count =",obj_counter[PGIE_CLASS_ID_VEHICLE],"Person Count =",obj_counter[PGIE_CLASS_ID_PERSON])
    return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GSTBuffer")
        return

    # Retrieve batch metadata from the GST Buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects
    # the C address of gst_buffer as input,
    # which is obtained with hash(gst_buffer)

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    if not batch_meta:
        return Gst.PadProbeReturn.OK
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDSFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collection
            # will leave it alone
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            continue

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                continue

            obj_counter[obj_meta.class_id] += 1

            if ((frame_number % 30) == 0):
                # Allocating an NvDsEventMsgMeta instance and getting reference
                # to it. The underlying memory is not managed by Python so that
                # downstream plugins can access it. Otherwise the garbage collector
                # will free it when this probe exits.
                # See the "MetaData Access" section at this link:
                # https://docs.nvidia.com/metropolis/deepstream/dev-guide/index.html#page/DeepStream_Development_Guide/deepstream_Python_sample_apps.html

                # See also the struct fields here (C++ API reference):
                # https://docs.nvidia.com/metropolis/deepstream/5.0/dev-guide/DeepStream_Development_Guide/baggage/structNvDsEventMsgMeta.html
                msg_meta = pyds.alloc_nvds_event_msg_meta()
                msg_meta.bbox.top = obj_meta.rect_params.top
                msg_meta.bbox.left = obj_meta.rect_params.left
                msg_meta.bbox.width = obj_meta.rect_params.width
                msg_meta.bbox.height = obj_meta.rect_params.height
                msg_meta.frameId = frame_number
                msg_meta.trackingId = long_to_int(obj_meta.object_id)
                msg_meta.confidence = obj_meta.confidence
                # What is this msg_meta thing even for? Can we get rid of this bit?
                # https://docs.nvidia.com/metropolis/deepstream/python-api/NvDsMeta_Schema/NvDsEventMsgMeta.html
                # No we can't this is the thing that actually sends it out
                # TODO I need to understand what this is
                msg_meta = generate_event_msg_meta(msg_meta, obj_meta.class_id)
                user_event_meta = pyds.nvds_acquire_user_meta_from_pool(
                    batch_meta)
                if (user_event_meta):
                    user_event_meta.user_meta_data = msg_meta
                    user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META
                    # Custom MetaData added to NvDsUserMeta require
                    # custom copy and release functions.
                    # The MetaData library relies on these custom functions to perform deep-copy of the custom structure,
                    # and free allocated resources.
                    # These functions are registered as callback function pointers in the NvDsUserMeta structure.

                    # Setting callbacks in the event msg meta. The bindings layer
                    # will wrap these callables in C functions. Currently only one
                    # set of callbacks is supported.
                    # pyds.set_user_copyfunc(user_event_meta, meta_copy_func)
                    # pyds.set_user_releasefunc(user_event_meta, meta_free_func)
                    pyds.nvds_add_user_meta_to_frame(frame_meta,
                                                     user_event_meta)
                else:
                    print("Error in attaching event meta to buffer\n")
            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    print(
        f"Frame Number = {frame_number}, Person Count = {obj_counter[PGIE_CLASS_ID_PERSON]}"
    )
    return Gst.PadProbeReturn.OK
def set_event_message_meta_probe(pad, info, u_data):
    logging.info("set_event_message_meta_probe: BEGIN")
    add_message_when_no_objects_found = True
    gst_buffer = info.get_buffer()
    if gst_buffer:
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
        if batch_meta:
            for frame_meta_raw in glist_iterator(batch_meta.frame_meta_list):
                frame_meta = pyds.NvDsFrameMeta.cast(frame_meta_raw)
                logging.info(
                    "set_event_message_meta_probe: %20s:%-8s: pts=%23s, dts=%23s, duration=%23s, size=%8d"
                    % (pad.get_parent_element().name, pad.name,
                       format_clock_time(
                           gst_buffer.pts), format_clock_time(gst_buffer.dts),
                       format_clock_time(
                           gst_buffer.duration), gst_buffer.get_size()))
                pravega_timestamp = PravegaTimestamp.from_nanoseconds(
                    frame_meta.buf_pts)
                logging.info(
                    "set_event_message_meta_probe: %20s:%-8s: buf_pts=%s, pravega_timestamp=%s, ntp_timestamp=%s"
                    % (pad.get_parent_element().name, pad.name,
                       format_clock_time(frame_meta.buf_pts),
                       pravega_timestamp, str(frame_meta.ntp_timestamp)))
                if not pravega_timestamp.is_valid():
                    logging.info(
                        "set_event_message_meta_probe: Timestamp %s is invalid."
                        % pravega_timestamp)
                else:
                    added_message = False
                    for obj_meta_raw in glist_iterator(
                            frame_meta.obj_meta_list):
                        obj_meta = pyds.NvDsObjectMeta.cast(obj_meta_raw)
                        logging.info(
                            "set_event_message_meta_probe: obj_meta.class_id=%d"
                            % (obj_meta.class_id, ))
                        # We can only identify a single object in an NvDsEventMsgMeta.
                        # For now, we identify the first object in the frame.
                        # TODO: Create multiple NvDsEventMsgMeta instances per frame or use a custom user metadata class to identify multiple objects.
                        if not added_message:
                            # Allocating an NvDsEventMsgMeta instance and getting reference
                            # to it. The underlying memory is not manged by Python so that
                            # downstream plugins can access it. Otherwise the garbage collector
                            # will free it when this probe exits.
                            msg_meta = pyds.alloc_nvds_event_msg_meta()
                            msg_meta.bbox.top = obj_meta.rect_params.top
                            msg_meta.bbox.left = obj_meta.rect_params.left
                            msg_meta.bbox.width = obj_meta.rect_params.width
                            msg_meta.bbox.height = obj_meta.rect_params.height
                            msg_meta.frameId = frame_meta.frame_num
                            msg_meta.trackingId = long_to_int(
                                obj_meta.object_id)
                            msg_meta.confidence = obj_meta.confidence
                            msg_meta = generate_event_msg_meta(
                                msg_meta, obj_meta.class_id, pravega_timestamp)
                            user_event_meta = pyds.nvds_acquire_user_meta_from_pool(
                                batch_meta)
                            if user_event_meta is None:
                                raise Exception(
                                    "Error in attaching event meta to buffer")
                            user_event_meta.user_meta_data = msg_meta
                            user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META
                            # Setting callbacks in the event msg meta. The bindings layer
                            # will wrap these callables in C functions. Currently only one
                            # set of callbacks is supported.
                            pyds.set_user_copyfunc(user_event_meta,
                                                   meta_copy_func)
                            pyds.set_user_releasefunc(user_event_meta,
                                                      meta_free_func)
                            pyds.nvds_add_user_meta_to_frame(
                                frame_meta, user_event_meta)
                            added_message = True
                    if add_message_when_no_objects_found and not added_message:
                        msg_meta = pyds.alloc_nvds_event_msg_meta()
                        msg_meta.frameId = frame_meta.frame_num
                        msg_meta = generate_event_msg_meta(
                            msg_meta, PGIE_CLASS_ID_NONE, pravega_timestamp)
                        user_event_meta = pyds.nvds_acquire_user_meta_from_pool(
                            batch_meta)
                        if user_event_meta is None:
                            raise Exception(
                                "Error in attaching event meta to buffer")
                        user_event_meta.user_meta_data = msg_meta
                        user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META
                        pyds.set_user_copyfunc(user_event_meta, meta_copy_func)
                        pyds.set_user_releasefunc(user_event_meta,
                                                  meta_free_func)
                        pyds.nvds_add_user_meta_to_frame(
                            frame_meta, user_event_meta)
                        added_message = True

    logging.info("set_event_message_meta_probe: END")
    return Gst.PadProbeReturn.OK
Exemplo n.º 27
0
def nvanalytics_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return Gst.PadProbeReturn.OK

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        l_obj = frame_meta.obj_meta_list

        while l_obj:

            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            l_user_meta = obj_meta.obj_user_meta_list
            while l_user_meta:
                try:
                    user_meta = pyds.NvDsUserMeta.cast(l_user_meta.data)
                    if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type(
                            "NVIDIA.DSANALYTICSOBJ.USER_META"):
                        user_meta_data = pyds.NvDsAnalyticsObjInfo.cast(
                            user_meta.user_meta_data)
                        # if user_meta_data.dirStatus: print("Object {0} moving in direction: {1}".format(obj_meta.object_id, user_meta_data.dirStatus))
                        # if user_meta_data.lcStatus: print("Object {0} line crossing status: {1}".format(obj_meta.object_id, user_meta_data.lcStatus))
                except StopIteration:
                    break

                try:
                    l_user_meta = l_user_meta.next
                except StopIteration:
                    break
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        l_user = frame_meta.frame_user_meta_list
        while l_user:
            try:
                user_meta = pyds.NvDsUserMeta.cast(l_user.data)
                if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type(
                        "NVIDIA.DSANALYTICSFRAME.USER_META"):
                    user_meta_data = pyds.NvDsAnalyticsFrameMeta.cast(
                        user_meta.user_meta_data)
                    if user_meta_data.objLCCumCnt:
                        print("Linecrossing Cumulative: {0}".format(
                            user_meta_data.objLCCumCnt))
                    # if user_meta_data.objLCCurrCnt: print("Linecrossing Current Frame: {0}".format(user_meta_data.objLCCurrCnt))
            except StopIteration:
                break
            try:
                l_user = l_user.next
            except StopIteration:
                break

        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemplo n.º 28
0
    def get_lva_MediaStreamMessage(self, buffer, gst_lva_message, ih, iw):

        msg = extension_pb2.MediaStreamMessage()
        msg.ack_sequence_number = gst_lva_message.sequence_number
        msg.media_sample.timestamp = gst_lva_message.timestamp

        # # Retrieve batch metadata from the gst_buffer
        # # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer))
        frame = batch_meta.frame_meta_list
        while frame is not None:
            try:
                # Note that frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.NvDsFrameMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                frame_meta = pyds.NvDsFrameMeta.cast(frame.data)
                objInference = frame_meta.obj_meta_list
                frameWidth = frame_meta.source_frame_width
                frameHeight = frame_meta.source_frame_height

                inference = msg.media_sample.inferences.add()

                attributes = []
                obj_label = None
                obj_confidence = 0
                obj_left = 0
                obj_width = 0
                obj_top = 0
                obj_width = 0

                # iterate through objects
                while objInference is not None:
                    try:
                        # Casting objInference.data to pyds.NvDsObjectMeta
                        obj_meta = pyds.NvDsObjectMeta.cast(objInference.data)
                    except StopIteration:
                        break

                    rect_params = obj_meta.rect_params
                    top = int(rect_params.top)
                    left = int(rect_params.left)
                    width = int(rect_params.width)
                    height = int(rect_params.height)
                    obj_confidence = obj_meta.confidence
                    objLabel = obj_meta.obj_label
                    obj_label = objLabel

                    obj_left = left / iw
                    obj_top = top / ih
                    obj_width = width / iw
                    obj_height = height / ih

                    inference.type = inferencing_pb2.Inference.InferenceType.ENTITY
                    try:
                        objInference = objInference.next
                    except StopIteration:
                        break

                if obj_label is not None:
                    try:
                        entity = inferencing_pb2.Entity(
                            tag=inferencing_pb2.Tag(value=obj_label,
                                                    confidence=obj_confidence),
                            box=inferencing_pb2.Rectangle(l=obj_left,
                                                          t=obj_top,
                                                          w=obj_width,
                                                          h=obj_height))

                        for attr in attributes:
                            attribute = inferencing_pb2.Attribute(
                                name=attr[0],
                                value=attr[1],
                                confidence=attr[2])

                            entity.attributes.append(attribute)
                    except:
                        PrintGetExceptionDetails()

                    inference.entity.CopyFrom(entity)
            except StopIteration:
                break

            try:
                frame = frame.next
            except StopIteration:
                break

        return msg
Exemplo n.º 29
0
    def osd_sink_pad_buffer_probe(pad, info, u_data):
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }

        gst_buffer = info.get_buffer()
        if not gst_buffer:
            print("Unable to get GstBuffer ")
            return

        # Retrieve batch metadata from the gst_buffer
        # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
        l_frame = batch_meta.frame_meta_list
        while l_frame is not None:
            try:
                # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.glist_get_nvds_frame_meta()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                # frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
                frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            except StopIteration:
                break

            frame_number = frame_meta.frame_num
            num_rects = frame_meta.num_obj_meta
            l_obj = frame_meta.obj_meta_list
            while l_obj is not None:
                try:
                    # Casting l_obj.data to pyds.NvDsObjectMeta
                    # obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
                    obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                except StopIteration:
                    break
                obj_counter[obj_meta.class_id] += 1
                obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
                try:
                    l_obj = l_obj.next
                except StopIteration:
                    break

            # Acquiring a display meta object. The memory ownership remains in
            # the C code so downstream plugins can still access it. Otherwise
            # the garbage collector will claim it when this probe function exits.
            display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
            display_meta.num_labels = 1
            py_nvosd_text_params = display_meta.text_params[0]
            # Setting display text to be shown on screen
            # Note that the pyds module allocates a buffer for the string, and the
            # memory will not be claimed by the garbage collector.
            # Reading the display_text field here will return the C address of the
            # allocated string. Use pyds.get_string() to get the string content.

            fps_stream.get_fps()

            py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}" \
                .format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

            # Now set the offsets where the string should appear
            py_nvosd_text_params.x_offset = 10
            py_nvosd_text_params.y_offset = 12

            # Font , font-color and font-size
            py_nvosd_text_params.font_params.font_name = "Serif"
            py_nvosd_text_params.font_params.font_size = 10
            # set(red, green, blue, alpha); set to White
            py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

            # Text background color
            py_nvosd_text_params.set_bg_clr = 1
            # set(red, green, blue, alpha); set to Black
            py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
            # Using pyds.get_string() to get display_text as string
            print(pyds.get_string(py_nvosd_text_params.display_text))
            pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

            # if WRITE_FRAMES:
            #     n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            #     # convert python array into numy array format.
            #     frame_image = np.array(n_frame, copy=True, order='C')
            #     # covert the array into cv2 default color format
            #     frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)
            #     cv2.imwrite("./frame_" + str(frame_number) + ".jpg",
            #                 frame_image)
                # print('saved to')

            try:
                l_frame = l_frame.next
            except StopIteration:
                break

        return Gst.PadProbeReturn.OK
def set_event_message_meta_probe(pad, info, u_data):
    logging.info("set_event_message_meta_probe: BEGIN")
    gst_buffer = info.get_buffer()
    if gst_buffer:
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
        if batch_meta:
            for frame_meta_raw in glist_iterator(batch_meta.frame_meta_list):
                frame_meta = pyds.NvDsFrameMeta.cast(frame_meta_raw)
                # TODO: It appears that the timestamp may be incorrect by up to 1 second.
                pravega_video_timestamp = pts_to_pravega_video_timestamp(
                    frame_meta.buf_pts)
                logging.info(
                    "set_event_message_meta_probe: pad=%s:%s, pts=%s, buf_pts=%s, pravega_video_timestamp=%d, ntp_timestamp=%s"
                    % (pad.get_parent_element().name, pad.name,
                       format_clock_time(gst_buffer.pts),
                       format_clock_time(frame_meta.buf_pts),
                       pravega_video_timestamp, str(frame_meta.ntp_timestamp)))
                if pravega_video_timestamp <= 0:
                    logging.info(
                        "set_event_message_meta_probe: Timestamp is invalid. It may take a few seconds for RTSP timestamps to be valid."
                    )
                else:
                    is_first_object = True
                    for obj_meta_raw in glist_iterator(
                            frame_meta.obj_meta_list):
                        obj_meta = pyds.NvDsObjectMeta.cast(obj_meta_raw)
                        logging.info(
                            "set_event_message_meta_probe: obj_meta.class_id=%d"
                            % (obj_meta.class_id, ))
                        # We can only identify a single object in an NvDsEventMsgMeta.
                        # For now, we identify the first object in the frame.
                        # TODO: Create multiple NvDsEventMsgMeta instances per frame or use a custom user metadata class to identify multiple objects.
                        if is_first_object:
                            # Allocating an NvDsEventMsgMeta instance and getting reference
                            # to it. The underlying memory is not manged by Python so that
                            # downstream plugins can access it. Otherwise the garbage collector
                            # will free it when this probe exits.
                            msg_meta = pyds.alloc_nvds_event_msg_meta()
                            msg_meta.bbox.top = obj_meta.rect_params.top
                            msg_meta.bbox.left = obj_meta.rect_params.left
                            msg_meta.bbox.width = obj_meta.rect_params.width
                            msg_meta.bbox.height = obj_meta.rect_params.height
                            msg_meta.frameId = frame_meta.frame_num
                            msg_meta.trackingId = long_to_int(
                                obj_meta.object_id)
                            msg_meta.confidence = obj_meta.confidence
                            msg_meta = generate_event_msg_meta(
                                msg_meta, obj_meta.class_id,
                                pravega_video_timestamp)
                            user_event_meta = pyds.nvds_acquire_user_meta_from_pool(
                                batch_meta)
                            if user_event_meta:
                                user_event_meta.user_meta_data = msg_meta
                                user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META
                                # Setting callbacks in the event msg meta. The bindings layer
                                # will wrap these callables in C functions. Currently only one
                                # set of callbacks is supported.
                                pyds.set_user_copyfunc(user_event_meta,
                                                       meta_copy_func)
                                pyds.set_user_releasefunc(
                                    user_event_meta, meta_free_func)
                                pyds.nvds_add_user_meta_to_frame(
                                    frame_meta, user_event_meta)
                            else:
                                raise Exception(
                                    "Error in attaching event meta to buffer")
                            is_first_object = False
    logging.info("set_event_message_meta_probe: END")
    return Gst.PadProbeReturn.OK