Exemplo n.º 1
0
def analyze_meta(frame_number, obj_meta):
    print(' === obj_meta Primary Detection [%d]===' % (frame_number))
    print('object ={%s}' % (PGIE_CLASS[obj_meta.class_id]))
    print('object_id={}'.format(obj_meta.object_id))
    print('object height={}'.format(obj_meta.rect_params.height))
    print('object left={}'.format(obj_meta.rect_params.left))
    print('object top={}'.format(obj_meta.rect_params.top))
    print('object width={}'.format(obj_meta.rect_params.width))

    if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:  #Only vehicle supports secondary inference
        print(' === obj_meta Secondary Detection ===')
        cls_meta = obj_meta.classifier_meta_list
        while cls_meta is not None:
            cls = pyds.NvDsClassifierMeta.cast(cls_meta.data)

            #unique_component_id : Primary Detector:1, Secondary_CarColor: 2, Secondary_CarMake : 3,  Secondary_VehicleTypes:4
            #print('Secondary element={}'.format(SECONDARY_ELEMENT[cls.unique_component_id]))

            #print(' num_labels={}'.format(cls.num_labels))
            info = cls.label_info_list  # type of pyds.GList
            while info is not None:
                label_meta = pyds.glist_get_nvds_label_info(info.data)
                if cls.unique_component_id == 2:
                    print('\tCarColor ={}'.format(
                        CAR_COLOR[label_meta.result_class_id]))
                elif cls.unique_component_id == 3:
                    print('\tCarMake ={}'.format(
                        CAR_MAKE[label_meta.result_class_id]))
                elif cls.unique_component_id == 4:
                    print('\tVehicleTypes ={}'.format(
                        VEHICLE_TYPES[label_meta.result_class_id]))
                #print('label_meta label_id ={}'.format(label_meta.label_id))
                print('\tlabel_meta result_prob ={}'.format(
                    label_meta.result_prob))

                try:
                    info = info.next
                except StopIteration:
                    break

            try:
                cls_meta = cls_meta.next
            except StopIteration:
                break
Exemplo n.º 2
0
def analyze_meta(img, frame_number, obj_meta, save):
    filename = '/home/spypiggy/src/test_images/result/%d_%d_' % (
        frame_number, obj_meta.object_id)
    print(' === obj_meta Primary Detection [%d]===' % (frame_number))
    print('object ={%s}' % (PGIE_CLASS[obj_meta.class_id]))
    print('object_id={}'.format(obj_meta.object_id))
    print('object height={}'.format(obj_meta.rect_params.height))
    print('object left={}'.format(obj_meta.rect_params.left))
    print('object top={}'.format(obj_meta.rect_params.top))
    print('object width={}'.format(obj_meta.rect_params.width))
    filename += (PGIE_CLASS[obj_meta.class_id])

    if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:  #Only vehicle supports secondary inference
        print(' === obj_meta Secondary Detection ===')
        cls_meta = obj_meta.classifier_meta_list
        filename_ex = filename
        while cls_meta is not None:
            cls = pyds.NvDsClassifierMeta.cast(cls_meta.data)

            #unique_component_id : Primary Detector:1, Secondary_CarColor: 2, Secondary_CarMake : 3,  Secondary_VehicleTypes:4
            #print('Secondary element={}'.format(SECONDARY_ELEMENT[cls.unique_component_id]))

            #print(' num_labels={}'.format(cls.num_labels))
            info = cls.label_info_list  # type of pyds.GList
            while info is not None:
                label_meta = pyds.glist_get_nvds_label_info(info.data)
                if cls.unique_component_id == 2:
                    print('\tCarColor ={}'.format(
                        CAR_COLOR[label_meta.result_class_id]))
                    filename_ex += ('_' +
                                    CAR_COLOR[label_meta.result_class_id])
                elif cls.unique_component_id == 3:
                    print('\tCarMake ={}'.format(
                        CAR_MAKE[label_meta.result_class_id]))
                    filename_ex += ('_' + CAR_MAKE[label_meta.result_class_id])
                elif cls.unique_component_id == 4:
                    print('\tVehicleTypes ={}'.format(
                        VEHICLE_TYPES[label_meta.result_class_id]))
                    filename_ex += ('_' +
                                    VEHICLE_TYPES[label_meta.result_class_id])
                #print('label_meta label_id ={}'.format(label_meta.label_id))
                print('\tlabel_meta result_prob ={}'.format(
                    label_meta.result_prob))
                filename_ex += ('_%4.2f' % (label_meta.result_prob))

                try:
                    info = info.next
                except StopIteration:
                    break

            try:
                cls_meta = cls_meta.next
            except StopIteration:
                break

        filename_ex += '.jpg'
        if save:
            crop_img = img[
                int(obj_meta.rect_params.top):int(obj_meta.rect_params.top) +
                int(obj_meta.rect_params.height),
                int(obj_meta.rect_params.left):int(obj_meta.rect_params.left) +
                int(obj_meta.rect_params.width)]
            cv2.imwrite(filename_ex, crop_img)
    else:  # save  'BICYCLE', 'PERSON', 'ROADSIGN'
        filename_ex = filename + '.jpg'
        if save:
            crop_img = img[
                int(obj_meta.rect_params.top):int(obj_meta.rect_params.top) +
                int(obj_meta.rect_params.height),
                int(obj_meta.rect_params.left):int(obj_meta.rect_params.left) +
                int(obj_meta.rect_params.width)]
            cv2.imwrite(filename_ex, crop_img)
Exemplo n.º 3
0
    def osd_sink_pad_buffer_probe(self,pad,info,u_data):
        frame_number=0
        #Intializing object counter with 0.
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE:0,
            PGIE_CLASS_ID_BICYCLE:0,
            PGIE_CLASS_ID_PERSON:0,
            PGIE_CLASS_ID_ROADSIGN:0
        }


        num_rects=0

        gst_buffer = info.get_buffer()
        if not gst_buffer:
            print("Unable to get GstBuffer ")
            return

        # Retrieve batch metadata from the gst_buffer
        # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
        l_frame = batch_meta.frame_meta_list
        while l_frame is not None:
            try:
                # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.NvDsFrameMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            except StopIteration:
                break

            frame_number=frame_meta.frame_num
            num_rects = frame_meta.num_obj_meta
            l_obj=frame_meta.obj_meta_list

            # Message for output of detection inference
            msg = Detection2DArray()
            while l_obj is not None:
                try:
                    # Casting l_obj.data to pyds.NvDsObjectMeta
                    obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
                    l_classifier = obj_meta.classifier_meta_list

                    # If object is a car (class ID 0), perform attribute classification
                    if obj_meta.class_id == 0 and l_classifier is not None:
                        # Creating and publishing message with output of classification inference
                        msg2 = Classification2D()

                        while l_classifier is not None:
                            result = ObjectHypothesis()
                            try:
                                classifier_meta = pyds.glist_get_nvds_classifier_meta(l_classifier.data)
                                
                            except StopIteration:
                                print('Could not parse MetaData: ')
                                break

                            classifier_id = classifier_meta.unique_component_id
                            l_label = classifier_meta.label_info_list
                            label_info = pyds.glist_get_nvds_label_info(l_label.data)
                            classifier_class = label_info.result_class_id

                            if classifier_id == 2:
                                result.id = class_color[classifier_class]
                            elif classifier_id == 3:
                                result.id = class_make[classifier_class]
                            else:
                                result.id = class_type[classifier_class]

                            result.score = label_info.result_prob                            
                            msg2.results.append(result)
                            l_classifier = l_classifier.next
                    
                        self.publisher_classification.publish(msg2)
                except StopIteration:
                    break
    
                obj_counter[obj_meta.class_id] += 1

                # Creating message for output of detection inference
                result = ObjectHypothesisWithPose()
                result.id = str(class_obj[obj_meta.class_id])
                result.score = obj_meta.confidence
                
                left = obj_meta.rect_params.left
                top = obj_meta.rect_params.top
                width = obj_meta.rect_params.width
                height = obj_meta.rect_params.height
                bounding_box = BoundingBox2D()
                bounding_box.center.x = float(left + (width/2)) 
                bounding_box.center.y = float(top - (height/2))
                bounding_box.size_x = width
                bounding_box.size_y = height
                
                detection = Detection2D()
                detection.results.append(result)
                detection.bbox = bounding_box
                msg.detections.append(detection)

                try: 
                    l_obj=l_obj.next
                except StopIteration:
                    break

            # Publishing message with output of detection inference
            self.publisher_detection.publish(msg)
        

            # Acquiring a display meta object. The memory ownership remains in
            # the C code so downstream plugins can still access it. Otherwise
            # the garbage collector will claim it when this probe function exits.
            display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
            display_meta.num_labels = 1
            py_nvosd_text_params = display_meta.text_params[0]
            # Setting display text to be shown on screen
            # Note that the pyds module allocates a buffer for the string, and the
            # memory will not be claimed by the garbage collector.
            # Reading the display_text field here will return the C address of the
            # allocated string. Use pyds.get_string() to get the string content.
            py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

            # Now set the offsets where the string should appear
            py_nvosd_text_params.x_offset = 10
            py_nvosd_text_params.y_offset = 12

            # Font , font-color and font-size
            py_nvosd_text_params.font_params.font_name = "Serif"
            py_nvosd_text_params.font_params.font_size = 10
            # set(red, green, blue, alpha); set to White
            py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

            # Text background color
            py_nvosd_text_params.set_bg_clr = 1
            # set(red, green, blue, alpha); set to Black
            py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
            # Using pyds.get_string() to get display_text as string
            pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
            try:
                l_frame=l_frame.next
            except StopIteration:
                break
			
        return Gst.PadProbeReturn.OK 
Exemplo n.º 4
0
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        is_first_obj = True

        print("===")
        print("frame_meta.frame_num={0}".format(frame_meta.frame_num))

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            print("---")
            print("obj_meta.object_id={0}".format(obj_meta.object_id))
            print("obj_meta.class_id={0}".format(obj_meta.class_id))

            #get secondary classifier data
            l_classifier = obj_meta.classifier_meta_list
            if l_classifier is not None:  # and class_id==XXX #apply classifier for a specific class
                classifier_meta = pyds.glist_get_nvds_classifier_meta(
                    l_classifier.data)
                l_label = classifier_meta.label_info_list
                label_info = pyds.glist_get_nvds_label_info(l_label.data)
                classifier_class = label_info.result_class_id
                print("sgie class={0}", classifier_class)

            obj_counter[obj_meta.class_id] += 1

            # Cv2 stuff
            if is_first_obj:
                is_first_obj = False
                # Getting Image data using nvbufsurface
                # the input should be address of buffer and batch_id
                n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer),
                                                    frame_meta.batch_id)
                #convert python array into numy array format.
                frame_image = np.array(n_frame, copy=True, order='C')
                #covert the array into cv2 default color format
                frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)

            #recognize license plate data
            recognize_license_plate(frame_image, obj_meta, obj_meta.confidence)

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={}".format(
            frame_number, num_rects)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)

        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))

        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemplo n.º 5
0
    def tiler_sink_pad_buffer_probe(self, pad, info, u_data):
        frame_number = 0
        num_rects = 0
        gst_buffer = info.get_buffer()
        if not gst_buffer:
            print("Unable to get GstBuffer ")
            return

        # Retrieve batch metadata from the gst_buffer
        # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

        l_frame = batch_meta.frame_meta_list
        while l_frame is not None:
            try:
                # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.NvDsFrameMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            except StopIteration:
                break

            frame_number = frame_meta.frame_num
            l_obj = frame_meta.obj_meta_list
            num_rects = frame_meta.num_obj_meta
            is_first_obj = True
            save_image = False
            obj_counter = {
                PGIE_CLASS_ID_VEHICLE: 0,
                PGIE_CLASS_ID_BICYCLE: 0,
                PGIE_CLASS_ID_PERSON: 0,
                PGIE_CLASS_ID_ROADSIGN: 0
            }

            # Message for output of detection inference
            msg = Detection2DArray()
            while l_obj is not None:
                try:
                    # Casting l_obj.data to pyds.NvDsObjectMeta
                    obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                    l_classifier = obj_meta.classifier_meta_list
                    # If object is a car (class ID 0), perform attribute classification
                    if obj_meta.class_id == 0 and l_classifier is not None:
                        # Creating and publishing message with output of classification inference
                        msg2 = Classification2D()

                        while l_classifier is not None:
                            result = ObjectHypothesis()
                            try:
                                classifier_meta = pyds.glist_get_nvds_classifier_meta(
                                    l_classifier.data)
                            except StopIteration:
                                print('Could not parse MetaData: ')
                                break

                            classifier_id = classifier_meta.unique_component_id
                            l_label = classifier_meta.label_info_list
                            label_info = pyds.glist_get_nvds_label_info(
                                l_label.data)
                            classifier_class = label_info.result_class_id

                            if classifier_id == 2:
                                result.id = class_color[classifier_class]
                            elif classifier_id == 3:
                                result.id = class_make[classifier_class]
                            else:
                                result.id = class_type[classifier_class]

                            result.score = label_info.result_prob
                            msg2.results.append(result)
                            l_classifier = l_classifier.next

                        self.publisher_classification.publish(msg2)

                except StopIteration:
                    break

                obj_counter[obj_meta.class_id] += 1

                # Creating message for output of detection inference
                result = ObjectHypothesisWithPose()
                result.id = str(class_obj[obj_meta.class_id])
                result.score = obj_meta.confidence

                left = obj_meta.rect_params.left
                top = obj_meta.rect_params.top
                width = obj_meta.rect_params.width
                height = obj_meta.rect_params.height
                bounding_box = BoundingBox2D()
                bounding_box.center.x = float(left + (width / 2))
                bounding_box.center.y = float(top - (height / 2))
                bounding_box.size_x = width
                bounding_box.size_y = height

                detection = Detection2D()
                detection.results.append(result)
                detection.bbox = bounding_box
                msg.detections.append(detection)

                # Periodically check for objects with borderline confidence value that may be false positive detections.
                # If such detections are found, annotate the frame with bboxes and confidence value.
                # Save the annotated frame to file.
                if ((saved_count["stream_" + str(frame_meta.pad_index)] % 30
                     == 0) and (obj_meta.confidence > 0.3
                                and obj_meta.confidence < 0.31)):
                    if is_first_obj:
                        is_first_obj = False
                        # Getting Image data using nvbufsurface
                        # the input should be address of buffer and batch_id
                        n_frame = pyds.get_nvds_buf_surface(
                            hash(gst_buffer), frame_meta.batch_id)
                        #convert python array into numy array format.
                        frame_image = np.array(n_frame, copy=True, order='C')
                        #covert the array into cv2 default color format
                        frame_image = cv2.cvtColor(frame_image,
                                                   cv2.COLOR_RGBA2BGRA)

                    save_image = True
                    frame_image = draw_bounding_boxes(frame_image, obj_meta,
                                                      obj_meta.confidence)
                try:
                    l_obj = l_obj.next
                except StopIteration:
                    break

            # Get frame rate through this probe
            fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

            # Publishing message with output of detection inference
            self.publisher_detection.publish(msg)

            if save_image:
                cv2.imwrite(
                    folder_name + "/stream_" + str(frame_meta.pad_index) +
                    "/frame_" + str(frame_number) + ".jpg", frame_image)
            saved_count["stream_" + str(frame_meta.pad_index)] += 1
            try:
                l_frame = l_frame.next
            except StopIteration:
                break

        return Gst.PadProbeReturn.OK
Exemplo n.º 6
0
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    is_first_obj = True
    # Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    num_rects = 0
    vehicles_types = []
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        center_list = []
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            ## vehicle type detection ##
            # Filter detections by PGIE1 network and don't include RoadSign class
            if (obj_meta.unique_component_id == PGIE_UNIQUE_ID
                    and obj_meta.class_id !=
                    PGIE_CLASS_ID_ROADSIGN  # Exclude RoadSign
                    and obj_meta.class_id !=
                    PGIE_CLASS_ID_BICYCLE  # Exclude Bicycle
                    and
                    obj_meta.class_id != PGIE_CLASS_ID_PERSON  # Exclude Person
                ):
                #####################################
                ## vehicle type classification ##
                #####################################
                l_classifier = obj_meta.classifier_meta_list
                sgie_class = -1
                if l_classifier is not None:  # and class_id==XXX #apply classifier for a specific class
                    classifier_meta = pyds.glist_get_nvds_classifier_meta(
                        l_classifier.data)
                    l_label = classifier_meta.label_info_list
                    label_info = pyds.glist_get_nvds_label_info(l_label.data)
                    sgie_class = label_info.result_class_id
                    rect_params = obj_meta.rect_params
                    w = int(rect_params.width)
                    h = int(rect_params.height)
                    center = (w // 2, h // 2)
                    center_list.append(center)
                    # vehicles_coords.append(center)
                    if frame_number > 0 and frame_number % DUMPINTERVAL == 0:
                        vehicles_types.append(SGIE_LABELS_DICT[sgie_class])
                    #####################################
                    ## licence plate recognition stage ##
                    #####################################

                    # # Cv2 stuff
                    # if is_first_obj:
                    #     is_first_obj = False
                    #     # Getting Image data using nvbufsurface
                    #     # the input should be address of buffer and batch_id
                    #     n_frame = pyds.get_nvds_buf_surface(
                    #         hash(gst_buffer), frame_meta.batch_id)
                    #     # convert python array into numy array format.
                    #     frame_image = np.array(n_frame, copy=True, order='C')
                    #     # covert the array into cv2 default color format
                    #     frame_image = cv2.cvtColor(
                    #         frame_image, cv2.COLOR_RGBA2BGRA)

                    # # recognize license plate data
                    # alrp_output = lpdetector.alpr_frame(
                    #     frame_image, obj_meta, obj_meta.confidence, frame_number)
                    # print("alrp out >>> ", alrp_output)

            obj_counter[obj_meta.class_id] += 1
            #print("obj_meta: gie_id={0}; object_id={1}; class_id={2}; classifier_class={3}".format(obj_meta.unique_component_id,obj_meta.object_id,obj_meta.class_id,sgie_class))
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.

        # py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(
        #     frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        vcoords.vec_coords.append(center_list)
        if frame_number > 0 and frame_number % DUMPINTERVAL == 0:
            # return degree estimate for every vehicle
            # degrees = estimate_entery_exit_degrees(vehicles_coords)
            py_nvosd_text_params.display_text = "Time Stamp={} Vehicle_types={} Vehicles_coords_across dump interval {} ".format(
                datetime.datetime.now(), vehicles_types, vcoords.vec_coords)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        # where data is dumped
        if frame_number > 0 and frame_number % DUMPINTERVAL == 0:
            print(pyds.get_string(py_nvosd_text_params.display_text))
            vehicle_types = []
            vcoords.vec_coords = []
            center_list = []
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK