def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    num_rects=0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return


    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_text_params.display_text = "Frames: {} | Objects: {} | Vehicles: {} | Persons: {}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 12
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
			
    return Gst.PadProbeReturn.OK
def frame_function(batch_meta, frame_meta, dict_data):
    obj_counter = dict_data["obj_counter"]
    pgie_class_id = dict_data["pgie_class_id"]
    frame_number = frame_meta.frame_num
    num_rects = frame_meta.num_obj_meta
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    make_text_display(display_meta, frame_number, num_rects, obj_counter)
    # Using pyds.get_string() to get display_text as string
    py_nvosd_text_params = display_meta.text_params[0]
    print(pyds.get_string(py_nvosd_text_params.display_text))
    pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
Exemple #3
0
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    # version mask detection solo reconoce mascarillas y sin mascarilla
    obj_counter = {
        PGIE_CLASS_ID_FACE: 0,
        PGIE_CLASS_ID_PLATES: 0,
    }

    frame_number = 0
    num_rects = 0  # numero de objetos en el frame
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    #====================== Definicion de valores de mensajes a pantalla
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index

    camera_id = get_camera_id(current_pad_index)

    plates_info = get_plates_info(camera_id)
    #print('information edgar')
    #print(plates_info)
    #quit()
    #is_aforo_enabled = aforo_info['enabled']

    #social_distance_info = get_social_distance(camera_id)
    #is_social_distance_enabled = social_distance_info['enabled']

    #people_counting_info = get_people_counting(camera_id)
    #is_people_counting_enabled = people_counting_info['enabled']

    #print( "entro al  tiler_src_pad_buffer_probe")
    # Todos los servicios requieren impresion de texto solo para Aforo se requiere una linea y un rectangulo
    display_meta.num_labels = 1  # numero de textos
    py_nvosd_text_params = display_meta.text_params[0]

    # Setup del label de impresion en pantalla
    py_nvosd_text_params.x_offset = 100
    py_nvosd_text_params.y_offset = 120
    py_nvosd_text_params.font_params.font_name = "Arial"
    py_nvosd_text_params.font_params.font_size = 10
    py_nvosd_text_params.font_params.font_color.red = 1.0
    py_nvosd_text_params.font_params.font_color.green = 1.0
    py_nvosd_text_params.font_params.font_color.blue = 1.0
    py_nvosd_text_params.font_params.font_color.alpha = 1.0
    py_nvosd_text_params.set_bg_clr = 1
    py_nvosd_text_params.text_bg_clr.red = 0.0
    py_nvosd_text_params.text_bg_clr.green = 0.0
    py_nvosd_text_params.text_bg_clr.blue = 0.0
    py_nvosd_text_params.text_bg_clr.alpha = 1.0

    plate_ids = get_plate_ids_dict(camera_id)
    # por que ponerlo en 1 ????
    #frame_number = 1 # to avoid not definition issue

    #client=boto3.client('rekognition')

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        frame_number = frame_meta.frame_num

        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        save_image = False

        #print(num_rects) ID numero de stream
        #ids = set()

        # fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

        # Ciclo interno donde se evaluan los objetos dentro del frame
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            obj_counter[obj_meta.class_id] += 1

            # if class is 1 (plate) and only every other frame
            # TODO hay que utilizar la informacion en plates_info para determinar si esta dentro del area de interes y si esta entrando o saliendo y solo tomar las imagenes de cuando este entrando
            print(plates_info)
            #if obj_meta.class_id == 1 and frame_number % 2 == 0:
            if obj_meta.class_id == 1:
                #save_image = True

                if obj_meta.object_id not in plate_ids:
                    counter = 1
                    items = []
                else:
                    counter = plate_ids[obj_meta.object_id]['counter']
                    items = plate_ids[obj_meta.object_id]['items']
                    counter += 1

                print(
                    'X..............',
                    int(obj_meta.rect_params.width +
                        obj_meta.rect_params.left / 2))
                print(
                    'Y..............',
                    int(obj_meta.rect_params.height +
                        obj_meta.rect_params.top))

                # Getting Image data using nvbufsurface
                # the input should be address of buffer and batch_id
                n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer),
                                                    frame_meta.batch_id)

                # convert python array into numy array format.
                frame_image = np.array(n_frame, copy=True, order='C')

                # convert the array into cv2 default color format
                frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)

                # crop image
                frame_image = draw_bounding_boxes(frame_image, obj_meta,
                                                  obj_meta.confidence)
                items.append(frame_image)

                plate_ids.update(
                    {obj_meta.object_id: {
                        'counter': counter,
                        'items': items
                    }})
                #print('edgar...', plate_ids)
                set_plate_ids_dict(camera_id, plate_ids)
                for elemento in plate_ids.keys():
                    #print('11111111111', elemento)
                    #print('22222222222', type(elemento))
                    #print('33333333333', plate_ids)
                    #print('44444444444', plate_ids[elemento])
                    #print('55555555555', plate_ids[obj_meta.object_id]['counter'])
                    if plate_ids[obj_meta.object_id]['counter'] > 1:
                        print('................', frame_number, elemento,
                              'photo:',
                              len(plate_ids[obj_meta.object_id]['items']))
                        cv2.imwrite(
                            folder_name + "/stream_" +
                            str(frame_meta.pad_index) + "/" +
                            str(service.get_timestamp()) + "_" +
                            str(obj_meta.object_id) + ".jpg", frame_image)

            #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Mask={} NoMaks={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_FACE], obj_counter[PGIE_CLASS_ID_PLATES])

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        #pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        #fps_streams["stream{0}".format(frame_meta.pad_index)].print_data()
        #print("stream{0}".format(frame_meta.pad_index))

        if save_image:
            #print("Entre a guardar imagen")
            #print(obj_meta.class_id)

            # El nombre del archivo debe estar formado por date+id

            #cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg",frame_image)
            #print(str(service.get_timestamp()))
            #print(str(service.get_timestamp()/1000))
            a = 1
            cv2.imwrite(
                folder_name + "/stream_" + str(frame_meta.pad_index) + "/" +
                str(service.get_timestamp()) + "_" + str(obj_meta.object_id) +
                ".jpg", frame_image)

        saved_count["stream_" + str(frame_meta.pad_index)] += 1

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    '''
    if frame_number % 43 == 0:
        new_dict = {}
        no_mask_ids = get_plate_ids_dict(camera_id)

        for item in ids:
            if item in no_mask_ids:
                value = no_mask_ids[item]
                new_dict.update({item: value})

        set_plates_dict(camera_id, new_dict)

        # Lo manda a directo streaming
    '''

    return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    #Intiallizing object counter with 0.
    obj_counter = {PGIE_CLASS_ID_PICKLE: 0}
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.glist_get_nvds_frame_meta()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            #frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        # pyds.nvds_clear_obj_meta_list(frame_meta, frame_meta.obj_meta_list)
        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                #obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            currPickle = PickleBox(obj_meta.rect_params.left,
                                   obj_meta.rect_params.top,
                                   obj_meta.rect_params.width,
                                   obj_meta.rect_params.height)
            if currPickle.left > frame_meta.source_frame_width * minRange:
                obj_counter[obj_meta.class_id] += 1
                knownPickle = False
                for foundPickle in PickleBoxArr:
                    if (foundPickle.left * minRange <= currPickle.left <=
                            foundPickle.left * maxRange
                            and foundPickle.top * minRange <= currPickle.top <=
                            foundPickle.top * maxRange
                            and foundPickle.width * minRange <=
                            currPickle.width <= foundPickle.width * maxRange
                            and
                            foundPickle.height * minRange <= currPickle.height
                            <= foundPickle.height * maxRange):
                        knownPickle = True
                        break
                if knownPickle:
                    #BLUE
                    obj_meta.rect_params.border_color.set(
                        0.0, 254.0, 10.0, 0.0)

                    #BLACK
                    # obj_meta.rect_params.border_color.set(0.0, 0.0, 0.0, 255.0)
                else:
                    PickleBoxArr.append(currPickle)
            else:
                obj_meta.rect_params.border_color.set(0.0, 0.0, 0.0, 255.0)

            # print("Left = {}, Top={}, width={}, heigth={}".format(obj_meta.rect_params.left,obj_meta.rect_params.top,obj_meta.rect_params.width,obj_meta.rect_params.height))
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Pickles (current Frame)={} Number of total Pickles={}".format(
            frame_number, obj_counter[PGIE_CLASS_ID_PICKLE], len(PickleBoxArr))
        objectsAtFrame.append(obj_counter[PGIE_CLASS_ID_PICKLE])
        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        # print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemple #5
0
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    # version 2 solo personas

    obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
            }

    #obj_counter = {
    #        PGIE_CLASS_ID_PERSON: 0
    #        }

    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    #previous = service.get_previous()
    previous =  True
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        
        #service.set_frame_counter(frame_number)   # Aqui se esta evaluando el numero de frames ???????
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        ids = []
        boxes = []
         
        while l_obj is not None: 
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)         
            except StopIteration:
                break           

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top
            obj_id = obj_meta.object_id

            # Service Aforo (in and out)
            #if obj_meta.class_id == 2:
            #ids.append(obj_id)
            #boxes.append((x, y))
            # service.counting_in_and_out_first_detection((x, y), obj_id) In and out counting when the object finally desapears
            #service.counting_in_and_out_when_changing_area((x, y), obj_id, ids, previous)

            # Service People counting
            if previous:
                #service.people_counting_last_time_detected(ids)
                #service.people_counting_storing_fist_time(obj_id)
                print("Cancelados momentaneamente")
            try: 
                l_obj = l_obj.next
            except StopIteration:
                break

        # Service Social Distance
        #if previous:
        #    service.get_distances_between_detected_elements_from_centroid(boxes, ids)

        #if not service.get_previous():
        #    service.set_previous(True)
        #    previous = service.get_previous()
        
        # Service Aforo (in and out)
        # service.count_in_and_out_when_object_leaves_the_frame(ids) In and out counting when the object finally desapears

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        display_meta.num_lines = 1
        #display_meta.num_circles = 1
        display_meta.num_rects = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_line_params = display_meta.line_params[0]        
        #py_nvosd_circle_params = display_meta.circle_params[0]        
        py_nvosd_rect_params = display_meta.rect_params[0]        
        
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],obj_counter[PGIE_CLASS_ID_PERSON])
        py_nvosd_text_params.x_offset = 100
        py_nvosd_text_params.y_offset = 120
        py_nvosd_text_params.font_params.font_name = "Arial"
        py_nvosd_text_params.font_params.font_size = 10
        py_nvosd_text_params.font_params.font_color.red = 1.0
        py_nvosd_text_params.font_params.font_color.green = 1.0
        py_nvosd_text_params.font_params.font_color.blue = 1.0
        py_nvosd_text_params.font_params.font_color.alpha = 1.0
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.red = 0.0
        py_nvosd_text_params.text_bg_clr.green = 0.0
        py_nvosd_text_params.text_bg_clr.blue = 0.0
        py_nvosd_text_params.text_bg_clr.alpha = 1.0
        
        py_nvosd_line_params.x1 = 510
        py_nvosd_line_params.y1 = 740
        py_nvosd_line_params.x2 = 1050
        py_nvosd_line_params.y2 = 740
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.red = 1.0
        py_nvosd_line_params.line_color.green = 1.0
        py_nvosd_line_params.line_color.blue = 1.0
        py_nvosd_line_params.line_color.alpha = 1.0

        py_nvosd_rect_params.left = 500
        py_nvosd_rect_params.height = 120
        py_nvosd_rect_params.top = 680
        py_nvosd_rect_params.width = 560
        py_nvosd_rect_params.border_width = 4
        py_nvosd_rect_params.border_color.red = 0.0
        py_nvosd_rect_params.border_color.green = 0.0
        py_nvosd_rect_params.border_color.blue = 1.0
        py_nvosd_rect_params.border_color.alpha = 1.0

        #py_nvosd_circle_params.xc = 110
        #py_nvosd_circle_params.yc = 110
        #py_nvosd_circle_params.radius = 60
        #py_nvosd_circle_params.circle_color.red = 1.0
        #py_nvosd_circle_params.circle_color.green = 1.0
        #py_nvosd_circle_params.circle_color.blue = 1.0
        #py_nvosd_circle_params.circle_color.alpha = 1.0
        
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person)
        #Linea = pyds.nvds_line_params.set(1,6,20,20,100,100)

        # Lo manda a directo streaming
        pyds.nvds_add_display_meta_to_frame(frame_meta,display_meta)
        #print(pyds.get_string(py_nvosd_text_params.display_text))        # Lo manda a terminal
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()       
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK	
Exemple #6
0
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    # version 2 solo personas

    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }

    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    previous = service.get_previous()

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        if get_counter() == 60:
            set_counter()

            if get_current_time() > get_offset_time():
                print('aca...............')
                service.emulate_reading_from_server()
                set_offset_time()
            else:
                set_current_time()
        else:
            increment()

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        ids = []
        boxes = []

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            # print(" Class ID ", pgie_classes_str[obj_meta.class_id])

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top

            # Service Aforo (in and out)
            ids.append(obj_meta.object_id)
            boxes.append((x, y))
            direction = service.aforo((x, y), obj_meta.object_id, ids,
                                      previous)
            if direction:
                print(direction)

            # Service People counting
            if previous:
                service.people_counting_last_time_detected(ids)
                service.people_counting_storing_fist_time(obj_meta.object_id)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Service Social Distance
        if len(boxes) > 0:
            service.set_frame_counter(frame_number)
            service.tracked_on_time_social_distance(boxes, ids)

        if not previous:
            previous = service.set_previous()

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        display_meta.num_lines = 1
        display_meta.num_rects = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_line_params = display_meta.line_params[0]
        py_nvosd_rect_params = display_meta.rect_params[0]

        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],
            obj_counter[PGIE_CLASS_ID_PERSON])
        py_nvosd_text_params.x_offset = 100
        py_nvosd_text_params.y_offset = 120
        py_nvosd_text_params.font_params.font_name = "Arial"
        py_nvosd_text_params.font_params.font_size = 10
        py_nvosd_text_params.font_params.font_color.red = 1.0
        py_nvosd_text_params.font_params.font_color.green = 1.0
        py_nvosd_text_params.font_params.font_color.blue = 1.0
        py_nvosd_text_params.font_params.font_color.alpha = 1.0
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.red = 0.0
        py_nvosd_text_params.text_bg_clr.green = 0.0
        py_nvosd_text_params.text_bg_clr.blue = 0.0
        py_nvosd_text_params.text_bg_clr.alpha = 1.0

        #py_nvosd_line_params.x1 = 500
        #py_nvosd_line_params.y1 = 800
        #py_nvosd_line_params.x2 = 1200
        #py_nvosd_line_params.y2 = 800
        py_nvosd_line_params.x1 = 880
        py_nvosd_line_params.y1 = 850
        py_nvosd_line_params.x2 = 880
        py_nvosd_line_params.y2 = 250
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.red = 1.0
        py_nvosd_line_params.line_color.green = 1.0
        py_nvosd_line_params.line_color.blue = 1.0
        py_nvosd_line_params.line_color.alpha = 1.0

        py_nvosd_rect_params.left = 500
        py_nvosd_rect_params.height = 120
        py_nvosd_rect_params.top = 680
        py_nvosd_rect_params.width = 560
        py_nvosd_rect_params.border_width = 4
        py_nvosd_rect_params.border_color.red = 0.0
        py_nvosd_rect_params.border_color.green = 0.0
        py_nvosd_rect_params.border_color.blue = 1.0
        py_nvosd_rect_params.border_color.alpha = 1.0

        # Lo manda a directo streaming
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad,info,u_data):
    frame_number=0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
    }
    num_rects=0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj=frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame=l_frame.next
        except StopIteration:
            break
    #past traking meta data
    if(past_tracking_meta[0]==1):
        l_user=batch_meta.batch_user_meta_list
        while l_user is not None:
            try:
                # Note that l_user.data needs a cast to pyds.NvDsUserMeta
                # The casting is done by pyds.NvDsUserMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone
                user_meta=pyds.NvDsUserMeta.cast(l_user.data)
            except StopIteration:
                break
            if(user_meta and user_meta.base_meta.meta_type==pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META):
                try:
                    # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch
                    # The casting is done by pyds.NvDsPastFrameObjBatch.cast()
                    # The casting also keeps ownership of the underlying memory
                    # in the C code, so the Python garbage collector will leave
                    # it alone
                    pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data)
                except StopIteration:
                    break
                for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch):
                    print("streamId=",trackobj.streamID)
                    print("surfaceStreamID=",trackobj.surfaceStreamID)
                    for pastframeobj in pyds.NvDsPastFrameObjStream.list(trackobj):
                        print("numobj=",pastframeobj.numObj)
                        print("uniqueId=",pastframeobj.uniqueId)
                        print("classId=",pastframeobj.classId)
                        print("objLabel=",pastframeobj.objLabel)
                        for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj):
                            print('frameNum:', objlist.frameNum)
                            print('tBbox.left:', objlist.tBbox.left)
                            print('tBbox.width:', objlist.tBbox.width)
                            print('tBbox.top:', objlist.tBbox.top)
                            print('tBbox.right:', objlist.tBbox.height)
                            print('confidence:', objlist.confidence)
                            print('age:', objlist.age)
            try:
                l_user=l_user.next
            except StopIteration:
                break
    return Gst.PadProbeReturn.OK	
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        is_first_obj = True

        print("===")
        print("frame_meta.frame_num={0}".format(frame_meta.frame_num))

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            print("---")
            print("obj_meta.object_id={0}".format(obj_meta.object_id))
            print("obj_meta.class_id={0}".format(obj_meta.class_id))

            #get secondary classifier data
            l_classifier = obj_meta.classifier_meta_list
            if l_classifier is not None:  # and class_id==XXX #apply classifier for a specific class
                classifier_meta = pyds.glist_get_nvds_classifier_meta(
                    l_classifier.data)
                l_label = classifier_meta.label_info_list
                label_info = pyds.glist_get_nvds_label_info(l_label.data)
                classifier_class = label_info.result_class_id
                print("sgie class={0}", classifier_class)

            obj_counter[obj_meta.class_id] += 1

            # Cv2 stuff
            if is_first_obj:
                is_first_obj = False
                # Getting Image data using nvbufsurface
                # the input should be address of buffer and batch_id
                n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer),
                                                    frame_meta.batch_id)
                #convert python array into numy array format.
                frame_image = np.array(n_frame, copy=True, order='C')
                #covert the array into cv2 default color format
                frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)

            #recognize license plate data
            recognize_license_plate(frame_image, obj_meta, obj_meta.confidence)

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={}".format(
            frame_number, num_rects)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)

        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))

        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        '''
        print("Frame Number is ", frame_meta.frame_num)
        print("Source id is ", frame_meta.source_id)
        print("Batch id is ", frame_meta.batch_id)
        print("Source Frame Width ", frame_meta.source_frame_width)
        print("Source Frame Height ", frame_meta.source_frame_height)
        print("Num object meta ", frame_meta.num_obj_meta)
        '''
        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        bboxes = []
        classids = []
        num_rects = frame_meta.num_obj_meta
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1

            bos = np.array([
                int(obj_meta.rect_params.left),
                int(obj_meta.rect_params.top),
                int(obj_meta.rect_params.left + obj_meta.rect_params.width),
                int(obj_meta.rect_params.top + obj_meta.rect_params.height)
            ])
            bboxes.append(bos.astype("int"))

            classids.append(obj_meta.class_id)

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        boxes_dic[frame_meta.source_id] = bboxes
        frameTime = int(datetime.now().timestamp())

        if tracker_status == 1:
            counter, counts, trackers = trackers_list[
                frame_meta.source_id].detectandkalmtrack(
                    boxes_dic[frame_meta.source_id],
                    classids,
                    frameTime=frameTime)
            for index, values in counts.items():
                counters = list(values.keys())
                #print(f' This {frame_meta.source_id + 1} Tracker {classes[index]} -- {counters[0]} ----{str(values["inCount"])} ------ {counters[1]}---- {str(values["outCount"])}')
        """display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, vehicle_count, person)
        py_nvosd_text_params.x_offset = 10;
        py_nvosd_text_params.y_offset = 12;
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        py_nvosd_text_params.font_params.font_color.red = 1.0
        py_nvosd_text_params.font_params.font_color.green = 1.0
        py_nvosd_text_params.font_params.font_color.blue = 1.0
        py_nvosd_text_params.font_params.font_color.alpha = 1.0
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.red = 0.0
        py_nvosd_text_params.text_bg_clr.green = 0.0
        py_nvosd_text_params.text_bg_clr.blue = 0.0
        py_nvosd_text_params.text_bg_clr.alpha = 1.0
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person)
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)"""
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",obj_counter[PGIE_CLASS_ID_VEHICLE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON])

        framenumber = 0
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        x1, y1, x2, y2, x3, y3, x4, y4 = Roi_points_list[frame_meta.source_id]
        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x1
        line_params.y1 = y1
        line_params.x2 = x2
        line_params.y2 = y2
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1

        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x2
        line_params.y1 = y2
        line_params.x2 = x3
        line_params.y2 = y3
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1

        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x3
        line_params.y1 = y3
        line_params.x2 = x4
        line_params.y2 = y4
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1

        line_params = display_meta.line_params[framenumber]
        framenumber += 1
        line_params.x1 = x4
        line_params.y1 = y4
        line_params.x2 = x1
        line_params.y2 = y1
        line_params.line_width = 4
        line_params.line_color.red = 1.0
        line_params.line_color.green = 1.0
        line_params.line_color.blue = 0.0
        line_params.line_color.alpha = 0.7
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    previous = service.get_previous()
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        service.set_frame_counter(frame_number)
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        ids = []
        boxes = []
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top
            obj_id = obj_meta.object_id

            # Service Aforo (in and out)
            ids.append(obj_id)
            boxes.append((x, y))
            # service.counting_in_and_out_first_detection((x, y), obj_id) In and out counting when the object finally desapears
            service.counting_in_and_out_when_changing_area((x, y), obj_id, ids,
                                                           previous)

            # Service People counting
            if previous:
                service.people_counting_last_time_detected(ids)
                service.people_counting_storing_fist_time(obj_id)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Service Social Distance
        if previous:
            service.get_distances_between_detected_elements_from_centroid(
                boxes, ids)

        if not service.get_previous():
            service.set_previous(True)
            previous = service.get_previous()

        # Service Aforo (in and out)
        # service.count_in_and_out_when_object_leaves_the_frame(ids) In and out counting when the object finally desapears

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemple #11
0
def osd_sink_pad_buffer_probe(pad, info, u_data):
    global start, prt
    frame_number = 0

    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        now = time.time()
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.glist_get_nvds_frame_meta()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            #frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            '''
            img = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
            img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR)
            red = (0, 0, 255)
            location = (20, 50)
            font = cv2.FONT_ITALIC  # italic font
            cv2.putText(img, 'OpenCV Cooking', location, font, fontScale = 2, color = red, thickness = 3)
            #cv2.imshow('Hello', img)
            #cv2.waitKey(0)
            '''

        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                #obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                '''
                print('class_id={}'.format(type(obj_meta.class_id)))
                print('confidence={}'.format(type(obj_meta.confidence)))
                print('detector_bbox_info={}'.format(type(obj_meta.detector_bbox_info)))
                print('obj_label={}'.format(type(obj_meta.obj_label)))
                print('object_id={}'.format(type(obj_meta.object_id)))
                print('rect_params={}'.format(type(obj_meta.rect_params)))
                '''
                #print('mask_params={}'.format(type(obj_meta.mask_params)))  #Not binded
                print('        rect_params bg_color alpha={}'.format(
                    type(obj_meta.rect_params.bg_color)))
                print('    rect_params border_width={}'.format(
                    type(obj_meta.rect_params.border_width)))
                print('    rect_params border_width={}'.format(
                    obj_meta.rect_params.border_width))
                print('    rect_params  color_id={}'.format(
                    type(obj_meta.rect_params.color_id)))
                print('    rect_params  color_id={}'.format(
                    obj_meta.rect_params.color_id))
                print('    rect_params has_color_info={}'.format(
                    type(obj_meta.rect_params.has_color_info)))
                '''
                if True:
                    print(' === obj_meta ===')
                    print('class_id={}'.format(obj_meta.class_id))
                    print('confidence={}'.format(obj_meta.confidence))
                    print('detector_bbox_info={}'.format(obj_meta.detector_bbox_info))
                    #print('mask_params={}'.format(obj_meta.mask_params))
                    print('obj_label={}'.format(obj_meta.obj_label))
                    print('object_id={}'.format(obj_meta.object_id))
                    print('rect_params={}'.format(obj_meta.rect_params))
                    print('        rect_params bg_color alpha={}'.format(obj_meta.rect_params.bg_color.alpha))
                    print('        rect_params bg_color blue={}'.format(obj_meta.rect_params.bg_color.blue))
                    print('        rect_params bg_color green={}'.format(obj_meta.rect_params.bg_color.green))
                    print('        rect_params bg_color red={}'.format(obj_meta.rect_params.bg_color.red))
                   
                    
                    print('        rect_params border_color alpha={}'.format(obj_meta.rect_params.border_color.alpha))
                    print('        rect_params border_color blue={}'.format(obj_meta.rect_params.border_color.blue))
                    print('        rect_params border_color green={}'.format(obj_meta.rect_params.border_color.green))
                    print('        rect_params border_color red={}'.format(obj_meta.rect_params.border_color.red))
                    print('    rect_params border_width={}'.format(obj_meta.rect_params.border_width))
                    print('    rect_params  color_id={}'.format(obj_meta.rect_params.color_id))
                    print('    rect_params has_bg_color={}'.format(obj_meta.rect_params.has_bg_color))
                    print('    rect_params has_color_info={}'.format(obj_meta.rect_params.has_color_info))
                    print('    rect_params height={}'.format(obj_meta.rect_params.height))
                    print('    rect_params left={}'.format(obj_meta.rect_params.left))
                    print('    rect_params top={}'.format(obj_meta.rect_params.top))
                    print('    rect_params width={}'.format(obj_meta.rect_params.width))
                    print('    rect_params reserved={}'.format(obj_meta.rect_params.reserved))


                    print('tracker_bbox_info={}'.format(obj_meta.tracker_bbox_info))
                    print('tracker_confidence={}'.format(obj_meta.tracker_confidence))
                '''

            except StopIteration:
                break

            obj_meta.rect_params.has_bg_color = 1
            obj_meta.rect_params.bg_color.set(
                0.0, 0.0, 1.0, 0.2
            )  #It seems that only the alpha channel is working. RGB value is reflected.
            obj_counter[obj_meta.class_id] += 1
            obj_meta.rect_params.border_color.set(
                0.0, 1.0, 1.0, 0.0
            )  # It seems that only the alpha channel is not working. (red, green, blue , alpha)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={} FPS={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],
            obj_counter[PGIE_CLASS_ID_PERSON], (1 / (now - start)))

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
        py_nvosd_text_params.font_params.font_size = 20
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(
            0.2, 0.2, 1.0, 1)  # (red, green, blue , alpha)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.2, 0.2, 0.2, 0.3)
        # Using pyds.get_string() to get display_text as string
        if prt:
            print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
        prt = False
        start = now

    return Gst.PadProbeReturn.OK  #DROP, HANDLED, OK, PASS, REMOVE
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    # version mask detection solo reconoce mascarillas y sin mascarilla
    obj_counter = {
        PGIE_CLASS_ID_MASK: 0,
        PGIE_CLASS_ID_NOMASK: 0,
    }

    frame_number = 0
    num_rects = 0  # numero de objetos en el frame
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    #====================== Definicion de valores de mensajes a pantalla
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index

    camera_id = get_camera_id(current_pad_index)

    # Falta el servicio de mask Detection
    #
    #

    # Todos los servicios requieren impresion de texto solo para Aforo se requiere una linea y un rectangulo
    display_meta.num_labels = 1  # numero de textos
    py_nvosd_text_params = display_meta.text_params[0]

    # Setup del label de impresion en pantalla
    py_nvosd_text_params.x_offset = 100
    py_nvosd_text_params.y_offset = 120
    py_nvosd_text_params.font_params.font_name = "Arial"
    py_nvosd_text_params.font_params.font_size = 10
    py_nvosd_text_params.font_params.font_color.red = 1.0
    py_nvosd_text_params.font_params.font_color.green = 1.0
    py_nvosd_text_params.font_params.font_color.blue = 1.0
    py_nvosd_text_params.font_params.font_color.alpha = 1.0
    py_nvosd_text_params.set_bg_clr = 1
    py_nvosd_text_params.text_bg_clr.red = 0.0
    py_nvosd_text_params.text_bg_clr.green = 0.0
    py_nvosd_text_params.text_bg_clr.blue = 0.0
    py_nvosd_text_params.text_bg_clr.alpha = 1.0

    no_mask_ids = get_no_mask_ids_dict(camera_id)

    frame_number = 1  # to avoid not definition issue
    clean_at_every = 43
    use_ids = 1
    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        #print(" fps:",frame_meta.num_surface_per_frame)
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        #print(num_rects) ID numero de stream
        use_ids = frame_number % clean_at_every
        if use_ids == 0:
            ids = set()

        # Ciclo interno donde se evaluan los objetos dentro del frame
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            #x = obj_meta.rect_params.width
            #y = obj_meta.rect_params.height

            obj_counter[obj_meta.class_id] += 1

            # evaluating only if class_id is 1 (no mask)
            if obj_meta.class_id == 1:

                # collecting all the ids on this frame
                if use_ids == 0:
                    ids.add(obj_meta.object_id)

                if obj_meta.object_id not in no_mask_ids:
                    counter = 1
                else:
                    counter = no_mask_ids[obj_meta.object_id]
                    counter += 1

                # only if counter is lower than 4 we save the counter value in set_no_mask_ids_dict
                if counter < 4:
                    no_mask_ids.update({obj_meta.object_id: counter})
                    set_no_mask_ids_dict(camera_id, no_mask_ids)

                # only if the value is 4 we report the no mask to the server
                if counter == 4:
                    service.mask_detection(obj_meta.object_id, no_mask_ids,
                                           camera_id)

            py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Mask={} NoMaks={}".format(
                frame_number, num_rects, obj_counter[PGIE_CLASS_ID_MASK],
                obj_counter[PGIE_CLASS_ID_NOMASK])
            #if obj_meta.class_id == 1:
            #    print("Clase No Mask : ",obj_meta.class_id," ID :", obj_meta.object_id)   # si object_id = 1 es NOMASK

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

            #py_nvosd_text_params.display_text = "SOCIAL DISTANCE Source ID={} Source Number={} Person_count={}.format(frame_meta.source_id, frame_meta.pad_index , obj_counter[PGIE_CLASS_ID_PERSON])

        # Aqui Evaluo si tengo id_repetidos y mando solo los unicos

# y evaluo si tengo que limpiar el arreglo despues de n frames
#  ----->

        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    if use_ids == 0:
        new_dict = {}
        no_mask_ids = get_no_mask_ids_dict(camera_id)

        for item in ids:
            if item in no_mask_ids:
                value = no_mask_ids[item]
                new_dict.update({item: value})

        set_no_mask_ids_dict(camera_id, new_dict)

        # Lo manda a directo streaming

    return Gst.PadProbeReturn.OK
Exemple #13
0
def tiler_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE: 0,
            PGIE_CLASS_ID_PERSON: 0,
            PGIE_CLASS_ID_BICYCLE: 0,
            PGIE_CLASS_ID_ROADSIGN: 0
        }
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:  # vehicle detected
                    if obj_meta.rect_params.top > (
                            0.25 * 1080
                    ):  # discard detection instances for vehicles too far from the camera
                        car_found = 0  # vehicle id flag
                        for x in vehicle_list:
                            if x.vehicle_id == obj_meta.object_id:  # vehicle id found in the list of vehicle metadata
                                x.frames_list.append(frame_number)
                                x.x_list.append(int(obj_meta.rect_params.left))
                                x.y_list.append(int(obj_meta.rect_params.top))
                                x.xc_list.append(
                                    int(obj_meta.rect_params.left +
                                        (obj_meta.rect_params.width / 2)))
                                x.yc_list.append(
                                    int(obj_meta.rect_params.top +
                                        (obj_meta.rect_params.height / 2)))
                                car_found = 1  # vehicle metadata was already initialized
                                break

                        if car_found == 0:  # vehicle metadata was not initialized in the list
                            frames_temp_list = []
                            frames_temp_list.append(frame_number)
                            x_temp_list = []
                            x_temp_list.append(int(obj_meta.rect_params.left))
                            y_temp_list = []
                            y_temp_list.append(int(obj_meta.rect_params.top))
                            xc_temp_list = []
                            xc_temp_list.append(
                                int(obj_meta.rect_params.left +
                                    (obj_meta.rect_params.width / 2)))
                            yc_temp_list = []
                            yc_temp_list.append(
                                int(obj_meta.rect_params.top +
                                    (obj_meta.rect_params.height / 2)))
                            vehicle_list.append(
                                Vehicle(obj_meta.object_id, frames_temp_list,
                                        x_temp_list, y_temp_list, xc_temp_list,
                                        yc_temp_list))

                        print('Vehicle ID = ', obj_meta.object_id,
                              ', Frame Number = ', frame_number, ', Top X = ',
                              obj_meta.rect_params.left, ', Top Y = ',
                              obj_meta.rect_params.top, ', Width = ',
                              obj_meta.rect_params.width, ', Height = ',
                              obj_meta.rect_params.height
                              )  # initialize vehicle metadata

            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        print("Frame Number =", frame_number, "Number of Objects in frame =",
              num_rects, "Vehicles in frame =",
              obj_counter[PGIE_CLASS_ID_VEHICLE]
              )  # object bounding box metadata overlay
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        if frame_number == 500:  # when the stream should stop; increase this value to extend the life of video stream
            y_min_list = []
            y_max_list = []
            for car_object in vehicle_list:
                if len(
                        car_object.frames_list
                ) > 10:  # ignore tracking instances with a life of less than ten frames
                    print(car_object.vehicle_id,
                          car_object.frames_list,
                          car_object.y_list,
                          len(car_object.frames_list),
                          '\n',
                          sep=' ')
                    y_min_list.append(min(car_object.y_list))
                    y_max_list.append(max(car_object.y_list))
            y_min_list.sort()
            y_max_list.sort()
            print('y_min:', y_min_list, len(y_min_list), '\n')
            print('y_max:', y_max_list, len(y_max_list), '\n')
            print('Optimal Frame Range:')
            print('y:', min(y_max_list) - 100, max(y_min_list))

            with open(
                    '/opt/nvidia/deepstream/deepstream-5.0/sources/deepstream_python_apps/apps/optimal_frame_extractor/road.txt',
                    'r'
            ) as file:  # input file of the frame extractor application
                data = file.readlines()
            data[10] = str('y1 ') + str(min(y_max_list) - 100) + str(
                ' #opt_frm_inf_start') + str('\n')
            data[11] = str('y2 ') + str(
                max(y_min_list)) + str(' #opt_frm_inf_end')
            with open(
                    '/opt/nvidia/deepstream/deepstream-5.0/sources/deepstream_python_apps/apps/optimal_frame_extractor/road.txt',
                    'w') as file:
                file.writelines(data)

            sys.exit()

        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data):
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    # Set frame_number & rectangles to draw as 0
    frame_number = 0
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        # Get frame number , number of rectables to draw and object metadata
        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list

        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            # Increment Object class by 1 and Set Box border to Red color
            obj_counter[obj_meta.class_id] += 1
            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break
        ################## Setting Metadata Display configruation ###############
        # Acquiring a display meta object.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],
            obj_counter[PGIE_CLASS_ID_PERSON])
        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12
        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # Set(red, green, blue, alpha); Set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # Set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string to print in notebook
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        ############################################################################
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemple #15
0
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    is_first_obj = True
    # Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    num_rects = 0
    vehicles_types = []
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        center_list = []
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            ## vehicle type detection ##
            # Filter detections by PGIE1 network and don't include RoadSign class
            if (obj_meta.unique_component_id == PGIE_UNIQUE_ID
                    and obj_meta.class_id !=
                    PGIE_CLASS_ID_ROADSIGN  # Exclude RoadSign
                    and obj_meta.class_id !=
                    PGIE_CLASS_ID_BICYCLE  # Exclude Bicycle
                    and
                    obj_meta.class_id != PGIE_CLASS_ID_PERSON  # Exclude Person
                ):
                #####################################
                ## vehicle type classification ##
                #####################################
                l_classifier = obj_meta.classifier_meta_list
                sgie_class = -1
                if l_classifier is not None:  # and class_id==XXX #apply classifier for a specific class
                    classifier_meta = pyds.glist_get_nvds_classifier_meta(
                        l_classifier.data)
                    l_label = classifier_meta.label_info_list
                    label_info = pyds.glist_get_nvds_label_info(l_label.data)
                    sgie_class = label_info.result_class_id
                    rect_params = obj_meta.rect_params
                    w = int(rect_params.width)
                    h = int(rect_params.height)
                    center = (w // 2, h // 2)
                    center_list.append(center)
                    # vehicles_coords.append(center)
                    if frame_number > 0 and frame_number % DUMPINTERVAL == 0:
                        vehicles_types.append(SGIE_LABELS_DICT[sgie_class])
                    #####################################
                    ## licence plate recognition stage ##
                    #####################################

                    # # Cv2 stuff
                    # if is_first_obj:
                    #     is_first_obj = False
                    #     # Getting Image data using nvbufsurface
                    #     # the input should be address of buffer and batch_id
                    #     n_frame = pyds.get_nvds_buf_surface(
                    #         hash(gst_buffer), frame_meta.batch_id)
                    #     # convert python array into numy array format.
                    #     frame_image = np.array(n_frame, copy=True, order='C')
                    #     # covert the array into cv2 default color format
                    #     frame_image = cv2.cvtColor(
                    #         frame_image, cv2.COLOR_RGBA2BGRA)

                    # # recognize license plate data
                    # alrp_output = lpdetector.alpr_frame(
                    #     frame_image, obj_meta, obj_meta.confidence, frame_number)
                    # print("alrp out >>> ", alrp_output)

            obj_counter[obj_meta.class_id] += 1
            #print("obj_meta: gie_id={0}; object_id={1}; class_id={2}; classifier_class={3}".format(obj_meta.unique_component_id,obj_meta.object_id,obj_meta.class_id,sgie_class))
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.

        # py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(
        #     frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        vcoords.vec_coords.append(center_list)
        if frame_number > 0 and frame_number % DUMPINTERVAL == 0:
            # return degree estimate for every vehicle
            # degrees = estimate_entery_exit_degrees(vehicles_coords)
            py_nvosd_text_params.display_text = "Time Stamp={} Vehicle_types={} Vehicles_coords_across dump interval {} ".format(
                datetime.datetime.now(), vehicles_types, vcoords.vec_coords)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        # where data is dumped
        if frame_number > 0 and frame_number % DUMPINTERVAL == 0:
            print(pyds.get_string(py_nvosd_text_params.display_text))
            vehicle_types = []
            vcoords.vec_coords = []
            center_list = []
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data):
    global frame_numberx
    global num_rectsx
    global Object1
    global Object2
    global Object3
    global Object4
    global Object5
    global Object6
    global Object7
    global Object8
    global Object9
    global Object10

    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_TOOTHBRUSH: 0,
        PGIE_CLASS_ID_HAIR_DRYER: 0,
        PGIE_CLASS_ID_TEDDY_BEAR: 0,
        PGIE_CLASS_ID_SCISSORS: 0,
        PGIE_CLASS_ID_VASE: 0,
        PGIE_CLASS_ID_CLOCK: 0,
        PGIE_CLASS_ID_BOOK: 0,
        PGIE_CLASS_ID_REFRIGERATOR: 0,
        PGIE_CLASS_ID_SINK: 0,
        PGIE_CLASS_ID_TOASTER: 0,
        PGIE_CLASS_ID_OVEN: 0,
        PGIE_CLASS_ID_MICROWAVE: 0,
        PGIE_CLASS_ID_CELL_PHONE: 0,
        PGIE_CLASS_ID_KEYBOARD: 0,
        PGIE_CLASS_ID_REMOTE: 0,
        PGIE_CLASS_ID_MOUSE: 0,
        PGIE_CLASS_ID_LAPTOP: 0,
        PGIE_CLASS_ID_TVMONITOR: 0,
        PGIE_CLASS_ID_TOILET: 0,
        PGIE_CLASS_ID_DININGTABLE: 0,
        PGIE_CLASS_ID_BED: 0,
        PGIE_CLASS_ID_POTTEDPLANT: 0,
        PGIE_CLASS_ID_SOFA: 0,
        PGIE_CLASS_ID_CHAIR: 0,
        PGIE_CLASS_ID_CAKE: 0,
        PGIE_CLASS_ID_DONUT: 0,
        PGIE_CLASS_ID_PIZZA: 0,
        PGIE_CLASS_ID_HOT_DOG: 0,
        PGIE_CLASS_ID_CARROT: 0,
        PGIE_CLASS_ID_BROCCOLI: 0,
        PGIE_CLASS_ID_ORANGE: 0,
        PGIE_CLASS_ID_SANDWICH: 0,
        PGIE_CLASS_ID_APPLE: 0,
        PGIE_CLASS_ID_BANANA: 0,
        PGIE_CLASS_ID_BOWL: 0,
        PGIE_CLASS_ID_SPOON: 0,
        PGIE_CLASS_ID_KNIFE: 0,
        PGIE_CLASS_ID_FORK: 0,
        PGIE_CLASS_ID_CUP: 0,
        PGIE_CLASS_ID_WINE_GLASS: 0,
        PGIE_CLASS_ID_BOTTLE: 0,
        PGIE_CLASS_ID_TENNIS_RACKET: 0,
        PGIE_CLASS_ID_SURFBOARD: 0,
        PGIE_CLASS_ID_SKATEBOARD: 0,
        PGIE_CLASS_ID_BASEBALL_GLOVE: 0,
        PGIE_CLASS_ID_BASEBALL_BAT: 0,
        PGIE_CLASS_ID_KITE: 0,
        PGIE_CLASS_ID_SPORTS_BALL: 0,
        PGIE_CLASS_ID_SNOWBOARD: 0,
        PGIE_CLASS_ID_SKIS: 0,
        PGIE_CLASS_ID_FRISBEE: 0,
        PGIE_CLASS_ID_SUITCASE: 0,
        PGIE_CLASS_ID_TIE: 0,
        PGIE_CLASS_ID_HANDBAG: 0,
        PGIE_CLASS_ID_UMBRELLA: 0,
        PGIE_CLASS_ID_BACKPACK: 0,
        PGIE_CLASS_ID_GIRAFFE: 0,
        PGIE_CLASS_ID_ZEBRA: 0,
        PGIE_CLASS_ID_BEAR: 0,
        PGIE_CLASS_ID_ELEPHANT: 0,
        PGIE_CLASS_ID_COW: 0,
        PGIE_CLASS_ID_SHEEP: 0,
        PGIE_CLASS_ID_HORSE: 0,
        PGIE_CLASS_ID_DOG: 0,
        PGIE_CLASS_ID_CAT: 0,
        PGIE_CLASS_ID_BIRD: 0,
        PGIE_CLASS_ID_BENCH: 0,
        PGIE_CLASS_ID_PARKING_METER: 0,
        PGIE_CLASS_ID_STOP_SIGN: 0,
        PGIE_CLASS_ID_FIRE_HYDRANT: 0,
        PGIE_CLASS_ID_TRAFFIC_LIGHT: 0,
        PGIE_CLASS_ID_BOAT: 0,
        PGIE_CLASS_ID_TRUCK: 0,
        PGIE_CLASS_ID_TRAIN: 0,
        PGIE_CLASS_ID_BUS: 0,
        PGIE_CLASS_ID_AEROPLANE: 0,
        PGIE_CLASS_ID_MOTORBIKE: 0,
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_PERSON: 0
    }
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        frame_numberx = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        num_rectsx = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Bird_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_CUP], obj_counter[PGIE_CLASS_ID_BOTTLE])

        Object1 = obj_counter[newValue1]
        Object2 = obj_counter[newValue2]
        Object3 = obj_counter[newValue3]
        Object4 = obj_counter[newValue4]
        Object5 = obj_counter[newValue5]
        Object6 = obj_counter[newValue6]
        Object7 = obj_counter[newValue7]
        Object8 = obj_counter[newValue8]
        Object9 = obj_counter[newValue9]
        Object10 = obj_counter[newValue10]

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        #print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemple #17
0
def tiler_sink_pad_buffer_probe(pad,info,u_data):
    global x11, x12, x13, x14, x21, x22, x23, x24   # lanes
    global vehicle_count
    frame_number=0
    num_rects=0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return
        
    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            
        except StopIteration:
            break

        frame_number=frame_meta.frame_num
        l_obj=frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False
        obj_counter = {
        PGIE_CLASS_ID_VEHICLE:0,
        PGIE_CLASS_ID_PERSON:0,
        PGIE_CLASS_ID_BICYCLE:0,
        PGIE_CLASS_ID_ROADSIGN:0
        }
        while l_obj is not None:
            try: 
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
                if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:  # vehicle detected
                    if obj_meta.rect_params.top >= y1 and obj_meta.rect_params.top <= y2:
                        car_found = 0
                        for x in vehicle_list:
                            if x.vehicle_id == obj_meta.object_id:
                                x.frames_list.append(frame_number)
                                x.x_list.append(obj_meta.rect_params.left)
                                x.y_list.append(obj_meta.rect_params.top)
                                x.xc_list.append(int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2)))
                                x.yc_list.append(int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2)))
                                x.width_list.append(obj_meta.rect_params.width)
                                x.height_list.append(obj_meta.rect_params.height)
                                x_center = int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))
                                if x_center > min(x13, x23):
                                    x.lane_list.append('shoulder')
                                elif x_center > min(x12, x22):
                                    x.lane_list.append('slow')
                                elif x_center > min(x11, x21):
                                    x.lane_list.append('medium')
                                else:
                                    x.lane_list.append('fast')
                                car_found = 1
                                break
                            
                        if car_found == 0:
                            frames_temp_list = []
                            frames_temp_list.append(frame_number)
                            x_temp_list = []
                            x_temp_list.append(obj_meta.rect_params.left)
                            y_temp_list = []
                            y_temp_list.append(obj_meta.rect_params.top)
                            xc_temp_list = []
                            xc_temp_list.append(int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2)))
                            yc_temp_list = []
                            yc_temp_list.append(int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2)))
                            width_temp_list = []
                            width_temp_list.append(obj_meta.rect_params.width)
                            height_temp_list = []
                            height_temp_list.append(obj_meta.rect_params.height)
                            x_center = int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))
                            lane_temp_list = []
                            if x_center > min(x13, x23):
                                lane_temp_list.append('shoulder')
                            elif x_center > min(x12, x22):
                                lane_temp_list.append('slow')
                            elif x_center > min(x11, x21):
                                lane_temp_list.append('medium')
                            else:
                                lane_temp_list.append('fast')
                                
                            vehicle_list.append(Vehicle(obj_meta.object_id, frames_temp_list, x_temp_list, y_temp_list, xc_temp_list, yc_temp_list, width_temp_list, height_temp_list, lane_temp_list))
                            vehicle_count += 1
                        
                    print('Vehicle ID = ', obj_meta.object_id, ', Frame Number = ', frame_number, ', Top X = ', obj_meta.rect_params.left,', Top Y = ', obj_meta.rect_params.top, ', Width = ', obj_meta.rect_params.width, ', Height = ', obj_meta.rect_params.height)

                    for i, o in enumerate(vehicle_list):        
                        frame_lag = abs(int(o.frames_list[-1]) - int(frame_number))
                        if (frame_lag > 20) and int(len(o.frames_list)) <= 6:   # vehicle count rectifier; eliminates false tracking instances
                            print('inadequate number of frames in train, deleting...', '\n')
                            del vehicle_list[i]
                            vehicle_count -= 1
                            break
                        
                        if frame_lag > 20 and frame_lag < 100:      # optimal frame extractor
                            midpoint = int((y1 + y2) / 2)
                            my_array = np.array(o.yc_list)
                            pos = (np.abs(my_array - midpoint)).argmin()
                            temp_frame_number = o.frames_list[pos]
                            temp_id = o.vehicle_id
                            with open('optimal_frame_extraction.txt', 'a') as the_file:
                                the_file.write(str(o.frames_list[pos]))
                                the_file.write(' ')
                                the_file.write(str(o.vehicle_id))
                                the_file.write(' ')
                                the_file.write(str(o.width_list[pos]))
                                the_file.write(' ')
                                the_file.write(str(o.height_list[pos]))
                                the_file.write(' ')
                                the_file.write(str(o.x_list[pos]))
                                the_file.write(' ')
                                the_file.write(str(o.y_list[pos]))
                                the_file.write('\n')
                            xx1 = int(o.x_list[pos])
                            xx2 = int(o.x_list[pos]) + int(o.width_list[pos])
                            yy1 = int(o.y_list[pos])
                            yy2 = int(o.y_list[pos]) + int(o.height_list[pos])
                            del vehicle_list[i]
                            finder = 0
                            for f in rgb_frames_list:
                                if f.frame_iterator == temp_frame_number:
                                    break
                                else:
                                    finder += 1
                            crop = (rgb_frames_list[finder].rgb_image)[yy1:yy2, xx1:xx2]
                            cv2.imwrite(folder_name+"/stream_"+str(0)+"/frame_id="+str(temp_frame_number)+'_'+str(temp_id)+".jpg", crop)
                            break
                            
                        if frame_lag > 100:     # vehicle buffer cleaner; eliminates expired tracking instances
                            print('train expired, deleting...', '\n')
                            del vehicle_list[i]
                            break
                            
                        
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            
            try: 
                l_obj=l_obj.next
            except StopIteration:
                break

        print("Frame Number =", frame_number, "Number of Objects in frame =",num_rects,"Vehicles in frame =",obj_counter[PGIE_CLASS_ID_VEHICLE],"Total Vehicles Detected =",vehicle_count)
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        #if save_image:
        #    cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg",frame_image)
        #saved_count["stream_"+str(frame_meta.pad_index)]+=1 
        
        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Total Vehicles Detected={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], vehicle_count)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        
        # Draw x11_x21
        py_nvosd_line_params = display_meta.line_params[0]
        py_nvosd_line_params.x1 = x11
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x21
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x11 = py_nvosd_line_params.x1
        x21 = py_nvosd_line_params.x2

        # Draw x12_x22
        py_nvosd_line_params = display_meta.line_params[1]
        py_nvosd_line_params.x1 = x12
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x22
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x12 = py_nvosd_line_params.x1
        x22 = py_nvosd_line_params.x2

        # Draw x13_x23
        py_nvosd_line_params = display_meta.line_params[2]
        py_nvosd_line_params.x1 = x13
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x23
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x13 = py_nvosd_line_params.x1
        x23 = py_nvosd_line_params.x2

        # Draw x14_x24
        py_nvosd_line_params = display_meta.line_params[3]
        py_nvosd_line_params.x1 = x14
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x24
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x14 = py_nvosd_line_params.x1
        x24 = py_nvosd_line_params.x2
        
        # save current frame to rgb_frames_list
        n_frame=pyds.get_nvds_buf_surface(hash(gst_buffer),frame_meta.batch_id)
        frame_image=np.array(n_frame,copy=True,order='C')
        frame_image=cv2.cvtColor(frame_image,cv2.COLOR_RGBA2BGRA)
        rgb_frames_list.append(RGB_Frame(frame_number, frame_image))
        if len(rgb_frames_list) > 120:
            for x in range(20):
                del rgb_frames_list[x]
        
        try:
            l_frame=l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemple #18
0
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    # version mask detection solo reconoce mascarillas y sin mascarilla
    obj_counter = {
        PGIE_CLASS_ID_FACE: 0,
        PGIE_CLASS_ID_PLATES: 0,
    }

    frame_number = 0
    num_rects = 0  # numero de objetos en el frame
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list

    #====================== Definicion de valores de mensajes a pantalla
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index

    camera_id = get_camera_id(current_pad_index)

    #aforo_info = get_aforo(camera_id)
    #is_aforo_enabled = aforo_info['enabled']

    #social_distance_info = get_social_distance(camera_id)
    #is_social_distance_enabled = social_distance_info['enabled']

    #people_counting_info = get_people_counting(camera_id)
    #is_people_counting_enabled = people_counting_info['enabled']

    # Falta el servicio de Plates Detection
    #
    #

    #print( "entro al  tiler_src_pad_buffer_probe")
    # Todos los servicios requieren impresion de texto solo para Aforo se requiere una linea y un rectangulo
    display_meta.num_labels = 1  # numero de textos
    py_nvosd_text_params = display_meta.text_params[0]

    # Setup del label de impresion en pantalla
    py_nvosd_text_params.x_offset = 100
    py_nvosd_text_params.y_offset = 120
    py_nvosd_text_params.font_params.font_name = "Arial"
    py_nvosd_text_params.font_params.font_size = 10
    py_nvosd_text_params.font_params.font_color.red = 1.0
    py_nvosd_text_params.font_params.font_color.green = 1.0
    py_nvosd_text_params.font_params.font_color.blue = 1.0
    py_nvosd_text_params.font_params.font_color.alpha = 1.0
    py_nvosd_text_params.set_bg_clr = 1
    py_nvosd_text_params.text_bg_clr.red = 0.0
    py_nvosd_text_params.text_bg_clr.green = 0.0
    py_nvosd_text_params.text_bg_clr.blue = 0.0
    py_nvosd_text_params.text_bg_clr.alpha = 1.0

    #no_mask_ids = get_no_mask_ids_dict(camera_id)
    # por que ponerlo en 1 ????
    #frame_number = 1 # to avoid not definition issue

    client = boto3.client('rekognition')

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break
        #print( "primer ciclo")
        frame_number = frame_meta.frame_num
        #print(" fps:",frame_meta.num_surface_per_frame)
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta
        is_first_obj = True
        save_image = False

        #print(num_rects) ID numero de stream
        ids = set()

        # Ciclo interno donde se evaluan los objetos dentro del frame
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            obj_counter[obj_meta.class_id] += 1
            #print(obj_meta.confidence,"   ",obj_meta.object_id)
            #print(obj_counter[obj_meta.class_id],"   ",obj_counter[obj_meta.class_id]%5)
            # and (obj_meta.confidence > 0.9 )
            print(frame_number)
            if ((obj_meta.class_id == 1) and (frame_number % 8 == 0)):
                if is_first_obj:
                    is_first_obj = False
                    # Getting Image data using nvbufsurface
                    # the input should be address of buffer and batch_id
                    n_frame = pyds.get_nvds_buf_surface(
                        hash(gst_buffer), frame_meta.batch_id)
                    #convert python array into numy array format.
                    frame_image = np.array(n_frame, copy=True, order='C')
                    #covert the array into cv2 default color format
                    frame_image = cv2.cvtColor(frame_image,
                                               cv2.COLOR_RGBA2BGRA)

                save_image = True
                frame_image = draw_bounding_boxes(frame_image, obj_meta,
                                                  obj_meta.confidence)

                response = client.detect_labels(Image={'Bytes': frame_image})
                print('Detected labels in ')
                for label in response['Labels']:
                    print(label['Name'] + ' : ' + str(label['Confidence']))

            #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Mask={} NoMaks={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_FACE], obj_counter[PGIE_CLASS_ID_PLATES])

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        #pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        #print(save_image)
        #print(folder_name)
        if save_image:
            print("Entre a guardar imagen")
            print(obj_meta.class_id)

            cv2.imwrite(
                folder_name + "/stream_" + str(frame_meta.pad_index) +
                "/frame_" + str(frame_number) + ".jpg", frame_image)
        saved_count["stream_" + str(frame_meta.pad_index)] += 1

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    '''
    if frame_number % 43 == 0:
        new_dict = {}
        no_mask_ids = get_no_mask_ids_dict(camera_id)

        for item in ids:
            if item in no_mask_ids:
                value = no_mask_ids[item]
                new_dict.update({item: value})

        set_no_mask_ids_dict(camera_id, new_dict)

        # Lo manda a directo streaming
    '''

    return Gst.PadProbeReturn.OK
    def osd_sink_pad_buffer_probe(self,pad,info,u_data):
        frame_number=0
        #Intializing object counter with 0.
        obj_counter = {
            PGIE_CLASS_ID_VEHICLE:0,
            PGIE_CLASS_ID_BICYCLE:0,
            PGIE_CLASS_ID_PERSON:0,
            PGIE_CLASS_ID_ROADSIGN:0
        }


        num_rects=0

        gst_buffer = info.get_buffer()
        if not gst_buffer:
            print("Unable to get GstBuffer ")
            return

        # Retrieve batch metadata from the gst_buffer
        # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
        l_frame = batch_meta.frame_meta_list
        while l_frame is not None:
            try:
                # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.NvDsFrameMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            except StopIteration:
                break

            frame_number=frame_meta.frame_num
            num_rects = frame_meta.num_obj_meta
            l_obj=frame_meta.obj_meta_list

            # Message for output of detection inference
            msg = Detection2DArray()
            while l_obj is not None:
                try:
                    # Casting l_obj.data to pyds.NvDsObjectMeta
                    obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
                    l_classifier = obj_meta.classifier_meta_list

                    # If object is a car (class ID 0), perform attribute classification
                    if obj_meta.class_id == 0 and l_classifier is not None:
                        # Creating and publishing message with output of classification inference
                        msg2 = Classification2D()

                        while l_classifier is not None:
                            result = ObjectHypothesis()
                            try:
                                classifier_meta = pyds.glist_get_nvds_classifier_meta(l_classifier.data)
                                
                            except StopIteration:
                                print('Could not parse MetaData: ')
                                break

                            classifier_id = classifier_meta.unique_component_id
                            l_label = classifier_meta.label_info_list
                            label_info = pyds.glist_get_nvds_label_info(l_label.data)
                            classifier_class = label_info.result_class_id

                            if classifier_id == 2:
                                result.id = class_color[classifier_class]
                            elif classifier_id == 3:
                                result.id = class_make[classifier_class]
                            else:
                                result.id = class_type[classifier_class]

                            result.score = label_info.result_prob                            
                            msg2.results.append(result)
                            l_classifier = l_classifier.next
                    
                        self.publisher_classification.publish(msg2)
                except StopIteration:
                    break
    
                obj_counter[obj_meta.class_id] += 1

                # Creating message for output of detection inference
                result = ObjectHypothesisWithPose()
                result.id = str(class_obj[obj_meta.class_id])
                result.score = obj_meta.confidence
                
                left = obj_meta.rect_params.left
                top = obj_meta.rect_params.top
                width = obj_meta.rect_params.width
                height = obj_meta.rect_params.height
                bounding_box = BoundingBox2D()
                bounding_box.center.x = float(left + (width/2)) 
                bounding_box.center.y = float(top - (height/2))
                bounding_box.size_x = width
                bounding_box.size_y = height
                
                detection = Detection2D()
                detection.results.append(result)
                detection.bbox = bounding_box
                msg.detections.append(detection)

                try: 
                    l_obj=l_obj.next
                except StopIteration:
                    break

            # Publishing message with output of detection inference
            self.publisher_detection.publish(msg)
        

            # Acquiring a display meta object. The memory ownership remains in
            # the C code so downstream plugins can still access it. Otherwise
            # the garbage collector will claim it when this probe function exits.
            display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
            display_meta.num_labels = 1
            py_nvosd_text_params = display_meta.text_params[0]
            # Setting display text to be shown on screen
            # Note that the pyds module allocates a buffer for the string, and the
            # memory will not be claimed by the garbage collector.
            # Reading the display_text field here will return the C address of the
            # allocated string. Use pyds.get_string() to get the string content.
            py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

            # Now set the offsets where the string should appear
            py_nvosd_text_params.x_offset = 10
            py_nvosd_text_params.y_offset = 12

            # Font , font-color and font-size
            py_nvosd_text_params.font_params.font_name = "Serif"
            py_nvosd_text_params.font_params.font_size = 10
            # set(red, green, blue, alpha); set to White
            py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

            # Text background color
            py_nvosd_text_params.set_bg_clr = 1
            # set(red, green, blue, alpha); set to Black
            py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
            # Using pyds.get_string() to get display_text as string
            pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
            try:
                l_frame=l_frame.next
            except StopIteration:
                break
			
        return Gst.PadProbeReturn.OK 
Exemple #20
0
def analytics_meta_buffer_probe(pad, info, u_data):

    # Get the buffer from the pipeline
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer")
        return Gst.PadProbeReturn.OK

    # With the pyds wrapper get the batch of metadata from the buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    # From the batch of metadata get the list of frames
    list_of_frames = batch_meta.frame_meta_list

    # Iterate thru the list of frames
    while list_of_frames is not None:
        try:

            # Get the metadata on the current frame
            # The next frame is set at the end of the while loop
            frame_meta = pyds.NvDsFrameMeta.cast(list_of_frames.data)

        except StopIteration:
            break

        # INFORMATION THAT IS PRESENT THE FRAME
        #
        # - frame_meta.frame_num
        # - frame_meta.frame_num
        # - frame_meta.source_id
        # - frame_meta.batch_id
        # - frame_meta.source_frame_width
        # - frame_meta.source_frame_height
        # - frame_meta.num_obj_meta

        # Print the frame width and height to see what positions can the bounding boxed be drawed
        # print('Frame Width: ' + str(frame_meta.source_frame_width)) = 1920
        # print('Frame Height: ' + str(frame_meta.source_frame_height)) = 1080

        # In the information of the frame we can get a list of objects detected on the frame.
        list_of_objects = frame_meta.obj_meta_list

        # Iterate thru the list of objects
        while list_of_objects is not None:
            try:
                # Get the metadata for each object in the frame
                object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data)

            except StopIteration:
                break

            # Go to the next object in the list
            l_user_meta = object_meta.obj_user_meta_list

            while l_user_meta:
                try:
                    pass
                    # user_meta = pyds.NvDsUserMeta.cast(l_user_meta.data)
                    # print(user_meta.base_meta.meta_type)
                    # if user_meta.base_meta.meta_type == pyds.nvds_get_user_meta_type("NVIDIA.DSANALYTICSOBJ.USER_META"):
                    #     user_meta_data = pyds.NvDsAnalyticsObjInfo.cast(user_meta.user_meta_data)
                    #     if user_meta_data.dirStatus: print("Object {0} moving in direction: {1}".format(object_meta.object_id, user_meta_data.dirStatus))
                    #     if user_meta_data.lcStatus: print("Object {0} line crossing status: {1}".format(object_meta.object_id, user_meta_data.lcStatus))
                    #     if user_meta_data.ocStatus: print("Object {0} overcrowding status: {1}".format(object_meta.object_id, user_meta_data.ocStatus))
                    #     if user_meta_data.roiStatus: print("Object {0} roi status: {1}".format(object_meta.object_id, user_meta_data.roiStatus))
                except StopIteration:
                    break

            try:
                list_of_objects = list_of_objects.next
            except StopIteration:
                break
        # When there is no more object in the list of objects
        # we continue here

        # INFORMATION OF THE OBJECT METADATA
        #
        # - object_meta.class_id
        # - object_meta.confidence
        # - object_meta.obj_label
        # - object_meta.object_id (If not tracker present on the pipeline, the ID is the same for all objects)
        # - object_meta.rect_params

        # Get the display meta from the batch meta, this is another metadata different that the frame meta collected
        # befor
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)

        # Define the number of rects that we are going to draw

        # Draw the boxes on the frame
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        try:
            # Go to the next frame in the list
            list_of_frames = list_of_frames.next
        except StopIteration:
            break
        # When there are not frames in the buffer we end here, and the function returns ok

    return Gst.PadProbeReturn.OK
Exemple #21
0
def cb_buffer_probe(pad, info, cb_args):
    global frame_number
    global start_time

    face_processor, e_ready = cb_args
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer", error=True)
        return

    # Set e_ready event to notify the pipeline is working (e.g: for orchestrator)
    if e_ready is not None and not e_ready.is_set():
        print("Inference pipeline setting [green]e_ready[/green]")
        e_ready.set()

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.glist_get_nvds_frame_meta()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            # frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        # num_detections = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        detections = []
        obj_meta_list = []
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                # obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_meta_list.append(obj_meta)
            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
            box = obj_meta.rect_params
            # print(f"{obj_meta.obj_label} | {obj_meta.confidence}")

            box_points = (
                (box.left, box.top),
                (box.left + box.width, box.top + box.height),
            )
            box_p = obj_meta.confidence
            box_label = obj_meta.obj_label
            if face_processor.validate_detection(box_points, box_p, box_label):
                det_data = {"label": box_label, "p": box_p}
                detections.append(
                    Detection(
                        np.array(box_points),
                        data=det_data,
                    ))
                # print(f"Added detection: {det_data}")
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Remove all object meta to avoid drawing. Do this outside while since we're modifying list
        for obj_meta in obj_meta_list:
            # Remove this to avoid drawing label texts
            pyds.nvds_remove_obj_meta_from_frame(frame_meta, obj_meta)
        obj_meta_list = None

        # Each meta object carries max 16 rects/labels/etc.
        max_drawings_per_meta = 16  # This is hardcoded, not documented

        if face_processor.tracker is not None:
            # Track, count and draw tracked people
            tracked_people = face_processor.tracker.update(
                detections, period=face_processor.tracker_period)
            # Filter out people with no live points (don't draw)
            drawn_people = [
                person for person in tracked_people
                if person.live_points.any()
            ]

            if face_processor.draw_tracked_people:
                for n_person, person in enumerate(drawn_people):
                    points = person.estimate
                    box_points = points.clip(0).astype(int)

                    # Update mask votes
                    face_processor.add_detection(
                        person.id,
                        person.last_detection.data["label"],
                        person.last_detection.data["p"],
                    )
                    label, color = face_processor.get_person_label(person.id)

                    # Index of this person's drawing in the current meta
                    n_draw = n_person % max_drawings_per_meta

                    if n_draw == 0:  # Initialize meta
                        # Acquiring a display meta object. The memory ownership remains in
                        # the C code so downstream plugins can still access it. Otherwise
                        # the garbage collector will claim it when this probe function exits.
                        display_meta = pyds.nvds_acquire_display_meta_from_pool(
                            batch_meta)
                        pyds.nvds_add_display_meta_to_frame(
                            frame_meta, display_meta)

                    draw_detection(display_meta, n_draw, box_points, label,
                                   color)

        # Raw detections
        if face_processor.draw_raw_detections:
            for n_detection, detection in enumerate(detections):
                points = detection.points
                box_points = points.clip(0).astype(int)
                label = detection.data["label"]
                if label == LABEL_MASK:
                    color = face_processor.color_mask
                elif label == LABEL_NO_MASK or label == LABEL_MISPLACED:
                    color = face_processor.color_no_mask
                else:
                    color = face_processor.color_unknown
                label = f"{label} | {detection.data['p']:.2f}"
                n_draw = n_detection % max_drawings_per_meta

                if n_draw == 0:  # Initialize meta
                    # Acquiring a display meta object. The memory ownership remains in
                    # the C code so downstream plugins can still access it. Otherwise
                    # the garbage collector will claim it when this probe function exits.
                    display_meta = pyds.nvds_acquire_display_meta_from_pool(
                        batch_meta)
                    pyds.nvds_add_display_meta_to_frame(
                        frame_meta, display_meta)
                draw_detection(display_meta, n_draw, box_points, label, color)

            # Using pyds.get_string() to get display_text as string
            # print(pyds.get_string(py_nvosd_text_params.display_text))
            # print(".", end="", flush=True)
        # print("")
        if not frame_number % FRAMES_LOG_INTERVAL:
            print(f"Processed {frame_number} frames...")

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    # Start timer at the end of first frame processing
    if start_time is None:
        start_time = time.time()
    return Gst.PadProbeReturn.OK
Exemple #22
0
def sink_pad_buffer_probe(pad, info, u_data):

    gst_buffer = info.get_buffer()

    if not gst_buffer:
        sys.stderr.write("Unable to get GstBuffer")

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    frame_list = batch_meta.frame_meta_list

    while frame_list is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(frame_list.data)
        except StopIteration:
            break

        list_of_objects = frame_meta.obj_meta_list

        while list_of_objects is not None:

            try:
                object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data)
                # https://docs.nvidia.com/metropolis/deepstream/5.0DP/python-api/NvDsMeta/NvDsObjectMeta.html

                if object_meta.object_id not in detectedObjectsIds:
                    t = time.localtime()
                    current_time = time.strftime("%H:%M:%S", t)

                    detectedObjectsIds.append(object_meta.object_id)
                    detectedObjects.append({
                        'id':
                        str(object_meta.object_id),
                        'label':
                        str(object_meta.obj_label),
                        'time':
                        current_time,
                        'confidence':
                        str(object_meta.confidence)
                    })

            except StopIteration:
                break
            # obj_counter[object_meta.class_id] += 1
            try:
                list_of_objects = list_of_objects.next
            except StopIteration:
                break
        try:
            frame_list = frame_list.next
        except StopIteration:
            break

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]

        textDisplay = "DETECTED OBJECTS:\n\n"
        if len(detectedObjects) > 10:
            detectedObjectsList = detectedObjects[-10]
        else:
            detectedObjectsList = detectedObjects

        for _object in detectedObjectsList:
            textDisplay = textDisplay + _object[
                "time"] + ": Detected: \"" + _object[
                    "label"] + "\", ID: " + _object[
                        "id"] + ", Confidence: " + _object["confidence"] + "\n"
            print(textDisplay)

        py_nvosd_text_params.display_text = textDisplay
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

    return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data):
    # Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    frame_number = 0
    num_rects = 0
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    #previous = pc.get_previous()
    previous = False
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)

        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        #pc.set_frame_counter(frame_number)
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        ids = []
        boxes = []
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top
            obj_id = obj_meta.object_id

            # Service Aforo (in and out)
            ids.append(obj_id)
            boxes.append((x, y))
            #pc.counting_in_and_out_first_detection((x, y), obj_id)
            #pc.counting_in_and_out_when_changing_area((x, y), obj_id, ids, previous)

            # Service People counting
            if previous:
                pc.people_counting_last_time_detected(ids)
                pc.people_counting_storing_fist_time(obj_id)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Service Social Distance
        if previous:
            i = 0
            #pc.get_distances_between_detected_elements_from_centroid(boxes, ids)

        #if not pc.get_previous():
        #    pc.set_previous(True)
        #    previous = pc.get_previous()

        # Service Aforo (in and out)
        #pc.count_in_and_out_when_object_leaves_the_frame(ids)

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]

        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.

        py_nvosd_text_params.display_text = \
            "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}"\
                .format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)

        # Using pyds.get_string() to get display_text as string
        # Get frame rate through this probe
        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data):

    # Intiallizing object counter with 0.
    # version 2.1 solo personas

    servicios_habilitados = service.emulate_reading_from_server()
    #print("Valor Aforo :", servicios_habilitados[AFORO_ENT_SAL_SERVICE],servicios_habilitados[PEOPLE_COUNTING_SERVICE],servicios_habilitados[SOCIAL_DISTANCE_SERVICE])

    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }

    frame_number = 0
    num_rects = 0  # numero de objetos en el frame
    gst_buffer = info.get_buffer()

    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    previous = service.get_previous()

    #====================== Definicion de valores de mensajes a pantalla
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    display_meta.num_labels = 1  # numero de textos
    display_meta.num_lines = 1  # numero de lineas
    display_meta.num_rects = 1  # numero de rectangulos

    py_nvosd_text_params = display_meta.text_params[0]
    py_nvosd_line_params = display_meta.line_params[0]
    py_nvosd_rect_params = display_meta.rect_params[0]

    # Setup del label de impresion en pantalla
    py_nvosd_text_params.x_offset = 100
    py_nvosd_text_params.y_offset = 120
    py_nvosd_text_params.font_params.font_name = "Arial"
    py_nvosd_text_params.font_params.font_size = 10
    py_nvosd_text_params.font_params.font_color.red = 1.0
    py_nvosd_text_params.font_params.font_color.green = 1.0
    py_nvosd_text_params.font_params.font_color.blue = 1.0
    py_nvosd_text_params.font_params.font_color.alpha = 1.0
    py_nvosd_text_params.set_bg_clr = 1
    py_nvosd_text_params.text_bg_clr.red = 0.0
    py_nvosd_text_params.text_bg_clr.green = 0.0
    py_nvosd_text_params.text_bg_clr.blue = 0.0
    py_nvosd_text_params.text_bg_clr.alpha = 1.0

    # Setup de la linea de Ent/Sal
    # los valos de las coordenadas tienen que ser obtenidos del archivo de configuracion
    # en este momento estan hardcode

    py_nvosd_line_params.x1 = 510
    py_nvosd_line_params.y1 = 740
    py_nvosd_line_params.x2 = 1050
    py_nvosd_line_params.y2 = 740
    py_nvosd_line_params.line_width = 5
    py_nvosd_line_params.line_color.red = 1.0
    py_nvosd_line_params.line_color.green = 1.0
    py_nvosd_line_params.line_color.blue = 1.0
    py_nvosd_line_params.line_color.alpha = 1.0

    # setup del rectangulo de Ent/Sal
    # de igual manera que los parametros de linea,
    # los valores del rectangulo se calculan en base a
    # los valoes del archivo de configuracion

    py_nvosd_rect_params.left = 500
    py_nvosd_rect_params.height = 120
    py_nvosd_rect_params.top = 680
    py_nvosd_rect_params.width = 560
    py_nvosd_rect_params.border_width = 4
    py_nvosd_rect_params.border_color.red = 0.0
    py_nvosd_rect_params.border_color.green = 0.0
    py_nvosd_rect_params.border_color.blue = 1.0
    py_nvosd_rect_params.border_color.alpha = 1.0

    #======================

    while l_frame is not None:
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        # que hace esta funcion ????
        #if get_counter() == 60:
        #    set_counter()

        #    if get_current_time() > get_offset_time():
        #        print('aca...............')
        #        service.emulate_reading_from_server()
        #        set_offset_time()
        #    else:
        #        set_current_time()
        #else:
        #    increment()

        frame_number = frame_meta.frame_num
        l_obj = frame_meta.obj_meta_list
        num_rects = frame_meta.num_obj_meta

        #
        #print("stream_"+str(frame_meta.pad_index))     El numero de fuente viene en el pad_index
        # este valor debe usarse para identificar que servicio se debe ejecutar en el ciclo interno
        #

        ids = []
        boxes = []

        #
        # Ciclo interno donde se evaluan los objetos dentro del frame
        #
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break

            # validacion de solo personas, solo para control de debug
            # print(" Class ID ", pgie_classes_str[obj_meta.class_id])

            obj_counter[obj_meta.class_id] += 1
            x = obj_meta.rect_params.left
            y = obj_meta.rect_params.top

            # Service Aforo (in and out)
            ids.append(obj_meta.object_id)
            boxes.append((x, y))

            #print(servicios_habilitados[AFORO_ENT_SAL_SERVICE])
            if servicios_habilitados[AFORO_ENT_SAL_SERVICE]:
                #print("Servicio de Aforo habilitado")
                entrada, salida = service.aforo((x, y), obj_meta.object_id,
                                                ids, previous)
                #print("Valor Direccion ", entrada, salida)
                #if direction == 1:
                #    contador_entrada += 1
                #    print("Entrada", contador_entrada)
                #elif direction == 0:
                #    print("Salida", contador_salida)
                #    contador_salida += 1

            # Service People counting
            #if previous:
            #    service.people_counting_last_time_detected(ids)
            #    service.people_counting_storing_fist_time(obj_meta.object_id)

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Nota 18-Agst-2020
        # El codigo de Social Distance se esta en este momento ejecutando fuera del ciclo de identificacion
        # de objetos dentro del frame, creo que no debe ser asi

        # Service Social Distance
        if servicios_habilitados[SOCIAL_DISTANCE_SERVICE]:
            boxes_length = len(boxes)
            if boxes_length > 1:
                service.set_frame_counter(frame_number)
                service.tracked_on_time_social_distance(
                    boxes, ids, boxes_length)

        if not previous:
            previous = service.set_previous()

        # Impresion en el video de los valores que nos interesan
        # Dibujo de la linea de Ent/Sal
        #

        #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],obj_counter[PGIE_CLASS_ID_PERSON])
        py_nvosd_text_params.display_text = "Source ID={} Source Number={} Person_count={} Entradas=={} Salidas=={}".format(
            frame_meta.source_id, frame_meta.pad_index,
            obj_counter[PGIE_CLASS_ID_PERSON], entrada, salida)

        # Lo manda a directo streaming
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        # Lo manda a terminal, siguientes 2 lineas, hacen lo mismo, diferentes funciones

        #print(pyds.get_string(py_nvosd_text_params.display_text))
        #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Vehicle_count=",vehicle_count,"Person_count=",person)

        fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemple #25
0
def osd_sink_pad_buffer_probe(pad, info, u_data):
    global start
    frame_number = 0

    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        now = time.time()
        try:
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
                print('class_id={}'.format(obj_meta.class_id))
                print('object_id={}'.format(obj_meta.object_id))
                print('obj_label={}'.format(obj_meta.obj_label))

                print('    rect_params height={}'.format(
                    obj_meta.rect_params.height))
                print('    rect_params left={}'.format(
                    obj_meta.rect_params.left))
                print('    rect_params top={}'.format(
                    obj_meta.rect_params.top))
                print('    rect_params width={}'.format(
                    obj_meta.rect_params.width))

            except StopIteration:
                break

            obj_meta.rect_params.border_color.set(
                0.0, 1.0, 1.0, 0.0
            )  # It seems that only the alpha channel is not working. (red, green, blue , alpha)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} FPS={}".format(
            frame_number, num_rects, (1 / (now - start)))

        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        py_nvosd_text_params.font_params.font_name = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
        py_nvosd_text_params.font_params.font_size = 20
        py_nvosd_text_params.font_params.font_color.set(
            0.2, 0.2, 1.0, 1)  # (red, green, blue , alpha)
        py_nvosd_text_params.set_bg_clr = 1
        py_nvosd_text_params.text_bg_clr.set(0.2, 0.2, 0.2, 0.3)

        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

        start = now

    return Gst.PadProbeReturn.OK  #DROP, HANDLED, OK, PASS, REMOVE
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_FACE: 0,
    }
    num_rects = 0
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.NvDsFrameMeta.cast()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta

        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1

            l_user = obj_meta.obj_user_meta_list
            # print(l_user)
            while l_user is not None:
                print('Inside l_user = obj_meta.obj_user_meta_list Loop')
                try:
                    # Casting l_obj.data to pyds.NvDsObjectMeta
                    user_meta = pyds.NvDsUserMeta.cast(l_user.data)
                except StopIteration:
                    break

                if (user_meta.base_meta.meta_type !=
                        pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META):
                    continue

                tensor_meta = pyds.NvDsInferTensorMeta.cast(
                    user_meta.user_meta_data)

                # Boxes in the tensor meta should be in network resolution which is
                # found in tensor_meta.network_info. Use this info to scale boxes to
                # the input frame resolution.
                layers_info = []

                for i in range(tensor_meta.num_output_layers):
                    layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
                    layers_info.append(layer)
                    print(f'Layer: {i}, Layer name: {layer.layerName}')

                try:
                    l_user = l_user.next
                except StopIteration:
                    break

            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Person_count={} Face_count={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_PERSON],
            obj_counter[PGIE_CLASS_ID_FACE])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemple #27
0
def osd_sink_pad_buffer_probe(pad, info, u_data):

    # Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }

    num_rects = 0
    global total_cars  # explicit mention of the global variable inside the function
    global x11, x12, x13, x14, x21, x22, x23, x24  # lanes
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.glist_get_nvds_frame_meta()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                obj_meta = pyds.glist_get_nvds_object_meta(l_obj.data)
                if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:
                    if obj_meta.rect_params.top >= y1 and obj_meta.rect_params.top <= y2:
                        car_found = 0
                        for x in gate_list:
                            if x.vehicle_id == obj_meta.object_id:
                                x.frames_list.append(frame_number)
                                x.x_list.append(obj_meta.rect_params.left)
                                x.y_list.append(obj_meta.rect_params.top)
                                x.xc_list.append(
                                    int(obj_meta.rect_params.left +
                                        (obj_meta.rect_params.width / 2)))
                                x.yc_list.append(
                                    int(obj_meta.rect_params.top +
                                        (obj_meta.rect_params.height / 2)))
                                x.x_smallest = min(x.x_list)
                                x.x_largest = max(x.x_list)
                                x.y_smallest = min(x.y_list)
                                x.y_largest = max(x.y_list)
                                x_center = int(obj_meta.rect_params.left +
                                               (obj_meta.rect_params.width /
                                                2))
                                y_center = int(obj_meta.rect_params.top +
                                               (obj_meta.rect_params.height /
                                                2))
                                if x_center > min(x13, x23):
                                    x.lane.append('fast')
                                elif x_center > min(x12, x22):
                                    x.lane.append('medium')
                                elif x_center > min(x11, x21):
                                    x.lane.append('slow')
                                else:
                                    x.lane.append('shoulder')
                                car_found = 1
                                break

                        if car_found == 0:
                            frame_temp_list = []
                            frame_temp_list.append(frame_number)
                            x_temp_list = []
                            x_temp_list.append(obj_meta.rect_params.left)
                            y_temp_list = []
                            y_temp_list.append(obj_meta.rect_params.top)
                            xc_temp_list = []
                            xc_temp_list.append(
                                int(obj_meta.rect_params.left +
                                    (obj_meta.rect_params.width / 2)))
                            yc_temp_list = []
                            yc_temp_list.append(
                                int(obj_meta.rect_params.top +
                                    (obj_meta.rect_params.height / 2)))
                            x_center = int(obj_meta.rect_params.left +
                                           (obj_meta.rect_params.width / 2))
                            y_center = int(obj_meta.rect_params.top +
                                           (obj_meta.rect_params.height / 2))
                            lane_temp_list = []
                            if x_center > min(x13, x23):
                                lane_temp_list.append('fast')
                            elif x_center > min(x12, x22):
                                lane_temp_list.append('medium')
                            elif x_center > min(x11, x21):
                                lane_temp_list.append('slow')
                            else:
                                lane_temp_list.append('shoulder')
                            gate_list.append(
                                Gate(obj_meta.object_id, min(x_temp_list),
                                     max(x_temp_list), min(y_temp_list),
                                     max(y_temp_list), frame_temp_list,
                                     x_temp_list, y_temp_list, xc_temp_list,
                                     yc_temp_list, lane_temp_list))

                    if obj_meta.object_id > total_cars:
                        total_cars = obj_meta.object_id  # total cars assigned unique tracing IDs

                    print('Vehicle ID = ', obj_meta.object_id,
                          ', Frame Number = ', frame_number, ', Top X = ',
                          obj_meta.rect_params.left, ', Top Y = ',
                          obj_meta.rect_params.top, ', Width = ',
                          obj_meta.rect_params.width, ', Height = ',
                          obj_meta.rect_params.height)

            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]

        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicles in Frame={} Total Objects in Stream={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],
            total_cars)

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        # Draw x11_x21
        py_nvosd_line_params = display_meta.line_params[0]
        py_nvosd_line_params.x1 = x11
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x21
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x11 = py_nvosd_line_params.x1
        x21 = py_nvosd_line_params.x2

        # Draw x12_x22
        py_nvosd_line_params = display_meta.line_params[1]
        py_nvosd_line_params.x1 = x12
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x22
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x12 = py_nvosd_line_params.x1
        x22 = py_nvosd_line_params.x2

        # Draw x13_x23
        py_nvosd_line_params = display_meta.line_params[2]
        py_nvosd_line_params.x1 = x13
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x23
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x13 = py_nvosd_line_params.x1
        x23 = py_nvosd_line_params.x2

        # Draw x14_x24
        py_nvosd_line_params = display_meta.line_params[3]
        py_nvosd_line_params.x1 = x14
        py_nvosd_line_params.y1 = y1
        py_nvosd_line_params.x2 = x24
        py_nvosd_line_params.y2 = y2
        py_nvosd_line_params.line_width = 5
        py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0)
        display_meta.num_lines = display_meta.num_lines + 1
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        x14 = py_nvosd_line_params.x1
        x24 = py_nvosd_line_params.x2

        try:
            l_frame = l_frame.next
        except StopIteration:
            break
    return Gst.PadProbeReturn.OK
Exemple #28
0
    def overlay_display_type(self, gst_buffer, client_data):

        # cast the C void* client_data back to a py_object pointer and deref
        meta_data = cast(client_data, POINTER(py_object)).contents.value
        meta_data.mutex.acquire()

        frame_number = 0

        # Retrieve batch metadata from the gst_buffer
        # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
        # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
        batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
        l_frame = batch_meta.frame_meta_list
        while l_frame is not None:
            try:
                # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
                # The casting is done by pyds.NvDsFrameMeta.cast()
                # The casting also keeps ownership of the underlying memory
                # in the C code, so the Python garbage collector will leave
                # it alone.
                frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
            except StopIteration:
                break

            if meta_data.active_display_type is not None:
                display_meta = pyds.nvds_acquire_display_meta_from_pool(
                    batch_meta)

                display_meta.num_lines = 0
                display_meta.num_rects = 0

                if 'Line' in meta_data.active_display_type:
                    line = meta_data.active_display_type['Line']
                    py_nvosd_line_params = display_meta.line_params[
                        display_meta.num_lines]
                    display_meta.num_lines += 1

                    py_nvosd_line_params = line.copy(py_nvosd_line_params)

                elif 'Polygon' in meta_data.active_display_type:

                    lines = meta_data.active_display_type['Polygon']

                    for line in lines:
                        if line:
                            py_nvosd_line_params = display_meta.line_params[
                                display_meta.num_lines]
                            display_meta.num_lines += 1

                            py_nvosd_line_params = line.copy(
                                py_nvosd_line_params)

                pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

            try:
                l_frame = l_frame.next
            except StopIteration:
                break

        meta_data.mutex.release()
        return True
def osd_sink_pad_buffer_probe(pad, info, u_data):
    frame_number = 0
    #Intiallizing object counter with 0.
    obj_counter = {
        PGIE_CLASS_ID_VEHICLE: 0,
        PGIE_CLASS_ID_PERSON: 0,
        PGIE_CLASS_ID_BICYCLE: 0,
        PGIE_CLASS_ID_ROADSIGN: 0
    }
    num_rects = 0

    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer ")
        return

    # Retrieve batch metadata from the gst_buffer
    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
    l_frame = batch_meta.frame_meta_list
    while l_frame is not None:
        try:
            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
            # The casting is done by pyds.glist_get_nvds_frame_meta()
            # The casting also keeps ownership of the underlying memory
            # in the C code, so the Python garbage collector will leave
            # it alone.
            #frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
        except StopIteration:
            break

        frame_number = frame_meta.frame_num
        num_rects = frame_meta.num_obj_meta
        l_obj = frame_meta.obj_meta_list
        while l_obj is not None:
            try:
                # Casting l_obj.data to pyds.NvDsObjectMeta
                #obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
            except StopIteration:
                break
            obj_counter[obj_meta.class_id] += 1
            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
            try:
                l_obj = l_obj.next
            except StopIteration:
                break

        # Acquiring a display meta object. The memory ownership remains in
        # the C code so downstream plugins can still access it. Otherwise
        # the garbage collector will claim it when this probe function exits.
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
        display_meta.num_labels = 1
        py_nvosd_text_params = display_meta.text_params[0]
        # Setting display text to be shown on screen
        # Note that the pyds module allocates a buffer for the string, and the
        # memory will not be claimed by the garbage collector.
        # Reading the display_text field here will return the C address of the
        # allocated string. Use pyds.get_string() to get the string content.
        py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(
            frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE],
            obj_counter[PGIE_CLASS_ID_PERSON])

        # Now set the offsets where the string should appear
        py_nvosd_text_params.x_offset = 10
        py_nvosd_text_params.y_offset = 12

        # Font , font-color and font-size
        py_nvosd_text_params.font_params.font_name = "Serif"
        py_nvosd_text_params.font_params.font_size = 10
        # set(red, green, blue, alpha); set to White
        py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

        # Text background color
        py_nvosd_text_params.set_bg_clr = 1
        # set(red, green, blue, alpha); set to Black
        py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
        # Using pyds.get_string() to get display_text as string
        print(pyds.get_string(py_nvosd_text_params.display_text))
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
        try:
            l_frame = l_frame.next
        except StopIteration:
            break

    return Gst.PadProbeReturn.OK
Exemple #30
0
def analytics_meta_buffer_probe(pad,info,u_data):

    # Get the buffer from the pipeline
    gst_buffer = info.get_buffer()
    if not gst_buffer:
        print("Unable to get GstBuffer")
        return Gst.PadProbeReturn.OK

    # With the pyds wrapper get the batch of metadata from the buffer
    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))

    # From the batch of metadata get the list of frames
    list_of_frames = batch_meta.frame_meta_list

    # Iterate thru the list of frames
    while list_of_frames is not None:
        try:
            
            # Get the metadata on the current frame
            # The next frame is set at the end of the while loop
            frame_meta = pyds.NvDsFrameMeta.cast(list_of_frames.data)

        except StopIteration:
            break

        # INFORMATION THAT IS PRESENT THE FRAME
        #
        # - frame_meta.frame_num
        # - frame_meta.frame_num
        # - frame_meta.source_id
        # - frame_meta.batch_id
        # - frame_meta.source_frame_width
        # - frame_meta.source_frame_height
        # - frame_meta.num_obj_meta

        # Print the frame width and height to see what positions can the bounding boxed be drawed
        # print('Frame Width: ' + str(frame_meta.source_frame_width)) = 1920
        # print('Frame Height: ' + str(frame_meta.source_frame_height)) = 1080

        # In the information of the frame we can get a list of objects detected on the frame.
        list_of_objects = frame_meta.obj_meta_list

        # Iterate thru the list of objects
        while list_of_objects is not None:
            try: 
                # Get the metadata for each object in the frame
                object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data)

            except StopIteration:
                break

            try: 
                # Go to the next object in the list
                list_of_objects = list_of_objects.next
            except StopIteration:
                break
        # When there is no more object in the list of objects
        # we continue here

        # INFORMATION OF THE OBJECT METADATA
        #
        # - object_meta.class_id
        # - object_meta.confidence
        # - object_meta.obj_label
        # - object_meta.object_id (If not tracker present on the pipeline, the ID is the same for all objects)
        # - object_meta.rect_params

        # The object squares info is in object_meta.rect_params
        # Calculate the center of the object in the frame
        box = object_meta.rect_params
        x = (box.left + box.width - (box.width / 2))
        y = (box.top + box.height - (box.height / 2))
        # center_of_object = (x, y)
        # center_of_object = np.array([(x,y)], np.int32)

        # Check if the center of the object cross one of the boxes
        # for i in range(len(boxes_per_line)):


            # left = boxes_per_line[i][0]
            # top = boxes_per_line[i][1]
            # width = boxes_per_line[i][2]
            # height = boxes_per_line[i][3]

            # # Create the numpy box
            # numbyBox = np.array([[left, top], [left, (top + height)], [(left + width), top], [(left + width), (top + height)]]),
            # result = cv2.pointPolygonTest(numbyBox, center_of_object, False)
            # print('Cross Box: ' + str(i + 1))


        # print(result)


        # Get the display meta from the batch meta, this is another metadata different that the frame meta collected 
        # befor
        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)

        # Define the number of rects that we are going to draw
        display_meta.num_rects = len(boxes_per_line)

        ## Draw each box in the list of boxex
        for i in range(len(boxes_per_line)):

            ## Get the first one
            py_nvosd_rect_params = display_meta.rect_params[i]

            # Now set the offsets where the string should appear
            py_nvosd_rect_params.has_bg_color = True
            py_nvosd_rect_params.bg_color.set(128.0, 255.0, 1.0, 0.5)
            py_nvosd_rect_params.left   = boxes_per_line[i]["boundaries"][0]
            py_nvosd_rect_params.top    = boxes_per_line[i]["boundaries"][1]
            py_nvosd_rect_params.width  = boxes_per_line[i]["boundaries"][2]
            py_nvosd_rect_params.height = boxes_per_line[i]["boundaries"][3]

        # Draw the boxes on the frame
        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)

        try:
            # Go to the next frame in the list
            list_of_frames = list_of_frames.next
        except StopIteration:
            break
        # When there are not frames in the buffer we end here, and the function returns ok


    return Gst.PadProbeReturn.OK