def osd_sink_pad_buffer_probe(pad, info, u_data): global start, prt frame_number = 0 #Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: now = time.time() try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.glist_get_nvds_frame_meta() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. #frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data) frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) ''' img = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR) red = (0, 0, 255) location = (20, 50) font = cv2.FONT_ITALIC # italic font cv2.putText(img, 'OpenCV Cooking', location, font, fontScale = 2, color = red, thickness = 3) #cv2.imshow('Hello', img) #cv2.waitKey(0) ''' except StopIteration: break frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta #obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data) obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) ''' print('class_id={}'.format(type(obj_meta.class_id))) print('confidence={}'.format(type(obj_meta.confidence))) print('detector_bbox_info={}'.format(type(obj_meta.detector_bbox_info))) print('obj_label={}'.format(type(obj_meta.obj_label))) print('object_id={}'.format(type(obj_meta.object_id))) print('rect_params={}'.format(type(obj_meta.rect_params))) ''' #print('mask_params={}'.format(type(obj_meta.mask_params))) #Not binded print(' rect_params bg_color alpha={}'.format( type(obj_meta.rect_params.bg_color))) print(' rect_params border_width={}'.format( type(obj_meta.rect_params.border_width))) print(' rect_params border_width={}'.format( obj_meta.rect_params.border_width)) print(' rect_params color_id={}'.format( type(obj_meta.rect_params.color_id))) print(' rect_params color_id={}'.format( obj_meta.rect_params.color_id)) print(' rect_params has_color_info={}'.format( type(obj_meta.rect_params.has_color_info))) ''' if True: print(' === obj_meta ===') print('class_id={}'.format(obj_meta.class_id)) print('confidence={}'.format(obj_meta.confidence)) print('detector_bbox_info={}'.format(obj_meta.detector_bbox_info)) #print('mask_params={}'.format(obj_meta.mask_params)) print('obj_label={}'.format(obj_meta.obj_label)) print('object_id={}'.format(obj_meta.object_id)) print('rect_params={}'.format(obj_meta.rect_params)) print(' rect_params bg_color alpha={}'.format(obj_meta.rect_params.bg_color.alpha)) print(' rect_params bg_color blue={}'.format(obj_meta.rect_params.bg_color.blue)) print(' rect_params bg_color green={}'.format(obj_meta.rect_params.bg_color.green)) print(' rect_params bg_color red={}'.format(obj_meta.rect_params.bg_color.red)) print(' rect_params border_color alpha={}'.format(obj_meta.rect_params.border_color.alpha)) print(' rect_params border_color blue={}'.format(obj_meta.rect_params.border_color.blue)) print(' rect_params border_color green={}'.format(obj_meta.rect_params.border_color.green)) print(' rect_params border_color red={}'.format(obj_meta.rect_params.border_color.red)) print(' rect_params border_width={}'.format(obj_meta.rect_params.border_width)) print(' rect_params color_id={}'.format(obj_meta.rect_params.color_id)) print(' rect_params has_bg_color={}'.format(obj_meta.rect_params.has_bg_color)) print(' rect_params has_color_info={}'.format(obj_meta.rect_params.has_color_info)) print(' rect_params height={}'.format(obj_meta.rect_params.height)) print(' rect_params left={}'.format(obj_meta.rect_params.left)) print(' rect_params top={}'.format(obj_meta.rect_params.top)) print(' rect_params width={}'.format(obj_meta.rect_params.width)) print(' rect_params reserved={}'.format(obj_meta.rect_params.reserved)) print('tracker_bbox_info={}'.format(obj_meta.tracker_bbox_info)) print('tracker_confidence={}'.format(obj_meta.tracker_confidence)) ''' except StopIteration: break obj_meta.rect_params.has_bg_color = 1 obj_meta.rect_params.bg_color.set( 0.0, 0.0, 1.0, 0.2 ) #It seems that only the alpha channel is working. RGB value is reflected. obj_counter[obj_meta.class_id] += 1 obj_meta.rect_params.border_color.set( 0.0, 1.0, 1.0, 0.0 ) # It seems that only the alpha channel is not working. (red, green, blue , alpha) try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={} FPS={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON], (1 / (now - start))) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" py_nvosd_text_params.font_params.font_size = 20 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set( 0.2, 0.2, 1.0, 1) # (red, green, blue , alpha) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.2, 0.2, 0.2, 0.3) # Using pyds.get_string() to get display_text as string if prt: print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break prt = False start = now return Gst.PadProbeReturn.OK #DROP, HANDLED, OK, PASS, REMOVE
def osd_sink_pad_buffer_probe(pad, info, u_data): global start, prt frame_number = 0 #Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: now = time.time() try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) if (frame_number % 30 == 0): obj_user = obj_meta.obj_user_meta_list analyze_meta(frame_number, obj_meta) except StopIteration: break obj_counter[obj_meta.class_id] += 1 try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={} FPS={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON], (1 / (now - start))) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string if (prt): print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break start = now prt = False return Gst.PadProbeReturn.OK
def analytics_meta_buffer_probe(pad, info, u_data): # Get the buffer from the pipeline gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer") return Gst.PadProbeReturn.OK # With the pyds wrapper get the batch of metadata from the buffer batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) # From the batch of metadata get the list of frames list_of_frames = batch_meta.frame_meta_list # Iterate thru the list of frames while list_of_frames is not None: try: # Get the metadata on the current frame # The next frame is set at the end of the while loop frame_meta = pyds.NvDsFrameMeta.cast(list_of_frames.data) except StopIteration: break # INFORMATION THAT IS PRESENT THE FRAME # # - frame_meta.frame_num # - frame_meta.frame_num # - frame_meta.source_id # - frame_meta.batch_id # - frame_meta.source_frame_width # - frame_meta.source_frame_height # - frame_meta.num_obj_meta # Print the frame width and height to see what positions can the bounding boxed be drawed # print('Frame Width: ' + str(frame_meta.source_frame_width)) = 1920 # print('Frame Height: ' + str(frame_meta.source_frame_height)) = 1080 # In the information of the frame we can get a list of objects detected on the frame. list_of_objects = frame_meta.obj_meta_list # Iterate thru the list of objects while list_of_objects is not None: try: # Get the metadata for each object in the frame object_meta = pyds.NvDsObjectMeta.cast(list_of_objects.data) except StopIteration: break try: # Go to the next object in the list list_of_objects = list_of_objects.next except StopIteration: break # When there is no more object in the list of objects # we continue here # INFORMATION OF THE OBJECT METADATA # # - object_meta.class_id # - object_meta.confidence # - object_meta.obj_label # - object_meta.object_id (If not tracker present on the pipeline, the ID is the same for all objects) # - object_meta.rect_params # The object squares info is in object_meta.rect_params # Calculate the center of the object in the frame box = object_meta.rect_params x = (box.left + box.width - (box.width / 2)) y = (box.top + box.height - (box.height / 2)) # center_of_object = (x, y) # center_of_object = np.array([(x,y)], np.int32) # Check if the center of the object cross one of the boxes # for i in range(len(boxes_per_line)): # left = boxes_per_line[i][0] # top = boxes_per_line[i][1] # width = boxes_per_line[i][2] # height = boxes_per_line[i][3] # # Create the numpy box # numbyBox = np.array([[left, top], [left, (top + height)], [(left + width), top], [(left + width), (top + height)]]), # result = cv2.pointPolygonTest(numbyBox, center_of_object, False) # print('Cross Box: ' + str(i + 1)) # print(result) # Get the display meta from the batch meta, this is another metadata different that the frame meta collected # befor display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) # Define the number of rects that we are going to draw display_meta.num_rects = len(boxes_per_line) ## Draw each box in the list of boxex for i in range(len(boxes_per_line)): ## Get the first one py_nvosd_rect_params = display_meta.rect_params[i] # Now set the offsets where the string should appear py_nvosd_rect_params.has_bg_color = True py_nvosd_rect_params.bg_color.set(128.0, 255.0, 1.0, 0.5) py_nvosd_rect_params.left = boxes_per_line[i]["boundaries"][0] py_nvosd_rect_params.top = boxes_per_line[i]["boundaries"][1] py_nvosd_rect_params.width = boxes_per_line[i]["boundaries"][2] py_nvosd_rect_params.height = boxes_per_line[i]["boundaries"][3] # Draw the boxes on the frame pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: # Go to the next frame in the list list_of_frames = list_of_frames.next except StopIteration: break # When there are not frames in the buffer we end here, and the function returns ok return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 is_first_obj = True # Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list print(l_frame) while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break ## vehicle type detection ## # Filter detections by PGIE1 network and don't include RoadSign class if (obj_meta.unique_component_id == PGIE_UNIQUE_ID and obj_meta.class_id != PGIE_CLASS_ID_ROADSIGN # Exclude RoadSign and obj_meta.class_id != PGIE_CLASS_ID_BICYCLE # Exclude Bicycle and obj_meta.class_id != PGIE_CLASS_ID_PERSON # Exclude Person ): # get secondary classifier data l_classifier = obj_meta.classifier_meta_list sgie_class = -1 if l_classifier is not None: # and class_id==XXX #apply classifier for a specific class classifier_meta = pyds.glist_get_nvds_classifier_meta( l_classifier.data) l_label = classifier_meta.label_info_list label_info = pyds.glist_get_nvds_label_info(l_label.data) sgie_class = label_info.result_class_id print("sgie_class >>>", sgie_class) # Cv2 stuff if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface( hash(gst_buffer), frame_meta.batch_id) # convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') # covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) # recognize license plate data alrp_output = lpdetector.alpr_frame( frame_image, obj_meta, obj_meta.confidence, frame_number) print("alrp out >>> ", alrp_output) obj_counter[obj_meta.class_id] += 1 #print("obj_meta: gie_id={0}; object_id={1}; class_id={2}; classifier_class={3}".format(obj_meta.unique_component_id,obj_meta.object_id,obj_meta.class_id,sgie_class)) try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE: # vehicle detected if obj_meta.rect_params.top > ( 0.25 * 1080 ): # discard detection instances for vehicles too far from the camera car_found = 0 # vehicle id flag for x in vehicle_list: if x.vehicle_id == obj_meta.object_id: # vehicle id found in the list of vehicle metadata x.frames_list.append(frame_number) x.x_list.append(int(obj_meta.rect_params.left)) x.y_list.append(int(obj_meta.rect_params.top)) x.xc_list.append( int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))) x.yc_list.append( int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2))) car_found = 1 # vehicle metadata was already initialized break if car_found == 0: # vehicle metadata was not initialized in the list frames_temp_list = [] frames_temp_list.append(frame_number) x_temp_list = [] x_temp_list.append(int(obj_meta.rect_params.left)) y_temp_list = [] y_temp_list.append(int(obj_meta.rect_params.top)) xc_temp_list = [] xc_temp_list.append( int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))) yc_temp_list = [] yc_temp_list.append( int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2))) vehicle_list.append( Vehicle(obj_meta.object_id, frames_temp_list, x_temp_list, y_temp_list, xc_temp_list, yc_temp_list)) print('Vehicle ID = ', obj_meta.object_id, ', Frame Number = ', frame_number, ', Top X = ', obj_meta.rect_params.left, ', Top Y = ', obj_meta.rect_params.top, ', Width = ', obj_meta.rect_params.width, ', Height = ', obj_meta.rect_params.height ) # initialize vehicle metadata except StopIteration: break obj_counter[obj_meta.class_id] += 1 try: l_obj = l_obj.next except StopIteration: break print("Frame Number =", frame_number, "Number of Objects in frame =", num_rects, "Vehicles in frame =", obj_counter[PGIE_CLASS_ID_VEHICLE] ) # object bounding box metadata overlay # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) if frame_number == 500: # when the stream should stop; increase this value to extend the life of video stream y_min_list = [] y_max_list = [] for car_object in vehicle_list: if len( car_object.frames_list ) > 10: # ignore tracking instances with a life of less than ten frames print(car_object.vehicle_id, car_object.frames_list, car_object.y_list, len(car_object.frames_list), '\n', sep=' ') y_min_list.append(min(car_object.y_list)) y_max_list.append(max(car_object.y_list)) y_min_list.sort() y_max_list.sort() print('y_min:', y_min_list, len(y_min_list), '\n') print('y_max:', y_max_list, len(y_max_list), '\n') print('Optimal Frame Range:') print('y:', min(y_max_list) - 100, max(y_min_list)) with open( '/opt/nvidia/deepstream/deepstream-5.0/sources/deepstream_python_apps/apps/optimal_frame_extractor/road.txt', 'r' ) as file: # input file of the frame extractor application data = file.readlines() data[10] = str('y1 ') + str(min(y_max_list) - 100) + str( ' #opt_frm_inf_start') + str('\n') data[11] = str('y2 ') + str( max(y_min_list)) + str(' #opt_frm_inf_end') with open( '/opt/nvidia/deepstream/deepstream-5.0/sources/deepstream_python_apps/apps/optimal_frame_extractor/road.txt', 'w') as file: file.writelines(data) sys.exit() try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data): # Intiallizing object counter with 0. # version mask detection solo reconoce mascarillas y sin mascarilla obj_counter = { PGIE_CLASS_ID_MASK: 0, PGIE_CLASS_ID_NOMASK: 0, } frame_number = 0 num_rects = 0 # numero de objetos en el frame gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list #====================== Definicion de valores de mensajes a pantalla display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index camera_id = get_camera_id(current_pad_index) # Falta el servicio de mask Detection # # # Todos los servicios requieren impresion de texto solo para Aforo se requiere una linea y un rectangulo display_meta.num_labels = 1 # numero de textos py_nvosd_text_params = display_meta.text_params[0] # Setup del label de impresion en pantalla py_nvosd_text_params.x_offset = 100 py_nvosd_text_params.y_offset = 120 py_nvosd_text_params.font_params.font_name = "Arial" py_nvosd_text_params.font_params.font_size = 10 py_nvosd_text_params.font_params.font_color.red = 1.0 py_nvosd_text_params.font_params.font_color.green = 1.0 py_nvosd_text_params.font_params.font_color.blue = 1.0 py_nvosd_text_params.font_params.font_color.alpha = 1.0 py_nvosd_text_params.set_bg_clr = 1 py_nvosd_text_params.text_bg_clr.red = 0.0 py_nvosd_text_params.text_bg_clr.green = 0.0 py_nvosd_text_params.text_bg_clr.blue = 0.0 py_nvosd_text_params.text_bg_clr.alpha = 1.0 no_mask_ids = get_no_mask_ids_dict(camera_id) frame_number = 1 # to avoid not definition issue clean_at_every = 43 use_ids = 1 while l_frame is not None: try: frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num #print(" fps:",frame_meta.num_surface_per_frame) l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta #print(num_rects) ID numero de stream use_ids = frame_number % clean_at_every if use_ids == 0: ids = set() # Ciclo interno donde se evaluan los objetos dentro del frame while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break #x = obj_meta.rect_params.width #y = obj_meta.rect_params.height obj_counter[obj_meta.class_id] += 1 # evaluating only if class_id is 1 (no mask) if obj_meta.class_id == 1: # collecting all the ids on this frame if use_ids == 0: ids.add(obj_meta.object_id) if obj_meta.object_id not in no_mask_ids: counter = 1 else: counter = no_mask_ids[obj_meta.object_id] counter += 1 # only if counter is lower than 4 we save the counter value in set_no_mask_ids_dict if counter < 4: no_mask_ids.update({obj_meta.object_id: counter}) set_no_mask_ids_dict(camera_id, no_mask_ids) # only if the value is 4 we report the no mask to the server if counter == 4: service.mask_detection(obj_meta.object_id, no_mask_ids, camera_id) py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Mask={} NoMaks={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_MASK], obj_counter[PGIE_CLASS_ID_NOMASK]) #if obj_meta.class_id == 1: # print("Clase No Mask : ",obj_meta.class_id," ID :", obj_meta.object_id) # si object_id = 1 es NOMASK try: l_obj = l_obj.next except StopIteration: break #py_nvosd_text_params.display_text = "SOCIAL DISTANCE Source ID={} Source Number={} Person_count={}.format(frame_meta.source_id, frame_meta.pad_index , obj_counter[PGIE_CLASS_ID_PERSON]) # Aqui Evaluo si tengo id_repetidos y mando solo los unicos # y evaluo si tengo que limpiar el arreglo despues de n frames # -----> pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() try: l_frame = l_frame.next except StopIteration: break if use_ids == 0: new_dict = {} no_mask_ids = get_no_mask_ids_dict(camera_id) for item in ids: if item in no_mask_ids: value = no_mask_ids[item] new_dict.update({item: value}) set_no_mask_ids_dict(camera_id, new_dict) # Lo manda a directo streaming return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list while l_obj is not None: try: obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) # print(obj_meta.rect_params.top) # print(obj_meta.rect_params.left) # print(obj_meta.rect_params.width) # print(obj_meta.rect_params.height) # print(obj_meta.class_id) # print(obj_meta.obj_label) # print(obj_meta.object_id) except StopIteration: break obj_counter[obj_meta.class_id] += 1 try: l_obj = l_obj.next except StopIteration: break display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] py_nvosd_text_params.display_text = "Frames: {} | Objects: {} | Vehicles: {} | Persons: {}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 12 py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) py_nvosd_text_params.set_bg_clr = 1 py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data): #Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } # Set frame_number & rectangles to draw as 0 frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break # Get frame number , number of rectables to draw and object metadata frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break # Increment Object class by 1 and Set Box border to Red color obj_counter[obj_meta.class_id] += 1 obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0) try: l_obj = l_obj.next except StopIteration: break ################## Setting Metadata Display configruation ############### # Acquiring a display meta object. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # Set(red, green, blue, alpha); Set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # Set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string to print in notebook print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) ############################################################################ # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad,info,u_data): frame_number=0 num_rects=0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break ''' print("Frame Number is ", frame_meta.frame_num) print("Source id is ", frame_meta.source_id) print("Batch id is ", frame_meta.batch_id) print("Source Frame Width ", frame_meta.source_frame_width) print("Source Frame Height ", frame_meta.source_frame_height) print("Num object meta ", frame_meta.num_obj_meta) ''' frame_number=frame_meta.frame_num l_obj=frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta obj_counter = { PGIE_CLASS_ID_TOOTHBRUSH:0, PGIE_CLASS_ID_HAIR_DRYER:0, PGIE_CLASS_ID_TEDDY_BEAR:0, PGIE_CLASS_ID_SCISSORS:0, PGIE_CLASS_ID_VASE:0, PGIE_CLASS_ID_CLOCK:0, PGIE_CLASS_ID_BOOK:0, PGIE_CLASS_ID_REFRIGERATOR:0, PGIE_CLASS_ID_SINK:0, PGIE_CLASS_ID_TOASTER:0, PGIE_CLASS_ID_OVEN:0, PGIE_CLASS_ID_MICROWAVE:0, PGIE_CLASS_ID_CELL_PHONE:0, PGIE_CLASS_ID_KEYBOARD:0, PGIE_CLASS_ID_REMOTE:0, PGIE_CLASS_ID_MOUSE:0, PGIE_CLASS_ID_LAPTOP:0, PGIE_CLASS_ID_TVMONITOR:0, PGIE_CLASS_ID_TOILET:0, PGIE_CLASS_ID_DININGTABLE:0, PGIE_CLASS_ID_BED:0, PGIE_CLASS_ID_POTTEDPLANT:0, PGIE_CLASS_ID_SOFA:0, PGIE_CLASS_ID_CHAIR:0, PGIE_CLASS_ID_CAKE:0, PGIE_CLASS_ID_DONUT:0, PGIE_CLASS_ID_PIZZA:0, PGIE_CLASS_ID_HOT_DOG:0, PGIE_CLASS_ID_CARROT:0, PGIE_CLASS_ID_BROCCOLI:0, PGIE_CLASS_ID_ORANGE:0, PGIE_CLASS_ID_SANDWICH:0, PGIE_CLASS_ID_APPLE:0, PGIE_CLASS_ID_BANANA:0, PGIE_CLASS_ID_BOWL:0, PGIE_CLASS_ID_SPOON:0, PGIE_CLASS_ID_KNIFE:0, PGIE_CLASS_ID_FORK:0, PGIE_CLASS_ID_CUP:0, PGIE_CLASS_ID_WINE_GLASS:0, PGIE_CLASS_ID_BOTTLE:0, PGIE_CLASS_ID_TENNIS_RACKET:0, PGIE_CLASS_ID_SURFBOARD:0, PGIE_CLASS_ID_SKATEBOARD:0, PGIE_CLASS_ID_BASEBALL_GLOVE:0, PGIE_CLASS_ID_BASEBALL_BAT:0, PGIE_CLASS_ID_KITE:0, PGIE_CLASS_ID_SPORTS_BALL:0, PGIE_CLASS_ID_SNOWBOARD:0, PGIE_CLASS_ID_SKIS:0, PGIE_CLASS_ID_FRISBEE:0, PGIE_CLASS_ID_SUITCASE:0, PGIE_CLASS_ID_TIE:0, PGIE_CLASS_ID_HANDBAG:0, PGIE_CLASS_ID_UMBRELLA:0, PGIE_CLASS_ID_BACKPACK:0, PGIE_CLASS_ID_UMBRELLA:0, PGIE_CLASS_ID_GIRAFFE:0, PGIE_CLASS_ID_ZEBRA:0, PGIE_CLASS_ID_BEAR:0, PGIE_CLASS_ID_ELEPHANT:0, PGIE_CLASS_ID_COW:0, PGIE_CLASS_ID_SHEEP:0, PGIE_CLASS_ID_HORSE:0, PGIE_CLASS_ID_DOG:0, PGIE_CLASS_ID_CAT:0, PGIE_CLASS_ID_BIRD:0, PGIE_CLASS_ID_BENCH:0, PGIE_CLASS_ID_PARKING_METER:0, PGIE_CLASS_ID_STOP_SIGN:0, PGIE_CLASS_ID_FIRE_HYDRANT:0, PGIE_CLASS_ID_TRAFFIC_LIGHT:0, PGIE_CLASS_ID_BOAT:0, PGIE_CLASS_ID_TRUCK:0, PGIE_CLASS_ID_TRAIN:0, PGIE_CLASS_ID_BUS:0, PGIE_CLASS_ID_AEROPLANE:0, PGIE_CLASS_ID_MOTORBIKE:0, PGIE_CLASS_ID_VEHICLE:0, PGIE_CLASS_ID_BICYCLE:0, PGIE_CLASS_ID_PERSON:0 } while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 try: l_obj=l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Bird_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_BIRD], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string #print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() try: l_frame=l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data): global x11, x12, x13, x14, x21, x22, x23, x24 global y11, y22, y1, y2 global vehicle_count frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE: # vehicle detected if obj_meta.rect_params.top >= y1 and obj_meta.rect_params.top <= y2: # optimal range filter car_found = 0 # vehicle id flag for x in vehicle_list: if x.vehicle_id == obj_meta.object_id: # vehicle metadata lists are already initialized in vehicle_list x.frames_list.append(frame_number) x.x_list.append(obj_meta.rect_params.left) x.y_list.append(obj_meta.rect_params.top) x.xc_list.append( int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))) x.yc_list.append( int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2))) x.width_list.append(obj_meta.rect_params.width) x.height_list.append( obj_meta.rect_params.height) x_center = int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2)) if x_center > min(x13, x23): x.lane_list.append('shoulder') elif x_center > min(x12, x22): x.lane_list.append('slow') elif x_center > min(x11, x21): x.lane_list.append('medium') else: x.lane_list.append('fast') car_found = 1 # vehicle metadata lists were already initialized in vehicle_list break if car_found == 0: # vehicle metadata lists are not initialized in vehicle_list frames_temp_list = [] frames_temp_list.append(frame_number) x_temp_list = [] x_temp_list.append(obj_meta.rect_params.left) y_temp_list = [] y_temp_list.append(obj_meta.rect_params.top) xc_temp_list = [] xc_temp_list.append( int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))) yc_temp_list = [] yc_temp_list.append( int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2))) width_temp_list = [] width_temp_list.append(obj_meta.rect_params.width) height_temp_list = [] height_temp_list.append( obj_meta.rect_params.height) x_center = int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2)) lane_temp_list = [] if x_center > min(x13, x23): lane_temp_list.append('shoulder') elif x_center > min(x12, x22): lane_temp_list.append('slow') elif x_center > min(x11, x21): lane_temp_list.append('medium') else: lane_temp_list.append('fast') vehicle_list.append( Vehicle(obj_meta.object_id, frames_temp_list, x_temp_list, y_temp_list, xc_temp_list, yc_temp_list, width_temp_list, height_temp_list, lane_temp_list) ) # initialize vehicle metadata lists print('Vehicle ID = ', obj_meta.object_id, ', Frame Number = ', frame_number, ', Top X = ', obj_meta.rect_params.left, ', Top Y = ', obj_meta.rect_params.top, ', Width = ', obj_meta.rect_params.width, ', Height = ', obj_meta.rect_params.height ) # show metadata of vehicle detection instance for i, o in enumerate(vehicle_list): frame_lag = abs( int(o.frames_list[-1]) - int(frame_number) ) # how far behind is the vehicle object; usually, a difference at least two frames signifies a stop or break in tracking activity if (frame_lag > 20) and int( len(o.frames_list) ) <= 6: # vehicle count rectifier; eliminates false tracking instances, i.e., ones not tracked long enough for conclusive tracking train resolution print( 'inadequate number of frames in train, deleting...', '\n') del vehicle_list[i] break if frame_lag > 20 and frame_lag < 100: # optimal frame extractor...the business end vehicle_count += 1 midpoint = int( (y1 + y2) / 2) # reference point of optimality my_array = np.array(o.yc_list) pos = (np.abs(my_array - midpoint)).argmin( ) # position of frame - in the vehicle object y coordinates list - closest to the midpoint of optimal range temp_frame_number = o.frames_list[pos] temp_id = o.vehicle_id now = datetime.now() dt_string = now.strftime('%d/%m/%Y %H:%M:%S') image_path = folder_name + "/stream_" + str( 0) + "/numb_frno_trid=" + str( vehicle_count) + '_' + str( temp_frame_number) + '_' + str( temp_id) + ".jpg" #response = requests.put(BASE + "vehicle/" + str(o.vehicle_id), {"frame_number": str(o.frames_list[pos]), "lane": str(o.lane_list[pos]), "datetime": str(dt_string), "image_path": str(image_path)}) # add to server database #response = requests.put(BASE + "camera/", {"tracking_id": str(o.vehicle_id), "frame_number": str(o.frames_list[pos]), "lane": str(o.lane_list[pos]), "datetime": str(dt_string), "image_path": str(image_path)}) #response = requests.post(BASE + "camera_post/", data = {"tracking_id": str(o.vehicle_id), "frame_number": str(o.frames_list[pos]), "lane": str(o.lane_list[pos]), "datetime": str(dt_string), "image_path": str(image_path)}) #print(response.json()) # res = requests.post(BASE + 'camera_post/', json = {"tracking_id": str(o.vehicle_id), "frame_number": str(o.frames_list[pos]), "lane": str(o.lane_list[pos]), "datetime": str(dt_string), "image_path": str(image_path)}) res = requests.post(BASE + 'camera/', json={ "tracking_id": str(o.vehicle_id), "frame_number": str(o.frames_list[pos]), "lane": str(o.lane_list[pos]), "datetime": str(dt_string), "image_path": str(image_path) }) if res.ok: print(res.json()) with open('optimal_frame_extraction.txt', 'a') as the_file: the_file.write(str(o.frames_list[pos])) the_file.write(' ') the_file.write(str(o.vehicle_id)) the_file.write(' ') the_file.write(str(o.width_list[pos])) the_file.write(' ') the_file.write(str(o.height_list[pos])) the_file.write(' ') the_file.write(str(o.x_list[pos])) the_file.write(' ') the_file.write(str(o.y_list[pos])) the_file.write(' ') the_file.write(str(o.lane_list[pos])) the_file.write(' ') the_file.write(str(dt_string)) the_file.write('\n') xx1 = int(o.x_list[pos]) xx2 = int(o.x_list[pos]) + int(o.width_list[pos]) yy1 = int(o.y_list[pos]) yy2 = int(o.y_list[pos]) + int(o.height_list[pos]) del vehicle_list[i] finder = 0 for f in rgb_frames_list: if f.frame_iterator == temp_frame_number: break else: finder += 1 crop = ( rgb_frames_list[finder].rgb_image )[yy1:yy2, xx1: xx2] # crop the part of the frame bounding the vehicle cv2.imwrite( folder_name + "/stream_" + str(0) + "/numb_frno_trid=" + str(vehicle_count) + '_' + str(temp_frame_number) + '_' + str(temp_id) + ".jpg", crop) break if frame_lag > 100: # vehicle buffer cleaner; eliminates expired tracking instances print('train expired, deleting...', '\n') del vehicle_list[i] break except StopIteration: break obj_counter[obj_meta.class_id] += 1 try: l_obj = l_obj.next except StopIteration: break print("Frame Number =", frame_number, "Number of Objects in frame =", num_rects, "Vehicles in frame =", obj_counter[PGIE_CLASS_ID_VEHICLE], "Total Vehicles Detected =", vehicle_count) # metadata overlay # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() #if save_image: # cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg",frame_image) #saved_count["stream_"+str(frame_meta.pad_index)]+=1 # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Total Vehicles Detected={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], vehicle_count) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) # Draw x11_x21 py_nvosd_line_params = display_meta.line_params[0] py_nvosd_line_params.x1 = x11 py_nvosd_line_params.y1 = y11 py_nvosd_line_params.x2 = x21 py_nvosd_line_params.y2 = y22 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x11 = py_nvosd_line_params.x1 x21 = py_nvosd_line_params.x2 # Draw x12_x22 py_nvosd_line_params = display_meta.line_params[1] py_nvosd_line_params.x1 = x12 py_nvosd_line_params.y1 = y11 py_nvosd_line_params.x2 = x22 py_nvosd_line_params.y2 = y22 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x12 = py_nvosd_line_params.x1 x22 = py_nvosd_line_params.x2 # Draw x13_x23 py_nvosd_line_params = display_meta.line_params[2] py_nvosd_line_params.x1 = x13 py_nvosd_line_params.y1 = y11 py_nvosd_line_params.x2 = x23 py_nvosd_line_params.y2 = y22 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x13 = py_nvosd_line_params.x1 x23 = py_nvosd_line_params.x2 # Draw x14_x24 py_nvosd_line_params = display_meta.line_params[3] py_nvosd_line_params.x1 = x14 py_nvosd_line_params.y1 = y11 py_nvosd_line_params.x2 = x24 py_nvosd_line_params.y2 = y22 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x14 = py_nvosd_line_params.x1 x24 = py_nvosd_line_params.x2 # save current frame to rgb_frames_list n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) frame_image = np.array(n_frame, copy=True, order='C') frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) rgb_frames_list.append(RGB_Frame(frame_number, frame_image)) if len( rgb_frames_list ) > 120: # drop expired frames; extend frame life by increasing this value (may cause Jetson to shutdown) for x in range(20): del rgb_frames_list[x] try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad,info,u_data): frame_number=0 #Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE:0, PGIE_CLASS_ID_PERSON:0, PGIE_CLASS_ID_BICYCLE:0, PGIE_CLASS_ID_ROADSIGN:0 } num_rects=0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number=frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj=frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break try: obj_counter[obj_meta.class_id] += 1 except KeyError: print(f' >>>> {obj_meta.class_id}') try: l_obj=l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame=l_frame.next except StopIteration: break #past traking meta data if(past_tracking_meta[0]==1): l_user=batch_meta.batch_user_meta_list while l_user is not None: try: # Note that l_user.data needs a cast to pyds.NvDsUserMeta # The casting is done by pyds.NvDsUserMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone user_meta=pyds.NvDsUserMeta.cast(l_user.data) except StopIteration: break if(user_meta and user_meta.base_meta.meta_type==pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META): try: # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch # The casting is done by pyds.NvDsPastFrameObjBatch.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data) except StopIteration: break for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch): print("streamId=",trackobj.streamID) print("surfaceStreamID=",trackobj.surfaceStreamID) for pastframeobj in pyds.NvDsPastFrameObjStream.list(trackobj): print("numobj=",pastframeobj.numObj) print("uniqueId=",pastframeobj.uniqueId) print("classId=",pastframeobj.classId) print("objLabel=",pastframeobj.objLabel) for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj): print('frameNum:', objlist.frameNum) print('tBbox.left:', objlist.tBbox.left) print('tBbox.width:', objlist.tBbox.width) print('tBbox.top:', objlist.tBbox.top) print('tBbox.right:', objlist.tBbox.height) print('confidence:', objlist.confidence) print('age:', objlist.age) try: l_user=l_user.next except StopIteration: break return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 # Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) #print("Source id is ", frame_meta.source_id) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # https://forums.developer.nvidia.com/t/custom-nvdsobjectmeta-in-python/125690 #pyds.nvds_acquire_meta_lock(batch_meta) #frame_meta.bInferDone = True #obj_meta = pyds.nvds_acquire_obj_meta_from_pool(batch_meta) #obj_meta.rect_params.top = y - (h / 2) #obj_meta.rect_params.left = x - (w / 2) #obj_meta.rect_params.width = w #obj_meta.rect_params.height = h #edgar print('frame number', frame_number, "Id: ", obj_meta.object_id, 'x', obj_meta.rect_params.left + (obj_meta.rect_params.width/2), 'y', obj_meta.rect_params.top + (obj_meta.rect_params.height/2)) try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = \ "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}"\ .format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string #print('edgarrr', pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 #Intiallizing object counter with 0. obj_counter = {PGIE_CLASS_ID_PICKLE: 0} num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.glist_get_nvds_frame_meta() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. #frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data) frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break # pyds.nvds_clear_obj_meta_list(frame_meta, frame_meta.obj_meta_list) frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta #obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data) obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break currPickle = PickleBox(obj_meta.rect_params.left, obj_meta.rect_params.top, obj_meta.rect_params.width, obj_meta.rect_params.height) if currPickle.left > frame_meta.source_frame_width * minRange: obj_counter[obj_meta.class_id] += 1 knownPickle = False for foundPickle in PickleBoxArr: if (foundPickle.left * minRange <= currPickle.left <= foundPickle.left * maxRange and foundPickle.top * minRange <= currPickle.top <= foundPickle.top * maxRange and foundPickle.width * minRange <= currPickle.width <= foundPickle.width * maxRange and foundPickle.height * minRange <= currPickle.height <= foundPickle.height * maxRange): knownPickle = True break if knownPickle: #BLUE obj_meta.rect_params.border_color.set( 0.0, 254.0, 10.0, 0.0) #BLACK # obj_meta.rect_params.border_color.set(0.0, 0.0, 0.0, 255.0) else: PickleBoxArr.append(currPickle) else: obj_meta.rect_params.border_color.set(0.0, 0.0, 0.0, 255.0) # print("Left = {}, Top={}, width={}, heigth={}".format(obj_meta.rect_params.left,obj_meta.rect_params.top,obj_meta.rect_params.width,obj_meta.rect_params.height)) try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Pickles (current Frame)={} Number of total Pickles={}".format( frame_number, obj_counter[PGIE_CLASS_ID_PICKLE], len(PickleBoxArr)) objectsAtFrame.append(obj_counter[PGIE_CLASS_ID_PICKLE]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string # print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK