def get_frame(gst_buffer, batch_id): n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), batch_id) #convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') #covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) return frame_image
def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 # Intiallizing object counter with 0. gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num classification = classifications[frame_number] n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) #convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') #convert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) print(classification) if path.exists(f'{folder_name}/{classification}') == False: os.mkdir(f'{folder_name}/{classification}') cv2.imwrite(f'{folder_name}/{classification}/frame_{frame_number}.jpg', frame_image) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad,info,u_data): frame_number=0 num_rects=0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) # extract the database information and transfere it to the variables global_metadata = biblio.read_pickle(get_metadata_db()) global_encondings = biblio.read_pickle(get_encodings_db()) total = len(global_metadata) #global_images = biblio.read_pickle(get_images_db()) #set_known_faces_db(total, encodings, metadata) local_metadata = [] local_encodings = [] local_images = [] global actions l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False obj_counter = { PGIE_CLASS_ID_FACE:0, PGIE_CLASS_ID_PLATE:0, PGIE_CLASS_ID_MAKE:0, PGIE_CLASS_ID_MODEL:0 } while l_obj is not None: #print("stream_"+str(frame_meta.pad_index)) try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Periodically check for objects with borderline confidence value that may be false positive detections. # If such detections are found, annoate the frame with bboxes and confidence value. # Save the annotated frame to file. #print(obj_meta.confidence) if obj_meta.class_id == 0 and obj_meta.confidence > 0.85: print('taca...................................') # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) frame_image = crop_and_get_faces_locations(n_frame, obj_meta, obj_meta.confidence) #frame_image, total_visitors = clasify_to_known_and_unknown(frame_image, obj_meta.confidence) index = None # try to encode the crop image with the detected face encodings = face_recognition.face_encodings(frame_image) if encodings: index, face_encoding = search_encoding_in_encodings(encodings, global_encondings, global_metadata) if index: face_label = f"{metadata[index]['name']} {int(time_at_door.total_seconds())}s" today_now = datetime.now() global_metadata[index]['last_seen'] = today_now global_metadata[index]['seen_count'] += 1 global_metadata[index]['seen_in_frames'].append(frame_number) global_metadata[index]['seen_count'] += 1 global_metadata[index]['time_at_door'] = today_now - global_metadata[index]['first_seen_this_interaction'] set_global_metadata(global_metadata) global_encondings.append(face_encoding) set_global_encodings(global_metadata) else: program_action = get_action() if program_action == actions['read']: total += 1 name = 'visitor_' + str(total) register_new_face(face_encoding, image, name, confidence, frame_number) save_image = True try: l_obj=l_obj.next except StopIteration: break #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Face_count=",obj_counter[PGIE_CLASS_ID_FACE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON]) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() if save_image: # TODO escribir a archivos imagen, metadata y encodings # acumulate the faces of this frame in a variable to be saved before leaving the tiler print('edgar............................') #cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg", frame_image) cv2.imwrite(folder_name + "/stream_" + str(frame_meta.pad_index) + "/frame_" + str(total_visitors) + ".jpg", frame_image) saved_count["stream_"+str(frame_meta.pad_index)]+=1 try: l_frame=l_frame.next except StopIteration: break #total_visitors, known_face_metadata, known_face_encodings = get_known_faces_db() #print(total_visitors, known_face_metadata, known_face_encodings) #biblio.write_to_pickle(known_face_encodings, known_face_metadata, get_metadata_db()) return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Periodically check for objects with borderline confidence value that may be false positive detections. # If such detections are found, annotate the frame with bboxes and confidence value. # Save the annotated frame to file. if saved_count["stream_{}".format( frame_meta.pad_index)] % 30 == 0 and ( MIN_CONFIDENCE < obj_meta.confidence < MAX_CONFIDENCE): if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface( hash(gst_buffer), frame_meta.batch_id) n_frame = draw_bounding_boxes(n_frame, obj_meta, obj_meta.confidence) # convert python array into numpy array format in the copy mode. frame_copy = np.array(n_frame, copy=True, order='C') # convert the array into cv2 default color format frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA) save_image = True try: l_obj = l_obj.next except StopIteration: break print("Frame Number=", frame_number, "Number of Objects=", num_rects, "Vehicle_count=", obj_counter[PGIE_CLASS_ID_VEHICLE], "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON]) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() if save_image: img_path = "{}/stream_{}/frame_{}.jpg".format( folder_name, frame_meta.pad_index, frame_number) cv2.imwrite(img_path, frame_copy) saved_count["stream_{}".format(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 #Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list is_first_obj = True while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Cv2 stuff if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) #convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') #covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) #recognize license plate data recognize_license_plate(frame_image, obj_meta, obj_meta.confidence) try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string # print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data): global start, prt frame_number = 0 #Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: now = time.time() try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) img = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR) except StopIteration: break frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta if (frame_number % 30 == 0): # Save Full image cv2.imwrite( '/home/spypiggy/src/test_images/result/%d_original.jpg' % frame_number, img) l_obj = frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) if (frame_number % 30 == 0): obj_user = obj_meta.obj_user_meta_list analyze_meta(img, frame_number, obj_meta, True) except StopIteration: break obj_counter[obj_meta.class_id] += 1 try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={} FPS={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON], (1 / (now - start))) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string if (prt): print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break start = now prt = False return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data): global image1copy, frameSync frame_number = 0 frameNumber = 0 BufferNumber = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: frameNumber = frameNumber + 1 BufferNumber = BufferNumber + 1 try: frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) # print(hash(gst_buffer)) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) frame_image = np.array(n_frame, copy=True, order='C') frameSync = copy.copy(frame_image[:, :, 0:3]) image1 = np.uint8(frameSync) image1copy = cv2.cvtColor(image1, cv2.COLOR_RGB2BGR) # SyncImRect = np.where(RectDetect == 0, frameSync, RectDetect) # color_image = cv2.resize(image1copy, (round(480 * WScale), round(320 * HScale))) # photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(np.uint8(image1copy)).convert('RGB')) # cv2.waitKey() # if GeneralSetting.start_record is True: # VideoRecorded.write(DetectResult.image1copy) Capturing = True ObjAvailable = l_obj counter = 0 obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } while l_obj is not None: try: obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) rect_params = obj_meta.rect_params top = int(rect_params.top) left = int(rect_params.left) width = int(rect_params.width) height = int(rect_params.height) confidence = obj_meta.confidence obj_counter[obj_meta.class_id] += 1 except StopIteration: break try: l_obj = l_obj.next except StopIteration: break # image1copy = cv2.rectangle(image1copy, (left, top), (left+width, top+height), (25,0,0), 2) cv2.imshow('test', image1copy) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False obj_counter = { PGIE_CLASS_ID_FACE: 0, PGIE_CLASS_ID_PLATE: 0, PGIE_CLASS_ID_MAKE: 0, PGIE_CLASS_ID_MODEL: 0 } while l_obj is not None: #print("stream_"+str(frame_meta.pad_index)) try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Periodically check for objects with borderline confidence value that may be false positive detections. # If such detections are found, annoate the frame with bboxes and confidence value. # Save the annotated frame to file. #print(obj_meta.confidence) #print("stream_"+str(frame_meta.pad_index)) #(saved_count["stream_"+str(frame_meta.pad_index)]%30==0) and #if((obj_meta.confidence>0.3 and obj_meta.confidence<0.51)): #obj_meta.class_id == 1 #and (saved_count["stream_"+str(frame_meta.pad_index)]%10==0)): print(obj_meta.confidence) if (obj_meta.class_id == 0 and obj_meta.confidence > 0.85): if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface( hash(gst_buffer), frame_meta.batch_id) #convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') #covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) #image=cv2.line(frame_image,(10,500),(600,500), (0,255,0), 4) save_image = True print("Carita : ", obj_counter[obj_meta.class_id]) frame_image = draw_bounding_boxes(frame_image, obj_meta, obj_meta.confidence) try: l_obj = l_obj.next except StopIteration: break #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Face_count=",obj_counter[PGIE_CLASS_ID_FACE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON]) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() if save_image: cv2.imwrite( folder_name + "/stream_" + str(frame_meta.pad_index) + "/frame_" + str(frame_number) + ".jpg", frame_image) saved_count["stream_" + str(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data): global fake_frame_number num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) save_image = False l_frame = batch_meta.frame_meta_list current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index camera_id = get_camera_id(current_pad_index) program_action = get_action(camera_id) delta = get_delta(camera_id) default_similarity = get_similarity(camera_id) total_visitors, known_face_metadata, known_face_encodings = get_known_faces_db( camera_id) tracking_absence_dict = get_tracking_absence_dict(camera_id) id_set = set() while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break fake_frame_number += 1 frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True obj_counter = { PGIE_CLASS_ID_FACE: 0, PGIE_CLASS_ID_PLATE: 0, PGIE_CLASS_ID_MAKE: 0, PGIE_CLASS_ID_MODEL: 0 } contador = 0 while l_obj is not None: contador += 1 try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Periodically check for objects with borderline confidence value that may be false positive detections. # If such detections are found, annoate the frame with bboxes and confidence value. # Save the annotated frame to file. if obj_meta.class_id == 0 and obj_meta.confidence > 0.81: # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) frame_image = crop_and_get_faces_locations( n_frame, obj_meta, obj_meta.confidence) if frame_image.size > 0: name = None id_set.add(obj_meta.object_id) known_faces_indexes = get_known_faces_indexes(camera_id) if classify_to_known_and_unknown( camera_id, frame_image, obj_meta.object_id, name, program_action, obj_meta.confidence, fake_frame_number, delta, default_similarity, known_faces_indexes, known_face_metadata, known_face_encodings): save_image = True #cv2.imwrite('/tmp/found_elements/found_multiple_' + str(fake_frame_number) + ".jpg", frame_image) try: l_obj = l_obj.next except StopIteration: break #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Face_count=",obj_counter[PGIE_CLASS_ID_FACE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON]) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() saved_count["stream_" + str(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break if id_set and known_faces_indexes: missing_ids = [ id_item for id_item in known_faces_indexes if id_item not in id_set ] for item in missing_ids: if item not in tracking_absence_dict: tracking_absence_dict.update({item: 1}) else: tracking_absence_dict[item] += 1 if save_image: if program_action == action_types['read']: write_to_db(known_face_metadata, known_face_encodings, get_output_db_name(camera_id)) else: target_metadata = get_found_faces(camera_id) empty_encodings = [] for _x_ in range(len(target_metadata)): empty_encodings.append('') write_to_db(target_metadata, empty_encodings, get_output_db_name(camera_id)) if id_set and known_faces_indexes: known_faces_indexes, tracking_absence_dict = biblio.cleanup_tracking_list( known_faces_indexes, tracking_absence_dict, 80) set_tracking_absence_dict(camera_id, tracking_absence_dict) set_known_faces_indexes(camera_id, known_faces_indexes) return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(self, pad, info, u_data): frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_ROADSIGN: 0 } # Message for output of detection inference msg = Detection2DArray() while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) l_classifier = obj_meta.classifier_meta_list # If object is a car (class ID 0), perform attribute classification if obj_meta.class_id == 0 and l_classifier is not None: # Creating and publishing message with output of classification inference msg2 = Classification2D() while l_classifier is not None: result = ObjectHypothesis() try: classifier_meta = pyds.glist_get_nvds_classifier_meta( l_classifier.data) except StopIteration: print('Could not parse MetaData: ') break classifier_id = classifier_meta.unique_component_id l_label = classifier_meta.label_info_list label_info = pyds.glist_get_nvds_label_info( l_label.data) classifier_class = label_info.result_class_id if classifier_id == 2: result.id = class_color[classifier_class] elif classifier_id == 3: result.id = class_make[classifier_class] else: result.id = class_type[classifier_class] result.score = label_info.result_prob msg2.results.append(result) l_classifier = l_classifier.next self.publisher_classification.publish(msg2) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Creating message for output of detection inference result = ObjectHypothesisWithPose() result.id = str(class_obj[obj_meta.class_id]) result.score = obj_meta.confidence left = obj_meta.rect_params.left top = obj_meta.rect_params.top width = obj_meta.rect_params.width height = obj_meta.rect_params.height bounding_box = BoundingBox2D() bounding_box.center.x = float(left + (width / 2)) bounding_box.center.y = float(top - (height / 2)) bounding_box.size_x = width bounding_box.size_y = height detection = Detection2D() detection.results.append(result) detection.bbox = bounding_box msg.detections.append(detection) # Periodically check for objects with borderline confidence value that may be false positive detections. # If such detections are found, annotate the frame with bboxes and confidence value. # Save the annotated frame to file. if ((saved_count["stream_" + str(frame_meta.pad_index)] % 30 == 0) and (obj_meta.confidence > 0.3 and obj_meta.confidence < 0.31)): if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface( hash(gst_buffer), frame_meta.batch_id) #convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') #covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) save_image = True frame_image = draw_bounding_boxes(frame_image, obj_meta, obj_meta.confidence) try: l_obj = l_obj.next except StopIteration: break # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() # Publishing message with output of detection inference self.publisher_detection.publish(msg) if save_image: cv2.imwrite( folder_name + "/stream_" + str(frame_meta.pad_index) + "/frame_" + str(frame_number) + ".jpg", frame_image) saved_count["stream_" + str(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data): # Intiallizing object counter with 0. # version mask detection solo reconoce mascarillas y sin mascarilla obj_counter = { PGIE_CLASS_ID_FACE: 0, PGIE_CLASS_ID_PLATES: 0, } frame_number = 0 num_rects = 0 # numero de objetos en el frame gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list #====================== Definicion de valores de mensajes a pantalla display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index camera_id = get_camera_id(current_pad_index) #aforo_info = get_aforo(camera_id) #is_aforo_enabled = aforo_info['enabled'] #social_distance_info = get_social_distance(camera_id) #is_social_distance_enabled = social_distance_info['enabled'] #people_counting_info = get_people_counting(camera_id) #is_people_counting_enabled = people_counting_info['enabled'] # Falta el servicio de Plates Detection # # #print( "entro al tiler_src_pad_buffer_probe") # Todos los servicios requieren impresion de texto solo para Aforo se requiere una linea y un rectangulo display_meta.num_labels = 1 # numero de textos py_nvosd_text_params = display_meta.text_params[0] # Setup del label de impresion en pantalla py_nvosd_text_params.x_offset = 100 py_nvosd_text_params.y_offset = 120 py_nvosd_text_params.font_params.font_name = "Arial" py_nvosd_text_params.font_params.font_size = 10 py_nvosd_text_params.font_params.font_color.red = 1.0 py_nvosd_text_params.font_params.font_color.green = 1.0 py_nvosd_text_params.font_params.font_color.blue = 1.0 py_nvosd_text_params.font_params.font_color.alpha = 1.0 py_nvosd_text_params.set_bg_clr = 1 py_nvosd_text_params.text_bg_clr.red = 0.0 py_nvosd_text_params.text_bg_clr.green = 0.0 py_nvosd_text_params.text_bg_clr.blue = 0.0 py_nvosd_text_params.text_bg_clr.alpha = 1.0 #no_mask_ids = get_no_mask_ids_dict(camera_id) # por que ponerlo en 1 ???? #frame_number = 1 # to avoid not definition issue client = boto3.client('rekognition') while l_frame is not None: try: frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break #print( "primer ciclo") frame_number = frame_meta.frame_num #print(" fps:",frame_meta.num_surface_per_frame) l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False #print(num_rects) ID numero de stream ids = set() # Ciclo interno donde se evaluan los objetos dentro del frame while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 #print(obj_meta.confidence," ",obj_meta.object_id) #print(obj_counter[obj_meta.class_id]," ",obj_counter[obj_meta.class_id]%5) # and (obj_meta.confidence > 0.9 ) print(frame_number) if ((obj_meta.class_id == 1) and (frame_number % 8 == 0)): if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface( hash(gst_buffer), frame_meta.batch_id) #convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') #covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) save_image = True frame_image = draw_bounding_boxes(frame_image, obj_meta, obj_meta.confidence) response = client.detect_labels(Image={'Bytes': frame_image}) print('Detected labels in ') for label in response['Labels']: print(label['Name'] + ' : ' + str(label['Confidence'])) #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Mask={} NoMaks={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_FACE], obj_counter[PGIE_CLASS_ID_PLATES]) try: l_obj = l_obj.next except StopIteration: break #pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() #print(save_image) #print(folder_name) if save_image: print("Entre a guardar imagen") print(obj_meta.class_id) cv2.imwrite( folder_name + "/stream_" + str(frame_meta.pad_index) + "/frame_" + str(frame_number) + ".jpg", frame_image) saved_count["stream_" + str(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break ''' if frame_number % 43 == 0: new_dict = {} no_mask_ids = get_no_mask_ids_dict(camera_id) for item in ids: if item in no_mask_ids: value = no_mask_ids[item] new_dict.update({item: value}) set_no_mask_ids_dict(camera_id, new_dict) # Lo manda a directo streaming ''' return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False obj_counter = { PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BAG: 0, PGIE_CLASS_ID_FACE: 0 } while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # osd_rect_params = pyds.NvOSD_RectParams.cast(obj_meta.rect_params) # Draw black patch to cover faces (class_id = 2), can change to other colors if (obj_meta.class_id == PGIE_CLASS_ID_FACE): obj_meta.rect_params.border_width = 0 obj_meta.rect_params.has_bg_color = 1 obj_meta.rect_params.bg_color.red = 0.0 obj_meta.rect_params.bg_color.green = 0.0 obj_meta.rect_params.bg_color.blue = 0.0 obj_meta.rect_params.bg_color.alpha = 1.0 elif (obj_meta.class_id == PGIE_CLASS_ID_PERSON ) : obj_meta.rect_params.border_width = 0 obj_meta.rect_params.has_bg_color = 1 obj_meta.rect_params.bg_color.red = 0.0 obj_meta.rect_params.bg_color.green = 0.0 obj_meta.rect_params.bg_color.blue = 0.0 obj_meta.rect_params.bg_color.alpha = 0.5 # Periodically check for objects and save the annotated object to file. if saved_count["stream_{}".format(frame_meta.pad_index)] % 10 == 0 and obj_meta.class_id == PGIE_CLASS_ID_FACE : if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) n_frame = crop_object(n_frame, obj_meta) # convert python array into numpy array format in the copy mode. frame_copy = np.array(n_frame, copy=True, order='C') # convert the array into cv2 default color format frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA) save_image = True try: l_obj = l_obj.next except StopIteration: break print("Frame Number=", frame_number, "Number of Objects=", num_rects, "Face_count=", obj_counter[PGIE_CLASS_ID_FACE], "Person_count=", obj_counter[PGIE_CLASS_ID_PERSON]) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() if save_image: img_path = "{}/stream_{}/frame_{}.jpg".format(folder_name, frame_meta.pad_index, frame_number) cv2.imwrite(img_path, frame_copy) saved_count["stream_{}".format(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 is_first_obj = True # Intiallizing object counter with 0. obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list print(l_frame) while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num num_rects = frame_meta.num_obj_meta l_obj = frame_meta.obj_meta_list while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break ## vehicle type detection ## # Filter detections by PGIE1 network and don't include RoadSign class if (obj_meta.unique_component_id == PGIE_UNIQUE_ID and obj_meta.class_id != PGIE_CLASS_ID_ROADSIGN # Exclude RoadSign and obj_meta.class_id != PGIE_CLASS_ID_BICYCLE # Exclude Bicycle and obj_meta.class_id != PGIE_CLASS_ID_PERSON # Exclude Person ): # get secondary classifier data l_classifier = obj_meta.classifier_meta_list sgie_class = -1 if l_classifier is not None: # and class_id==XXX #apply classifier for a specific class classifier_meta = pyds.glist_get_nvds_classifier_meta( l_classifier.data) l_label = classifier_meta.label_info_list label_info = pyds.glist_get_nvds_label_info(l_label.data) sgie_class = label_info.result_class_id print("sgie_class >>>", sgie_class) # Cv2 stuff if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface( hash(gst_buffer), frame_meta.batch_id) # convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') # covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) # recognize license plate data alrp_output = lpdetector.alpr_frame( frame_image, obj_meta, obj_meta.confidence, frame_number) print("alrp out >>> ", alrp_output) obj_counter[obj_meta.class_id] += 1 #print("obj_meta: gie_id={0}; object_id={1}; class_id={2}; classifier_class={3}".format(obj_meta.unique_component_id,obj_meta.object_id,obj_meta.class_id,sgie_class)) try: l_obj = l_obj.next except StopIteration: break # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data): # Intiallizing object counter with 0. # version mask detection solo reconoce mascarillas y sin mascarilla obj_counter = { PGIE_CLASS_ID_FACE: 0, PGIE_CLASS_ID_PLATES: 0, } frame_number = 0 num_rects = 0 # numero de objetos en el frame gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list #====================== Definicion de valores de mensajes a pantalla display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta) current_pad_index = pyds.NvDsFrameMeta.cast(l_frame.data).pad_index camera_id = get_camera_id(current_pad_index) plates_info = get_plates_info(camera_id) #print('information edgar') #print(plates_info) #quit() #is_aforo_enabled = aforo_info['enabled'] #social_distance_info = get_social_distance(camera_id) #is_social_distance_enabled = social_distance_info['enabled'] #people_counting_info = get_people_counting(camera_id) #is_people_counting_enabled = people_counting_info['enabled'] #print( "entro al tiler_src_pad_buffer_probe") # Todos los servicios requieren impresion de texto solo para Aforo se requiere una linea y un rectangulo display_meta.num_labels = 1 # numero de textos py_nvosd_text_params = display_meta.text_params[0] # Setup del label de impresion en pantalla py_nvosd_text_params.x_offset = 100 py_nvosd_text_params.y_offset = 120 py_nvosd_text_params.font_params.font_name = "Arial" py_nvosd_text_params.font_params.font_size = 10 py_nvosd_text_params.font_params.font_color.red = 1.0 py_nvosd_text_params.font_params.font_color.green = 1.0 py_nvosd_text_params.font_params.font_color.blue = 1.0 py_nvosd_text_params.font_params.font_color.alpha = 1.0 py_nvosd_text_params.set_bg_clr = 1 py_nvosd_text_params.text_bg_clr.red = 0.0 py_nvosd_text_params.text_bg_clr.green = 0.0 py_nvosd_text_params.text_bg_clr.blue = 0.0 py_nvosd_text_params.text_bg_clr.alpha = 1.0 plate_ids = get_plate_ids_dict(camera_id) # por que ponerlo en 1 ???? #frame_number = 1 # to avoid not definition issue #client=boto3.client('rekognition') while l_frame is not None: try: frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta save_image = False #print(num_rects) ID numero de stream #ids = set() # fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() # Ciclo interno donde se evaluan los objetos dentro del frame while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # if class is 1 (plate) and only every other frame # TODO hay que utilizar la informacion en plates_info para determinar si esta dentro del area de interes y si esta entrando o saliendo y solo tomar las imagenes de cuando este entrando print(plates_info) #if obj_meta.class_id == 1 and frame_number % 2 == 0: if obj_meta.class_id == 1: #save_image = True if obj_meta.object_id not in plate_ids: counter = 1 items = [] else: counter = plate_ids[obj_meta.object_id]['counter'] items = plate_ids[obj_meta.object_id]['items'] counter += 1 print( 'X..............', int(obj_meta.rect_params.width + obj_meta.rect_params.left / 2)) print( 'Y..............', int(obj_meta.rect_params.height + obj_meta.rect_params.top)) # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) # convert python array into numy array format. frame_image = np.array(n_frame, copy=True, order='C') # convert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) # crop image frame_image = draw_bounding_boxes(frame_image, obj_meta, obj_meta.confidence) items.append(frame_image) plate_ids.update( {obj_meta.object_id: { 'counter': counter, 'items': items }}) #print('edgar...', plate_ids) set_plate_ids_dict(camera_id, plate_ids) for elemento in plate_ids.keys(): #print('11111111111', elemento) #print('22222222222', type(elemento)) #print('33333333333', plate_ids) #print('44444444444', plate_ids[elemento]) #print('55555555555', plate_ids[obj_meta.object_id]['counter']) if plate_ids[obj_meta.object_id]['counter'] > 1: print('................', frame_number, elemento, 'photo:', len(plate_ids[obj_meta.object_id]['items'])) cv2.imwrite( folder_name + "/stream_" + str(frame_meta.pad_index) + "/" + str(service.get_timestamp()) + "_" + str(obj_meta.object_id) + ".jpg", frame_image) #py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Mask={} NoMaks={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_FACE], obj_counter[PGIE_CLASS_ID_PLATES]) try: l_obj = l_obj.next except StopIteration: break #pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() #fps_streams["stream{0}".format(frame_meta.pad_index)].print_data() #print("stream{0}".format(frame_meta.pad_index)) if save_image: #print("Entre a guardar imagen") #print(obj_meta.class_id) # El nombre del archivo debe estar formado por date+id #cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg",frame_image) #print(str(service.get_timestamp())) #print(str(service.get_timestamp()/1000)) a = 1 cv2.imwrite( folder_name + "/stream_" + str(frame_meta.pad_index) + "/" + str(service.get_timestamp()) + "_" + str(obj_meta.object_id) + ".jpg", frame_image) saved_count["stream_" + str(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break ''' if frame_number % 43 == 0: new_dict = {} no_mask_ids = get_plate_ids_dict(camera_id) for item in ids: if item in no_mask_ids: value = no_mask_ids[item] new_dict.update({item: value}) set_plates_dict(camera_id, new_dict) # Lo manda a directo streaming ''' return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad,info,u_data): global x11, x12, x13, x14, x21, x22, x23, x24 # lanes global vehicle_count frame_number=0 num_rects=0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number=frame_meta.frame_num l_obj=frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False obj_counter = { PGIE_CLASS_ID_VEHICLE:0, PGIE_CLASS_ID_PERSON:0, PGIE_CLASS_ID_BICYCLE:0, PGIE_CLASS_ID_ROADSIGN:0 } while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data) if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE: # vehicle detected if obj_meta.rect_params.top >= y1 and obj_meta.rect_params.top <= y2: car_found = 0 for x in vehicle_list: if x.vehicle_id == obj_meta.object_id: x.frames_list.append(frame_number) x.x_list.append(obj_meta.rect_params.left) x.y_list.append(obj_meta.rect_params.top) x.xc_list.append(int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))) x.yc_list.append(int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2))) x.width_list.append(obj_meta.rect_params.width) x.height_list.append(obj_meta.rect_params.height) x_center = int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2)) if x_center > min(x13, x23): x.lane_list.append('shoulder') elif x_center > min(x12, x22): x.lane_list.append('slow') elif x_center > min(x11, x21): x.lane_list.append('medium') else: x.lane_list.append('fast') car_found = 1 break if car_found == 0: frames_temp_list = [] frames_temp_list.append(frame_number) x_temp_list = [] x_temp_list.append(obj_meta.rect_params.left) y_temp_list = [] y_temp_list.append(obj_meta.rect_params.top) xc_temp_list = [] xc_temp_list.append(int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2))) yc_temp_list = [] yc_temp_list.append(int(obj_meta.rect_params.top + (obj_meta.rect_params.height / 2))) width_temp_list = [] width_temp_list.append(obj_meta.rect_params.width) height_temp_list = [] height_temp_list.append(obj_meta.rect_params.height) x_center = int(obj_meta.rect_params.left + (obj_meta.rect_params.width / 2)) lane_temp_list = [] if x_center > min(x13, x23): lane_temp_list.append('shoulder') elif x_center > min(x12, x22): lane_temp_list.append('slow') elif x_center > min(x11, x21): lane_temp_list.append('medium') else: lane_temp_list.append('fast') vehicle_list.append(Vehicle(obj_meta.object_id, frames_temp_list, x_temp_list, y_temp_list, xc_temp_list, yc_temp_list, width_temp_list, height_temp_list, lane_temp_list)) vehicle_count += 1 print('Vehicle ID = ', obj_meta.object_id, ', Frame Number = ', frame_number, ', Top X = ', obj_meta.rect_params.left,', Top Y = ', obj_meta.rect_params.top, ', Width = ', obj_meta.rect_params.width, ', Height = ', obj_meta.rect_params.height) for i, o in enumerate(vehicle_list): frame_lag = abs(int(o.frames_list[-1]) - int(frame_number)) if (frame_lag > 20) and int(len(o.frames_list)) <= 6: # vehicle count rectifier; eliminates false tracking instances print('inadequate number of frames in train, deleting...', '\n') del vehicle_list[i] vehicle_count -= 1 break if frame_lag > 20 and frame_lag < 100: # optimal frame extractor midpoint = int((y1 + y2) / 2) my_array = np.array(o.yc_list) pos = (np.abs(my_array - midpoint)).argmin() temp_frame_number = o.frames_list[pos] temp_id = o.vehicle_id with open('optimal_frame_extraction.txt', 'a') as the_file: the_file.write(str(o.frames_list[pos])) the_file.write(' ') the_file.write(str(o.vehicle_id)) the_file.write(' ') the_file.write(str(o.width_list[pos])) the_file.write(' ') the_file.write(str(o.height_list[pos])) the_file.write(' ') the_file.write(str(o.x_list[pos])) the_file.write(' ') the_file.write(str(o.y_list[pos])) the_file.write('\n') xx1 = int(o.x_list[pos]) xx2 = int(o.x_list[pos]) + int(o.width_list[pos]) yy1 = int(o.y_list[pos]) yy2 = int(o.y_list[pos]) + int(o.height_list[pos]) del vehicle_list[i] finder = 0 for f in rgb_frames_list: if f.frame_iterator == temp_frame_number: break else: finder += 1 crop = (rgb_frames_list[finder].rgb_image)[yy1:yy2, xx1:xx2] cv2.imwrite(folder_name+"/stream_"+str(0)+"/frame_id="+str(temp_frame_number)+'_'+str(temp_id)+".jpg", crop) break if frame_lag > 100: # vehicle buffer cleaner; eliminates expired tracking instances print('train expired, deleting...', '\n') del vehicle_list[i] break except StopIteration: break obj_counter[obj_meta.class_id] += 1 try: l_obj=l_obj.next except StopIteration: break print("Frame Number =", frame_number, "Number of Objects in frame =",num_rects,"Vehicles in frame =",obj_counter[PGIE_CLASS_ID_VEHICLE],"Total Vehicles Detected =",vehicle_count) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() #if save_image: # cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg",frame_image) #saved_count["stream_"+str(frame_meta.pad_index)]+=1 # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise # the garbage collector will claim it when this probe function exits. display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta) display_meta.num_labels = 1 py_nvosd_text_params = display_meta.text_params[0] # Setting display text to be shown on screen # Note that the pyds module allocates a buffer for the string, and the # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Total Vehicles Detected={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], vehicle_count) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 py_nvosd_text_params.y_offset = 12 # Font , font-color and font-size py_nvosd_text_params.font_params.font_name = "Serif" py_nvosd_text_params.font_params.font_size = 10 # set(red, green, blue, alpha); set to White py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0) # Text background color py_nvosd_text_params.set_bg_clr = 1 # set(red, green, blue, alpha); set to Black py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0) # Using pyds.get_string() to get display_text as string print(pyds.get_string(py_nvosd_text_params.display_text)) pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) # Draw x11_x21 py_nvosd_line_params = display_meta.line_params[0] py_nvosd_line_params.x1 = x11 py_nvosd_line_params.y1 = y1 py_nvosd_line_params.x2 = x21 py_nvosd_line_params.y2 = y2 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x11 = py_nvosd_line_params.x1 x21 = py_nvosd_line_params.x2 # Draw x12_x22 py_nvosd_line_params = display_meta.line_params[1] py_nvosd_line_params.x1 = x12 py_nvosd_line_params.y1 = y1 py_nvosd_line_params.x2 = x22 py_nvosd_line_params.y2 = y2 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x12 = py_nvosd_line_params.x1 x22 = py_nvosd_line_params.x2 # Draw x13_x23 py_nvosd_line_params = display_meta.line_params[2] py_nvosd_line_params.x1 = x13 py_nvosd_line_params.y1 = y1 py_nvosd_line_params.x2 = x23 py_nvosd_line_params.y2 = y2 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x13 = py_nvosd_line_params.x1 x23 = py_nvosd_line_params.x2 # Draw x14_x24 py_nvosd_line_params = display_meta.line_params[3] py_nvosd_line_params.x1 = x14 py_nvosd_line_params.y1 = y1 py_nvosd_line_params.x2 = x24 py_nvosd_line_params.y2 = y2 py_nvosd_line_params.line_width = 5 py_nvosd_line_params.line_color.set(0.0, 1.0, 0.0, 1.0) display_meta.num_lines = display_meta.num_lines + 1 pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta) x14 = py_nvosd_line_params.x1 x24 = py_nvosd_line_params.x2 # save current frame to rgb_frames_list n_frame=pyds.get_nvds_buf_surface(hash(gst_buffer),frame_meta.batch_id) frame_image=np.array(n_frame,copy=True,order='C') frame_image=cv2.cvtColor(frame_image,cv2.COLOR_RGBA2BGRA) rgb_frames_list.append(RGB_Frame(frame_number, frame_image)) if len(rgb_frames_list) > 120: for x in range(20): del rgb_frames_list[x] try: l_frame=l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK
def tiler_src_pad_buffer_probe(pad, info, u_data): global fake_frame_number num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: print("Unable to get GstBuffer ") return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) global total_visitors save_image = False l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break fake_frame_number += 1 frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True obj_counter = { PGIE_CLASS_ID_FACE: 0, PGIE_CLASS_ID_PLATE: 0, PGIE_CLASS_ID_MAKE: 0, PGIE_CLASS_ID_MODEL: 0 } #print('frame_number....', fake_frame_number) while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Periodically check for objects with borderline confidence value that may be false positive detections. # If such detections are found, annoate the frame with bboxes and confidence value. # Save the annotated frame to file. if obj_meta.class_id == 0 and obj_meta.confidence > 0.70: # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) frame_image = crop_and_get_faces_locations( n_frame, obj_meta, obj_meta.confidence) if frame_image.size > 0: try: cv2.imwrite( '/tmp/found_elements/found_multiple_' + str(fake_frame_number) + ".jpg", frame_image) except Exception as e: if frame_image.size == 0: print('ooo') print('------------') quit() #if classify_to_known_and_unknown(frame_image, obj_meta.confidence, obj_meta.object_id, fake_frame_number): # save_image = True try: l_obj = l_obj.next except StopIteration: break #print("Frame Number=", frame_number, "Number of Objects=",num_rects,"Face_count=",obj_counter[PGIE_CLASS_ID_FACE],"Person_count=",obj_counter[PGIE_CLASS_ID_PERSON]) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() saved_count["stream_" + str(frame_meta.pad_index)] += 1 try: l_frame = l_frame.next except StopIteration: break if save_image: write_to_db() return Gst.PadProbeReturn.OK
def tiler_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: log.critical('Error: Unable to get GstBuffer') return # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: try: # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta # The casting is done by pyds.NvDsFrameMeta.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone. frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) except StopIteration: break frame_number = frame_meta.frame_num l_obj = frame_meta.obj_meta_list num_rects = frame_meta.num_obj_meta is_first_obj = True save_image = False obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0 } while l_obj is not None: try: # Casting l_obj.data to pyds.NvDsObjectMeta obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) except StopIteration: break obj_counter[obj_meta.class_id] += 1 # Periodically check for objects with borderline confidence value that may be false positive detections. # If such detections are found, annoate the frame with bboxes and confidence value. # Save the annotated frame to file. # The below proces will be rewritten later, but we keep it in for reference if ((saved_count["stream_" + str(frame_meta.pad_index)] % sampling_rate == 0) and (obj_meta.confidence > 0.3 and obj_meta.confidence < 0.31)): if is_first_obj: is_first_obj = False # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface( hash(gst_buffer), frame_meta.batch_id) # convert python array into numpy array format. frame_image = np.array(n_frame, copy=True, order='C') # covert the array into cv2 default color format frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA) save_image = True frame_image = add_confidence_box(frame_image, obj_meta, obj_meta.confidence) # save and draw box when we have a person if frame_number % sampling_rate == 0: # only draw a box once in a while depending on the sampling rate # Getting Image data using nvbufsurface # the input should be address of buffer and batch_id n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) # convert python array into numpy array format. frame_image = np.array(n_frame, copy=True, order='C') frame_image = draw_bounding_boxes(frame_image, obj_meta) # continue with the next object when there is one try: l_obj = l_obj.next except StopIteration: break if frame_number % 100 == 0 or num_rects: # at least once in the 1000 frames write some logging or when there was an object log.info( f'- Frame Number: {frame_number}, Number of Objects: {num_rects}, Vehicle_count: {obj_counter[PGIE_CLASS_ID_VEHICLE]}, Person_count={obj_counter[PGIE_CLASS_ID_PERSON]}' ) else: log.debug( f'- Frame Number: {frame_number}, Number of Objects: {num_rects}, Vehicle_count: {obj_counter[PGIE_CLASS_ID_VEHICLE]}, Person_count={obj_counter[PGIE_CLASS_ID_PERSON]}' ) # Get frame rate through this probe fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps() if save_image: cv2.imwrite( f'{folder_name}/stream_{frame_meta.pad_index}/frame_{frame_number}.jpg', frame_image) saved_count["stream_" + str(frame_meta.pad_index)] += 1 # continue with the next frame when there is one try: l_frame = l_frame.next except StopIteration: break return Gst.PadProbeReturn.OK