def detect(q1, q2): model_config = parser.parse_args() print("Loading detector") model = "ssd_mobilenet_v2_coco" conf_th = 0.3 INPUT_HW = (300, 300) cls_dict = get_cls_dict("coco") vis = BBoxVisualization(cls_dict) trt_ssd = TrtSSD(model, INPUT_HW) print("Loading detector complete") if model_config.ui == 1: cv2.startWindowThread() cv2.namedWindow("window") while 1: try: frame, frame_time = q1.get() delay = time.time() - frame_time if delay > 0.4: print("Skipping frame") continue boxes, confs, clss = trt_ssd.detect(frame, conf_th) print([get_cls_dict("coco")[c] for c in clss]) if model_config.ui == 1: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img = vis.draw_bboxes(frame, boxes, confs, clss) cv2.imshow('window', img[..., ::-1]) except Exception as e: traceback.print_exc()
def main(): global THREAD_RUNNING cuda.init() # init pycuda driver args = parse_args() cap = open_cam_rtsp(args.cam_name, args.cam_password, args.cam_ip) if not cap.isOpened(): sys.exit('Failed to open camera!') #抓取图像子进程 THREAD_RUNNING = True th = threading.Thread(target=grab_img, args=(cap, )) th.start() #目标识别 cls_dict = get_cls_dict(args.model.split('_')[-1]) open_window(WINDOW_NAME, args.image_width, args.image_height, 'Camera TensorRT SSD Demo for Jetson Nano') vis = BBoxVisualization(cls_dict) condition = threading.Condition() global IMG_HANDLE trt_thread = TrtThread(condition, IMG_HANDLE, args.model, conf_th=0.3) trt_thread.start() # start the child thread loop_and_display(condition, vis) trt_thread.stop() # stop the child thread #关闭图像子进程 THREAD_RUNNING = False th.join() cap.release() cv2.destroyAllWindows()
def main(): args = parse_args() cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cls_dict = get_cls_dict(args.model) trt_ssd = TrtSSD(args.model, INPUT_HW) cam.start() if args.use_console: loop_and_detect_console(cam, trt_ssd, conf_th=0.3, loop=args.loop, cls_dict=cls_dict) else: open_window(WINDOW_NAME, args.image_width, args.image_height, 'Camera TensorRT SSD Demo for Jetson Nano') vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis) cam.stop() cam.release() cv2.destroyAllWindows()
def main(): # Parse arguments and get input args = parse_args() cam = Camera(args) if not cam.isOpened(): raise SystemExit('ERROR: failed to open camera!') # Create NN1 and NN2 models and load into memory cls_dict = get_cls_dict(args.model.split('_')[-1]) trt_ssd = TrtSSD(args.model, INPUT_HW) mtcnn = TrtMtcnn() # Create Preview Window open_window(WINDOW_NAME, 'Camera Preview', cam.img_width, cam.img_height) vis = BBoxVisualization(cls_dict) # Enter Detection Mode while True: # Get Image img = cam.read() out.write(img) nn1_results = [] # Run Neural Networks img, nn1_results, nn2_results, nn3_results = loop_and_detect( img, mtcnn, args.minsize, trt_ssd, conf_th=0.3, vis=vis) # Communicate to Arduino if (nn1_results != []): img = robot_drive(img, nn1_results) else: serial_port.write("N".encode()) print("N") # Display and save output cv2.imshow(WINDOW_NAME, img) outNN.write(img) # User/Keyboard Input key = cv2.waitKey(1) if key == ord('q'): out.release() outNN.release() break # Clean up and exit cam.release() cv2.destroyAllWindows() serial_port.close()
def main(): args = parse_args() cam = Camera(args) if not cam.get_is_opened(): raise SystemExit('ERROR: failed to open camera!') cls_dict = get_cls_dict(args.model.split('_')[-1]) trt_ssd = TrtSSD(args.model, INPUT_HW) open_window(WINDOW_NAME, 'Camera TensorRT SSD Demo', cam.img_width, cam.img_height) vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis) cam.release() cv2.destroyAllWindows()
def main(): # Parse arguments and get input args = parse_args() # Create NN1 and NN2 models cls_dict = get_cls_dict(args.model.split('_')[-1]) trt_ssd = TrtSSD(args.model, INPUT_HW) mtcnn = TrtMtcnn() # Create Preview Window vis = BBoxVisualization(cls_dict) imageNum = 10 # Enter Detection Mode while True: # Get Image imageName = "/home/nvidia/Pictures/test13.jpg" #imageName = "/media/E76F-73E0/Faces/1 (" + str(imageNum) + ").jpg" #imageName = "/home/nvidia/Pictures/Benchmarks/Pedestrians/PennPed000" + str(imageNum) + ".png" imageNum = imageNum + 1 #print(imageName) img = cv2.imread(imageName) cv2.imshow(WINDOW_NAME, img) # Run Neural Networks img, nn1_results, nn2_results, nn3_results = loop_and_detect( img, mtcnn, args.minsize, trt_ssd, conf_th=0.3, vis=vis) # Display Results cv2.imshow(WINDOW_NAME, img) #cv2.waitKey(0) # User/Keyboard Input key = cv2.waitKey(1) if key == 27: # ESC key: quit program break # Clean up and exit cv2.destroyAllWindows() serial_port.close()
def main(args, cam): # args = parse_args() is_open = up() time.sleep(60) # cam = Camera(args) if is_open: cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cls_dict = get_cls_dict(args.model.split('_')[-1]) trt_ssd = TrtSSD(args.model, INPUT_HW) cam.start() vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_ssd, conf_th=0.9, vis=vis) cam.stop() cam.release() cv2.destroyAllWindows()
def main(): args = parse_args() cam = Camera(args) if not cam.get_is_opened(): raise SystemExit('ERROR: failed to open camera!') cuda.init() # init pycuda driver cls_dict = get_cls_dict(args.model.split('_')[-1]) open_window(WINDOW_NAME, 'Camera TensorRT SSD Demo', cam.img_width, cam.img_height) vis = BBoxVisualization(cls_dict) condition = threading.Condition() trt_thread = TrtThread(condition, cam, args.model, conf_th=0.3) trt_thread.start() # start the child thread loop_and_display(condition, vis) trt_thread.stop() # stop the child thread cam.release() cv2.destroyAllWindows()
def main(config): args = parse_args() data_sys = Data_sys() pub = Publish(host=config['Mqtt_pub']) cls_dict = get_cls_dict(args.model.split('_')[-1]) trt_ssd = TrtSSD(args.model, INPUT_HW) img_list = [] # video cap = cv2.VideoCapture('xiasha.avi') i = 0 while cap.isOpened(): frame, img = cap.read() if img is not None: img_list.append([img]) result = trt_ssd.detect(img_list[0], conf_th=0.3) # print(result) data_sys.dataSynchronization(result, img_list, args.model, ['boundary_intrude', None], config['Zone'], config['Channel'][0], config['device_id'], pub, config['Polygon']) img_list = [] i = i + 1 print(i) else: msg = json.dumps({ "nvr_id": config['Zone'], "device_id": config['device_id'], "channel_id": config['Channel'][0] }) pub.send_msg(topic="zs/ai_spxwfx/rtsp/" + config['Zone'] + "/" + config['Channel'][0], msg=msg, Zone=config['Zone'], device_id=config['device_id']) '''
def main(): args = parse_args() cam = Camera(args) is_open = up() #time.sleep(60) if is_open: cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cls_dict = get_cls_dict(args.model.split('_')[-1]) trt_ssd = TrtSSD(args.model, INPUT_HW) cam.start() open_window(WINDOW_NAME, args.image_width, args.image_height, 'Camera TensorRT SSD Demo for Jetson Nano') vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_ssd, conf_th=0.9, vis=vis) cam.stop() cam.release() cv2.destroyAllWindows()
def main(): args = parse_args() cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cls_dict = get_cls_dict(args.model.split('_')[-1]) cuda.init() # init pycuda driver cam.start() # let camera start grabbing frames open_window(WINDOW_NAME, args.image_width, args.image_height, 'Camera TensorRT SSD Demo for Jetson Nano') vis = BBoxVisualization(cls_dict) condition = threading.Condition() trt_thread = TrtThread(condition, cam, args.model, conf_th=0.3) trt_thread.start() # start the child thread loop_and_display(condition, vis) trt_thread.stop() # stop the child thread cam.stop() cam.release() cv2.destroyAllWindows()
import cv2 import pycuda.autoinit # This is needed for initializing CUDA driver from utils.ssd_classes import get_cls_dict from utils.ssd import TrtSSD from utils.visualization import BBoxVisualization from utils.display import open_window, set_display, show_fps import time import numpy as np model = "ssd_mobilenet_v2_coco" filename = "./dogs.jpg" conf_th = 0.3 INPUT_HW = (300, 300) cls_dict = get_cls_dict("coco") vis = BBoxVisualization(cls_dict) img = cv2.imread(filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) trt_ssd = TrtSSD(model, INPUT_HW) # Kick start the model. for _ in range(20): boxes, confs, clss = trt_ssd.detect(img, conf_th) print([get_cls_dict("coco")[c] for c in clss]) img = vis.draw_bboxes(img, boxes, confs, clss) cv2.imwrite("result.jpg", img[..., ::-1]) times = [] for _ in range(20): start_time = time.time() boxes, confs, clss = trt_ssd.detect(img, conf_th)
def loop_and_detect(cam, trt_ssd, conf_th, robot, model): """Loop, grab images from camera, and do object detection. # Arguments cam: the camera object (video source). tf_sess: TensorFlow/TensorRT session to run SSD object detection. conf_th: confidence/score threshold for object detection. vis: for visualization. """ settings_path = "../dataset/RTIMULib" logger.info("Using settings file %s", settings_path + ".ini") if not os.path.exists(settings_path + ".ini"): logger.error("Settings file does not exist, will be created") s = RTIMU.Settings(settings_path) imu = RTIMU.RTIMU(s) logger.info("IMU Name: %s", imu.IMUName()) if not imu.IMUInit(): logger.error("IMU Init Failed") else: logger.info("IMU Init Succeeded") imu.setSlerpPower(0.02) imu.setGyroEnable(True) imu.setAccelEnable(True) imu.setCompassEnable(True) poll_interval = imu.IMUGetPollInterval() logger.info("Recommended Poll Interval: %f", poll_interval) cls_dict = get_cls_dict(model.split('_')[-1]) fps = 0.0 counter = 58 tic = time.time() while True: if imu.IMURead(): gyro = imu.getIMUData().copy() else: gyro = [] img = cam.read() if img is not None: boxes, confs, clss = trt_ssd.detect(img, conf_th) toc = time.time() curr_fps = 1.0 / (toc - tic) # calculate an exponentially decaying average of fps number fps = curr_fps if fps == 0.0 else (fps * 0.9 + curr_fps * 0.1) tic = toc counter += 1 if counter > fps: logger.info("fps: %f", fps) if len(gyro) > 0: accel = gyro["accel"] fusion = gyro["fusionPose"] logger.info( "r: %f p: %f y: %f" % (math.degrees(fusion[0]), math.degrees( fusion[1]), math.degrees(fusion[2]))) logger.info("x: %.2f y: %.2f z: %.2f" % (accel[0], accel[1], accel[2])) counter = 0 # compute all detected objects detections = [] for i, (bb, cf, cl) in enumerate(zip(boxes, confs, clss)): detections.append({ 'bbox': bb, 'confidence': cf, 'label': int(cl) }) if logger.isEnabledFor(logging.DEBUG) and (len(detections)) > 0: logger.debug(detections) # select detections that match selected class label matching_detections = [ d for d in detections if d['label'] in v2_coco_labels_to_capture and d['confidence'] > 0.50 ] if len(matching_detections) > 0: logger.debug(matching_detections) # get detection closest to center of field of view and center bot det = closest_detection(matching_detections, width=cam.img_width, height=cam.img_height) if det is not None: center = detection_center(det, cam.img_width, cam.img_height) logger.info("center: %s, on object: %s", center, cls_dict[det['label']]) move_speed = 2.0 * center[0] if abs(move_speed) > 0.3: if move_speed > 0.0: robot.right(move_speed) else: robot.left(abs(move_speed)) else: robot.stop()