def infer(): # Image source cam = Camera() stream = cam.get_stream() floorNet = FloorNet() # Prediction while True: start = time.time() print("======================================") # Infer raw_img = stream.get() img, mask = floorNet.predict(raw_img) # Visualize mask = cv.cvtColor(mask, cv.COLOR_GRAY2BGR) cv.addWeighted(mask, 0.5, img, 0.5, 0, img) cv.imshow('Camera', img) # Calculate frames per second (FPS) end = time.time() print('Total estimated time: {:.4f}'.format(end-start)) fps = 1/(end-start) print("FPS: {:.1f}".format(fps)) if cv.waitKey(10) & 0xFF == ord('q'): break # Clear resources cv.destroyAllWindows() cam.terminate()
def main(): args = parse_args() cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') trt_ssd = TrtSSD(args.model) cam.start() # initialize bot logger.info('initialize robot') robot = Robot() logger.info('starting to loop and detect') loop_and_detect(cam=cam, trt_ssd=trt_ssd, conf_th=0.3, robot=robot, model=args.model) logger.info('cleaning up') robot.stop() cam.stop() cam.release()
def main(): args = parse_args() if args.category_num <= 0: raise SystemExit('Bad category_num: %d!' % args.category_num) cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cls_dict = get_cls_dict(args.category_num) yolo_dim = int(args.model.split('-')[-1]) if yolo_dim not in (288, 416, 608): raise SystemExit('Bad yolo_dim: %d!\nPlease make sure the model file name contains the correct dimension...' % yolo_dim) trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim), args.category_num) cam.start() open_window(WINDOW_NAME, args.image_width, args.image_height, 'Camera TensorRT YOLOv3 Demo') vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis) cam.stop() cam.release() cv2.destroyAllWindows()
def __init__(self, width: int, height: int, title: str, camera: bool = False): self.__Daytime = True self.__enableCamera = camera glut.glutInit() glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGBA) glut.glutInitWindowSize(width, height) glut.glutInitWindowPosition(0, 0) glut.glutCreateWindow(title) glut.glutReshapeFunc(self.__window_resize) glut.glutDisplayFunc(self.__display) glut.glutKeyboardFunc(self.__keyboardDown) glut.glutKeyboardUpFunc(self.__keyboardUp) glut.glutMotionFunc(self.__mouse_look_clb) self._create_shader() self.setSky(True) glEnable(GL_DEPTH_TEST) glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) self.__ElementsList = [] self.__camera = Camera() self.__lastX = width / 2 self.__lastY = height / 2 self.__first_mouse = True self.__left = False self.__right = False self.__forward = False self.__backward = False
def main(): args = parse_args() cam = Camera(args) cam.open() # import pdb # pdb.set_trace() if not cam.is_opened: sys.exit('[INFO] Failed to open camera!') cls_dict = get_cls_dict('coco') yolo_dim = int(args.model.split('-')[-1]) # 416 or 608 trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim)) print('[INFO] Camera: starting') cam.start() open_window(WINDOW_NAME, args.image_width, args.image_height, 'TensorRT YOLOv3 Detector') vis = BBoxVisualization(cls_dict) loop_and_detect(cam, args.runtime, trt_yolov3, conf_th=0.3, vis=vis, window_name=WINDOW_NAME) print('[INFO] Program: stopped') cam.stop() cam.release() cv2.destroyAllWindows()
def main(): args = parse_args() if args.category_num <= 0: raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num) if not os.path.isfile('yolo/%s.trt' % args.model): raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model) cam = Camera(args) if not cam.isOpened(): raise SystemExit('ERROR: failed to open camera!') cls_dict = get_cls_dict(args.category_num) vis = BBoxVisualization(cls_dict) h, w = get_input_shape(args.model) trt_yolo = TrtYOLO(args.model, (h, w), args.category_num, args.letter_box) open_window(WINDOW_NAME, 'Camera TensorRT YOLO Demo', cam.img_width, cam.img_height) msg_queue = Queue(maxsize=100) # msg_queue.put("0,0,0,-1".encode()) Thread(target=serArd, args=(msg_queue, )).start() loop_and_detect(cam, trt_yolo, msg_queue, conf_th=0.7, vis=vis) while True: pass cam.release() cv2.destroyAllWindows()
def main(): args = parse_args() if args.category_num <= 0: raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num) if not os.path.isfile('yolo/%s.trt' % args.model): raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model) cam = Camera(args) if not cam.isOpened(): raise SystemExit('ERROR: failed to open camera!') cls_dict = get_cls_dict(args.category_num) yolo_dim = args.model.split('-')[-1] if 'x' in yolo_dim: dim_split = yolo_dim.split('x') if len(dim_split) != 2: raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim) w, h = int(dim_split[0]), int(dim_split[1]) else: h = w = int(yolo_dim) if h % 32 != 0 or w % 32 != 0: raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim) trt_yolo = TrtYOLO(args.model, (h, w), args.category_num) open_window(WINDOW_NAME, 'Camera TensorRT YOLO Demo', 640, 480) vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_yolo, conf_th=0.3, vis=vis) cam.release() cv2.destroyAllWindows()
def main(): print(f"{datetime.datetime.now().isoformat()} start!", flush=True) args = parse_args() if args.category_num <= 0: raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num) if not os.path.isfile('yolo/%s.trt' % args.model): raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model) cam = Camera(args) if not cam.isOpened(): raise SystemExit('ERROR: failed to open camera!') cls_dict = get_cls_dict(args.category_num) yolo_dim = args.model.split('-')[-1] if 'x' in yolo_dim: dim_split = yolo_dim.split('x') if len(dim_split) != 2: raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim) w, h = int(dim_split[0]), int(dim_split[1]) else: h = w = int(yolo_dim) if h % 32 != 0 or w % 32 != 0: raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim) trt_yolo = TrtYOLO(args.model, (h, w), args.category_num) # open_window( # WINDOW_NAME, 'Camera TensorRT YOLO Demo', # cam.img_width, cam.img_height) vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_yolo, conf_th=0.3, vis=vis) cam.release()
def main(): args = parse_args() cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cls_dict = get_cls_dict('coco') yolo_dim = int(args.model.split('-')[-1]) # 416 or 608 trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim)) cam.start() # open_window(WINDOW_NAME, args.image_width, args.image_height, # 'Camera TensorRT YOLOv3 Demo') vis = BBoxVisualization(cls_dict) # for video # loop_and_detect(cam, trt_yolov3, conf_th=0.3, vis=vis) # for single file detect_demo(cam, trt_yolov3, conf_th=0.3, vis=vis) cam.stop() cam.release() cv2.destroyAllWindows()
def main(): args = parse_args() #YOLO INIT #cls_dict = get_cls_dict('coco') cls_dict = get_cls_dict('deepfamily') print("classes count : ", len(cls_dict)) yolo_dim = int(args.model.split('-')[-1]) # 416 or 608 print("yolo_dim : ", yolo_dim) trt_yolov3 = TrtYOLOv3(args.model, (yolo_dim, yolo_dim)) #CAMERA cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cam.start() #CAM-WINDOW open_window(WINDOW_NAME, args.image_width, args.image_height, 'DEEPFAMILY PROJECT - TensorRT YOLOv3') vis = BBoxVisualization(cls_dict) #DETECT-LOOP loop_and_detect(cam, trt_yolov3, conf_th=0.95, vis=vis) #loop_and_detect(cam, trt_yolov3, conf_th=0.95) cam.stop() cam.release() cv2.destroyAllWindows()
def infer(): # Image source cam = Camera() stream = cam.get_stream() # Load edge model EDGE_MODEL = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../models/tpu/ohmnilabs_floornet_224_quant_postprocess_edgetpu.tflite') inference = Inference(EDGE_MODEL) # Prediction while True: start = time.time() print("======================================") # Infer raw_img = stream.get() img, mask = inference.predict(raw_img) # Visualize mask = cv.cvtColor(mask, cv.COLOR_GRAY2BGR) cv.addWeighted(mask, 0.5, img, 0.5, 0, img) cv.imshow('Camera', img) # Calculate frames per second (FPS) end = time.time() print('Total estimated time: {:.4f}'.format(end-start)) fps = 1/(end-start) print("FPS: {:.1f}".format(fps)) if cv.waitKey(10) & 0xFF == ord('q'): break # Clear resources cv.destroyAllWindows() cam.terminate()
def test_prosac(self): """ 改进抽样一致 """ Camera.reset() self.__last_frame = None print("PROSAC") for i in range(1, 30): start = time.clock() _, frame = Camera.get_frame() if self.__last_frame is None: self.__last_frame = frame continue img = cv2.absdiff(frame, self.__last_frame) self.__last_frame = frame img = cv2.GaussianBlur(img, (5, 5), 2.5) img = ImageUtils.morphology(img, cv2.MORPH_DILATE, 16) _, img = ImageUtils.binary(img, threshold_type=cv2.THRESH_OTSU) # 计算特征点 key_points, descriptors = ImageUtils.get_key_points(frame, img) matches = ImageUtils.knn_match(self.__target_descriptors, descriptors) if len(matches) > 0: # 匹配到合适的特征点 points = ImageUtils.get_matches_points(key_points, matches) src_key_points = ImageUtils.get_matches_points( self.__target_key_points, matches, 1) # PROSAC去除错误点 _, mask = cv2.findHomography(src_key_points, points, cv2.RHO) if mask is not None: points_after = points[mask.ravel() == 1] end = time.clock() print("%d\t%d\t%d\t%f" % (i, len(points), len(points_after), end - start))
def main(): args = parse_args() cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') cls_dict = get_cls_dict(args.model) trt_ssd = TrtSSD(args.model, INPUT_HW) cam.start() if args.use_console: loop_and_detect_console(cam, trt_ssd, conf_th=0.3, loop=args.loop, cls_dict=cls_dict) else: open_window(WINDOW_NAME, args.image_width, args.image_height, 'Camera TensorRT SSD Demo for Jetson Nano') vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis) cam.stop() cam.release() cv2.destroyAllWindows()
def main(): args = parse_args() if args.category_num <= 0: raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num) if not os.path.isfile('yolo/%s.trt' % args.model): raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model) cam = Camera(args) if not cam.isOpened(): raise SystemExit('ERROR: failed to open camera!') cls_dict = get_cls_dict(args.category_num) vis = BBoxVisualization(cls_dict) h, w = get_input_shape(args.model) trt_yolo = TrtYOLO(args.model, (h, w), args.category_num, args.letter_box) mjpeg_server = MjpegServer(port=args.mjpeg_port) print('MJPEG server started...') try: loop_and_detect(cam, trt_yolo, conf_th=0.3, vis=vis, mjpeg_server=mjpeg_server) except Exception as e: print(e) finally: mjpeg_server.shutdown() cam.release()
def main(): logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Ask tensorflow logger not to propagate logs to parent (which causes # duplicated logging) logging.getLogger('tensorflow').propagate = False global args args = parse_args() logger.info('called with args: %s' % args) # build the class (index/name) dictionary from labelmap file logger.info('reading label map') cls_dict = read_label_map(args.labelmap_file) pb_path = './data/{}_trt.pb'.format(args.model) log_path = './logs/{}_trt'.format(args.model) if args.do_build: logger.info('building TRT graph and saving to pb: %s' % pb_path) build_trt_pb(args.model, pb_path) logger.info('opening camera device/file') cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') logger.info('loading TRT graph from pb: %s' % pb_path) trt_graph = load_trt_pb(pb_path) logger.info('starting up TensorFlow session') tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True #tf_sess = tf.Session(config=tf_config, graph=trt_graph) -- Vincent #Solve : "unable to satfisfy explicit device /dev/CPU:0 -- Vincent tf_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True), graph=trt_graph) if args.do_tensorboard: logger.info('writing graph summary to TensorBoard') write_graph_tensorboard(tf_sess, log_path) logger.info('warming up the TRT graph with a dummy image') od_type = 'faster_rcnn' if 'faster_rcnn' in args.model else 'ssd' dummy_img = np.zeros((720, 1280, 3), dtype=np.uint8) _, _, _ = detect(dummy_img, tf_sess, conf_th=.3, od_type=od_type) cam.start() # ask the camera to start grabbing images # grab image and do object detection (until stopped by user) logger.info('starting to loop and detect') vis = BBoxVisualization(cls_dict) open_display_window(cam.img_height, cam.img_width) result = loop_and_detect(cam, tf_sess, args.conf_th, vis, od_type=od_type) logger.info('cleaning up') cam.stop() # terminate the sub-thread in camera tf_sess.close() cam.release() cv2.destroyAllWindows()
def main(): args = parse_args() if args.category_num <= 0: raise SystemExit(f'ERROR: bad category_num ({args.category_num})!') if not os.path.isfile(args.model): raise SystemExit(f'ERROR: file {args.model} not found!') # Process valid coco json file process_valid_json(args.valid_coco) if args.write_images: if not os.path.exists(args.image_output): os.mkdir(args.image_output) # Create camera for video/image input cam = Camera(args) if not cam.get_is_opened(): raise SystemExit('ERROR: failed to open camera!') class_dict = get_cls_dict(args.category_num) yolo_dim = (args.model.replace(".trt", "")).split('-')[-1] if 'x' in yolo_dim: dim_split = yolo_dim.split('x') if len(dim_split) != 2: raise SystemExit(f'ERROR: bad yolo_dim ({yolo_dim})!') w, h = int(dim_split[0]), int(dim_split[1]) else: h = w = int(yolo_dim) if h % 32 != 0 or w % 32 != 0: raise SystemExit(f'ERROR: bad yolo_dim ({yolo_dim})!') # Create yolo trt_yolo = TrtYOLO(args.model, (h, w), args.category_num) if args.activate_display: open_window(WINDOW_NAME, 'Camera TensorRT YOLO Demo', cam.img_width, cam.img_height) visual = BBoxVisualization(class_dict) # Run detection loop_and_detect(cam, trt_yolo, args, confidence_thresh=args.confidence_threshold, visual=visual) # Clean up cam.release() if args.activate_display: cv2.destroyAllWindows()
def yolo_detection(): # dev = cuda.Device(0) # ctx = dev.make_context() args = parse_args() print(args) """ config assert """ if args.category_num <= 0: raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num) if not os.path.isfile('yolo/darknet/%s.trt' % args.model): raise SystemExit('ERROR: file (yolo/darknet/%s.trt) not found!' % args.model) cls_dict = get_cls_dict(args.category_num) yolo_dim = args.model.split('-')[-1] if 'x' in yolo_dim: dim_split = yolo_dim.split('x') if len(dim_split) != 2: raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim) w, h = int(dim_split[0]), int(dim_split[1]) else: h = w = int(yolo_dim) if h % 32 != 0 or w % 32 != 0: raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim) """ capture the image """ cam = Camera(args) if not cam.isOpened(): raise SystemExit('ERROR: failed to open camera!') """ deploy the yolo model """ trt_yolo = TrtYOLO(args.model, (h, w), args.category_num) # open_window( # WINDOW_NAME, 'Camera TensorRT YOLO Demo', # cam.img_width, cam.img_height) """ detect the insulator using model """ vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_yolo, conf_th=0.3, vis=vis) """ release the image """ cam.release()
def __init__(self): self.camera = Camera(camera_suffixes=Config.camera_suffixes) self.history = EpisodeHistory() self.gripper = Gripper('172.16.0.2', Config.gripper_speed, Config.gripper_force) self.robot = Robot('panda_arm', Config.general_dynamics_rel) self.saver = Saver(Config.database_url, Config.collection) self.current_bin = Config.start_bin self.md = MotionData().with_dynamics(1.0) self.overall_start = 0 self.last_after_images: Optional[List[OrthographicImage]] = None
def main(): camera = Camera(1, True) while True: blocks, frame = camera.get_block_coords() position, angle, frame2 = camera.get_robot_position() nav = Navigate() if blocks and position: block_data = nav.calculate_distances_angles( blocks, position, angle) print(nav.add_block_to_rejects(block_data, position, angle)) cv2.imshow('frame', frame) cv2.imshow('frame2', frame2) k = cv2.waitKey(5) & 0xFF if k == 27: break
def main(): args = parse_args() cam = Camera(args) if not cam.get_is_opened(): raise SystemExit('ERROR: failed to open camera!') mtcnn = TrtMtcnn() open_window( WINDOW_NAME, 'Camera TensorRT MTCNN Demo for Jetson Nano', cam.img_width, cam.img_height) loop_and_detect(cam, mtcnn, args.minsize) cam.release() cv2.destroyAllWindows()
def main() -> None: aspect_ratio = 16 / 9 image_width = 256 image_height = int(image_width / aspect_ratio) samples_per_pixel = 20 max_depth = 10 world: HittableList = three_ball_scene() lookfrom = Point3(13, 2, 3) lookat = Point3(0, 0, 0) vup = Vec3(0, 1, 0) vfov = 20 dist_to_focus: float = 10 aperture: float = 0.1 cam = Camera(lookfrom, lookat, vup, vfov, aspect_ratio, aperture, dist_to_focus) print("Start rendering.") start_time = time.time() n_processer = multiprocessing.cpu_count() img_list: List[Img] = Parallel(n_jobs=n_processer)( delayed(scan_line)(j, world, cam, image_width, image_height, samples_per_pixel, max_depth) for j in range(image_height - 1, -1, -1)) final_img = Img(image_width, image_height) final_img.set_array(np.concatenate([img.frame for img in img_list])) end_time = time.time() print(f"\nDone. Total time: {round(end_time - start_time, 1)} s.") final_img.save("./output.png", True)
def start(self): self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._game = CatanomousGame(self._config) while (True): print '1 to init hexagons, 2 for resources/numbers, 3 for pieces, 4 to use bluetooth, 5 to fix numbers' token = raw_input("Input: ") if token == '1': reset = raw_input("Reset?") == 'Y' self._handle_hexagon_init(reset, debug=True) elif token == '2': self._handle_resource_init(debug=True) elif token == '3': num = raw_input("Num? ") self._handle_dice_roll(int(num), debug=True) elif token == '5': self._hardcode_numbers() elif token == '4': self._bt_server.start() sock = self._bt_server.accept() self._listen_for_dice(sock, debug=True) elif token == 'X': break return
def three_ball_scene(aspect_ratio: float, time0: float, time1: float) \ -> Tuple[BVHNode, Camera]: world = HittableList() world.add( Sphere(Point3(0, 0, -1), 0.5, Lambertian(SolidColor(0.1, 0.2, 0.5)))) world.add( Sphere(Point3(0, -100.5, -1), 100, Lambertian(SolidColor(0.8, 0.8, 0)))) world.add(Sphere(Point3(1, 0, -1), 0.5, Metal(Color(0.8, 0.6, 0.2), 0.3))) world.add(Sphere(Point3(-1, 0, -1), 0.5, Dielectric(1.5))) world.add(Sphere(Point3(-1, 0, -1), -0.45, Dielectric(1.5))) world_bvh = BVHNode(world.objects, time0, time1) lookfrom = Point3(3, 3, 2) lookat = Point3(0, 0, -1) vup = Vec3(0, 1, 0) vfov = 20 dist_to_focus: float = (lookfrom - lookat).length() aperture: float = 0 cam = Camera(lookfrom, lookat, vup, vfov, aspect_ratio, aperture, dist_to_focus, time0, time1) # lookfrom = Point3(13, 2, 3) # lookat = Point3(0, 0, 0) # vup = Vec3(0, 1, 0) # vfov = 20 # dist_to_focus: float = 10 # aperture: float = 0.1 # cam = Camera( # lookfrom, lookat, vup, vfov, aspect_ratio, aperture, dist_to_focus, # time0, time1 # ) return world_bvh, cam
def main() -> None: aspect_ratio = 16 / 9 image_width = 720 image_height = int(image_width / aspect_ratio) samples_per_pixel = 48 max_depth = 5 world = random_scene() lookfrom = Point3(13, 2, 3) lookat = Point3(0, 0, 0) vup = Vec3(0, 1, 0) vfov = 20 dist_to_focus = 10 aperture = 0.1 cam = Camera(lookfrom, lookat, vup, vfov, aspect_ratio, aperture, dist_to_focus) print("Start rendering.") start_time = time.time() img_list = Parallel(n_jobs=2, verbose=20)( delayed(scan_frame)(world, cam, image_width, image_height, max_depth) for s in range(samples_per_pixel)) end_time = time.time() print(f"\nDone. Total time: {round(end_time - start_time, 1)} s.") final_img = Img(image_width, image_height) for img in img_list: final_img.write_frame(img) final_img.average(samples_per_pixel).gamma(2) final_img.save("./output.png", True)
def main(): args = parse_args() labels = np.loadtxt('googlenet/synset_words.txt', str, delimiter='\t') cam = Camera(args) if not cam.get_is_opened(): raise SystemExit('ERROR: failed to open camera!') # initialize the tensorrt googlenet engine net = PyTrtGooglenet(DEPLOY_ENGINE, ENGINE_SHAPE0, ENGINE_SHAPE1) open_window(WINDOW_NAME, 'Camera TensorRT GoogLeNet Demo', cam.img_width, cam.img_height) loop_and_classify(cam, net, labels, args.crop_center) cam.release() cv2.destroyAllWindows()
class Window(QDialog): def __init__(self): super().__init__() self.ui = Ui_Form() self.ui.setupUi(self) db = Database(host='localhost', database='eardoor', user='******', password='******', table='records') slm = QStringListModel() self.ui.records.setModel(slm) self.updater = Updater(self.ui, db, slm) self.camera = Camera(0, self.ui.camera.width(), self.ui.camera.height()) self.recognizer = Recognizer() self.fps = 50 self.timer = QTimer(self) self.timer.timeout.connect(self.update) self.timer.start(1000 // self.fps) def update(self): frame = self.camera.capture() name, frame = self.recognizer.detect(frame) self.updater.update(name, frame)
def main(): args = parse_args() cam = Camera(args) if not cam.get_is_opened(): raise SystemExit('ERROR: failed to open camera!') cls_dict = get_cls_dict(args.model.split('_')[-1]) trt_ssd = TrtSSD(args.model, INPUT_HW) open_window(WINDOW_NAME, 'Camera TensorRT SSD Demo', cam.img_width, cam.img_height) vis = BBoxVisualization(cls_dict) loop_and_detect(cam, trt_ssd, conf_th=0.3, vis=vis) cam.release() cv2.destroyAllWindows()
def main() -> None: aspect_ratio = 16 / 9 image_width = 256 image_height = int(image_width / aspect_ratio) samples_per_pixel = 20 max_depth = 10 world: HittableList = three_ball_scene() lookfrom = Point3(13, 2, 3) lookat = Point3(0, 0, 0) vup = Vec3(0, 1, 0) vfov = 20 dist_to_focus: float = 10 aperture: float = 0.1 cam = Camera(lookfrom, lookat, vup, vfov, aspect_ratio, aperture, dist_to_focus) print("Start rendering.") start_time = time.time() n_processer = multiprocessing.cpu_count() img_list: List[Img] = Parallel(n_jobs=n_processer, verbose=10)( delayed(scan_line)(j, world, cam, image_width, image_height, samples_per_pixel, max_depth) for j in range(image_height - 1, -1, -1)) # # Profile prologue # import cProfile # import pstats # import io # from pstats import SortKey # pr = cProfile.Profile() # pr.enable() # img_list: List[Img] = list() # for j in range(image_height-1, -1, -1): # img_list.append( # scan_line( # j, world, cam, # image_width, image_height, # samples_per_pixel, max_depth # ) # ) # # Profile epilogue # pr.disable() # s = io.StringIO() # sortby = SortKey.CUMULATIVE # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(s.getvalue()) final_img = Img(image_width, image_height) final_img.set_array(np.concatenate([img.frame for img in img_list])) end_time = time.time() print(f"\nDone. Total time: {round(end_time - start_time, 1)} s.") final_img.save("./output.png", True)
def main(): args = parse_args() cam = Camera(args) cam.open() if not cam.is_opened: sys.exit('Failed to open camera!') mtcnn = TrtMtcnn() cam.start() open_window(WINDOW_NAME, width=640, height=480, title='MTCNN Window') detect_faces(cam, mtcnn) cam.stop() cam.release() cv2.destroyAllWindows() del mtcnn
def start_test(self): self._config = self._prepare_config(True) self._camera = Camera(self._config) self._camera.start() self._dice_detector = DiceDetector(self._config) self._handle_detect_dice() self._config.save_cv_config(self._CONFIG_FILE)
def main(): args = parse_args() labels = np.loadtxt('googlenet/synset_words.txt', str, delimiter='\t') cam = Camera(args) if not cam.isOpened(): raise SystemExit('ERROR: failed to open camera!') open_window(WINDOW_NAME, 'Camera TensorRT GoogLeNet Demo', cam.img_width, cam.img_height) condition = threading.Condition() trt_thread = TrtGooglenetThread(condition, cam, labels, args.crop_center) trt_thread.start() # start the child thread loop_and_display(condition) trt_thread.stop() # stop the child thread cam.release() cv2.destroyAllWindows()
def shot(w=224, h=224): cam = Camera() cam.resolution = (w, h) cam.vflip, cam.hflip = True, True try: cam.capture('./test.png', resize=(w, h)) # cam.capture('./test.png') finally: cam.close() return send_file('./test.png')
def main(): cam_config = "config/camera.json" config = CVConfig("config/config.json") config.load_cam_config(cam_config) camera = Camera(config) config_json = None try: camera.start() while (True): print "Enter a camera setting to change. (or 'P' to preview, 'X' to quit, 'V' to see current settings, 'S' to save image, 'L' to load config, 'SC' to save config)" print "1 to save hexagon pic, 2 to save resource pic, 3 to save num pic" token = raw_input("Input: ") if token == 'P': get_picture(camera) elif token == 'X': print "Saving config as:", cam_config config.save_cam_config(cam_config) break elif token == 'SC': print "Saving config as:", cam_config config.save_cam_config(cam_config) elif token == 'V': settings = config.get_cam_all() print "Settings for: ", cam_config for key in settings: print key, ": ", settings[key] elif token == 'S': img = get_picture(camera) path = raw_input("Path: ") CVUtils.save_img(img, path) elif token == 'L': cam_config = raw_input('Config path:') config_json = CVConfig.load_json(cam_config) camera._set_config(config_json) get_picture(camera) elif token =='1': img = get_picture(camera, CVConfig.load_json("config/camera_hex.json")) CVUtils.save_img(img, "images/test_hex.png") elif token =='2': img = get_picture(camera) CVUtils.save_img(img, "images/test_resource.png") elif token =='3': img = get_picture(camera, CVConfig.load_json("config/camera_nums.json")) CVUtils.save_img(img, "images/test_nums.png") else: process_token(token, camera) get_picture(camera) finally: camera.stop()
def start_test(self, reset_hexes=False, skip_resources=False, dont_reset=False): self._config = self._prepare_config(not dont_reset) self._camera = Camera(self._config) self._camera.start() self._game = CatanomousGame(self._config) self._handle_hexagon_init(reset_hexes, debug=True) self._config.save_cv_config(self._CONFIG_FILE) if not skip_resources: self._handle_resource_init(debug=True) self._config.save_cv_config(self._CONFIG_FILE) self._handle_dice_roll(1, debug=True) self._config.save_cv_config(self._CONFIG_FILE)
def start(self): self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._dice_detector = DiceDetector(self._config) self._bt_client = None while (True): print '1 to detect dice roll, 2 to enable bluetooth' token = raw_input("Input: ") if token == '1': self._handle_detect_dice() elif token == '2': self._bt_client = BluetoothClient() self._bt_client.connect(self._SERVER_ADDR) elif token == 'X': if self._bt_client is not None: self._bt_client.send('\n') break return
def start_auto(self): # Wait for button PRESS to connect to server, or HOLD to exit self._gpio.led_on() if self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'HOLD': self._gpio.led_restore() return self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._dice_detector = DiceDetector(self._config) self._bt_client = BluetoothClient() # Connect to BT server self._gpio.led_off() ret = False try: ret = self._bt_client.connect(self._SERVER_ADDR) except e: self._gpio.led_blink(3) return # Green LED on if connected, blink 3 times if failed if ret: self._gpio.led_on() else: self._gpio.led_blink(3) return while(True): res = self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) if res == 'PRESS': self._handle_detect_dice() else: break self._bt_client.send('\n')
class MainController(object): _IMAGE_WIDTH = 1200 _HEX_FILE = "config/hexagons.npy" _CONFIG_FILE = "config/config.json" _BUTTON_PIN = 17 def __init__(self): self._camera_hex_config = CVConfig.load_json("config/camera_hex.json") self._camera_nums_config = CVConfig.load_json("config/camera_nums.json") self._bt_server = BluetoothServer() self._debugger = Debugger(self._bt_server) self._gpio = GPIOController() self._card_dealer = None return def _prepare_config(self, reset=False): hex_config = self._HEX_FILE camera_config = "config/camera.json" cv_config = self._CONFIG_FILE config = CVConfig(cv_config, reset) config.load_cv_config(cv_config) config.load_cam_config("config/camera.json") config.load_hex_config(hex_config) return config def _get_image(self, config=None): if self._camera is None: print "Camera unintialized" img = self._camera.capture(config) return imutils.resize(img, width=self._IMAGE_WIDTH) # Called to detect and save hexagons def _handle_hexagon_init(self, reset=True, debug=False): img = self._get_image(self._camera_hex_config) if reset: self._config.set_hexagons(None) initial = time.time() hexes = self._game.init_game(img) if reset: self._game.save_hexagons(self._HEX_FILE) if debug: print "Hexagons detected, moving on to resources, time: ", time.time() - initial Debugger.show_hexagons(img, hexes, 250) # Called to detect resources and numbers def _handle_resource_init(self, debug=False): num_img = self._get_image(self._camera_nums_config) res_img = self._get_image() initial = time.time() tiles = self._game.new_game(res_img, num_img) if debug: print "Resources/numbers detected, moving on to pieces, time: ", time.time() - initial Debugger.show_resources(num_img, tiles, 250) self._debugger.log("Resource/number detection finished.", "RESOURCES") self._debugger.log_tiles(tiles) def _hardcode_numbers(self): img = self._get_image() hexagons = self._game._board_detector._hexagons mask = np.zeros(img.shape) for h in hexagons: cv2.drawContours(mask, [c], -1, (255, 255, 255), 2) GUIUtils.update_image(mask) cv2.waitKey(250) num = int(raw_input("Number? ")) h._number = num mask.fill(0) self._debugger.log_tiles(hexagons) # Called to detect new properties and deal cards based on roll def _handle_dice_roll(self, num, debug=False): img = self._get_image() initial = time.time() (detected, instructions) = self._game.dice_rolled(num, img) if debug: print "Pieces detected, exiting..., time: ", time.time() - initial Debugger.show_properties(img, detected, 250) self._debugger.log("Finished processing dice roll.", "DICE") self._debugger.log_pieces(detected) self._debugger.log_instructions(instructions) if self._card_dealer is not None: self._card_dealer.process_round(instructions) def _listen_for_dice(self, sock, debug=False): while True: num = self._bt_server.receive(sock) if num == '\n': break self._gpio.led_off() self._debugger.log("Dice roll received: " + num, "DICE") self._handle_dice_roll(int(num), debug) self._gpio.led_on() self._bt_server.close(sock) self._bt_server.close_server() def start_auto(self, visual_debug=False): self._gpio.init_button(self._BUTTON_PIN) # Wait for button PRESS to continue, or HOLD to exit self._gpio.led_on() if self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'HOLD': self._gpio.led_restore() return self._gpio.led_off() try: self._card_dealer = CardDealer() self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._game = CatanomousGame(self._config) self._bt_server.start() self._gpio.led_on() # LED - ON = waiting for something, OFF = processing something # PRESS to connect debugger, HOLD to skip if self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'PRESS': self._gpio.led_off() self._debugger.accept() self._gpio.led_on() self._debugger.log("Connected to bluetooth debugger.", "CONNECT") else: self._debugger.log("No debugger chosen.", "CONNECT") self._gpio.led_blink(3) time.sleep(1.5) try: # Wait for dice detector to connect self._gpio.led_off() self._debugger.log("Waiting for dicebox to connect...", "CONNECT") dice_sock = self._bt_server.accept() self._gpio.led_on() self._debugger.log("Dice box connected.", "CONNECT") # Wait for HOLD to indicate reset hexagons, PRESS means load saved self._debugger.log("HOLD to reset hexes, PRESS to load.", "INPUT") reset_hexagons = self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'HOLD' self._debugger.log("Reset hexagons: " + str(reset_hexagons), "HEXAGONS") self._gpio.led_off() self._handle_hexagon_init(reset_hexagons, debug=visual_debug) self._gpio.led_on() self._debugger.log("Hexagons detected.", "HEXAGONS") # PRESS to initialize resources and numbers self._debugger.log("PRESS after setting up resources/numbers", "INPUT") self._gpio.wait_for_press(self._BUTTON_PIN) self._gpio.led_off() self._debugger.log("Starting resource/number detection.", "RESOURCES") self._handle_resource_init(debug=visual_debug) self._gpio.led_on() # Wait for signals from dice detector self._debugger.log("Waiting for dice rolls...", "INPUT") self._listen_for_dice(dice_sock, debug=visual_debug) except Exception as e: # Send to debugger and reraise to blink LED self._debugger.log(str(e), 'ERROR') raise e except Exception as e: # BLink to indicate an error occured import subprocess subprocess.call('sudo sh -c "echo ' + str(e) + ' > /home/pi/logs/pylog.txt"', shell=True) while True: self._gpio.led_on() time.sleep(1) self._gpio.led_off() time.sleep(1) return def start(self): self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._game = CatanomousGame(self._config) while (True): print '1 to init hexagons, 2 for resources/numbers, 3 for pieces, 4 to use bluetooth, 5 to fix numbers' token = raw_input("Input: ") if token == '1': reset = raw_input("Reset?") == 'Y' self._handle_hexagon_init(reset, debug=True) elif token == '2': self._handle_resource_init(debug=True) elif token == '3': num = raw_input("Num? ") self._handle_dice_roll(int(num), debug=True) elif token == '5': self._hardcode_numbers() elif token == '4': self._bt_server.start() sock = self._bt_server.accept() self._listen_for_dice(sock, debug=True) elif token == 'X': break return def start_test(self, reset_hexes=False, skip_resources=False, dont_reset=False): self._config = self._prepare_config(not dont_reset) self._camera = Camera(self._config) self._camera.start() self._game = CatanomousGame(self._config) self._handle_hexagon_init(reset_hexes, debug=True) self._config.save_cv_config(self._CONFIG_FILE) if not skip_resources: self._handle_resource_init(debug=True) self._config.save_cv_config(self._CONFIG_FILE) self._handle_dice_roll(1, debug=True) self._config.save_cv_config(self._CONFIG_FILE)
class DiceController(object): _IMAGE_WIDTH = 1200 _CONFIG_FILE = "config/config.json" _SERVER_ADDR = "B8:27:EB:A6:25:50" _BUTTON_PIN = 17 def __init__(self): self._bt_client = None self._gpio = GPIOController() self._gpio.init_button(self._BUTTON_PIN) return def _prepare_config(self, reset=False): camera_config = "config/camera_dice.json" cv_config = self._CONFIG_FILE config = CVConfig(cv_config, reset) config.load_cv_config(cv_config) config.load_cam_config(camera_config) return config def _get_image(self, config=None): if self._camera is None: print "Camera unintialized" img = self._camera.capture(config) return imutils.resize(img, width=self._IMAGE_WIDTH) # Called to detect and save hexagons def _handle_detect_dice(self): img = self._get_image() initial = time.time() dice_roll = self._dice_detector.detect_roll(img) print "Rolled: ", dice_roll # print "Time: ", time.time() - initial if self._bt_client is not None: self._bt_client.send(str(dice_roll)) def start_auto(self): # Wait for button PRESS to connect to server, or HOLD to exit self._gpio.led_on() if self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'HOLD': self._gpio.led_restore() return self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._dice_detector = DiceDetector(self._config) self._bt_client = BluetoothClient() # Connect to BT server self._gpio.led_off() ret = False try: ret = self._bt_client.connect(self._SERVER_ADDR) except e: self._gpio.led_blink(3) return # Green LED on if connected, blink 3 times if failed if ret: self._gpio.led_on() else: self._gpio.led_blink(3) return while(True): res = self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) if res == 'PRESS': self._handle_detect_dice() else: break self._bt_client.send('\n') def start(self): self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._dice_detector = DiceDetector(self._config) self._bt_client = None while (True): print '1 to detect dice roll, 2 to enable bluetooth' token = raw_input("Input: ") if token == '1': self._handle_detect_dice() elif token == '2': self._bt_client = BluetoothClient() self._bt_client.connect(self._SERVER_ADDR) elif token == 'X': if self._bt_client is not None: self._bt_client.send('\n') break return def start_test(self): self._config = self._prepare_config(True) self._camera = Camera(self._config) self._camera.start() self._dice_detector = DiceDetector(self._config) self._handle_detect_dice() self._config.save_cv_config(self._CONFIG_FILE)
def main(): global camera # Game initialization pygame.init() screen = pygame.display.set_mode(map(int, SCREEN.size)) # Game's objects initialization camera = Camera() game = Game() # Game object. This will handle all the game world # and its components menu = Menu(screen, game) # Game's menu while menu.update(): menu.render() for event in pygame.event.get(): if event.type == QUIT: return if event.type == KEYUP and event.key == K_ESCAPE: return pygame.display.flip() screen = pygame.display.set_mode(map(int, SCREEN.size), HWSURFACE|OPENGL|DOUBLEBUF) OGLManager.resize(*(map(int, SCREEN.size))) OGLManager.init(*(map(int, SCREEN.size))) pygame.display.set_caption("¡A ti te va a caer el Axl!") # Game! while True: for event in pygame.event.get(): if event.type == QUIT: return if event.type == KEYUP and event.key == K_ESCAPE: return # Set FPS game.clock.tick(FPS) # Updates camera position, if asked. camera.handle_keys() # Draw the camera glMatrixMode(GL_MODELVIEW) glLoadIdentity() glClearColor(0.0,0.0,0.0,0.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) gluLookAt(*camera.to_args()) # print camera # Draw the game try: game.render() # Renders all game's elements game.behave() # Follows behaviors in game.characters game.extra() # Loads extra content in the game, if needed except GameOverException: import sys, time # wait a few seconds to show the result of the game. time.sleep(4) # Continue? maybe later sys.exit() # Flip and display view. pygame.display.flip()
def start_auto(self, visual_debug=False): self._gpio.init_button(self._BUTTON_PIN) # Wait for button PRESS to continue, or HOLD to exit self._gpio.led_on() if self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'HOLD': self._gpio.led_restore() return self._gpio.led_off() try: self._card_dealer = CardDealer() self._config = self._prepare_config() self._camera = Camera(self._config) self._camera.start() self._game = CatanomousGame(self._config) self._bt_server.start() self._gpio.led_on() # LED - ON = waiting for something, OFF = processing something # PRESS to connect debugger, HOLD to skip if self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'PRESS': self._gpio.led_off() self._debugger.accept() self._gpio.led_on() self._debugger.log("Connected to bluetooth debugger.", "CONNECT") else: self._debugger.log("No debugger chosen.", "CONNECT") self._gpio.led_blink(3) time.sleep(1.5) try: # Wait for dice detector to connect self._gpio.led_off() self._debugger.log("Waiting for dicebox to connect...", "CONNECT") dice_sock = self._bt_server.accept() self._gpio.led_on() self._debugger.log("Dice box connected.", "CONNECT") # Wait for HOLD to indicate reset hexagons, PRESS means load saved self._debugger.log("HOLD to reset hexes, PRESS to load.", "INPUT") reset_hexagons = self._gpio.wait_for_press_or_hold(self._BUTTON_PIN) == 'HOLD' self._debugger.log("Reset hexagons: " + str(reset_hexagons), "HEXAGONS") self._gpio.led_off() self._handle_hexagon_init(reset_hexagons, debug=visual_debug) self._gpio.led_on() self._debugger.log("Hexagons detected.", "HEXAGONS") # PRESS to initialize resources and numbers self._debugger.log("PRESS after setting up resources/numbers", "INPUT") self._gpio.wait_for_press(self._BUTTON_PIN) self._gpio.led_off() self._debugger.log("Starting resource/number detection.", "RESOURCES") self._handle_resource_init(debug=visual_debug) self._gpio.led_on() # Wait for signals from dice detector self._debugger.log("Waiting for dice rolls...", "INPUT") self._listen_for_dice(dice_sock, debug=visual_debug) except Exception as e: # Send to debugger and reraise to blink LED self._debugger.log(str(e), 'ERROR') raise e except Exception as e: # BLink to indicate an error occured import subprocess subprocess.call('sudo sh -c "echo ' + str(e) + ' > /home/pi/logs/pylog.txt"', shell=True) while True: self._gpio.led_on() time.sleep(1) self._gpio.led_off() time.sleep(1) return