def startCamera(config): camera = CameraServer.getInstance().startAutomaticCapture(name=config.name, path=config.path) camera.setConfigJson(json.dumps(config.config)) #CameraServer.getInstance().setConfigJson(json.dumps(config.config)) camera = CameraServer.getInstance().getVideo() return camera
def init(config): global camera global cvsink global outputStreamStd camera = CameraServer.getInstance().startAutomaticCapture(name="Vision", path=config["path"]) camera.setResolution(height, width) cvsink = CameraServer.getInstance().getVideo() outputStreamStd = CameraServer.getInstance().putVideo("Gray", height, width)
def valueChanged(table, key, value, isNew): print("valueChanged: key: '%s'; value: %s; isNew: %s" % (key, value, isNew)) if key == "Camera State": if value.lower() == "forward" and len(cameras) >= 2: cvSink = CameraServer.getInstance().getVideo(cameras[0]) dualServer.setSource(cameras[0]) elif value.lower() == "backward" and len(cameras) >= 2: cvSink = CameraServer.getInstance().getVideo(cameras[1]) dualServer.setSource(cameras[1])
def __init__(self, source=None, stream_name="Camera0", res=None, network_table=None): self.logger = logging.getLogger("CSDisplay") self.stream_name = stream_name self.source = source if res is not None: self.output_width = res[0] self.output_height = res[1] else: self.output_width = int(self.source.width) self.output_height = int(self.source.height) cs = CameraServer.getInstance() self.outstream = cs.putVideo(self.stream_name, self.output_width, self.output_height) self._frame = None self._new_frame = False self.net_table = network_table self.stopped = True threading.Thread.__init__(self)
def main(): resW = 544 resH = 306 fps = 30 cs = CameraServer.getInstance() cs.enableLogging() camera = cs.startAutomaticCapture() camera.setResolution(resW, resH) camera.setFPS(fps) cvSink = cs.getVideo() outputStream = cs.putVideo("Name", resW, resH) img = np.zeros(shape=(resH, resW, 3), dtype=np.uint8) while (True): time, img = cvSink.grabFrame(img) if time == 0: outputStream.notifyError(cvSink.getError()) continue #if we ever need to vision processing code, put it here. outputStream.putFrame(img)
def __init__(self, test_img=None, test_video=None, test_display=False): self.testing = False self.frame = None self.video = None self.test_display = test_display if not type(test_img) == type(None): self.testing = True self.frame = test_img elif not type(test_video) == type(None): self.testing = True self.video = test_video else: from cscore import CameraServer self.cs = CameraServer.getInstance() self.camera_configs = self.read_config_JSON() self.cameras = [ self.start_camera(camera_config) for camera_config in self.camera_configs ] for prop in self.cameras[0].enumerateProperties(): print(prop.getName()) self.sinks = [ self.cs.getVideo(camera=camera) for camera in self.cameras ] self.source = self.cs.putVideo("Driver_Stream", FRAME_WIDTH, FRAME_HEIGHT) self.frame = np.zeros(shape=(FRAME_WIDTH, FRAME_HEIGHT, 3), dtype=np.uint8)
def run(): n = nt.NetworkTables.getTable("SmartDashboard") cs = CameraServer.getInstance() camera = cs.startAutomaticCapture() camera.setResolution(320, 240) cvSink = cs.getVideo() img = np.zeros(shape=(240, 320, 3), dtype=np.uint8) pipeline = None team = n.getNumber("team", 2) if team == 1: pipeline = BluePipeline() elif team == 0: pipeline = RedPipeline() while pipeline is not None: time, img = cvSink.grabFrame(img) if time == 0: continue if state == "auto": state = n.getString("state") if pipeline.process(img): n.putBoolean("dump", True) else: pipeline = None
def startSwitchedCamera(config): """Start running the switched camera.""" print("Starting switched camera '{}' on {}".format(config.name, config.key)) server = CameraServer.getInstance().addSwitchedCamera(config.name) def listener(fromobj, key, value, isNew): if isinstance(value, float): i = int(value) if i >= 0 and i < len(cameras): server.setSource(cameras[i]) elif isinstance(value, str): for i in range(len(cameraConfigs)): if value == cameraConfigs[i].name: server.setSource(cameras[i]) break NetworkTablesInstance.getDefault().getEntry(config.key).addListener( listener, ntcore.constants.NT_NOTIFY_IMMEDIATE | ntcore.constants.NT_NOTIFY_NEW | ntcore.constants.NT_NOTIFY_UPDATE, ) return server
def main(): cs = CameraServer.getInstance() cs.enableLogging() # Capture from the first USB Camera on the system camera = cs.startAutomaticCapture() camera.setResolution(width, height) cvSink = cs.getVideo() # (optional) Setup a CvSource. This will send images back to the Dashboard outputStream = cs.putVideo("Rectangle", width, height) # Allocating new images is very expensive, always try to preallocate img = np.zeros(shape=(height, width, 3), dtype=np.uint8) while True: # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. time, img = cvSink.grabFrame(img) if time == 0: # Send the output the error. outputStream.notifyError(cvSink.getError()) # skip the rest of the current iteration continue # Give the output stream a new image to display outputStream.putFrame(img)
def main(): cs = CameraServer.getInstance() cs.enableLogging() camera = cs.startAutomaticCapture() camera.setResolution(HORIZONTAL_RES, VERTICAL_RES) NetworkTables.initialize(server='10.56.54.2') pipeline = GripPipeline() cvSink = cs.getVideo() outputStream = cs.putVideo('Shooter', HORIZONTAL_RES, VERTICAL_RES) img = np.zeros(shape=(VERTICAL_RES, HORIZONTAL_RES, 3), dtype=np.uint8) while True: time, img = cvSink.grabFrame(img) if time == 0: outputStream.notifyError(cvSink.getError()) continue pipeline.process(img) if len(pipeline.find_contours_output) > 0: extra_process( max(pipeline.find_contours_output, key=cv2.contourArea)) outputStream.putFrame(pipeline.find_contours_image)
def startCamera(config): """Start running the camera.""" print("Starting camera '{}' on {}".format(config.name, config.path)) inst = CameraServer.getInstance() camera = UsbCamera(config.name, config.path) server = inst.startAutomaticCapture(camera=camera, return_server=True) #server.setResolution(640, 360) camera.setConfigJson(json.dumps(config.config)) if config.path == "/dev/video0": camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen) #camera.setExposureManual(1) #camera.setWhiteBalanceManual(50) #os.system("v4l2-ctl -d /dev/video0 -c auto_exposure=1") #os.system("v4l2-ctl -d /dev/video0 -c exposure_time_absolute=100") #os.system("") camera.getProperty("auto_exposure").set(1) camera.getProperty("exposure_time_absolute").set(50) camera.getProperty("contrast").set(100) #camera.getProperty("white_balance_temperature_auto").set(0) #for prop in camera.enumerateProperties(): #print(prop.getName()) if config.streamConfig is not None: server.setConfigJson(json.dumps(config.streamConfig)) return camera
def main(): cs = CameraServer.getInstance() cs.enableLogging() usb1 = cs.startAutomaticCapture(dev=0) cs.waitForever()
def main(): cs = CameraServer.getInstance() cs.enableLogging() # Capture from the first USB Camera on the system camera = cs.startAutomaticCapture() camera.setResolution(320, 240) # Get a CvSink. This will capture images from the camera cvSink = cs.getVideo() # (optional) Setup a CvSource. This will send images back to the Dashboard outputStream = cs.putVideo("Name", 320, 240) # Allocating new images is very expensive, always try to preallocate img = np.zeros(shape=(240, 320, 3), dtype=np.uint8) while True: # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. time, img = cvSink.grabFrame(img) if time == 0: # Send the output the error. outputStream.notifyError(cvSink.getError()); # skip the rest of the current iteration continue # # Insert your image processing logic here! # # (optional) send some image back to the dashboard outputStream.putFrame(img)
def startCamera(config): print("Starting camera '{}' on {}".format(config.name, config.path)) inst = CameraServer.getInstance() camera = UsbCamera(config.name, config.path) server = inst.startAutomaticCapture(camera=camera, return_server=True) camera.setConfigJson(json.dumps(config.config)) camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen) if config.streamConfig is not None: server.setConfigJson(json.dumps(config.streamConfig)) cvSink = inst.getVideo() if config.name == "PS3 Eyecam": cvSrc = inst.putVideo( config.name + " Vision", 320, 240) #Change to get values from config file 160,120 else: cvSrc = inst.putVideo( config.name + " Vision", 160, 120) #Change to get values from config file 160,120 print(config.height) return camera, cvSink, cvSrc
def main(): print('Initializing NetworkTables') NetworkTables.initialize(server='10.5.55.2') cs = CameraServer.getInstance() cs.enableLogging() camera = cs.startAutomaticCapture() camera.setResolution(640, 480) # camera.setExposureManual(15) cvSink = cs.getVideo() outputStream = cs.putVideo("Port Targeting", 320, 240) img = np.zeros(shape=(480, 640, 3), dtype=np.uint8) print('Creating video capture') print('Creating pipeline') pipeline = GripRetroreflectivePipeline() print('Running pipeline') while True: time, img = cvSink.grabFrame(img) if time == 0: outputStream.notifyError(cvSink.getError()); continue pipeline.process(img) processed_frame = extra_processing(pipeline, img) outputStream.putFrame(processed_frame) print('Capture closed')
def main(): cs = CameraServer.getInstance() cs.enableLogging() camera = cs.startAutomaticCapture() camera.setResolution(640, 360) # Get a CvSink. This will capture images from the camera cvSink = cs.getVideo() # (optional) Setup a CvSource. This will send images back to the Dashboard outputStream = cs.putVideo("Rectangle", 320, 240) # Allocating new images is very expensive, always try to preallocate img = np.zeros(shape=(240, 320, 3), dtype=np.uint8) while True: # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. time, img = cvSink.grabFrame(img) if time == 0: # Send the output the error. outputStream.notifyError(cvSink.getError()) # skip the rest of the current iteration continue # Put a rectangle on the image cv2.rectangle(img, (100, 100), (300, 300), (255, 255, 255), 5) # Give the output stream a new image to display outputStream.putFrame(img)
def start_camera(): inst = CameraServer.getInstance() camera = UsbCamera('Hatch Panels', '/dev/video0') # with open("cam.json", encoding='utf-8') as cam_config: # camera.setConfigJson(json.dumps(cam_config.read())) camera.setResolution(VERTICAL_RES, HORIZONTAL_RES) camera.setFPS(FPS) camera.setBrightness(10) camera.setConfigJson(""" { "fps": """ + str(FPS) + """, "height": """ + str(HORIZONTAL_RES) + """, "pixel format": "mjpeg", "properties": [ { "name": "brightness", "value": 0 }, { "name": "contrast", "value": 100 }, { "name": "saturation", "value": 40 } ], "width": """ + str(VERTICAL_RES) + """ } """) camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen) return inst, camera
def main(): cs = CameraServer.getInstance() camera = cs.startAutomaticCapture(dev=0) #camera2 = cs.startAutomaticCapture(dev = 1) #camera.setResolution(320,240) cs.waitForever()
def main(): cs = CameraServer.getInstance() cs.enableLogging() camera = cs.startAutomaticCapture() camera.setResolution(160, 120) camera.setFPS(20) # Get a CvSink. This will capture images from the camera cvSink = cs.getVideo() # (optional) Setup a CvSource. This will send images back to the Dashboard outputStream = cs.putVideo("Rectangle", 320, 240) # Allocating new images is very expensive, always try to preallocate img = np.zeros(shape=(120, 160, 3), dtype=np.uint8) while True: time, img = cvSink.grabFrame(img) if time == 0: # Send the output the error. outputStream.notifyError(cvSink.getError()) # skip the rest of the current iteration continue # Give the output stream a new image to display outputStream.putFrame(img)
def startCameras(self): """ Start all connected cameras. """ if (len(self.camera_configs) > 0): # Start vision camera camera_config = self.camera_configs[0] (self.parsed_width, self.parsed_height) = self.parseDimensions(camera_config) self.vision_camera = self.startVisionCamera(camera_config) time.sleep(1) # Start custom output stream if (Constants.ENABLE_CUSTOM_STREAM): self.cv_source = self.startOutputSource( self.parsed_width, self.parsed_height) # Start the switchable camera stream inst = CameraServer.getInstance() self.drive_server = inst.addServer(name="Drive") # Start streaming cameras if (len(self.camera_configs) >= 2): self.front_camera = self.startDriveCamera( self.camera_configs[1]) self.drive_server.setSource(self.front_camera) if (len(self.camera_configs) == 3): self.back_camera = self.startDriveCamera( self.camera_configs[2])
def main(): cs = CameraServer.getInstance() cs.enableLogging() # Capture from the first USB Camera on the system camera = cs.startAutomaticCapture() camera.setResolution(320, 240) # camera.setResolution(640, 480) # Get a CvSink. This will capture images from the camera cvSink = cs.getVideo() # (optional) Setup a CvSource. This will send images back to the Dashboard outputStream = cs.putVideo("Name", 160, 120) # Allocating new images is very expensive, always try to preallocate img = np.zeros(shape=(120, 160, 3), dtype=np.uint8) while True: # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. time, img = cvSink.grabFrame(img) # (optional) send some image back to the dashboard outputStream.putFrame(img)
def main(): camera_server = CameraServer.getInstance() camera_server.enableLogging() # TODO Use device addresses and sides here please cargo_camera = UsbCamera(name=CameraConfig.cargo_name, path=CameraConfig.cargo_dev_address) hatch_camera = UsbCamera(name=CameraConfig.hatch_name, path=CameraConfig.hatch_dev_address) front_camera = camera_server.startAutomaticCapture( name=CameraConfig.front_name, path=CameraConfig.front_dev_address) switching_server = camera_server.addSwitchedCamera("Switched") switching_server.setSource(hatch_camera) # Use networktables to switch the source # -> obviously, you can switch them however you'd like def _listener(source, key, value, isNew): if str(value) == "0": switching_server.setSource(hatch_camera) elif str(value) == "1": switching_server.setSource(cargo_camera) network_table = networktables.NetworkTables.getTable("/CameraPublisher") network_table.putString("Selected Camera", "0") network_table.addEntryListener(_listener, key="Selected Camera") camera_server.waitForever()
def __init__(self, config_parser): model_path = "model.tflite" print("Initializing TFLite runtime interpreter") self.interpreter = tflite.Interpreter( model_path, experimental_delegates=[tflite.load_delegate('libedgetpu.so.1')]) self.interpreter.allocate_tensors() print("Getting labels") parser = PBTXTParser("map.pbtxt") parser.parse() self.labels = parser.get_labels() print("Connecting to Network Tables") ntinst = NetworkTablesInstance.getDefault() ntinst.startClientTeam(config_parser.team) self.entry = ntinst.getTable("ML").getEntry("detections") self.temp_entry = [] print("Starting camera server") cs = CameraServer.getInstance() camera = cs.startAutomaticCapture() # print("Cameras:", config_parser.cameras) camera_config = config_parser.cameras[0] WIDTH, HEIGHT = camera_config["width"], camera_config["height"] # print(WIDTH, HEIGHT, "DIMS") camera.setResolution(WIDTH, HEIGHT) camera.setExposureManual(50) self.cvSink = cs.getVideo() self.img = np.zeros(shape=(HEIGHT, WIDTH, 3), dtype=np.uint8) self.output = cs.putVideo("Axon", WIDTH, HEIGHT) self.frames = 0
def startCamera(config): print("Starting camera '{}' on {}".format(config.name, config.path)) cs = CameraServer.getInstance() camera = cs.startAutomaticCapture(name=config.name, path=config.path) camera.setConfigJson(json.dumps(config.config)) return camera
def main(): """Run this file to process vision code. Please go to README.md to learn how to use this properly By Grant Perkins, 2018 """ # Initialize pipeline, image, camera server pipe = GripPipeline() img = np.zeros(shape=(480, 640, 3), dtype=np.uint8) cs = CameraServer.getInstance() camera = cs.startAutomaticCapture() camera.setResolution(640, 480) camera.setFPS(20) camera.setExposureManual(40) # Get a CvSink. This will capture images from the camera cvsink = cs.getVideo() # Wait on vision processing until connected to Network Tables cond = threading.Condition() notified = [False] def listener(connected, info): print(info, '; Connected=%s' % connected) with cond: notified[0] = True cond.notify() NetworkTables.initialize(server='roborio-1100-frc.local') NetworkTables.addConnectionListener(listener, immediateNotify=True) with cond: print("Waiting") if not notified[0]: # Wait until connected. cond.wait() is exited once listener is called by ConnectionListener cond.wait() print("Connected!") table = NetworkTablesInstance.getDefault().getTable("Pi") imagetaken = False last = -1 while True: time.sleep(.4) # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. t, img = cvsink.grabFrame(img) if t == 0: continue pipe.process(img) cx, cy = pipe.get_centeroid() area = pipe.get_area() print(cx) if cx != last and cx != -1: print(cx) last = cx table.getEntry("centerx").setDouble(cx) table.getEntry("centery").setDouble(cy) table.getEntry("area").setDouble(area)
def main(): # Get ready with default camera and enable cscore logginf cs = CameraServer.getInstance() cs.enableLogging() # Find usable cameras (those which support YUYV mode) camera_ids = [] for id in range(3): if system( 'v4l2-ctl --all -d /dev/video%s | grep "Pixel Format" | grep RG10' % id) != 0: camera_ids.append(id) # Check that we found at least two usable cameras assert len(camera_ids) >= 2 # Enable capturing (waiting for a streaming client to connect) camera0 = cs.startAutomaticCapture(dev=camera_ids[0], name='Jetson Camera 0') camera1 = cs.startAutomaticCapture(dev=camera_ids[1], name='Jetson Camera 1') # cs.startAutomaticCapture(dev=2, name='Jetson Camera 2') camera0.setResolution(160, 120) camera1.setResolution(160, 120) cvSink0 = cs.getVideo(camera=camera0) cvSink1 = cs.getVideo(camera=camera1) outputStream = cs.putVideo("Swappable", 160, 120) img = np.zeros(shape=(120, 160, 3), dtype=np.uint8) sd = NetworkTables.getTable("SmartDashboard/DB") while True: # Allow Button 3 (from Dashboard) state change to exit, # thus forcing a restart if sd.getBoolean("Button 3", False): # wait a bit, then set button back to Off sleep(0.5) sd.putBoolean("Button 3", True) sleep(0.5) break # Allow Button 0 (from Dashboard) to select camera source if sd.getBoolean("Button 0", False): sink = cvSink0 else: sink = cvSink1 t, img = sink.grabFrame(img) if t == 0: outputStream.notifyError(sink.getError()) continue outputStream.putFrame(img)
def startCamera(config): print("Starting camera '{}' on {}".format(config.name, config.path)) camera = CameraServer.getInstance() \ .getVideo(name=config.name) camera.setConfigJson(json.dumps(config.config)) return camera
def main(): cs = CameraServer.getInstance() cs.enableLogging() usb1 = cs.startAutomaticCapture(dev=0) usb2 = cs.startAutomaticCapture(dev=1) # print("Camera bois good") cs.waitForever()
def __init__(self): self.logger = logging.getLogger(__name__) if not WORKING: self.logger.warning('CameraServer not installed') return self.video_server = CameraServer.getInstance(). \ putVideo('nano_output', self.HEIGHT, self.WIDTH) self.output = ""
def main(): cs = CameraServer.getInstance() cs.enableLogging() usb1 = cs.startAutomaticCapture(name='cam1', path='/dev/v4l/by-path/platform-ci_hdrc.0-usb-0:1.1:1.0-video-index0') usb2 = cs.startAutomaticCapture(name='cam2', path='/dev/v4l/by-path/platform-ci_hdrc.0-usb-0:1.2:1.0-video-index0') cs.waitForever()