def __init__(self, ntInstance: NetworkTablesInstance) -> None: self.recordingControlEntry = ntInstance.getEntry( RecordingController.kRecordingControlKey) self.recordingFileNameFormatEntry = ntInstance.getEntry( RecordingController.kRecordingFileNameFormatKey) self.eventsTable = ntInstance.getTable( RecordingController.kEventMarkerTableName)
def robotInit(self): self.joystick = wpilib.Joystick(0) self.motors = wpilib.SpeedControllerGroup(WPI_VictorSPX(2), WPI_VictorSPX(3)) self.priorAutospeed = 0 # TODO: Check if we need IdleMode.kBrake # self.motor.setIdleMode(IdleMode.kBrake); self.encoder = wpilib.Encoder( 8, 7, True, encodingType=wpilib.Encoder.EncodingType.k1X) # // # // Configure encoder related functions -- getDistance and getrate should # // return units and units/s # // # % if units == 'Degrees': # double encoderConstant = (1 / GEARING) * 360 # % elif units == 'Radians': # double encoderConstant = (1 / GEARING) * 2. * Math.PI; # % elif units == 'Rotations': self.encoderConstant = (1 / (self.ENCODER_PULSES_PER_REV)) self.encoder.setDistancePerPulse(self.encoderConstant) self.encoder.reset() NetworkTablesInstance.getDefault().setUpdateRate(0.010)
def startSwitchedCamera(config): """Start running the switched camera.""" print("Starting switched camera '{}' on {}".format(config.name, config.key)) server = CameraServer.getInstance().addSwitchedCamera(config.name) def listener(fromobj, key, value, isNew): if isinstance(value, float): i = int(value) if i >= 0 and i < len(cameras): server.setSource(cameras[i]) elif isinstance(value, str): for i in range(len(cameraConfigs)): if value == cameraConfigs[i].name: server.setSource(cameras[i]) break NetworkTablesInstance.getDefault().getEntry(config.key).addListener( listener, ntcore.constants.NT_NOTIFY_IMMEDIATE | ntcore.constants.NT_NOTIFY_NEW | ntcore.constants.NT_NOTIFY_UPDATE, ) return server
def __init__(self, inst: NetworkTablesInstance = NetworkTables) -> None: inst.initialize(server=RIO_IP) self.inst = inst nt = inst.getTable("/vision") self.entry = nt.getEntry("data") self.ping = nt.getEntry("ping") self.raspi_pong = nt.getEntry("raspi_pong") self.rio_pong = nt.getEntry("rio_pong") self.last_ping_time = 0.0 self.time_to_pong = 0.00000001 self._get_time = time.monotonic
def extra_processing(showFrame): global pipeline m_table = NetworkTablesInstance.getDefault().getTable("JETSONNANO") # Get the biggest contor and process. if pipeline.filter_contours_output: m_MAXCnt = sorted(pipeline.filter_contours_output, key=cv2.contourArea)[0] # Find the bounding boxes of the contour to get x, y, width, and height. x, y, w, h = cv2.boundingRect(m_MAXCnt) # Dilver data to roborio by NetworkTable. m_table.putBoolean("IsHaveTarget", True) m_table.putNumber("SpinError", (x + w / 2) - showFrame.shape[1]/2) # Show the image when you need it. cv2.drawContours(showFrame, m_MAXCnt, -1, 255, 3) cv2.circle( showFrame, (int(x + w / 2), int(y + h / 2)), 3, (0, 0, 255), -1) print("Target Get!!! ", "X:", int(x + w / 2), "Y:", int(y + h / 2), "Error:", (x + w / 2) - showFrame.shape[1]/2) else: m_table.putBoolean("IsHaveTarget", False) print("Target Lose QvQ")
def init(): if len(sys.argv) >= 2: configFile = sys.argv[1] # read configuration if not readJSONConfig(): sys.exit(1) # start NetworkTables ntinst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ntinst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) # start cameras for config in cameraConfigs: serv, sink, src = startCamera(config) cameras.append(serv) sinks.append(sink) srcs.append(src) sd.putNumber("h1", 0) sd.putNumber("s1", 0) sd.putNumber("v1", 0) sd.putNumber("h2", 180) sd.putNumber("v2", 255) sd.putNumber("s2", 255) while True: processVision(sinks[0], srcs[0]) processVision(sinks[1], srcs[1])
def isConnectedToRobot(self, ntinst): """ This ensures that the Pi is properly connected to the Network Tables. """ connected = False # Read status from Network Tables ntinst = NetworkTablesInstance.getDefault() table = ntinst.getTable(SMART_DASHBOARD).getSubTable(VISION_TABLE) value = table.getString(CONNECTION_STATUS, "NOT_INIT") self.logger.logMessage("Connection Status: " + value) # Restart the script when the RoboRio is first connected to the Network Tables if (value == "PING"): table.putString(CONNECTION_STATUS, "PONG") ntinst.flush() time.sleep(1) sys.exit("Reconnecting to Network Tables...") # Wait for the connection to be re-established connected = self.waitForConnection(table) if (connected): table.putString(CONNECTION_STATUS, "RESET") return connected
def startNetworkTables(self): """ Connect to the Network Tables as a client or start the server locally. """ retry_attempts = 5 ntinst = NetworkTablesInstance.getDefault() if self.server: self.logger.logMessage("Setting up NetworkTables server...") ntinst.startServer() else: self.logger.logMessage( "Setting up NetworkTables client for team {}".format( self.team)) ntinst.startClientTeam(self.team) # Wait for Network Tables to be connected connected = False attempt = 0 while (not connected and attempt < retry_attempts): time.sleep(1) connected = self.isConnectedToRobot(ntinst) self.logger.logMessage("NetworkTables Connected: " + str(connected) + ", Attempt: " + str(attempt)) attempt += 1 if (not connected): sys.exit( "Connection to robot not established. Restarting vision processing..." )
def init(): if len(sys.argv) >= 2: configFile = sys.argv[1] # read configuration if not readConfig(): sys.exit(1) # start NetworkTables ntinst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ntinst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) cvSink = None cvSrc = None # start cameras for config in cameraConfigs: serv, cvSink, cvSrc = startCamera(config) cameras.append(serv) # start switched cameras for config in switchedCameraConfigs: startSwitchedCamera(config) while True: processVision(cvSink, cvSrc) print("Outputting image")
def __init__(self, app): app.setSize(800, 480) app.setSize('fullscreen') app.setGuiPadding(0, 0) app.setBg(warn_color) app.setFont(size=64, family='Ubuntu', underline=False, slant='roman') app.addLabel('title', 'VISION SYSTEM') app.getLabelWidget('title').config(font='Ubuntu 64 underline') app.addLabel('radio', 'RADIO: {}'.format(down_text)) app.addLabel('robot', 'ROBOT: {}'.format(down_text)) app.addLabel('ntabl', 'NTABL: {}'.format(down_text)) app.getLabelWidget('radio').config(font='Ubuntu\ Mono 64 bold', bg='red') app.getLabelWidget('robot').config(font='Ubuntu\ Mono 64 bold', bg='red') app.getLabelWidget('ntabl').config(font='Ubuntu\ Mono 64 bold', bg='red') self.app = app self.radio_address = '10.{}.{}.1'.format(int(team / 100), int(team % 100)) self.robot_address = '10.{}.{}.2'.format(int(team / 100), int(team % 100)) self.nt = NetworkTablesInstance.getDefault() self.nt.startClientTeam(team) self.nt.addConnectionListener(self._listener, immediateNotify=True)
def __init__(self, config_parser): model_path = "model.tflite" print("Initializing TFLite runtime interpreter") self.interpreter = tflite.Interpreter( model_path, experimental_delegates=[tflite.load_delegate('libedgetpu.so.1')]) self.interpreter.allocate_tensors() print("Getting labels") parser = PBTXTParser("map.pbtxt") parser.parse() self.labels = parser.get_labels() print("Connecting to Network Tables") ntinst = NetworkTablesInstance.getDefault() ntinst.startClientTeam(config_parser.team) self.entry = ntinst.getTable("ML").getEntry("detections") self.temp_entry = [] print("Starting camera server") cs = CameraServer.getInstance() camera = cs.startAutomaticCapture() # print("Cameras:", config_parser.cameras) camera_config = config_parser.cameras[0] WIDTH, HEIGHT = camera_config["width"], camera_config["height"] # print(WIDTH, HEIGHT, "DIMS") camera.setResolution(WIDTH, HEIGHT) camera.setExposureManual(50) self.cvSink = cs.getVideo() self.img = np.zeros(shape=(HEIGHT, WIDTH, 3), dtype=np.uint8) self.output = cs.putVideo("Axon", WIDTH, HEIGHT) self.frames = 0
def recordingController(cls): if cls._recordingController is None: cls._recordingController = RecordingController( NetworkTablesInstance.getDefault() ) return cls._recordingController
def init(): if len(sys.argv) >= 2: configFile = sys.argv[1] # read configuration if not readJSONConfig(): sys.exit(1) # start NetworkTables ntinst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ntinst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) # start cameras for config in cameraConfigs: serv, sink, src = startCamera(config) cameras.append(serv) sinks.append(sink) srcs.append(src) goalContour = getGoalContours() #if goalContour == None: #print("uh oh") while True: #time.sleep(10) processVision(sinks[0], srcs[0], 0, goalContour)
def main(): global configFile if len(sys.argv) >= 2: configFile = sys.argv[1] if not readConfig(): print("Unable to read config file!") sys.exit(1) # start cameras cameras = [] streams = [] image_width = 640 image_height = 480 grip = VisionPipeline() print("Initialized vision stuff") for cameraConfig in cameraConfigs: # cameras.append(startCamera(cameraConfig)) cs, cameraCapture, _ = startCamera(cameraConfig) streams.append(cs) cameras.append(cameraCapture) # First camera is server cameraServer = streams[0] # Set up a CV Sink to capture video cvSink = cameraServer.getVideo() # CvSource outputStream = cameraServer.putVideo("stream", image_width, image_height) img = np.zeros(shape=(image_height, image_width, 3), dtype=np.uint8) # Networktables ninst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ninst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ninst.startClientTeam(team) network_table = ninst.getTable("Shuffleboard").getSubTable("Vision") network_table.getEntry("connected").setValue(True) time.sleep(0.1) while True: timestamp, img = cvSink.grabFrame(img) frame = img if timestamp == 0: outputStream.notifyError(cvSink.getError()) continue
def main(): """Run this file to process vision code. Please go to README.md to learn how to use this properly By Grant Perkins, 2018 """ # Initialize pipeline, image, camera server pipe = GripPipeline() img = np.zeros(shape=(480, 640, 3), dtype=np.uint8) cs = CameraServer.getInstance() camera = cs.startAutomaticCapture() camera.setResolution(640, 480) camera.setFPS(20) camera.setExposureManual(40) # Get a CvSink. This will capture images from the camera cvsink = cs.getVideo() # Wait on vision processing until connected to Network Tables cond = threading.Condition() notified = [False] def listener(connected, info): print(info, '; Connected=%s' % connected) with cond: notified[0] = True cond.notify() NetworkTables.initialize(server='roborio-1100-frc.local') NetworkTables.addConnectionListener(listener, immediateNotify=True) with cond: print("Waiting") if not notified[0]: # Wait until connected. cond.wait() is exited once listener is called by ConnectionListener cond.wait() print("Connected!") table = NetworkTablesInstance.getDefault().getTable("Pi") imagetaken = False last = -1 while True: time.sleep(.4) # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. t, img = cvsink.grabFrame(img) if t == 0: continue pipe.process(img) cx, cy = pipe.get_centeroid() area = pipe.get_area() print(cx) if cx != last and cx != -1: print(cx) last = cx table.getEntry("centerx").setDouble(cx) table.getEntry("centery").setDouble(cy) table.getEntry("area").setDouble(area)
def getCameraSelection(self): """ Get the current camera selection from the Network Tables. """ ntinst = NetworkTablesInstance.getDefault() table = ntinst.getTable(SMART_DASHBOARD) value = table.getString(CAMERA_SELECTION, "Front") return value
def main(): NetworkTables.initialize("10.68.51.2") LineDetect = DetectGreenTape() value = 0 sd = NetworkTables.getTable("SmartDashboard") ntinst = NetworkTablesInstance.getDefault() ntinst.startServer() inst = CameraServer.getInstance() camera1 = UsbCamera( "Camera1", "/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.2:1.0-video-index0") camera2 = UsbCamera( "Camera2", "/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.1.3:1.0-video-index0") camera1.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen) camera2.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen) usb1 = inst.startAutomaticCapture(camera=camera1) usb2 = inst.startAutomaticCapture(camera=camera2) usb1.setResolution(320, 240) usb2.setResolution(320, 240) cvSink = inst.getVideo(camera=camera2) img = np.zeros(shape=(240, 320, 3), dtype=np.uint8) while True: timestamp, img = cvSink.grabFrame(img) LineDetect.process(img) sd.putNumber('NbLinesDetectedNew', len(LineDetect.filter_lines_output)) Xmean1 = 0 Xmean2 = 0 Ymean1 = 0 Ymean2 = 0 angle = 0 nbLigne = len(LineDetect.filter_lines_output) for ligne in LineDetect.filter_lines_output: Xmean1 += ligne.x1 Xmean2 += ligne.x2 Ymean1 += ligne.y1 Ymean2 += ligne.y2 angle += ligne.angle() if nbLigne: Ymean1 /= nbLigne Ymean2 /= nbLigne Xmean1 /= nbLigne Xmean2 /= nbLigne angle /= nbLigne sd.putNumber('Xmean1', Xmean1) sd.putNumber('Xmean2', Xmean2) sd.putNumber('Ymean1', Ymean1) sd.putNumber('Ymean2', Ymean2) sd.putNumber('Angle', angle) time.sleep(0.1)
def main(): readConfig() # start cameras image_width = 640 image_height = 480 cameras = [] streams = [] for cameraConfig in cameraConfigs: cs, cameraCapture = startCamera(cameraConfig) streams.append(cs) cameras.append(cameraCapture) #Get the first camera cameraServer = streams[0] # Get a CvSink. This will capture images from the camera cvSink = cameraServer.getVideo() # (optional) Setup a CvSource. This will send images back to the Dashboard outputStream = cameraServer.putVideo("stream", image_width, image_height) # Allocating new images is very expensive, always try to preallocate img = np.zeros(shape=(image_height, image_width, 3), dtype=np.uint8) # initialize network tables # start NetworkTables ntinst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ntinst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) nettable = ntinst.getTable("Shuffleboard").getSubTable('Vision') nettable.getEntry('connected').setValue('true') # allow the camera to warmup time.sleep(0.1) # loop forever while True: # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. timestamp, img = cvSink.grabFrame(img) #frame = flipImage(img) if timestamp == 0: # Send the output the error. outputStream.notifyError(cvSink.getError()) # skip the rest of the current iteration continue frame = picamvidopencv(img, nettable) # (optional) send some image back to the dashboard outputStream.putFrame(frame)
def __init__(self, table: NetworkTable, field: str) -> None: """Initialize the NetworkButton. :param table: the :class:`.NetworkTable` instance to use, or the name of the table to use. :param field: field to use. """ if isinstance(table, NetworkTable): self.entry = table.getEntry(field) else: table = NetworkTablesInstance.getDefault().getTable(table) self.entry = table.getEntry(field)
def __init__(self) -> None: self.last_pong = time.monotonic() # 50Hz control loop for 2 seconds self.odometry: Deque[Odometry] = deque(maxlen=50 * 2) self.ntinst = NetworkTablesInstance() if hal.isSimulation(): self.ntinst.startTestMode(server=False) else: self.ntinst.startClient("10.47.74.6") # Raspberry pi's IP self.ntinst.setUpdateRate(1) # ensure our flush calls flush immediately self.fiducial_x_entry = self.ntinst.getEntry("/vision/fiducial_x") self.fiducial_y_entry = self.ntinst.getEntry("/vision/fiducial_y") self.fiducial_time_entry = self.ntinst.getEntry("/vision/fiducial_time") self.ping_time_entry = self.ntinst.getEntry("/vision/ping") self.raspi_pong_time_entry = self.ntinst.getEntry("/vision/raspi_pong") self.rio_pong_time_entry = self.ntinst.getEntry("/vision/rio_pong") self.latency_entry = self.ntinst.getEntry("/vision/clock_offset") self.processing_time_entry = self.ntinst.getEntry("/vision/processing_time") self.camera_entry = self.ntinst.getEntry("/vision/game_piece")
def __init__(self, config_parser): print("Initializing TFLite runtime interpreter") try: model_path = "model.tflite" self.interpreter = tflite.Interpreter( model_path, experimental_delegates=[ tflite.load_delegate('libedgetpu.so.1') ]) self.hardware_type = "Coral Edge TPU" except: print( "Failed to create Interpreter with Coral, switching to unoptimized" ) model_path = "unoptimized.tflite" self.interpreter = tflite.Interpreter(model_path) self.hardware_type = "Unoptimized" self.interpreter.allocate_tensors() print("Getting labels") parser = PBTXTParser("map.pbtxt") parser.parse() self.labels = parser.get_labels() print("Connecting to Network Tables") ntinst = NetworkTablesInstance.getDefault() ntinst.startClientTeam(config_parser.team) ntinst.startDSClient() self.entry = ntinst.getTable("ML").getEntry("detections") self.coral_entry = ntinst.getTable("ML").getEntry("coral") self.fps_entry = ntinst.getTable("ML").getEntry("fps") self.resolution_entry = ntinst.getTable("ML").getEntry("resolution") self.temp_entry = [] print("Starting camera server") cs = CameraServer.getInstance() camera = cs.startAutomaticCapture() camera_config = config_parser.cameras[0] WIDTH, HEIGHT = camera_config["width"], camera_config["height"] camera.setResolution(WIDTH, HEIGHT) self.cvSink = cs.getVideo() self.img = np.zeros(shape=(HEIGHT, WIDTH, 3), dtype=np.uint8) self.output = cs.putVideo("Axon", WIDTH, HEIGHT) self.frames = 0 self.coral_entry.setString(self.hardware_type) self.resolution_entry.setString(str(WIDTH) + ", " + str(HEIGHT))
def publishValues(self, center_x, center_y, angle_offset): """ Publish coordinates/values to the 'vision' network table. """ ntinst = NetworkTablesInstance.getDefault() table = ntinst.getTable(SMART_DASHBOARD).getSubTable(VISION_TABLE) table.putValue("centerX", center_x) table.putValue("centerY", center_y) table.putValue("angleOffset", angle_offset) self.logger.logMessage( 'Center: (' + str(center_x) + ', ' + str(center_y) + ')', True) self.logger.logMessage('Angle Offset: ' + str(angle_offset), True)
def main(): global tableInstance pipeline = TapeRecognitionPipeline() #cs = CameraServer.getInstance() # Capture from the first USB Camera on the system #camera = UsbCamera(name="Camera rPi Camera 0",path="/dev/video0") #server = cs.startAutomaticCapture(camera=camera,return_server=True) #camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen) #camera.setResolution(320, 240) capture = cv2.VideoCapture(0) capture.set(cv2.CAP_PROP_FRAME_WIDTH, 320) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240) tableInstance = NetworkTablesInstance() tableInstance.initialize(server="roboRIO-3319-FRC.local") print(tableInstance.isConnected()) # Get a CvSink. This will capture images from the camera #cvSink = cs.getVideo() #cvSink.setEnabled(True) # Allocating new images is very expensive, always try to preallocate img = numpy.zeros(shape=(240, 320, 3), dtype=numpy.uint8) while True: # Tell the CvSink to grab a frame from the camera and put it # in the source image. If there is an error notify the output. retval, img = capture.read(img) # # Insert your image processing logic here! # pipeline.process(img)
def main(): # espera para que o RPi esteja sempre ligado time.sleep(2.0) # configuração da câmera cap = cv2.VideoCapture(0) cap.set(3, 640) # altera width cap.set(4, 480) # altera height cap.set(cv2.CAP_PROP_BRIGHTNESS, 105) # configuração da NetworkTables ntinst = NetworkTablesInstance.getDefault() print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) shuffle = ntinst.getTable('Shuffleboard') sd = shuffle.getSubTable('Vision') # loop infinito para análise da imagem while (True): ret, frame = cap.read() proc = process(frame) obj = find_object(proc) if obj: ''' Comandos usados para calibrar a largura focal da câmera. focal_length = (obj[3] * KNOWN_DISTANCE) / KNOWN_WIDTH print("Distância focal: {0}".format(focal_length)) ''' # print(obj) center = obj[0] + (obj[2] / 2) diff = (640 / 2) - center + STANDART_ERROR dist = distance_to_object(KNOWN_WIDTH, F, obj[3]) # print("Dist: {0} Diff: {1}".format(dist, diff)) sd.putNumber('Difference', diff) sd.putNumber('Distance', dist) if cv2.waitKey(1) == 27: break cap.release()
def extra_processing(showFrame): global pipeline m_table = NetworkTablesInstance.getDefault().getTable("JETSONNANO") # Get the biggest contor and process. if pipeline.filter_contours_output: m_MAXCnt = sorted(pipeline.filter_contours_output, key=cv2.contourArea)[0] # Find the bounding boxes of the contour to get x, y, width, and height. x, y, w, h = cv2.boundingRect(m_MAXCnt) # Dilver data to roborio by NetworkTable. m_table.putBoolean("IsHaveTarget", True) m_table.putNumber("SpinError", (x + w / 2) - showFrame.shape[1] / 2) else: m_table.putBoolean("IsHaveTarget", False)
def __init__(self): # TODO: StartCAPI() # TODO: See if the next line is necessary # Resource.RestartProgram() inst = NetworkTablesInstance.getDefault() inst.setNetworkIdentity("Robot") if not RobotBase.isSimulation(): inst.startServer("/home/lvuser/networktables.ini") else: inst.startServer() from .driverstation import DriverStation self.ds = DriverStation.getInstance() inst.getTable("LiveWindow").getSubTable(".status").getEntry("LW Enabled").setBoolean(False) from .livewindow import LiveWindow LiveWindow.setEnabled(False) self.__initialized = True
def main(): NetworkTables.initialize("10.68.51.2") capture = cv2.VideoCapture("/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.1.3:1.0-video-index0") LineDetect = DetectGreenTape() value = 0 sd = NetworkTables.getTable("SmartDashboard") ntinst = NetworkTablesInstance.getDefault() ntinst.startServer() inst = CameraServer.getInstance() camera = UsbCamera("Camera1","/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.2:1.0-video-index0" ) server = inst.startAutomaticCapture(camera=camera, return_server=True) atexit.register(FonctionSortie, capture, server, camera) while True: ret, frame = capture.read() if ret: LineDetect.process(frame) sd.putNumber('NbLinesDEtected',len(LineDetect.filter_lines_output))
def init(): # read configuration if not readConfig(): sys.exit(1) # start NetworkTables ntinst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ntinst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) # start cameras for config in cameraConfigs: camera, inst = startCamera(config) cameras.append(camera) insts.append(inst) # start switched cameras for config in switchedCameraConfigs: startSwitchedCamera(config)
def startCameraServer(): if len(sys.argv) >= 2: configFile = sys.argv[1] # read configuration if not readConfig(): sys.exit(1) # start NetworkTables ntinst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ntinst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) # start cameras cameras = [] for cameraConfig in cameraConfigs: cameras.append(startCamera(cameraConfig)) return cameras
def liveWindowTable(cls) -> NetworkTable: if cls._liveWindowTable is None: cls._liveWindowTable = NetworkTablesInstance.getDefault().getTable( "LiveWindow" ) return cls._liveWindowTable
def main(argv=None): ap = argparse.ArgumentParser() #ap.add_argument("-v", "--video", help="path to the (optional) video file") ap.add_argument("-i", "--input", help="path to the input image file") ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size") ap.add_argument("-p", "--picamera", type=int, default=1, help="whether or not the Raspi camera should be used") args = vars(ap.parse_args()) #--------------- # INIT ...move to proper fxn later #---------------- # vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(320, 240),framerate=60).start() # vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(640, 480),framerate=60).start() vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(640, 480), framerate=90).start() # vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(1280, 720),framerate=60).start() # vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(1640, 922),framerate=40).start() # vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(1920, 1080),framerate=30).start() # vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(1640, 1232),framerate=40).start() # vs = VideoStream(usePiCamera=args["picamera"] > 0, resolution=(3280, 2464),framerate=15).start() # usbcam = VideoStream(src=0).start() time.sleep(2.0) vs.camera.brightness = 50 vs.camera.contrast = 0 vs.camera.saturation = 0 logging.basicConfig(level=logging.DEBUG) # Create NetworkTable Instance, initialize client, and get Table ntinst = NetworkTablesInstance.getDefault() NetworkTables.initialize(server='10.12.34.2') ballTable = NetworkTables.getTable('ballVision') #target_data = ntproperty('/fuVision/target_data', 6 * [0.0,], doc='Array of double/floats with target info: timestamp isFound, mode, distance, yaw') #------------------- # PERIODIC #-------------------- timestamp = float(time.time()) isThereABall = 0.0 mode = 0.0 distance = 0 angle = 0.0 while True: #update vars from networktables toggle_ball_targeting = ballTable.getBoolean("toggle_ball_targeting") provide_vid_to_DS = ballTable.getBoolean("provide_vid_to_DS") switch_vid_src = ballTable.getNumber( "switch_vid_src") ##Don't like this approach #Grab the frame frame = vs.read() #frame = imutils.resize(frame, width=320) #frame = imutils.rotate(frame, 90) #with picamera.array.PiRGBArray(camera) as stream: #camera.capture(stream, format="bgr") #frame = stream.array #img = np.zeros((600, 600, 3), np.uint8) img = frame.copy() scale = 1 bp = BallPipeline() cnts = bp.process(img) mb = ManyTo1B(cnts) #mb.coord() isThereABall = 0.0 if len(mb.candidateBalls) > 0: cv2.circle(img, mb.theOneTrueBall.centroid, int(mb.theOneTrueBall.radius), (0, 0, 255), 4) cv2.circle(img, mb.theOneTrueBall.centroid, 4, (0, 0, 255), -1) #cv2.drawContours(frame, cnts, 0, (0, 255, 0), 3) #print(mb.calcDistance(mb.theOneTrueBall.radius)) #print(mb.calcAngle(mb.theOneTrueBall.cX, img.shape[1])) isThereABall = 1.0 distance = mb.calcDistance(mb.theOneTrueBall.radius) angle = mb.calcAngle(mb.theOneTrueBall.cX, img.shape[1]) timestamp = float(time.time()) target_data = (timestamp, isThereABall, mode, distance, angle) # NEED TO SET target_data before putting into networkTables ballTable ballTable.putNumberArray("target_data", target_data) print("Data: ", target_data) cv2.imshow("BallTrack", img) #frame = VisionProcessing(frame) #cv.imshow("VP", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break ntinst.flush() cv2.destroyAllWindows() vs.stop()
def root(cls): if cls._root is None: cls._root = ShuffleboardInstance(NetworkTablesInstance.getDefault()) return cls._root