예제 #1
0
    def __init__(self, inst: NetworkTablesInstance = NetworkTables) -> None:
        inst.initialize(server=RIO_IP)
        self.inst = inst

        nt = inst.getTable("/vision")
        self.entry = nt.getEntry("data")
        self.ping = nt.getEntry("ping")
        self.raspi_pong = nt.getEntry("raspi_pong")
        self.rio_pong = nt.getEntry("rio_pong")

        self.last_ping_time = 0.0
        self.time_to_pong = 0.00000001
        self._get_time = time.monotonic
예제 #2
0
def main():
    global tableInstance

    pipeline = TapeRecognitionPipeline()
    #cs = CameraServer.getInstance()

    # Capture from the first USB Camera on the system
    #camera = UsbCamera(name="Camera rPi Camera 0",path="/dev/video0")
    #server = cs.startAutomaticCapture(camera=camera,return_server=True)
    #camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen)

    #camera.setResolution(320, 240)
    capture = cv2.VideoCapture(0)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    tableInstance = NetworkTablesInstance()
    tableInstance.initialize(server="roboRIO-3319-FRC.local")
    print(tableInstance.isConnected())

    # Get a CvSink. This will capture images from the camera
    #cvSink = cs.getVideo()
    #cvSink.setEnabled(True)

    # Allocating new images is very expensive, always try to preallocate
    img = numpy.zeros(shape=(240, 320, 3), dtype=numpy.uint8)

    while True:
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        retval, img = capture.read(img)

        #
        # Insert your image processing logic here!
        #
        pipeline.process(img)