Beispiel #1
0
def loop():
    import cscore as cs
    from networktables import NetworkTables
    from time import sleep

    nt = NetworkTables.getTable("/components/vision")

    width = 320
    height = 240
    fps = 25
    videomode = cs.VideoMode.PixelFormat.kMJPEG, width, height, fps

    front_camera = cs.UsbCamera("frontcam", 0)
    front_camera.setVideoMode(*videomode)

    front_cam_server = cs.MjpegServer("frontcamserver", 8082)
    front_cam_server.setSource(front_camera)

    back_camera = cs.UsbCamera("backcam", 1)
    back_camera.setVideoMode(*videomode)

    back_cam_server = cs.MjpegServer("backcamserver", 8081)
    back_cam_server.setSource(back_camera)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(front_camera)

    cvSource = cs.CvSource("cvsource", *videomode)
    cvmjpegServer = cs.MjpegServer("cvhttpserver", 8083)
    cvmjpegServer.setSource(cvSource)

    # Set the initial exposure of the front camera.
    # It may take a while for the exposure setting to have an effect.
    front_camera.setExposureManual(0)

    # Images are big. Preallocate an array to fill the image with.
    frame = np.zeros(shape=(height, width, 3), dtype=np.uint8)

    while True:
        if nt.getBoolean("vision_mode", True):
            front_camera.setExposureManual(0)
            time, frame = cvsink.grabFrame(frame)
            if time == 0:
                # We got an error; report it through the output stream.
                cvSource.notifyError(cvsink.getError())
            else:
                x, img, num_targets = find_target(frame)
                if num_targets > 0:
                    nt.putNumber("x", x)
                    nt.putNumber("time", time)
                    nt.putNumber("num_targets", num_targets)
                cvSource.putFrame(img)
        else:
            # Let the driver use the front camera to see.
            front_camera.setExposureAuto()
            # Avoid pegging the CPU.
            sleep(1)
Beispiel #2
0
    def __init__(self):
        self.nt = NetworkTable.getTable('/camera')

        #Cameras
        self.piston_cam = cs.UsbCamera('Piston Cam', 0)
        self.piston_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 160, 120,
                                     35)  #160 vs. 120

        self.piston_cam.setExposureAuto()
        self.piston_cam.getProperty('backlight_compensation').set(5)

        #self.piston_cam.setExposureManual(35)
        #self.piston_cam.setBrightness(65)
        #test = self.piston_cam.getProperty("exposure_absolute")
        #print(self.piston_cam.getProperty("exposure_absolute"))

        self.piston_server = cs.MjpegServer('httpserver', 1181)
        self.piston_server.setSource(self.piston_cam)

        if self.secondary_cam:
            self.light_ring_cam = cs.UsbCamera('Light Ring Cam', 0)
            self.light_ring_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG,
                                             320, 240, 20)

            # This only seems to affect automatic exposure mode
            # -> higher value means it lets more light in when facing a big light
            self.light_ring_cam.getProperty('backlight_compensation').set(5)

            #Image Processing
            self.cvsink = cs.CvSink('cvsink')
            self.cvsink.setSource(self.light_ring_cam)

            self.cvsource = cs.CvSource('cvsource',
                                        cs.VideoMode.PixelFormat.kMJPEG, 320,
                                        240, 20)

            #Streaming Servers

            #self.ring_stream = cs.MjpegServer("ring server", 1182)
            #self.ring_stream.setSource(self.light_ring_cam)

            if self.STREAM_CV:
                self.cv_stream = cs.MjpegServer('cv stream', 1183)
                self.cv_stream.setSource(self.cvsource)

            #Blank mat
            self.img = np.zeros(shape=(320, 240, 3), dtype=np.uint8)

            self.processor = ImageProcessor()
    def _init_usb_cameras(self):
        for device_name in self.usb_cameras_info:
            device = self.usb_cameras_info[device_name]

            camera = cscore.UsbCamera(name=device_name, dev=device.dev)

            self.usb_cameras[device_name] = camera
def main():
    cs = cscore.CameraServer.getInstance()
    cs.enableLogging()

    # camera at /dev/video0
    cam = cscore.UsbCamera("main", 0)
    camera = cs.startAutomaticCapture(camera=cam)

    camera.setResolution(640, 480)

    # Get a CvSink. This will capture images from the camera
    cvSink = cs.getVideo()

    # Setup a CvSource. This will send images back to the Dashboard
    outputStream = cs.putVideo("Rectangle", 640, 480)

    # Allocating new images is very expensive, always try to preallocate
    img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

    while True:
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        time, img = cvSink.grabFrame(img)
        if time == 0:
            # Send the output the error.
            outputStream.notifyError(cvSink.getError())
            # skip the rest of the current iteration
            continue

        # Put a rectangle on the image
        cv2.rectangle(img, (100, 100), (400, 400), (0, 255, 0), 5)

        # Give the output stream a new image to display
        outputStream.putFrame(img)
Beispiel #5
0
def main():
    camera = cs.UsbCamera("usbcam", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    mjpegServer = cs.MjpegServer("httpserver", 8081)
    mjpegServer.setSource(camera)

    print("mjpg server listening at http://0.0.0.0:8081")

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(camera)

    cvSource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, 320,
                           240, 30)
    cvMjpegServer = cs.MjpegServer("cvhttpserver", 8082)
    cvMjpegServer.setSource(cvSource)

    print("OpenCV output mjpg server listening at http://0.0.0.0:8082")

    test = np.zeros(shape=(240, 320, 3), dtype=np.uint8)
    flip = np.zeros(shape=(240, 320, 3), dtype=np.uint8)

    while True:

        time, test = cvsink.grabFrame(test)
        if time == 0:
            print("error:", cvsink.getError())
            continue

        print("got frame at time", time, test.shape)

        cv2.flip(test, flipCode=0, dst=flip)
        cvSource.putFrame(flip)
Beispiel #6
0
def loop():  # pragma: no cover
    import cscore as cs
    from networktables import NetworkTables
    from time import sleep, time as now

    nt = NetworkTables.getTable("/components/vision")

    width = 320
    height = 240
    fps = 25
    videomode = cs.VideoMode.PixelFormat.kMJPEG, width, height, fps

    #front_cam_id = "/dev/v4l/by-id/usb-046d_C922_Pro_Stream_Webcam_5FC7BADF-video-index0"
    front_cam_id = "/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920_332E8C9F-video-index0"

    front_camera = cs.UsbCamera("frontcam", front_cam_id)
    front_camera.setVideoMode(*videomode)

    front_cam_server = cs.MjpegServer("frontcamserver", 5803)
    front_cam_server.setSource(front_camera)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(front_camera)

    cvSource = cs.CvSource("cvsource", *videomode)
    cvmjpegServer = cs.MjpegServer("cvhttpserver", 5804)
    cvmjpegServer.setSource(cvSource)

    # Images are big. Preallocate an array to fill the image with.
    frame = np.zeros(shape=(height, width, 3), dtype=np.uint8)

    # Set the exposure to something bogus, in an attempt to work around a kernel bug when restarting the vision code?
    front_camera.setExposureAuto()
    sleep(1)
    cvsink.grabFrame(frame)

    # Set the exposure of the front camera to something usable.
    front_camera.setExposureManual(0)
    sleep(1)

    while True:
        time, frame = cvsink.grabFrame(frame)
        if time == 0:
            # We got an error; report it through the output stream.
            cvSource.notifyError(cvsink.getError())
        else:
            t = now()
            x, img, num_targets, target_sep = find_target(frame)
            if num_targets > 0:
                nt.putNumber("x", x)
                nt.putNumber("time", t)
                nt.putNumber("num_targets", num_targets)
                nt.putNumber("target_sep", target_sep)
                nt.putNumber("dt", now() - t)
                NetworkTables.flush()
            cvSource.putFrame(img)
Beispiel #7
0
def main():
    for caminfo in cs.UsbCamera.enumerateUsbCameras():
        print("%s: %s (%s)" % (caminfo.dev, caminfo.path, caminfo.name))
        if caminfo.otherPaths:
            print("Other device paths:")
            for path in caminfo.otherPaths:
                print(" ", path)

        camera = cs.UsbCamera("usbcam", caminfo.dev)

        print("Properties:")
        for prop in camera.enumerateProperties():
            kind = prop.getKind()
            if kind == cs.VideoProperty.Kind.kBoolean:
                print(
                    prop.getName(),
                    "(bool) value=%s default=%s" % (prop.get(), prop.getDefault()),
                )
            elif kind == cs.VideoProperty.Kind.kInteger:
                print(
                    prop.getName(),
                    "(int): value=%s min=%s max=%s step=%s default=%s"
                    % (
                        prop.get(),
                        prop.getMin(),
                        prop.getMax(),
                        prop.getStep(),
                        prop.getDefault(),
                    ),
                )
            elif kind == cs.VideoProperty.Kind.kString:
                print(prop.getName(), "(string):", prop.getString())
            elif kind == cs.VideoProperty.Kind.kEnum:
                print(prop.getName(), "(enum): value=%s" % prop.get())
                for i, choice in enumerate(prop.getChoices()):
                    if choice:
                        print("    %s: %s" % (i, choice))

        print("Video Modes")
        for mode in camera.enumerateVideoModes():
            if mode.pixelFormat == cs.VideoMode.PixelFormat.kMJPEG:
                fmt = "MJPEG"
            elif mode.pixelFormat == cs.VideoMode.PixelFormat.kYUYV:
                fmt = "YUYV"
            elif mode.pixelFormat == cs.VideoMode.PixelFormat.kRGB565:
                fmt = "RGB565"
            else:
                fmt = "Unknown"

            print("  PixelFormat:", fmt)
            print("   Width:", mode.width)
            print("   Height:", mode.height)
            print("   FPS: ", mode.fps)
Beispiel #8
0
def main():
    logger.info(
        "The camera server started! you made it this far i'm proud of you")
    try:
        cam = cscore.UsbCamera("usbcam0", 0)
        cam.setVideoMode(cscore.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
        cam.setBrightness(40)
        mjpegServer = cscore.MjpegServer(name="httpserver0", port=5806)
        mjpegServer.setSource(cam)
        while True:
            time.sleep(0.1)
    except Exception as e:
        print("Camera server is dead :(", e)
Beispiel #9
0
def main():
    cs_instance = cscore.CameraServer.getInstance()
    cs_instance.enableLogging()

    table = NetworkTables.getTable("Preferences")

    res_w = int(table.getNumber('Camera Res Width', 320))
    res_h = int(table.getNumber('Camera Res Height', 200))
    fps = int(table.getNumber('Camera FPS', 30))

    camera_objects = []

    for cam_idx, cam_config in enumerate(cameras):
        name, dev_id = cam_config

        camera_obj = cscore.UsbCamera(name=name, dev=dev_id)
        camera_obj.setResolution(res_w, res_h)
        camera_obj.setFPS(fps)

        camera_objects.append(camera_obj)
        # camera_chooser.addDefault(name, cam_idx)

    cam_server = cs_instance.addServer(name='camera_server')
    current_selected = int(table.getNumber('Selected Camera', 0))

    if current_selected >= len(camera_objects):
        current_selected = 0

    cam_server.setSource(camera_objects[current_selected])

    while True:
        selected_camera = int(table.getNumber('Selected Camera', 0))

        if (selected_camera < len(camera_objects)
                and selected_camera != current_selected):
            res_w = int(table.getNumber('Camera Res Width', 320))
            res_h = int(table.getNumber('Camera Res Height', 200))
            fps = int(table.getNumber('Camera FPS', 30))

            camera_obj = camera_objects[selected_camera]

            camera_obj.setResolution(res_w, res_h)
            camera_obj.setFPS(fps)

            cam_server.setSource(camera_obj)
            current_selected = selected_camera

        time.sleep(0.02)
    def add_camera(self, name, device, active=True):
        '''Add a single camera and set it to active/disabled as indicated.
        Cameras are referenced by their name, so pick something unique'''

        camera = cscore.UsbCamera(name, device)
        # remember the camera object, in case we need to adjust it (eg the exposure)
        self.cameras[name] = camera

        self.camera_server.startAutomaticCapture(camera=camera)

        # PS Eye camera needs to have its pixelformat set
        # camera.setPixelFormat(cscore.VideoMode.PixelFormat.kYUYV)

        camera.setResolution(int(self.image_width), int(self.image_height))
        camera.setFPS(int(self.camera_fps))

        # keep the camera open for faster switching
        camera.setConnectionStrategy(
            cscore.VideoSource.ConnectionStrategy.kKeepOpen)

        # set the camera for no auto focus, focus at infinity
        # TODO: different cameras have different properties
        # NOTE: order does matter
        VisionServer.set_camera_property(camera, 'focus_auto', 0)
        VisionServer.set_camera_property(camera, 'focus_absolute', 0)

        # Logitech does not like having exposure_auto_priority on when the light is poor
        #  slows down the frame rate
        # VisionServer.set_camera_property(camera, 'exposure_auto_priority', 0)

        mode = camera.getVideoMode()
        logging.info("camera '%s' pixel format = %s, %dx%d, %dFPS", name,
                     mode.pixelFormat, mode.width, mode.height, mode.fps)

        reader = ThreadedCamera(
            self.camera_server.getVideo(camera=camera)).start()
        self.video_readers[name] = reader
        if active:
            self.current_reader = reader
            self.active_camera = name

        return
    def add_camera(self, name, device, active=True):
        '''Add a single camera and set it to active/disabled as indicated.
        Cameras are referenced by their name, so pick something unique'''

        camera = cscore.UsbCamera(name, device)
        self.camera_server.startAutomaticCapture(camera=camera)
        camera.setResolution(self.image_width, self.image_height)
        camera.setFPS(self.camera_fps)

        sink = self.camera_server.getVideo(camera=camera)
        self.camera_feeds[name] = sink
        if active:
            self.current_camera = sink
            self.active_camera = name
            self.nt_active_camera = name
        else:
            # if not active, disable it to save CPU
            sink.setEnabled(False)

        return
Beispiel #12
0
    def add_camera(self, name, device, active=True):
        '''Add a single camera and set it to active/disabled as indicated.
        Cameras are referenced by their name, so pick something unique'''

        camera = cscore.UsbCamera(name, device)
        # remember the camera object, in case we need to adjust it (eg the exposure)
        self.cameras[name] = camera

        self.camera_server.startAutomaticCapture(camera=camera)

        # PS Eye camera needs to have its pixelformat set
        # camera.setPixelFormat(cscore.VideoMode.PixelFormat.kYUYV)

        camera.setResolution(int(self.image_width), int(self.image_height))
        camera.setFPS(int(self.camera_fps))

        # keep the camera open for faster switching
        camera.setConnectionStrategy(
            cscore.VideoSource.ConnectionStrategy.kKeepOpen)

        # set the camera for no auto focus, focus at infinity
        # TODO: different cameras have different properties
        # NOTE: order does matter
        VisionServer.set_camera_property(camera, 'focus_auto', 0)
        VisionServer.set_camera_property(camera, 'focus_absolute', 0)

        mode = camera.getVideoMode()
        logging.info("camera '%s' pixel format = %s, %dx%d, %dFPS", name,
                     mode.pixelFormat, mode.width, mode.height, mode.fps)

        sink = self.camera_server.getVideo(camera=camera)
        self.video_sinks[name] = sink
        if active:
            self.current_sink = sink
            self.active_camera = name
        # keep camera active for faster switching
        # else:
        #     # if not active, disable it to save CPU
        #     sink.setEnabled(False)

        return
Beispiel #13
0
    def add_camera(self, name, device, active=True):
        '''Add a single camera and set it to active/disabled as indicated.
        Cameras are referenced by their name, so pick something unique'''

        camera = cscore.UsbCamera(name, device)
        # remember the camera object, in case we need to adjust it (eg the exposure)
        self.cameras[name] = camera

        self.camera_server.startAutomaticCapture(camera=camera)
        camera.setResolution(int(self.image_width), int(self.image_height))
        camera.setFPS(int(self.camera_fps))

        sink = self.camera_server.getVideo(camera=camera)
        self.video_sinks[name] = sink
        if active:
            self.current_sink = sink
            self.active_camera = name
        else:
            # if not active, disable it to save CPU
            sink.setEnabled(False)

        return
import cscore as cs
import time

gear_cam = cs.UsbCamera("gear","/dev/gear_camera")
shooter_cam = cs.UsbCamera("shooter","/dev/shooter_camera")

gear_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
shooter_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 15)

gear_server = cs.MjpegServer("gear_http_server", 5800)
shooter_server = cs.MjpegServer("shooter_http_server", 5801)

gear_server.setSource(gear_cam)
shooter_server.setSource(shooter_cam)

print("Server started on ports 5800 and 5801")
while True:
	time.sleep(0.1)
Beispiel #15
0
def main():

    if not hasattr(cs, 'UsbCamera'):
        print(
            "ERROR: This platform does not currently have cscore UsbCamera support"
        )
        exit(1)

    if len(sys.argv) < 3:
        _usage()
        exit(1)

    try:
        cid = int(sys.argv[1])
    except ValueError:
        print("ERROR: Expected first argument to be a number, got '%s'" %
              sys.argv[1])
        _usage()
        exit(2)

    camera = cs.UsbCamera("usbcam", cid)

    # Set prior to connect
    argc = 2
    propName = None

    for arg in sys.argv[argc:]:
        argc += 1
        if arg == '--':
            break

        if propName is None:
            propName = arg
        else:
            try:
                propVal = int(arg)
            except ValueError:
                camera.getProperty(propName).setString(arg)
            else:
                camera.getProperty(propName).set(propVal)

            propName = None

    # Wait to connect
    while not camera.isConnected():
        time.sleep(0.010)

    # Set rest
    for arg in sys.argv[argc:]:
        if propName is None:
            propName = arg
        else:
            try:
                propVal = int(arg)
            except ValueError:
                camera.getProperty(propName).setString(arg)
            else:
                camera.getProperty(propName).set(propVal)

            propName = None

    # Print settings
    print("Properties:")
    for prop in camera.enumerateProperties():
        kind = prop.getKind()
        if kind == cs.VideoProperty.Kind.kBoolean:
            print(
                prop.getName(),
                "(bool) value=%s default=%s" % (prop.get(), prop.getDefault()))
        elif kind == cs.VideoProperty.Kind.kInteger:
            print(prop.getName(), "(int): value=%s min=%s max=%s step=%s default=%s" % \
            (prop.get(), prop.getMin(), prop.getMax(), prop.getStep(), prop.getDefault()))
        elif kind == cs.VideoProperty.Kind.kString:
            print(prop.getName(), "(string):", prop.getString())
        elif kind == cs.VideoProperty.Kind.kEnum:
            print(prop.getName(), "(enum): value=%s" % prop.get())
            for i, choice in enumerate(prop.getChoices()):
                if choice:
                    print("    %s: %s" % (i, choice))
Beispiel #16
0
hsvBallUpper = (15, 255, 255)

# hsvTapeLower = (0, 0, 245)
# hsvTapeUpper = (255, 20, 255)

hsvTapeLower = (82, 75, 176)
hsvTapeUpper = (96, 255, 255)

hsvGroundLower = (0, 0, 192)
hsvGroundUpper = (255, 70, 255)
# set the configFile
configFile = "/boot/frc.json"
cameraConfigs = []

# config the camera
ballCamera = cs.UsbCamera("ballcam", "/dev/video0")
ballCamera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)

groundCamera = cs.UsbCamera("groundcam", "/dev/video1")
groundCamera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)

# set the mjpegServer
# Server initial

ballServer = cs.MjpegServer("ballSource", 1181)
ballServer.setSource(ballCamera)

groundServer = cs.MjpegServer("groundSource", 8181)
groundServer.setSource(groundCamera)

print("ball server listening at http://0.0.0.0:1181")
try:
    import cscore
    camera = cscore.UsbCamera("usbcam", 0)
    camera.setVideoMode(cscore.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    mjpegServer = cscore.MjpegServer("httpserver", 8081)
    mjpegServer.setSource(camera)
except BaseException as e:
    print("cscore error")
    print(e)
def mainloop():

    #Get currnt time as a string
    currentTime = time.localtime(time.time())
    timeString = str(currentTime.tm_year) + str(currentTime.tm_mon) + str(currentTime.tm_mday) + str(currentTime.tm_hour) + str(currentTime.tm_min)
    
    #Open a log file
    logFilename = '/data/Logs/Run_Log_' + timeString + '.txt'
    log_file = open(logFilename, 'w')
    log_file.write('Run started on %s.\n' % datetime.datetime.now())
    log_file.write('')

    #Load VMX module
    vmxpi = imp.load_source('vmxpi_hal_python', '/usr/local/lib/vmxpi/vmxpi_hal_python.py')
    vmx = vmxpi.VMXPi(False,50)
    if vmx.IsOpen() is False:
        log_file.write('Error:  Unable to open VMX Client.\n')
        log_file.write('\n')
        log_file.write('        - Is pigpio (or the system resources it requires) in use by another process?\n')
        log_file.write('        - Does this application have root privileges?')
        log_file.close()
        sys.exit(0)
        
    #Open connection to USB camera (video device 0)
    camera = cs.UsbCamera("usbcam", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, imgwidth, imgheight, frames_per_sec)

    #Start raw Video Streaming Server
    mjpegServer = cs.MjpegServer("httpserver", 8081)
    mjpegServer.setSource(camera)

    #Define video sink
    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(camera)

    #Define video source
    cvsource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, imgwidth, imgheight, frames_per_sec)

    #Start processed Video Streaming Server
    cvMjpegServer = cs.MjpegServer("cvhttpserver", 8082)
    cvMjpegServer.setSource(cvsource)

    #Create blank image
    img = np.zeros(shape=(imgheight, imgwidth, 3), dtype=np.uint8)

    #Set video codec and create VideoWriter
    fourcc = cv.VideoWriter_fourcc(*'XVID')
    videoFilename = '/data/Match Videos/robotcam-' + timeString + '.avi'
    imgout = cv.VideoWriter(videoFilename,fourcc,20.0,(320,240))

    #Connect NetworkTables
    NetworkTables.initialize(server="10.41.21.2")
    navxTable = NetworkTables.getTable("navx")
    visionTable = NetworkTables.getTable("vision")
    smartDash = NetworkTables.getTable("SmartDashboard")

    
    #Reset yaw gyro
    if not vmx.getAHRS().IsCalibrating:
        vmx.getAHRS().ZeroYaw()

    #Reset displacement
    vmx.getAHRS().ResetDisplacement()

    #Start main processing loop
    while (True):

        #Grab a frame from video feed
        video_timestamp, img = cvsink.grabFrame(img)

        #Check for frame error
        if video_timestamp == 0:
            log_file.write('Video error: \n')
            log_file.write(cvsink.getError())
            log_file.write('\n')
            sleep (float(frames_per_sec * 2) / 1000.0)
            continue

        #Find contours in image
        img_contours, cubeDistance, cubeAngle, cubeHeight = detect_contours(img)

        #Put contour info on image
        if cubeDistance != -1:
            distanceString = 'Distance: %d' % cubeDistance
            cv.putText(img_contours, distanceString, (10,15), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
            angleString = 'Offset angle: %d' % cubeAngle
            cv.putText(img_contours, angleString, (10,30), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)

        #Put IMU info on image
        angleString = 'Angle: %.2f' % vmx.getAHRS().GetAngle()
        cv.putText (img, angleString, (30,70), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        yVelocityString = 'Y Vel:  %.2f' % vmx.getAHRS().GetVelocityY()
        cv.putText (img, yVelocityString, (30,90), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        xVelocityString = 'X Vel: %.2f' % vmx.getAHRS().GetVelocityX()
        cv.putText (img, xVelocityString, (30,110), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        yDispString = 'Y Disp: %.2f' % vmx.getAHRS().GetDisplacementY()
        cv.putText (img, yDispString, (30,130), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)
        xDispString = 'X Disp: %.2f' % vmx.getAHRS().GetDisplacementX()
        cv.putText (img, xDispString, (30,150), cv.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1,cv.LINE_AA)

	#Update network tables
        navxTable.putNumber("SensorTimestamp", vmx.getAHRS().GetLastSensorTimestamp())
        navxTable.putNumber("DriveAngle", vmx.getAHRS().GetAngle())
        navxTable.putNumber("YVelocity", vmx.getAHRS().GetVelocityY())
        navxTable.putNumber("XVelocity", vmx.getAHRS().GetVelocityX())
        navxTable.putNumber("YDisplacement", vmx.getAHRS().GetDisplacementY())
        navxTable.putNumber("XDisplacement", vmx.getAHRS().GetDisplacementX())
        visionTable.putNumber("CubeAngle", cubeAngle)
        visionTable.putNumber("CubeDistance", cubeDistance)
        smartDash.putNumber("DriveAngle", vmx.getAHRS().GetAngle())
        smartDash.putNumber("CubeAngle", cubeAngle)
        smartDash.putNumber("CubeDistance", cubeDistance)
        
        #Show image
        cv.imshow('My Webcam', img_contours)

        #Put frame in video stream
        cvsource.putFrame(img_contours)

        #Write image to file
        if writeVideo:
            imgout.write(img_contours)

        #Check for stop code from robot
        if cv.waitKey(1) == 27:
            break
        #robotStop = visionTable.getNumber("RobotStop", 0)
        #if robotStop == 1:
        #    break

    #Close all open windows
    cv.destroyAllWindows()

    #Close video file
    imgout.release()

    #Close the log file
    log_file.write('Run stopped on %s.' % datetime.datetime.now())
def main():
    # setup up logging
    logging.basicConfig(level=logging.DEBUG)

    # connect to the roborio network tables
    NetworkTables.initialize(server="192.168.1.106")

    # get the table
    sd = NetworkTables.getTable("SmartDashboard")

    # initialize some variables
    width = 640
    height = 480
    min_area = 100
    centerX = width // 2
    distance_between_targets = 11.5
    lX = lY = rX = rY = 0

    # set up the camera
    camServer = CameraServer.getInstance()
    camera = cs.UsbCamera("rPi Camera 0", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)

    # mjpegServer = cs.MjpegServer("httpserver", 8081)
    # mjpegServer.setSource(camera)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(camera)

    # cvSource = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, width, height, 30)
    # cvMjpegServer = cs.MjpegServer("cvhttpserver", 8082)
    # cvMjpegServer.setSource(cvSource)

    outputStream = camServer.putVideo("Mask", width, height)

    # initialize frame holders to save time
    frame = np.zeros(shape=(height, width, 3), dtype=np.uint8)
    blurred = np.zeros(shape=(height, width, 3), dtype=np.uint8)
    hsv = np.zeros(shape=(height, width, 3), dtype=np.uint8)
    mask = np.zeros(shape=(height, width, 3), dtype=np.uint8)

    count = 0

    while True:
        time, frame = cvsink.grabFrame(frame)
        if time == 0:
            continue

        # blur frame and convert it to hsv color space
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        # get the bounds from the dashboard
        lH = sd.getNumber("H-Lower", 0)
        uH = sd.getNumber("H-Upper", 180)

        lS = sd.getNumber("S-Lower", 0)
        uS = sd.getNumber("S-Upper", 255)

        lV = sd.getNumber("V-Lower", 0)
        uV = sd.getNumber("V-Upper", 255)

        # construct a mask for the bounds then erode and dilate it
        mask = cv2.inRange(hsv, (lH, lS, lV), (uH, uS, uV))
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        # find contours in the mask
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        # find closest contours to center on each side
        min_left = 0
        min_right = width
        left_contour = None
        right_contour = None

        # loop through contours
        for c in cnts:
            area = cv2.contourArea(c)

            # look only at contours larger than the min_area
            if area > min_area:
                M = cv2.moments(c)
                cX = int((M["m10"] / M["m00"]))
                cY = int((M["m01"] / M["m00"]))

                ((x, y), radius) = cv2.minEnclosingCircle(c)

                # draw a blue circle around the contour
                cv2.circle(frame, (int(x), int(y)), int(radius), (255, 255, 0),
                           1)

                # find the contours closest to the center x-value of the frame
                if cX > centerX:
                    if cX < min_right:
                        min_right = cX
                        right_contour = c
                else:
                    if cX > min_left:
                        min_left = cX
                        left_contour = c

        # draw a yellow circle around the nearest contours
        if right_contour is not None:
            ((rX, rY), radius) = cv2.minEnclosingCircle(right_contour)
            cv2.circle(frame, (int(rX), int(rY)), int(radius), (0, 255, 255),
                       1)

        if left_contour is not None:
            ((lX, lY), radius) = cv2.minEnclosingCircle(left_contour)
            cv2.circle(frame, (int(lX), int(lY)), int(radius), (0, 255, 255),
                       1)

        # interplate the distance between the centers of the two nearest contours for 11.5 inches
        if lX > 0 and rX > 0:
            try:
                distance = interp1d([lX, rX], [0, distance_between_targets])
                distance_from_left = distance(centerX)
                offset_from_center = (distance_between_targets /
                                      2) - distance_from_left

                direction = "left" if offset_from_center > 0 else "right" if offset_from_center < 0 else "center"
                print("The slider is {:.2f} inches {} of center".format(
                    abs(offset_from_center), direction))

                sd.putNumber("Offset", offset_from_center)
            except (NameError, ValueError) as e:
                print("Error in interpolation", e)
                pass

        # draw a center line
        cv2.line(frame, (centerX, 0), (centerX, height), (255, 255, 255), 1)

        cvSource.putFrame(mask)
        outputStream.putFrame(mask)
Beispiel #20
0
import numpy as np
import cv2
import time
import collections

from networktables import NetworkTables
import cscore as cs

from cvsink_thread import CvSinkThread, crop
from detect_target import detect_target

front_cam = cs.UsbCamera("front_cam", 0)
front_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 480, 270, 30)
front_cam.setBrightness(40)
front_cam.setExposureManual(2)

front_cam_sink = cs.CvSink("front_cam_sink")
front_cam_sink.setSource(front_cam)

mjpeg_source = cs.CvSource("mjpeg_source", cs.VideoMode.PixelFormat.kMJPEG,
                           320, 240, 8)
mjpeg_server = cs.MjpegServer("mjpeg_server", 8083)
mjpeg_server.setSource(mjpeg_source)
mjpeg_server.setDefaultCompression(50)
mjpeg_server.setCompression(50)

thread = CvSinkThread(front_cam_sink, (270, 480, 3))


def main_loop():
    # grab frame
Beispiel #21
0
    def __init__(self, server, settings, size=(320, 240), fps=15):
        self.name = settings["name"]
        self.path = settings["path"]

        self.logger = logging.getLogger("camera(" + self.name + ")")

        if (settings["destinations"]["streamVideo"]
                and settings["destinations"]["processVideo"]):
            self.logger.warning(
                "{0} is configured to stream and process".format(self.name))
            self.destination = "BOTH"
        elif (settings["destinations"]["streamVideo"]):
            self.destination = "STREAM"
        elif (settings["destinations"]["processVideo"]):
            self.destination = "VISION"
        else:
            self.destination = "NONE"

        self.switchIndex = settings["destinations"]["switchIndex"]

        self.settings = settings["settings"]
        self.logger.debug("settings " + str(self.settings))

        self.cameraMatrix = np.array(settings["cameraMatrix"])
        self.distortionCoeffients = np.array(
            settings["distortionCoefficients"])

        self.properties = settings["properties"]
        if ("width" in self.properties and "height" in self.properties):
            self.width = self.properties["width"]
            self.height = self.properties["height"]
        else:
            self.width, self.height = size

        self.flip = False
        if ("flip" in self.properties and self.properties["flip"]):
            self.flip = True

        if ("fps" in self.properties):
            fps = self.properties["fps"]

        #initalize the camera
        self.camera = cscore.UsbCamera(self.name, self.path)
        # keep the camera open for faster switching
        self.camera.setConnectionStrategy(
            cscore.VideoSource.ConnectionStrategy.kKeepOpen)

        self.camera.setResolution(self.width, self.height)
        self.camera.setFPS(int(fps))

        self.logger.debug(
            "Settings:\n" +
            subprocess.run(shlex.split("v4l2-ctl -d " + self.path + " -L"),
                           capture_output=True).stdout.decode("utf-8").strip())

        self.configSettings()

        mode = self.camera.getVideoMode()
        self.logger.info(
            "{0} started with pixelFormat:{1}, at {2}x{3} {4}FPS".format(
                self.name, mode.pixelFormat, mode.width, mode.height,
                mode.fps))

        self.sink = cscore.CvSink("sink_" + self.name)
        self.sink.setSource(self.camera)
        self.cameraFrame = np.zeros((self.height, self.width, 3), np.uint8)
        self.frameTime = 0
        self.stopped = False
        self.wasConnected = self.camera.isConnected()
Beispiel #22
0
    def startAutomaticCapture(self,
                              *,
                              dev=None,
                              name=None,
                              path=None,
                              camera=None):
        """Start automatically capturing images to send to the dashboard.
        
        You should call this method to see a camera feed on the dashboard.
        If you also want to perform vision processing on the roboRIO, use
        :meth:`getVideo` to get access to the camera images.
        
        :param dev: If specified, the device number to use
        :param name: If specified, the name to use for the camera (dev must be specified)
        :param path: If specified, device path (e.g. "/dev/video0") of the camera
        :param camera: If specified, an existing camera object to use
        
        :returns: USB Camera object, or the camera argument
        :rtype: :class:`cscore.VideoSource` object
        
        The following argument combinations are accepted -- all argument must be specified
        as keyword arguments:
        
        * (no args)
        * dev
        * dev, name
        * name, path
        * camera
        
        The first time this is called with no arguments, a USB Camera from
        device 0 is created.  Subsequent calls increment the device number
        (e.g. 1, 2, etc).

        .. note:: USB Cameras are not available on all platforms. If it is not
                  available on your platform, :exc:`.VideoException` is thrown
        """
        if camera is not None:
            assert dev is None and name is None and path is None
        else:
            if not hasattr(cscore, 'UsbCamera'):
                raise VideoException(
                    "USB Camera support not available on this platform!")

            if dev is not None:
                assert path is None
                arg = dev

            elif path is not None:
                assert name is not None
                arg = path

            else:
                # Note: this get-and-increment should be atomic.
                with self._mutex:
                    arg = self._defaultUsbDevice
                    self._defaultUsbDevice += 1

            if name is None:
                name = 'USB Camera %d' % arg

            camera = cscore.UsbCamera(name, arg)

        self.addCamera(camera)
        server = self.addServer(name="serve_" + camera.getName())
        server.setSource(camera)

        return camera
def main():

    #Define global variables
    global imgWidthDriver
    global imgHeightDriver
    global imgWidthVision
    global imgHeightVision

    #Define local variables
    driverCameraBrightness = 50
    visionCameraBrightness = 0
    driverFramesPerSecond = 15
    visionFramesPerSecond = 30

    #Define local flags
    networkTablesConnected = False
    driverCameraConnected = False
    visionCameraConnected = False
    foundBall = False
    foundTape = False
    foundVisionTarget = False

    #Get current time as a string
    currentTime = time.localtime(time.time())
    timeString = str(currentTime.tm_year) + str(currentTime.tm_mon) + str(currentTime.tm_mday) + str(currentTime.tm_hour) + str(currentTime.tm_min)

    #Open a log file
    logFilename = '/data/Logs/Run_Log_' + timeString + '.txt'
    log_file = open(logFilename, 'w')
    log_file.write('run started on %s.\n' % datetime.datetime.now())
    log_file.write('')

    #Load VMX module
    vmxpi = imp.load_source('vmxpi_hal_python', '/usr/local/lib/vmxpi/vmxpi_hal_python.py')
    vmx = vmxpi.VMXPi(False,50)
    if vmx.IsOpen() is False:
        log_file.write('Error:  Unable to open VMX Client.\n')
        log_file.write('\n')
        log_file.write('        - Is pigpio (or the system resources it requires) in use by another process?\n')
        log_file.write('        - Does this application have root privileges?')
        log_file.close()
        sys.exit(0)

    #Connect NetworkTables
    try:
        NetworkTables.initialize(server='10.41.21.2')
        visionTable = NetworkTables.getTable("vision")
        navxTable = NetworkTables.getTable("navx")
        smartDash = NetworkTables.getTable("SmartDashboard")
        networkTablesConnected = True
        log_file.write('Connected to Networktables on 10.41.21.2 \n')
    except:
        log_file.write('Error:  Unable to connect to Network tables.\n')
        log_file.write('Error message: ', sys.exec_info()[0])
        log_file.write('\n')

    #Navx configuration
    navxTable.putNumber("ZeroGyro", 0)
    #navxTable.putNumber("ZeroDisplace", 0)

    #Reset yaw gyro
    vmx.getAHRS().Reset()
    vmx.getAHRS().ZeroYaw()

    #Reset displacement
    vmx.getAHRS().ResetDisplacement()

    #Set up a camera server
    camserv = CameraServer.getInstance()
    camserv.enableLogging

    #Start capturing webcam videos
    try:
        driverCameraPath = '/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.5:1.0-video-index0'
        driverCamera = camserv.startAutomaticCapture(name = "DriverCamera", path=driverCameraPath)
        driverCamera.setResolution(imgWidthDriver, imgHeightDriver)
        driverCamera.setBrightness(driverCameraBrightness)
        driverCamera.setFPS(driverFramesPerSecond)
        driverCameraConnected = True
        log_file.write('Connected to driver camera on ID = 0.\n')
    except:
        log_file.write('Error:  Unable to connect to driver camera.\n')
        log_file.write('Error message: ', sys.exec_info()[0])
        log_file.write('\n')

    try:
        visionCameraPath = '/dev/v4l/by-path/platform-3f980000.usb-usb-0:1.4:1.0-video-index0'
        visionCamera = cs.UsbCamera(name="VisionCamera", path=visionCameraPath)
        visionCamera.setResolution(imgWidthVision, imgHeightVision)
        visionCamera.setBrightness(visionCameraBrightness)
        visionCamera.setFPS(visionFramesPerSecond)
        visionCameraConnected = True
    except:
        log_file.write('Error:  Unable to connect to vision camera.\n')
        log_file.write('Error message: ', sys.exec_info()[0])
        log_file.write('\n')        

    #Define vision video sink
    if driverCameraConnected == True:
        driverSink = camserv.getVideo(name = 'DriverCamera')
    if visionCameraConnected == True:
        visionSink = cs.CvSink(name = 'VisionCamera')
        visionSink.setSource(visionCamera)

    #Define output stream for driver camera images
    if (driverCameraConnected == True):
        driverOutputStream = camserv.putVideo("DriveCamera", imgWidthDriver, imgHeightDriver)
        
    #Define output stream for processed vision images (for testing only!)
    if (visionCameraConnected == True):
        visionOutputStream = camserv.putVideo("VisionCamera", imgWidthVision, imgHeightVision)

    #Set video codec and create VideoWriter
    if writeVideo == True:
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        videoFilename = '/data/Match_Videos/RobotVisionCam-' + timeString + '.mp4'
        visionImageOut = cv.VideoWriter(videoFilename,fourcc,visionFramesPerSecond,(imgWidthVision,imgHeightVision))

    #Create blank vision image
    imgDriver= np.zeros(shape=(imgWidthDriver, imgHeightDriver, 3), dtype=np.uint8)
    imgVision= np.zeros(shape=(imgWidthVision, imgHeightVision, 3), dtype=np.uint8)

    #Start main processing loop
    while (True):

        #Read in an image from 2019 Vision Images (for testing)
        #img = cv.imread('RetroreflectiveTapeImages2019/CargoStraightDark90in.jpg')
        #if img is None:
        #    break

        #Initialize video time stamp
        visionVideoTimestamp = 0
        
        #Grab frames from the vision web camera
        if driverCameraConnected == True:
            driverVideoTimestamp, imgDriver = driverSink.grabFrame(imgDriver)
        if visionCameraConnected == True:
            visionVideoTimestamp, imgVision = visionSink.grabFrame(imgVision)

        #Check for frame errors
        visionFrameGood = True
        if (visionVideoTimestamp == 0) and (visionCameraConnected == True):
            log_file.write('Vision video error: \n')
            log_file.write(visionSink.getError())
            log_file.write('\n')
            visionFrameGood = False
            sleep (float(visionFramesPerSecond * 2) / 1000.0)
            continue

        #Put driver frame in output stream
        if (driverCameraConnected == True):
            driverOutputStream.putFrame(imgDriver)

        #Continue processing if we have no errors
        if (visionFrameGood == True):

            #Call detection methods
            ballX, ballY, ballRadius, ballDistance, ballAngle, ballOffset, ballScreenPercent, foundBall = detect_ball_target(imgVision)
            #tapeX, tapeY, tapeW, tapeH, tapeOffset, foundTape = detect_floor_tape(imgVision)
            visionTargetX, visionTargetY, visionTargetW, visionTargetH, visionTargetDistance, visionTargetAngle, visionTargetOffset, foundVisionTarget = detect_vision_targets(imgVision)

            #Update networktables and log file
            if networkTablesConnected == True:

                visionTable.putNumber("RobotStop", 0)
                visionTable.putBoolean("WriteVideo", writeVideo)

                visionTable.putNumber("BallX", round(ballX, 2))
                visionTable.putNumber("BallY", round(ballY, 2))
                visionTable.putNumber("BallRadius", round(ballRadius, 2))
                visionTable.putNumber("BallDistance", round(ballDistance, 2))
                visionTable.putNumber("BallAngle", round(ballAngle, 2))
                visionTable.putNumber("BallOffset", round(ballOffset, 2))
                visionTable.putNumber("BallScreenPercent", round(ballScreenPercent, 2))
                visionTable.putBoolean("FoundBall", foundBall)
                
                if foundBall == True:
                    
                    log_file.write('Cargo found at %s.\n' % datetime.datetime.now())
                    log_file.write('  Ball distance: %.2f \n' % round(ballDistance, 2))
                    log_file.write('  Ball angle: %.2f \n' % round(ballAngle, 2))
                    log_file.write('  Ball offset: %.2f \n' % round(ballOffset, 2))
                    log_file.write('\n')

                if foundTape == True:
                    visionTable.putNumber("TapeX", round(tapeX, 2))
                    visionTable.putNumber("TapeY", round(tapeY, 2))
                    visionTable.putNumber("TapeW", round(tapeW, 2))
                    visionTable.putNumber("TapeH", round(tapeH, 2))
                    visionTable.putNumber("TapeOffset", round(tapeOffset, 2))
                    visionTable.putBoolean("FoundTape", foundTape)
                    log_file.write('Floor tape found at %s.\n' % datetime.datetime.now())
                    log_file.write('  Tape offset: %.2f \n' % round(tapeOffset, 2))
                    log_file.write('\n')


                visionTable.putNumber("VisionTargetX", round(visionTargetX, 2))
                visionTable.putNumber("VisionTargetY", round(visionTargetY, 2))
                visionTable.putNumber("VisionTargetW", round(visionTargetW, 2))
                visionTable.putNumber("VisionTargetH", round(visionTargetH, 2))
                visionTable.putNumber("VisionTargetDistance", round(visionTargetDistance, 2))
                visionTable.putNumber("VisionTargetAngle", round(visionTargetAngle, 2))
                visionTable.putNumber("VisionTargetOffset", round(visionTargetOffset, 2))
                visionTable.putBoolean("FoundVisionTarget", foundVisionTarget)

                if foundVisionTarget == True:
                    
                    log_file.write('Vision target found at %s.\n' % datetime.datetime.now())
                    log_file.write('  Vision target distance: %.2f \n' % round(visionTargetDistance, 2))
                    log_file.write('  Vision target angle: %.2f \n' % round(visionTargetAngle, 2))
                    log_file.write('  Vision target offset: %.2f \n' % round(visionTargetOffset, 2))
                    log_file.write('\n')

            #Draw various contours on the image
            if foundBall == True:
                cv.circle(imgVision, (int(ballX), int(ballY)), int(ballRadius), (0, 255, 0), 2) #ball
                cv.putText(imgVision, 'Distance to Ball: %.2f' %ballDistance, (320, 400), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 0, 255), 2)
                cv.putText(imgVision, 'Angle to Ball: %.2f' %ballAngle, (320, 440), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 0, 255), 2)                
            if foundTape == True:
                cv.rectangle(imgVision,(tapeX,tapeY),(tapeX+tapeW,tapeY+tapeH),(100,0,255),1) #floor tape
            if foundVisionTarget == True:
                cv.rectangle(imgVision,(visionTargetX,visionTargetY),(visionTargetX+visionTargetW,visionTargetY+visionTargetH),(0,255,0),2) #vision targets
                cv.putText(imgVision, 'Distance to Vision: %.2f' %visionTargetDistance, (10, 400), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 255, 0), 2)
                cv.putText(imgVision, 'Angle to Vision: %.2f' %visionTargetAngle, (10, 440), cv.FONT_HERSHEY_SIMPLEX, .75,(0, 255, 0), 2)

            #Put timestamp on image
            cv.putText(imgVision, str(datetime.datetime.now()), (10, 30), cv.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 2)

        #Update navx network table
        if networkTablesConnected == True:
            navxTable.putNumber("GyroAngle", round(vmx.getAHRS().GetAngle(), 2))
            navxTable.putNumber("GyroYaw", round(vmx.getAHRS().GetYaw(), 2))
            navxTable.putNumber("GyroPitch", round(vmx.getAHRS().GetPitch(), 2))
            navxTable.putNumber("YVelocity", round(vmx.getAHRS().GetVelocityY(), 4))
            navxTable.putNumber("XVelocity", round(vmx.getAHRS().GetVelocityX(), 4))
            navxTable.putNumber("YDisplacement", round(vmx.getAHRS().GetDisplacementY(), 4))
            navxTable.putNumber("XDisplacement", round(vmx.getAHRS().GetDisplacementX(), 4))
            navxTable.putNumber("YVelocity", round(vmx.getAHRS().GetVelocityY(), 4))
            navxTable.putNumber("XVelocity", round(vmx.getAHRS().GetVelocityX(), 4))
            navxTable.putNumber("YAccel", round(vmx.getAHRS().GetWorldLinearAccelY(), 4))
            navxTable.putNumber("XAccel", round(vmx.getAHRS().GetWorldLinearAccelX(), 4))

        #Check vision network table dashboard value
        sendVisionToDashboard = visionTable.getNumber("SendVision", 0)

        #Send vision to dashboard (for testing)
        if (visionCameraConnected == True) and (sendVisionToDashboard == 1):
            visionOutputStream.putFrame(imgVision)
            
        #Write processed image to file
        if (writeVideo == True) and (visionCameraConnected == True):
            visionImageOut.write(imgVision)

        #Display the vision camera stream (for testing only)
        #cv.imshow("Vision", imgVision)

        #Check for gyro re-zero
        gyroInit = navxTable.getNumber("ZeroGyro", 0)
        if gyroInit == 1:
            vmx.getAHRS().Reset()
            vmx.getAHRS().ZeroYaw()
            navxTable.putNumber("ZeroGyro", 0)

        #Check for displacement zero
        #dispInit = navxTable.getNumber("ZeroDisplace", 0)
        #if dispInit == 1:
        #    vmx.getAHRS().ResetDisplacement()
        #    navxTable.putNumber("ZeroDisplace", 0)
        
        #Check for stop code from robot or keyboard (for testing)
        #if cv.waitKey(1) == 27:
        #    break
        robotStop = visionTable.getNumber("RobotStop", 0)
        if (robotStop == 1) or (visionCameraConnected == False) or (networkTablesConnected == False):
            break


    #Close all open windows (for testing)
    #cv.destroyAllWindows()

    #Close video file
    visionImageOut.release()

    #Close the log file
    log_file.write('Run stopped on %s.' % datetime.datetime.now())
    log_file.close()
Beispiel #24
0
def main():
    NetworkTables.initialize(server=k_NETWORKTABLES_SERVER_IP)

    vs.camera.brightness = 50
    vs.camera.contrast = 0
    vs.camera.saturation = 0

    cs = CameraServer.getInstance()
    ps = CameraServer.getInstance()
    cs.enableLogging()

    # Capture from the first USB Camera on the system
    #camera0 = cs.startAutomaticCapture()
    #camera1 = cs.startAutomaticCapture()
    #camera = cs.startAutomaticCapture(name="picam",path="/dev/v4l/by-path/platform-bcm2835-codec-video-index0")
    #camera = cs.startAutomaticCapture(name="usbcam",path="/dev/v4l/by-path/platform-fd500000.pcie-pci-0000:01:00.0-usb-0:1.2:1.0-video-index0")
    #camera0.setResolution(k_CONVEYOR_CAM_REZ_WIDTH, k_CONVEYOR_CAM_REZ_HEIGHT)
    camerausb = cscore.UsbCamera(
        name="usbcam",
        path=
        "/dev/v4l/by-path/platform-fd500000.pcie-pci-0000:01:00.0-usb-0:1.2:1.0-video-index0"
    )
    camerapi = cscore.CvSource("cvsource", cscore.VideoMode.PixelFormat.kMJPEG,
                               k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT,
                               k_BALL_CAM_FPS)

    # setup MjpegServers
    usbMjpegServer = cscore.MjpegServer("ConveyorCam", 8081)
    piMjpegServer = cscore.MjpegServer("BallCam", 8082)

    # connect MjpegServers to their cameras
    usbMjpegServer.setSource(camerausb)
    piMjpegServer.setSource(camerapi)

    # Get a CvSink. This will capture images from the camera
    #cvSink = cs.getVideo()

    # (optional) Setup a CvSource. This will send images back to the Dashboard
    #outputStream = cs.putVideo("Name", k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT)

    # Allocating new images is very expensive, always try to preallocate
    img = np.zeros(shape=(k_BALL_CAM_REZ_WIDTH, k_BALL_CAM_REZ_HEIGHT, 3),
                   dtype=np.uint8)

    while True:
        frame = vs.read()
        camerapi.putFrame(frame)
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        #time, img = cvSink.grabFrame(img)
        #if time == 0:
        #    # Send the output the error.
        #    outputStream.notifyError(cvSink.getError());
        #    # skip the rest of the current iteration
        #    continue

        #
        # Insert image processing logic here
        #

        # (optional) send some image back to the dashboard
        #outputStream.putFrame(frame)
        #outputStream.putFrame(frame)
        print("putting Frame")
Beispiel #25
0
import VisionConfig
from grip import GripPipeline
import numpy as np
import cscore
from networktables import NetworkTables
import logging
import threading

# set logging level
# this is needed to get NetworkTables information
logging.basicConfig(level=logging.DEBUG)

# create the camera objects
picam = cscore.UsbCamera("picam", 0)
usbcam = cscore.UsbCamera("usbcam", 1)

# set video modes as determined in VisionConfig.py
picam.setVideoMode(cscore.VideoMode.PixelFormat.kMJPEG,
                   VisionConfig.pi_resolution[0],
                   VisionConfig.pi_resolution[1], VisionConfig.pi_framerate)
usbcam.setVideoMode(cscore.VideoMode.PixelFormat.kMJPEG,
                    VisionConfig.usb_resolution[0],
                    VisionConfig.usb_resolution[1], VisionConfig.usb_framerate)

# create a cv sink, which will grab images from the camera
cvsink = cscore.CvSink("cvsink")
cvsink.setSource(picam)

# create Pipeline Object
pipeline = GripPipeline()
Beispiel #26
0
#!/usr/bin/env python3
#
# WARNING: You should only use this approach for testing cscore on platforms that
#          it doesn't support using UsbCamera (Windows or OSX).
#

import cscore as cs

if hasattr(cs, "UsbCamera"):
    camera = cs.UsbCamera("usbcam", 0)
    camera.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
else:
    import cv2
    import threading

    camera = cs.CvSource("cvsource", cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)

    # tell OpenCV to capture video for us
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    # Do it in another thread
    def _thread():
        img = None
        while True:
            retval, img = cap.read(img)
            if retval:
                camera.putFrame(img)

    th = threading.Thread(target=_thread, daemon=True)
Beispiel #27
0
FRONT_LINE_BRIGHTNESS_HUMAN = 5
FRONT_LINE_EXPOSURE_VISION = 2
FRONT_LINE_EXPOSURE_HUMAN = 2

REAR_BRIGHTNESS_VISION = 70
REAR_BRIGHTNESS_HUMAN = 50
REAR_EXPOSURE_VISION = 10
REAR_EXPOSURE_HUMAN = 50

# General setup
NetworkTables.initialize(server=ROBORIO_IP)

# Globals
# TODO change resolution to widescreen on wider cameras

front_cam = cs.UsbCamera("front_cam", FRONT_CAM)
front_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 480, 270, 30)
front_cam_sink = cs.CvSink("front_cam_sink")
front_cam_sink.setSource(front_cam)

front_linecam = cs.UsbCamera("front_linecam", FRONT_LINECAM)
front_linecam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
front_linecam_sink = cs.CvSink("front_linecam_sink")
front_linecam_sink.setSource(front_linecam)

rear_cam = cs.UsbCamera("rear_cam", REAR_CAM)
rear_cam.setVideoMode(cs.VideoMode.PixelFormat.kMJPEG, 320, 240, 30)
rear_cam_sink = cs.CvSink("rear_cam_sink")
rear_cam_sink.setSource(rear_cam)

table = NetworkTables.getTable("VisionTable")