Esempio n. 1
0
            if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
                continue
            ratio = (float)(w) / h
            if (ratio < min_ratio or ratio > max_ratio):
                continue
            output.append(contour)
        return output



gLine = Example()

camera = UsbCamera("CammyBoi", 0)
camera.setExposureManual(10)
#camera.setConfigJson(json.dumps(json.load(open(configFile, "rt", encoding="utf-8"))))
vidSink = CvSink("Camera")
vidSink.setSource(camera)

vidSource = CvSource("Processed", VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
networkStream = MjpegServer("Stream", 1181)
networkStream.setSource(vidSource)
img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

while(True):
    ret, src = vidSink.grabFrame(img)
    startTime = time.time()

    gLine.process(src)


    image = gLine.mask_output
Esempio n. 2
0
    ntinst.startClientTeam(7539)

    SmartDashBoardValues = ntinst.getTable('SmartDashboard')

    #Start camera
    print("Connecting to camera")
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('Cam 0', 0)
    Camera.setExposureManual(5)
    Camera.setResolution(160, 120)
    cs.addCamera(Camera)

    print("connected")

    CvSink = cs.getVideo()
    outputStream = cs.putVideo("Processed Frames", 160, 120)

    #buffer to store img data
    img = np.zeros(shape=(160, 120, 3), dtype=np.uint8)
    # loop forever
    while True:

        GotFrame, img = CvSink.grabFrame(img)
        if GotFrame == 0:
            outputStream.notifyError(CvSink.getError())
            continue
        img = TrackTheTape(img, SmartDashBoardValues)

        outputStream.putFrame(img)
Esempio n. 3
0
    else:
        print("Setting up NetworkTables client for team {}".format(team))
        ntinst.startClientTeam(team)

    # start cameras
    for config in cameraConfigs:
        cameras.append(startCamera(config))

    # start switched cameras
    for config in switchedCameraConfigs:
        startSwitchedCamera(config)

    #Init Grip
    grip_green = GripPipelineGreen()
    grip_yellow = GripPipelineYellow()
    sinkY = CvSink("vision Yellow")
    sinkF = CvSink("vision Front")
    sinkY.setSource(cameras[0])  #Was 1, trying 0
    sinkF.setSource(cameras[2])  #Was 1, trying 0
    sinkG = CvSink("vision Green")
    sinkG.setSource(cameras[1])  #Was 1, trying 0
    image_G = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    image_F = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    image_Y = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    camservInst = CameraServer.getInstance()

    dashSource1 = camservInst.putVideo("UI Active Cam", 320, 240)
    #dashSource2 = camservInst.putVideo("UI Green Cam", 320, 240)
    # loop forever

    sd = ntinst.getTable('SmartDashboard')
Esempio n. 4
0
            ntcore.constants.NT_NOTIFY_UPDATE)
        smart_dashboard.getEntry(
            'Vision/Threshhold/Lower/saturation').addListener(
                set_threshhold_value('lower', 'saturation'),
                ntcore.constants.NT_NOTIFY_UPDATE)
        smart_dashboard.getEntry('Vision/Threshhold/Lower/value').addListener(
            set_threshhold_value('lower', 'value'),
            ntcore.constants.NT_NOTIFY_UPDATE)

    # Find primary and secondary cameras
    cameras = []
    for config in camera_configs:
        camera = UsbCamera(config.name, config.path)
        camera.setConfigJson(json.dumps(config.config))

        cvsink = CvSink('cvsink: ' + config.name)
        cvsink.setSource(camera)
        cvsink.setEnabled(False)  # disable frame fetching by default

        # Add 3 things to the list of cameras: the camera object itself,
        #  the cvsink object, and the camera configuration dictionary.
        #  The first two all the only ones actually needed. Two stream
        #  the camera you can use in server.setsource(cam), and the
        #  cvsink is used to enable fetching frames. config.config is
        #  only used to access configuration information like the name
        #  and resolution.
        cameras.append((camera, cvsink, config.config))

    vision_camera = None
    driver_cameras = []
    for camera in cameras:
Esempio n. 5
0
from pyimagesearch.shapedetector import ShapeDetector
import convenience

class Client(object):
	centerLoc = ntproperty("/SmartDashboard/centerLoc", (0,0))

cl = Client()

if (__name__ == "__main__"):
	team = 801
	ntinst = NetworkTablesInstance.getDefault()
	ntinst.startClientTeam(team)
	usbCamera = UsbCamera("USB Camera 0", 0)
	mjpegServer1 = MjpegServer("serve_USB Camera 0", 1181)
	mjpegServer1.setSource(usbCamera)
	cvSink = CvSink("opencv_USB Camera 0")
	cvSink.setSource(usbCamera)
	outputStream = CvSource("Blur", VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
	mjpegServer2 = MjpegServer("serve_Blur Camera 1", 1182)
	mjpegServer2.setSource(outputStream)
	frame = np.zeros((320,240,3), 'uint8')
	iter = 0
	while(True):
		_, frame = cvSink.grabFrame(frame)
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		lower_blue = np.array([110, 50, 50])
		upper_blue = np.array([130, 255, 255])
		mask = cv2.inRange(hsv, lower_blue, upper_blue)
		res = cv2.bitwise_and(frame,frame, mask=mask)

		res_gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
Esempio n. 6
0
        ntinst.startServer()
    else:
        print("Setting up NetworkTables client for team {}".format(team))
        ntinst.startClientTeam(team)

    # start cameras
    # ~ these are VideoSources
    cameras = []
    for cameraConfig in cameraConfigs:
        cameras.append(startCamera(cameraConfig))

    height = 320
    width = 240
    inst = CameraServer.getInstance()
    videoOutput = inst.putVideo("Rasp PI Output", height, width)
    videoSink = CvSink("Rasp PI Converter")

    img = np.ndarray((height, width, 3))
    videoSink.setSource(cameras[0])
    # loop forever
    while True:
        timestamp, img = videoSink.grabFrame(img)  # this outputs a CvImage
        if not timestamp:
            continue
        # hsl filter
        # convert from float to uint8
        img = img.astype(dtype="uint8")

        # get binary mask
        hsv_frame = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        lowerbh = wall_o.getNumber("lowerBoundH", 15)