Beispiel #1
0
                continue
            ratio = (float)(w) / h
            if (ratio < min_ratio or ratio > max_ratio):
                continue
            output.append(contour)
        return output



gLine = Example()

camera = UsbCamera("CammyBoi", 0)
camera.setExposureManual(10)
#camera.setConfigJson(json.dumps(json.load(open(configFile, "rt", encoding="utf-8"))))
vidSink = CvSink("Camera")
vidSink.setSource(camera)

vidSource = CvSource("Processed", VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
networkStream = MjpegServer("Stream", 1181)
networkStream.setSource(vidSource)
img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

while(True):
    ret, src = vidSink.grabFrame(img)
    startTime = time.time()

    gLine.process(src)


    image = gLine.mask_output
Beispiel #2
0
        ntinst.startClientTeam(team)

    # start cameras
    for config in cameraConfigs:
        cameras.append(startCamera(config))

    # start switched cameras
    for config in switchedCameraConfigs:
        startSwitchedCamera(config)

    #Init Grip
    grip_green = GripPipelineGreen()
    grip_yellow = GripPipelineYellow()
    sinkY = CvSink("vision Yellow")
    sinkF = CvSink("vision Front")
    sinkY.setSource(cameras[0])  #Was 1, trying 0
    sinkF.setSource(cameras[2])  #Was 1, trying 0
    sinkG = CvSink("vision Green")
    sinkG.setSource(cameras[1])  #Was 1, trying 0
    image_G = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    image_F = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    image_Y = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    camservInst = CameraServer.getInstance()

    dashSource1 = camservInst.putVideo("UI Active Cam", 320, 240)
    #dashSource2 = camservInst.putVideo("UI Green Cam", 320, 240)
    # loop forever

    sd = ntinst.getTable('SmartDashboard')

    while True:
Beispiel #3
0
import convenience

class Client(object):
	centerLoc = ntproperty("/SmartDashboard/centerLoc", (0,0))

cl = Client()

if (__name__ == "__main__"):
	team = 801
	ntinst = NetworkTablesInstance.getDefault()
	ntinst.startClientTeam(team)
	usbCamera = UsbCamera("USB Camera 0", 0)
	mjpegServer1 = MjpegServer("serve_USB Camera 0", 1181)
	mjpegServer1.setSource(usbCamera)
	cvSink = CvSink("opencv_USB Camera 0")
	cvSink.setSource(usbCamera)
	outputStream = CvSource("Blur", VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
	mjpegServer2 = MjpegServer("serve_Blur Camera 1", 1182)
	mjpegServer2.setSource(outputStream)
	frame = np.zeros((320,240,3), 'uint8')
	iter = 0
	while(True):
		_, frame = cvSink.grabFrame(frame)
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		lower_blue = np.array([110, 50, 50])
		upper_blue = np.array([130, 255, 255])
		mask = cv2.inRange(hsv, lower_blue, upper_blue)
		res = cv2.bitwise_and(frame,frame, mask=mask)

		res_gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
		res_blur = cv2.GaussianBlur(res_gray, (5,5), 0)
Beispiel #4
0
        smart_dashboard.getEntry(
            'Vision/Threshhold/Lower/saturation').addListener(
                set_threshhold_value('lower', 'saturation'),
                ntcore.constants.NT_NOTIFY_UPDATE)
        smart_dashboard.getEntry('Vision/Threshhold/Lower/value').addListener(
            set_threshhold_value('lower', 'value'),
            ntcore.constants.NT_NOTIFY_UPDATE)

    # Find primary and secondary cameras
    cameras = []
    for config in camera_configs:
        camera = UsbCamera(config.name, config.path)
        camera.setConfigJson(json.dumps(config.config))

        cvsink = CvSink('cvsink: ' + config.name)
        cvsink.setSource(camera)
        cvsink.setEnabled(False)  # disable frame fetching by default

        # Add 3 things to the list of cameras: the camera object itself,
        #  the cvsink object, and the camera configuration dictionary.
        #  The first two all the only ones actually needed. Two stream
        #  the camera you can use in server.setsource(cam), and the
        #  cvsink is used to enable fetching frames. config.config is
        #  only used to access configuration information like the name
        #  and resolution.
        cameras.append((camera, cvsink, config.config))

    vision_camera = None
    driver_cameras = []
    for camera in cameras:
        # The camera wanted for vision must be named 'Vision' exactly,
Beispiel #5
0
        ntinst.startClientTeam(team)

    # start cameras
    # ~ these are VideoSources
    cameras = []
    for cameraConfig in cameraConfigs:
        cameras.append(startCamera(cameraConfig))

    height = 320
    width = 240
    inst = CameraServer.getInstance()
    videoOutput = inst.putVideo("Rasp PI Output", height, width)
    videoSink = CvSink("Rasp PI Converter")

    img = np.ndarray((height, width, 3))
    videoSink.setSource(cameras[0])
    # loop forever
    while True:
        timestamp, img = videoSink.grabFrame(img)  # this outputs a CvImage
        if not timestamp:
            continue
        # hsl filter
        # convert from float to uint8
        img = img.astype(dtype="uint8")

        # get binary mask
        hsv_frame = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        lowerbh = wall_o.getNumber("lowerBoundH", 15)
        lowerbs = wall_o.getNumber("lowerBoundS", 100)
        lowerbv = wall_o.getNumber("lowerBoundV", 130)
        upperbh = wall_o.getNumber("upperBoundH", 60)