Exemple #1
0
    ntinst.startClientTeam(7539)

    SmartDashBoardValues = ntinst.getTable('SmartDashboard')

    #Start camera
    print("Connecting to camera")
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('Cam 0', 0)
    Camera.setExposureManual(5)
    Camera.setResolution(160, 120)
    cs.addCamera(Camera)

    print("connected")

    CvSink = cs.getVideo()
    outputStream = cs.putVideo("Processed Frames", 160, 120)

    #buffer to store img data
    img = np.zeros(shape=(160, 120, 3), dtype=np.uint8)
    # loop forever
    while True:

        GotFrame, img = CvSink.grabFrame(img)
        if GotFrame == 0:
            outputStream.notifyError(CvSink.getError())
            continue
        img = TrackTheTape(img, SmartDashBoardValues)

        outputStream.putFrame(img)
Exemple #2
0


gLine = Example()

camera = UsbCamera("CammyBoi", 0)
camera.setExposureManual(10)
#camera.setConfigJson(json.dumps(json.load(open(configFile, "rt", encoding="utf-8"))))
vidSink = CvSink("Camera")
vidSink.setSource(camera)

vidSource = CvSource("Processed", VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
networkStream = MjpegServer("Stream", 1181)
networkStream.setSource(vidSource)
img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

while(True):
    ret, src = vidSink.grabFrame(img)
    startTime = time.time()

    gLine.process(src)


    image = gLine.mask_output

    print(time.time() - startTime)
    #cv2.imshow('frame', img)
    vidSource.putFrame(image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
# When everything done, release the capture
Exemple #3
0
    sinkG.setSource(cameras[1])  #Was 1, trying 0
    image_G = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    image_F = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    image_Y = numpy.ndarray((320, 240, 3), dtype=numpy.uint8)  #Mid val was 360
    camservInst = CameraServer.getInstance()

    dashSource1 = camservInst.putVideo("UI Active Cam", 320, 240)
    #dashSource2 = camservInst.putVideo("UI Green Cam", 320, 240)
    # loop forever

    sd = ntinst.getTable('SmartDashboard')

    while True:

        #Change the time front camera
        timestamp, image_F = sinkF.grabFrame(image_F)

        #change time yellow ball camera
        timestamp, image_Y = sinkY.grabFrame(image_Y)
        grip_yellow.process(image_Y)

        #change time shooter camera
        timestamp, image_G = sinkG.grabFrame(image_G)
        grip_green.process(image_G)

        contours_output_green = grip_green.filter_contours_output
        contours_output_yellow = grip_yellow.filter_contours_output

        if (contours_output_green):
            image_G = getValuesGreen(image_G)
Exemple #4
0
    smart_dashboard.putNumber('Vision/Threshhold/Upper/hue',
                              processor.upper_thresh[0])
    smart_dashboard.putNumber('Vision/Threshhold/Upper/saturation',
                              processor.upper_thresh[1])
    smart_dashboard.putNumber('Vision/Threshhold/Upper/value',
                              processor.upper_thresh[2])
    smart_dashboard.putNumber('Vision/Threshhold/Lower/hue',
                              processor.lower_thresh[0])
    smart_dashboard.putNumber('Vision/Threshhold/Lower/saturation',
                              processor.lower_thresh[1])
    smart_dashboard.putNumber('Vision/Threshhold/Lower/value',
                              processor.lower_thresh[2])

    while True:
        timestamp, img = cvsink.grabFrame(img)
        if timestamp == 0:
            error_msg = 'Error: ' + str(cvsink.getError())
            print(error_msg)
            if USE_SMART_DASHBOARD:
                smart_dashboard.putString('Vision/errors', error_msg)
            continue

        # The elapsed time is really only for testing to determine the
        #  optimal image resolution and speed.
        start_time = time.time()
        try:
            result, img = processor.process_image(img)
        except:
            continue
        if USE_MODIFIED_IMAGE:
Exemple #5
0
if (__name__ == "__main__"):
	team = 801
	ntinst = NetworkTablesInstance.getDefault()
	ntinst.startClientTeam(team)
	usbCamera = UsbCamera("USB Camera 0", 0)
	mjpegServer1 = MjpegServer("serve_USB Camera 0", 1181)
	mjpegServer1.setSource(usbCamera)
	cvSink = CvSink("opencv_USB Camera 0")
	cvSink.setSource(usbCamera)
	outputStream = CvSource("Blur", VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
	mjpegServer2 = MjpegServer("serve_Blur Camera 1", 1182)
	mjpegServer2.setSource(outputStream)
	frame = np.zeros((320,240,3), 'uint8')
	iter = 0
	while(True):
		_, frame = cvSink.grabFrame(frame)
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		lower_blue = np.array([110, 50, 50])
		upper_blue = np.array([130, 255, 255])
		mask = cv2.inRange(hsv, lower_blue, upper_blue)
		res = cv2.bitwise_and(frame,frame, mask=mask)

		res_gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
		res_blur = cv2.GaussianBlur(res_gray, (5,5), 0)
		res_thresh = cv2.threshold(res_blur, 60, 255, cv2.THRESH_BINARY)[1]
		res_eroded = cv2.erode(res_thresh, np.ones((11,11)))
		res_dilated = cv2.dilate(res_eroded, np.ones((11,11)))
		res_color = cv2.cvtColor(res_dilated, cv2.COLOR_GRAY2BGR)

		resized = convenience.resize(res, width=300)
		ratio = res.shape[0] / float(resized.shape[0])
Exemple #6
0
                            width, height, 28)
    img = np.zeros(shape=(width, height, 3), dtype=np.uint8)
    count = 0
    global topX
    global topY
    global botX
    global botY
    global direction
    global alreadyFound
    while True:
        topX = -2
        topY = -2
        botX = -2
        botY = -2
        direction = 0
        alreadyFound = False
        GotFrame, frame = CvSink.grabFrame(img)
        if GotFrame == 0:
            outputStream.notifyError(CvSink.getError())
            continue

        SDV.putNumber(("Count"), count)
        #findReflectiveTape()
        print("Blue: " + str(frame[239, 0, 0]) + ", Green: " +
              str(frame[239, 0, 1]) + ", Red: " + str(frame[239, 0, 2]))
        count = count + 1
        SDV.putNumber(("TopX"), topX)
        SDV.putNumber(("TopY"), topY)
        SDV.putNumber(("BotX"), botX)
        SDV.putNumber(("BotY"), botY)
        SDV.putNumber(("Direction"), direction)
Exemple #7
0
    # ~ these are VideoSources
    cameras = []
    for cameraConfig in cameraConfigs:
        cameras.append(startCamera(cameraConfig))

    height = 320
    width = 240
    inst = CameraServer.getInstance()
    videoOutput = inst.putVideo("Rasp PI Output", height, width)
    videoSink = CvSink("Rasp PI Converter")

    img = np.ndarray((height, width, 3))
    videoSink.setSource(cameras[0])
    # loop forever
    while True:
        timestamp, img = videoSink.grabFrame(img)  # this outputs a CvImage
        if not timestamp:
            continue
        # hsl filter
        # convert from float to uint8
        img = img.astype(dtype="uint8")

        # get binary mask
        hsv_frame = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        lowerbh = wall_o.getNumber("lowerBoundH", 15)
        lowerbs = wall_o.getNumber("lowerBoundS", 100)
        lowerbv = wall_o.getNumber("lowerBoundV", 130)
        upperbh = wall_o.getNumber("upperBoundH", 60)
        upperbs = wall_o.getNumber("upperBoundS", 255)
        upperbv = wall_o.getNumber("upperBoundV", 255)
        lowerBound = np.array([lowerbh, lowerbs, lowerbv])