def main():
    cs = CameraServer.getInstance()
    cs.enableLogging()

    outputStream = CvSource('camera', VideoMode.PixelFormat.kMJPEG, int(640),
                            int(480), int(15))
    cs.addCamera(outputStream)
    server = cs.addServer(name='camera', port=int(5801))
    server.setSource(outputStream)

    camera = UsbCamera(
        "cam1",
        '/dev/v4l/by-id/usb-HD_Camera_Manufacturer_USB_2.0_Camera-video-index0'
    )
    # Capture from the first USB Camera on the system
    cs.startAutomaticCapture(camera=camera)
    camera.setResolution(640, 480)

    # Get a CvSink. This will capture images from the camera
    cvSink = cs.getVideo(camera=camera)

    # Allocating new images is very expensive, always try to preallocate
    img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

    while True:
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        time, img = cvSink.grabFrame(img)
        if time == 0:
            # Send the output the error.
            outputStream.notifyError(cvSink.getError())
            # skip the rest of the current iteration
            continue

        #
        # Insert your image processing logic here!
        #
        cv2.imshow("stream", img)
        # (optional) send some image back to the dashboard
        outputStream.putFrame(img)
예제 #2
0
            if (ratio < min_ratio or ratio > max_ratio):
                continue
            output.append(contour)
        return output



gLine = Example()

camera = UsbCamera("CammyBoi", 0)
camera.setExposureManual(10)
#camera.setConfigJson(json.dumps(json.load(open(configFile, "rt", encoding="utf-8"))))
vidSink = CvSink("Camera")
vidSink.setSource(camera)

vidSource = CvSource("Processed", VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
networkStream = MjpegServer("Stream", 1181)
networkStream.setSource(vidSource)
img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

while(True):
    ret, src = vidSink.grabFrame(img)
    startTime = time.time()

    gLine.process(src)


    image = gLine.mask_output

    print(time.time() - startTime)
    #cv2.imshow('frame', img)
    cameras = []
    for cameraConfig in cameraConfigs:
        cameras.append(startCamera(cameraConfig))

    server = CameraServer.getInstance()
    cameraZeroSink = server.getVideo()

    print(cameraZeroSink.getSource())

    image = np.zeros(shape=(160, 120, 3), dtype=np.uint8)

    outPutStream = server.putVideo("mask", 160, 120)

    mJpegServer = MjpegServer("cvServer", 1186)

    cvSource = CvSource("CVSource", cscore.VideoMode.PixelFormat.kMJPEG, 160,
                        120, 30)

    # loop forever
    while True:
        start = time.time()
        status, image = cameraZeroSink.grabFrame(image, 30)
        if status == 0:
            outPutStream.notifyError(cameraZeroSink.getError())
            print(cameraZeroSink.getError())
            continue
        #image2 = cv2.rectangle(image, (20, 60), (140, 120), (255, 255, 255), 5)
        #outPutStream.putFrame(image)

        #cvImage = cv2.imread(imageProcess, 1)
        lower_blue = np.array([
            30, 84, 70
예제 #4
0
    mjpeg_server.setSource(driver_camera[0])
    driver_camera = driver_cameras[1]
    driver_camera[1].setEnabled(True)
    mjpeg_server_2.setSource(driver_camera[0])

    camera, cvsink, config = vision_camera
    cvsink.setEnabled(True)

    # This code creates a cvsink which will push a modified image to the
    #  MJPEG stream. Testing only.
    if USE_MODIFIED_IMAGE:
        name = 'cvsource: ' + vision_camera[2]['name']
        width = vision_camera[2]['height']
        height = vision_camera[2]['width']
        fps = vision_camera[2]['fps']
        cvsource = CvSource(name, VideoMode.PixelFormat.kMJPEG, width, height,
                            fps)
        mjpeg_server.setSource(cvsource)

    processor = TargetProcessing(config['width'],
                                 config['height'],
                                 draw_img=USE_MODIFIED_IMAGE,
                                 view_thresh=False)
    # processor = BallProcessing(config['width'], config['height'])
    img = np.zeros(shape=(config['height'], config['width'], 3),
                   dtype=np.uint8)

    smart_dashboard.putNumber('Vision/Threshhold/Upper/hue',
                              processor.upper_thresh[0])
    smart_dashboard.putNumber('Vision/Threshhold/Upper/saturation',
                              processor.upper_thresh[1])
    smart_dashboard.putNumber('Vision/Threshhold/Upper/value',
예제 #5
0
class Client(object):
	centerLoc = ntproperty("/SmartDashboard/centerLoc", (0,0))

cl = Client()

if (__name__ == "__main__"):
	team = 801
	ntinst = NetworkTablesInstance.getDefault()
	ntinst.startClientTeam(team)
	usbCamera = UsbCamera("USB Camera 0", 0)
	mjpegServer1 = MjpegServer("serve_USB Camera 0", 1181)
	mjpegServer1.setSource(usbCamera)
	cvSink = CvSink("opencv_USB Camera 0")
	cvSink.setSource(usbCamera)
	outputStream = CvSource("Blur", VideoMode.PixelFormat.kMJPEG, 320, 240, 15)
	mjpegServer2 = MjpegServer("serve_Blur Camera 1", 1182)
	mjpegServer2.setSource(outputStream)
	frame = np.zeros((320,240,3), 'uint8')
	iter = 0
	while(True):
		_, frame = cvSink.grabFrame(frame)
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		lower_blue = np.array([110, 50, 50])
		upper_blue = np.array([130, 255, 255])
		mask = cv2.inRange(hsv, lower_blue, upper_blue)
		res = cv2.bitwise_and(frame,frame, mask=mask)

		res_gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
		res_blur = cv2.GaussianBlur(res_gray, (5,5), 0)
		res_thresh = cv2.threshold(res_blur, 60, 255, cv2.THRESH_BINARY)[1]
예제 #6
0
if __name__ == "__main__":
    if len(sys.argv) >= 2:
        configFile = sys.argv[1]

    # start cameras
    print("Connecting to camera")
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('rPi Camera 0', 0)
    Camera.setResolution(width, height)
    #cs.addCamera(Camera)
    cs.startAutomaticCapture(camera=Camera, return_server=True)

    CvSink = cs.getVideo()
    outputStream = CvSource('Processed Frames', VideoMode.PixelFormat.kBGR,
                            width, height, 28)
    img = np.zeros(shape=(width, height, 3), dtype=np.uint8)
    count = 0
    global topX
    global topY
    global botX
    global botY
    global direction
    global alreadyFound
    while True:
        topX = -2
        topY = -2
        botX = -2
        botY = -2
        direction = 0
        alreadyFound = False
예제 #7
0
    if sd.getNumber("stream_vision", 0) == 0:
        STREAM_VISION = False
    else:
        STREAM_VISION = True


if DEBUG:
    import pipeline_laptop as pipeline
else:
    import pipeline

    if STREAM_VISION:
        from cscore import CameraServer, CvSource, VideoMode

        inst = CameraServer.getInstance()
        camera = CvSource("CvCam", VideoMode.PixelFormat.kBGR, 320, 160, 15)
        server = inst.startAutomaticCapture(camera=camera, return_server=True)

os.system("v4l2-ctl -d /dev/video{} -c exposure_auto=1".format(CAMERA_PORT))
os.system("v4l2-ctl -d /dev/video{} -c exposure_absolute=0".format(CAMERA_PORT))
os.system("v4l2-ctl -d /dev/video{} -c brightness=30".format(CAMERA_PORT))
os.system("v4l2-ctl -d /dev/video{} -c contrast=10".format(CAMERA_PORT))

cap = cv.VideoCapture(CAMERA_PORT)
WIDTH = int(cap.get(3)/2)
HEIGHT = int(cap.get(4)/2)
MID_X = int(WIDTH / 2)

CROPPED_HEIGHT = 160
HFOV = 61
BlurType = Enum('BlurType',
                'Box_Blur Gaussian_Blur Median_Filter Bilateral_Filter')

gLine = ContourTests()
# cap = cv2.VideoCapture(0)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4))
# cs = CameraServer.getInstance()
# networkStream = cs.putVideo("Out", 160, 120)
camera = UsbCamera("CammyBoi", 0)
camera.setExposureManual(10)
#camera.setConfigJson(json.dumps(json.load(open(configFile, "rt", encoding="utf-8"))))
vidSink = CvSink("Camera")
vidSink.setSource(camera)

vidSource = CvSource("Processed", VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
networkStream = MjpegServer("Stream", 1181)
networkStream.setSource(vidSource)
img = numpy.zeros(shape=(480, 640, 3), dtype=numpy.uint8)

while (True):
    # Capture frame-by-frame
    # ret, src = cap.read()
    ret, src = vidSink.grabFrame(img)
    startTime = time.time()

    gLine.process(src)
    image = src
    contours = gLine.find_contours_output

    c1, c2 = None, None