output.append(contour) return output gLine = Example() camera = UsbCamera("CammyBoi", 0) camera.setExposureManual(10) #camera.setConfigJson(json.dumps(json.load(open(configFile, "rt", encoding="utf-8")))) vidSink = CvSink("Camera") vidSink.setSource(camera) vidSource = CvSource("Processed", VideoMode.PixelFormat.kMJPEG, 640, 480, 30) networkStream = MjpegServer("Stream", 1181) networkStream.setSource(vidSource) img = np.zeros(shape=(480, 640, 3), dtype=np.uint8) while(True): ret, src = vidSink.grabFrame(img) startTime = time.time() gLine.process(src) image = gLine.mask_output print(time.time() - startTime) #cv2.imshow('frame', img) vidSource.putFrame(image) if cv2.waitKey(1) & 0xFF == ord('q'):
cs.enableLogging() Camera = UsbCamera('Cam 0', 0) #exp = 2 #Camera.setExposureManual(exp) Camera.setResolution(160,120) cs.addCamera(Camera) #SmartDashBoardValues.putNumber('ExpAuto', 0) print("connected") Camera1 = UsbCamera('Cam 1', 1) Camera1.setResolution(640, 480) Camera1.setFPS(25) mjpegServer = MjpegServer("serve_Cam 1", 1182) mjpegServer.setResolution(480, 360) mjpegServer.setSource(Camera1) CvSink = cs.getVideo() outputStream = cs.putVideo("Processed Frames", 160,120) #buffers to store img data img = np.zeros(shape=(160,120,3), dtype=np.uint8) #ExpStatus = SmartDashBoardValues.getNumber('ExpAuto', 0) # loop forever loopCount = 0 #while True: # output = "" # ExpAuto = SmartDashBoardValues.getNumber('ExpAuto', 0) # # if ExpAuto == 0: # if ExpStatus == 1:
ntinst = NetworkTablesInstance.getDefault() if server: print("Setting up NetworkTables server") ntinst.startServer() else: print("Setting up NetworkTables client for team {}".format(team)) ntinst.startClientTeam(team) #### Initiate #### pipeline = GripPipeline() # start up cameras cameras = [] for cameraConfig in cameraConfigs: cameras.append(startCamera(cameraConfig)) dualServer = MjpegServer("Dual Server", "", 1184) dualServer.setSource(cameras[0]) # start up the cv2 stream img = np.zeros(shape=(360, 240, 3), dtype=np.uint8) cvSink = CameraServer.getInstance().getVideo() cvStream = MjpegServer("Grip Stream", "", 6969) gripVideo = CameraServer.getInstance().putVideo("GRIP stream", 360, 240) cvStream.setSource(gripVideo) # set up networktables sd = nt.getTable("SmartDashboard") ow = nt.getTable("ObiWan") sd.addEntryListener(valueChanged) ow.addEntryListener(valueChanged) sd.putBoolean("testLines", False) sd.putBoolean("testContours", False) sd.putNumber("lowPoint", 0) sd.putNumber("ctrArea", 0)
# This is only for debugging, as once I print out a bunch of values # it's hard to see that the initial setup went correctly. if PAUSE: print('Waiting...') time.sleep(5) print('Running!') # Start mjpeg_server mjpeg_server = MjpegServer('Vision Server', 1181) mjpeg_server_2 = MjpegServer('Vision Server', 1182) # Start camera loop driver_camera = driver_cameras[0] driver_camera[1].setEnabled(True) mjpeg_server.setSource(driver_camera[0]) driver_camera = driver_cameras[1] driver_camera[1].setEnabled(True) mjpeg_server_2.setSource(driver_camera[0]) camera, cvsink, config = vision_camera cvsink.setEnabled(True) # This code creates a cvsink which will push a modified image to the # MJPEG stream. Testing only. if USE_MODIFIED_IMAGE: name = 'cvsource: ' + vision_camera[2]['name'] width = vision_camera[2]['height'] height = vision_camera[2]['width'] fps = vision_camera[2]['fps'] cvsource = CvSource(name, VideoMode.PixelFormat.kMJPEG, width, height,
from networktables.util import ntproperty from pyimagesearch.shapedetector import ShapeDetector import convenience class Client(object): centerLoc = ntproperty("/SmartDashboard/centerLoc", (0,0)) cl = Client() if (__name__ == "__main__"): team = 801 ntinst = NetworkTablesInstance.getDefault() ntinst.startClientTeam(team) usbCamera = UsbCamera("USB Camera 0", 0) mjpegServer1 = MjpegServer("serve_USB Camera 0", 1181) mjpegServer1.setSource(usbCamera) cvSink = CvSink("opencv_USB Camera 0") cvSink.setSource(usbCamera) outputStream = CvSource("Blur", VideoMode.PixelFormat.kMJPEG, 320, 240, 15) mjpegServer2 = MjpegServer("serve_Blur Camera 1", 1182) mjpegServer2.setSource(outputStream) frame = np.zeros((320,240,3), 'uint8') iter = 0 while(True): _, frame = cvSink.grabFrame(frame) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) lower_blue = np.array([110, 50, 50]) upper_blue = np.array([130, 255, 255]) mask = cv2.inRange(hsv, lower_blue, upper_blue) res = cv2.bitwise_and(frame,frame, mask=mask)
port = 1181 mjpeg_server = MjpegServer('Vision Server', port) # If you ever need to standardize resolutions between cameras, you # can use the following. It's bad though, because then it # uncompresses and resises all images, so it decreases FPS. # mjpeg_server.setResolution(640, 480) # mjpeg_server.setCompression(70) # Start camera loop driver_camera = driver_cameras[0] driver_camera[1].setEnabled(True) # mjpeg_server.setSource(driver_camera[0]) camera, cvsink, config = vision_camera cvsink.setEnabled(True) mjpeg_server.setSource(camera) # This code creates a cvsink which will push a modified image to the # MJPEG stream. Testing only. if use_modified_image: name = 'cvsource: ' + vision_camera[2]['name'] width = vision_camera[2]['height'] height = vision_camera[2]['width'] fps = vision_camera[2]['fps'] cvsource = CvSource(name, VideoMode.PixelFormat.kMJPEG, width, height, fps) mjpeg_server.setSource(cvsource) processor = TargetProcessing() # Allways preallocate, it runs a lot faster img = np.zeros(shape=(config['height'], config['width'], 3),