def init_networktables(ipaddr): logger.info("Connecting to robot %s" % ipaddr) Networktable.setIPAddress(ipaddr) Networktable.setClientMode() Networktable.initialize() logger.info("Initialized networktables") logger.info("Waiting...")
def init_networktables(ipaddr): logger.info("Connecting to networktables at %s" % ipaddr) NetworkTable.setIPAddress(ipaddr) NetworkTable.setClientMode() NetworkTable.initialize() logger.info("Networktables Initialized")
def __init__(self, receiverIP): try: # IPAddress can be static ip ("10.49.15.2" or name:"roboRIO-4915-FRC"/"localhost") NetworkTable.setUpdateRate(.01) # default is .05 (50ms/20Hz), .01 (10ms/100Hz) NetworkTable.setIPAddress(receiverIP) NetworkTable.setClientMode() NetworkTable.initialize() self.sd = NetworkTable.getTable("SmartDashboard") # we communcate target to robot via VisionTarget table self.targetTable = self.sd.getSubTable("VisionTarget") # robot communicates to us via fields within the VisionControl SubTable # we opt for a different table to ensure we to receive callbacks from our # own writes. self.controlTable = self.sd.getSubTable("VisionControl") self.controlTable.addConnectionListener(self.connectionListener) self.controlTable.addTableListener(self.visionControlEvent) self.control = Control() self.target = Target() self.fpsHistory = [] self.lastUpdate = time.time() theComm = self except: xcpt = sys.exc_info() print("ERROR initializing network tables", xcpt[0]) traceback.print_tb(xcpt[2])
def main(): cap = cv2.VideoCapture(1) #Network Tables Setup logging.basicConfig(level=logging.DEBUG) NetworkTable.setIPAddress('10.32.56.2') NetworkTable.setClientMode() NetworkTable.initialize() nt = NetworkTable.getTable('SmartDashboard') while cap.isOpened(): _,frame=cap.read() hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Range for green light reflected off of the tape. Need to tune. lower_green = np.array(constants.LOWER_GREEN, dtype=np.uint8) upper_green = np.array(constants.UPPER_GREEN, dtype=np.uint8) #Threshold the HSV image to only get the green color. mask = cv2.inRange(hsv, lower_green, upper_green) #Gets contours of the thresholded image. _,contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #Draw the contours around detected object cv2.drawContours(frame, contours, -1, (0,0,255), 3) #Get centroid of tracked object. #Check to see if contours were found. if len(contours)>0: #find largest contour cnt = max(contours, key=cv2.contourArea) #get center center = get_center(cnt) cv2.circle(frame, center, 3, (0,0,255), 2) if(center[0] != 0 and center[1]!=0): center_str_x = "x = "+str(center[0]) center_str_y = "y = "+str(center[1]) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, "Center", constants.TEXT_COORDINATE_1, font, 0.7, (0,0,255), 2) cv2.putText(frame, center_str_x, constants.TEXT_COORDINATE_2, font, 0.7, (0,0,255), 2) cv2.putText(frame, center_str_y, constants.TEXT_COORDINATE_3, font, 0.7, (0,0,255), 2) angle, direction = get_offset_angle(center[0], center[1]) cv2.putText(frame, "Angle: "+str(angle),constants.TEXT_COORDINATE_4, font, 0.7, (0,0,255), 2) nt.putNumber('CameraAngle', angle) cv2.putText(frame, "Turn "+direction, constants.TEXT_COORDINATE_5, font, 0.7, (0,0,255), 2) if direction == right: nt.putNumber('Direction', 0) else: nt.putNumber('Direction', 1) #show image cv2.imshow('frame',frame) cv2.imshow('mask', mask) cv2.imshow('HSV', hsv) #close if delay in camera feed is too long k = cv2.waitKey(1) & 0xFF if k == 27: break cv2.destroyAllWindows()
def initNetworktables(): logging.basicConfig(level=logging.DEBUG) # to see messages from networktables NetworkTable.setIPAddress('127.0.0.1') # NetworkTable.setIPAddress('roborio-5260.local') NetworkTable.setClientMode() NetworkTable.initialize() return NetworkTable.getTable('Pi')
def setupNetworkTable(table, address): # To see messages from networktables, you must setup logging logging.basicConfig(level=logging.DEBUG) NetworkTable.setIPAddress(address) NetworkTable.setClientMode() NetworkTable.initialize() return NetworkTable.getTable(table)
def __init__(self): print "Initializing NetworkTables..." NetworkTable.setClientMode() NetworkTable.setIPAddress("roborio-4761-frc.local") NetworkTable.initialize() self.table = NetworkTable.getTable("vision") print "NetworkTables initialized!"
def __init__(self, fakerobot): try: if fakerobot: NetworkTable.setIPAddress("localhost") else: NetworkTable.setIPAddress("roboRIO-4915-FRC") NetworkTable.setClientMode() NetworkTable.initialize() self.sd = NetworkTable.getTable("SmartDashboard") self.visTable = self.sd.getSubTable("Vision") self.connectionListener = ConnectionListener() self.visTable.addConnectionListener(self.connectionListener) self.visTable.addTableListener(self.visValueChanged) self.targetState = targetState.TargetState(self.visTable) self.targetHigh = True self.autoAimEnabled = False self.imuHeading = 0 self.fpsHistory = [] self.lastUpdate = time.time() except: xcpt = sys.exc_info() print("ERROR initializing network tables", xcpt[0]) traceback.print_tb(xcpt[2])
def __init__(self, ip, name): NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() NetworkTable.initialize() self.visionNetworkTable = NetworkTable.getTable(name) if constants.SENDTOSMARTDASHBOARD: constants.smartDashboard = NetworkTable.getTable("SmartDashboard")
def main(): print('Initializing NetworkTables') NetworkTable.setIPAddress("10.52.55.98") NetworkTable.setClientMode() NetworkTable.initialize() print('Creating video capture') cap = cv2.VideoCapture("http://10.52.55.3/mjpg/video.mjpg") print("here") bytes = '' first = False print('Creating pipeline') pipeline = GripPipeline() print('Running pipeline') while cap.isOpened(): have_frame, frame = cap.read() if have_frame: pipeline.process(frame) extra_processing(pipeline) print('Capture closed')
def __init__( self, capture, log, settings ): # port="",filters="", hsv=False, original=True, in_file="", out_file="", display=True self.limits = {} # Pass the log object self.log = log log.init( "initializing saber_track Tracker" ) self.settings = settings # If the port tag is True, set the if settings["port"] != "": logging.basicConfig( level=logging.DEBUG ) NetworkTable.setIPAddress( settings["port"] ) NetworkTable.setClientMode( ) NetworkTable.initialize( ) self.smt_dash = NetworkTable.getTable( "SmartDashboard" ) # initialize the filters. If the filter is the default: "", it will not create trackbars for it. self.init_filters( settings["filters"] ) # Deal with inputs and outputs self.settings["out_file"] = str( self.settings["out_file"] ) # set the file that will be written on saved if settings["in_file"] != "": self.log.init( "Reading trackfile: " + settings["in_file"] + ".json" ) fs = open( name + ".json", "r" ) # open the file under a .json extention data = json.loads( fs.read() ) self.limits.update( data ) fs.close( ) # Localize the caputure object self.capture = capture # if there are any color limits (Upper and Lower hsv values to track) make the tracking code runs self.track = len( self.limits ) > 0 self.log.info( "Tracking: " + str(self.track) )
def main(): NetworkTable.setTeam(5587) # TODO set your team number NetworkTable.setClientMode() # TODO switch to RIO IP, or IP of laptop running OutlineViewer for setup NetworkTable.setIPAddress('10.55.87.2') NetworkTable.initialize() # TODO find what v4l2-ctl settings you need. Pass each commandline option # through this array. REQUIRES v4l2-utils to be installed. call([ "v4l2-ctl", "--set-ctrl=exposure_auto=1", "--set-ctrl=exposure_absolute=9", "--set-ctrl=white_balance_temperature_auto=0" ], shell=False) cap = cv2.VideoCapture(0) pipeline = GripPipeline() while True: ret, frame = cap.read() if ret: # TODO add extra parameters if the pipeline takes more than just a # single image pipeline.process(frame) extra_processing(pipeline)
def __init__(self): """initializes all values to presets or None if need to be set """ NetworkTable.initialize(server='10.44.80.2') #Current Roborio address NetworkTable.setUpdateRate(.02) self.numberPublish = NetworkTable.getTable('/GRIP/myContoursReport') self.__hsl_threshold_hue = [56.6546762589928, 83.28925638990128] self.__hsl_threshold_saturation = [229.31654676258992, 255.0] self.__hsl_threshold_luminance = [ 13.758992805755396, 51.51952461799658 ] self.hsl_threshold_output = None self.__find_contours_input = self.hsl_threshold_output self.__find_contours_external_only = False self.find_contours_output = None self.__filter_contours_contours = self.find_contours_output self.__filter_contours_min_area = 125.0 self.__filter_contours_min_perimeter = 0.0 self.__filter_contours_min_width = 0.0 self.__filter_contours_max_width = 1000.0 self.__filter_contours_min_height = 0.0 self.__filter_contours_max_height = 1000.0 self.__filter_contours_solidity = [0, 100] self.__filter_contours_max_vertices = 1000000.0 self.__filter_contours_min_vertices = 0.0 self.__filter_contours_min_ratio = 0.0 self.__filter_contours_max_ratio = 1000.0 self.filter_contours_output = None
def setupVisionTable(): global visionTable NetworkTable.setIPAddress("10.46.69.21") NetworkTable.setClientMode() NetworkTable.initialize() visionTable = NetworkTable.getTable("vision") setRunVision(False) turnOffLight()
def init_network_tables(ip='127.0.0.1'): """Initialize NetworkTables as a client to receive and send values :param ip: ip address or hostname of the server """ NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() NetworkTable.initialize() LOG.info('Network Tables connected as client to {}'.format(ip))
def main(): ip = "10.24.81.2" NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() #print "Initializing Network Tables" NetworkTable.initialize() goalFinder = GoalFinder() stream = urllib.urlopen('http://10.24.81.11/mjpg/video.mjpg') bytes = '' #print "Start Target Search Loop..." #turn true for single picture debuging first = False beagle = NetworkTable.getTable("GRIP") goals_table = beagle.getSubTable("aGoalContours") while True: #TODO: Fetch image from camera. # img = cv2.imread("0.jpg") bytes += stream.read(16384) b = bytes.rfind('\xff\xd9') a = bytes.rfind('\xff\xd8', 0, b - 1) if a != -1 and b != -1: jpg = bytes[a:b + 2] bytes = bytes[b + 2:] img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_COLOR) goalFinder.process_image(img) goals_table.putValue("centerX", NumberArray.from_list(goalFinder.targetXs)) goals_table.putValue("centerY", NumberArray.from_list(goalFinder.targetYs)) goals_table.putValue( "width", NumberArray.from_list(goalFinder.targetWidths)) goals_table.putValue( "height", NumberArray.from_list(goalFinder.targetHeights)) goals_table.putValue("area", NumberArray.from_list(goalFinder.targetAreas)) #if len(goalFinder.targetAreas) > 0: # print goalFinder.targetAreas #Use if you want to the save the image and retrieve it later. if first: first = False cv2.imwrite("test.jpg", img) goals_table.putNumber("OwlCounter", goals_table.getNumber("OwlCounter", 0) + 1) sleep(.01)
def __init__(self): NetworkTable.initialize() self.tf = TargetFinder.TargetFinder() self.tf.enabled = True cv2.namedWindow('img') for s in self.settings: self._create_trackbar(s)
def init_networktables(): #TODO: Check to see if NetworkTables is already initialized if not using_networktables: raise Exception("Attempted to initialize NetworkTables while NetworkTables is disabled!") NetworkTable.setIPAddress(args.networktables_ip) NetworkTable.setClientMode() NetworkTable.initialize() global table table = NetworkTable.getTable("vision") log.info("Initialized NetworkTables")
def init_ntable(): global ntable NetworkTable.setIPAddress(config.server["rioAddress"]) NetworkTable.setClientMode() NetworkTable.initialize() ntable = NetworkTable.getTable('vision'); ntable.addTableListener(onValueChanged)
def initNetworktables(options): if options.dashboard: logger.info("Connecting to networktables in Dashboard mode") NetworkTable.setDashboardMode() else: logger.info("Connecting to networktables at %s", options.robot) NetworkTable.setIPAddress(options.robot) NetworkTable.setClientMode() NetworkTable.initialize() logger.info("Networktables Initialized")
def main(): NetworkTable.setTeam('0000') # TODO set your team number NetworkTable.initialize() cap = cv2.VideoCapture(0) pipeline = GripPipeline() while True: ret, frame = cap.read() if ret: pipeline.process( frame ) # TODO add extra parameters if the pipeline takes more than just a single image extra_processing(pipeline)
def init_networktables(options): if options.dashboard: logger.info("Connecting to networktables in Dashboard mode") NetworkTable.setDashboardMode() else: logger.info("Connecting to networktables at %s", options.robot) NetworkTable.setIPAddress(options.robot) NetworkTable.setClientMode() NetworkTable.initialize() logger.info("Networktables Initialized")
def setup(robot_ip, table_id, connection_listener_class=None): print("Connecting to NetworkTable '{}' on Robot at '{}'".format(table_id, robot_ip)) NetworkTable.setIPAddress(robot_ip) NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(table_id) if connection_listener_class: table.addConnectionListener(connection_listener_class()) return table
def init_filter(): '''Function called by mjpg-streamer to initialize the filter''' # Connect to the robot NetworkTable.setIPAddress('127.0.0.1') NetworkTable.setClientMode() NetworkTable.initialize() #os.system("v4l2-ctl -d /dev/video1 -c exposure_auto=1 -c exposure_absolute=20") #os.system("v4l2-ctl -d /dev/video2 -c exposure_auto=1 -c exposure_absolute=10") filter = TargetFinder() return filter.quad_normals
def main(): ip = "10.24.81.2" NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() # print "Initializing Network Tables" NetworkTable.initialize() goalFinder = GoalFinder() stream = urllib.urlopen("http://10.24.81.11/mjpg/video.mjpg") bytes = "" # print "Start Target Search Loop..." # turn true for single picture debuging first = False beagle = NetworkTable.getTable("GRIP") goals_table = beagle.getSubTable("aGoalContours") while True: # TODO: Fetch image from camera. # img = cv2.imread("0.jpg") bytes += stream.read(16384) b = bytes.rfind("\xff\xd9") a = bytes.rfind("\xff\xd8", 0, b - 1) if a != -1 and b != -1: jpg = bytes[a : b + 2] bytes = bytes[b + 2 :] img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_COLOR) goalFinder.process_image(img) goals_table.putValue("centerX", NumberArray.from_list(goalFinder.targetXs)) goals_table.putValue("centerY", NumberArray.from_list(goalFinder.targetYs)) goals_table.putValue("width", NumberArray.from_list(goalFinder.targetWidths)) goals_table.putValue("height", NumberArray.from_list(goalFinder.targetHeights)) goals_table.putValue("area", NumberArray.from_list(goalFinder.targetAreas)) # if len(goalFinder.targetAreas) > 0: # print goalFinder.targetAreas # Use if you want to the save the image and retrieve it later. if first: first = False cv2.imwrite("test.jpg", img) goals_table.putNumber("OwlCounter", goals_table.getNumber("OwlCounter", 0) + 1) sleep(0.01)
def __init__(self): NetworkTable.initialize() from vision import ImageProcessor self.tf = ImageProcessor() self.tf.enabled = True self.tf.tuning = True cv2.namedWindow('img') for s in self.settings: self._create_trackbar(s) cv2.createTrackbar('draw_thresh', 'img', 0, 1, lambda v: self._en_thresh(v))
def __init__(self, options): NetworkTable.setIPAddress(options.ip) NetworkTable.setClientMode() NetworkTable.initialize() logger.debug('Networktables Initialized') self.current_session = None self.sd = NetworkTable.getTable('/') self.sd.addConnectionListener(self) with open(options.config) as config_file: self.config = json.load(config_file) self.run()
def main(): prev = 0 current = 0 owlMissingCounter = 0 robotMissingCounter = 0 prevRobotMissing = 0 currentRobotMissing = 0 ip = "roboRIO-2481-FRC.local" NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() print "Bird Watcher: Initializing Network Tables" NetworkTable.initialize() time.sleep(2) while True: beagle = NetworkTable.getTable("GRIP") goals_table = beagle.getSubTable("aGoalContours") current = goals_table.getNumber("OwlCounter", 0) currentRobotMissing = goals_table.getNumber("RobotCounter", 0) if current == prev: owlMissingCounter += 1 else: owlMissingCounter = 0 #print owlMissingCounter prev = current if owlMissingCounter > 10: print "Bird Watcher: Restarting Owl Service" os.system("systemctl restart Camera") time.sleep(5) else: time.sleep(1) if currentRobotMissing == prevRobotMissing: robotMissingCounter += 1 else: robotMissingCounter = 0 prevRobotMissing = currentRobotMissing if robotMissingCounter > 10: print "Robot Watcher: Restarting Owl Service" os.system("systemctl restart Camera") time.sleep(5)
def __init__(self): self.socketio = None self.value_cache = { 'tracking_enabled': True } NetworkTable.setIPAddress(config.ROBOT_IP_ADDRESS) NetworkTable.setClientMode() NetworkTable.initialize() self.sd = NetworkTable.getTable('SmartDashboard') self.sd.addTableListener(self.on_value_changed) self.c_listener = NetworkTablesConnectionListener( self.on_connection_changed) self.sd.addConnectionListener(self.c_listener)
def main(): NetworkTable.setIPAddress('10.19.37.2') NetworkTable.setClientMode() NetworkTable.initialize() sd = NetworkTable.getTable('SmartDashboard') #ms_list = [] while True: time.sleep(0.1) start_time = datetime.now() # returns the elapsed milliseconds since the start of the program vision(sd) dt = datetime.now() - start_time ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0 print ms cv2.destroyAllWindows()
def __init__(self,parent,id): wx.Frame.__init__(self,parent,id,"Robot",size=(265,300)) self.frame=wx.Panel(self) self.front_left=wx.TextCtrl(self.frame,-1,pos=(100,25),size=(150,40),style=wx.NO_BORDER|wx.TE_READONLY) distance_words=wx.StaticText(self.frame,-1,"Distance",pos=(10,25)) velocity_words=wx.StaticText(self.frame,-1,"Velocity",pos=(10,125)) speed_words=wx.StaticText(self.frame,-1,"Speed",pos=(10,175)) self.front_left.SetBackgroundColour("green") self.speed_stuff=wx.TextCtrl(self.frame,-1,pos=(100,175),size=(150,40),style=wx.NO_BORDER|wx.TE_READONLY) self.velocity_stuff=wx.TextCtrl(self.frame,-1,pos=(100,125),size=(150,40),style=wx.NO_BORDER|wx.TE_READONLY) self.camera_stuff=wx.TextCtrl(self.frame,-1,pos=(100,75),size=(150,40),style=wx.NO_BORDER|wx.TE_READONLY) vision_words=wx.StaticText(self.frame,-1,"Vision",pos=(10,75)) self.camera_stuff.SetBackgroundColour("green") self.user_font=wx.Font(18, wx.DEFAULT ,wx.NORMAL, wx.NORMAL) distance_words.SetFont(self.user_font) self.front_left.SetFont(self.user_font) self.camera_stuff.SetFont(self.user_font) speed_words.SetFont(self.user_font) velocity_words.SetFont(self.user_font) vision_words.SetFont(self.user_font) self.velocity_stuff.SetFont(self.user_font) self.speed_stuff.SetFont(self.user_font) self.camera=wx.Button(self.frame,label="Camera",pos=(50,220),size=(150,40)) self.Bind(wx.EVT_BUTTON, self.camera_link,self.camera) self.Bind(wx.EVT_CLOSE, self.close_window) #Pynetwork tables stuff NetworkTable.setIPAddress("127.0.0.1")#Temp for testing NetworkTable.setClientMode() NetworkTable.initialize() self.smrt = NetworkTable.getTable("SmartDashboard") self.distance = self.smrt.getAutoUpdateValue('Distance', 0) self.vision = self.smrt.getAutoUpdateValue('Vision', 0) self.velocity = self.smrt.getAutoUpdateValue('Velocity', 0) self.speed = self.smrt.getAutoUpdateValue('Speed', 0) threader=threading.Thread(target=self.threading) self.keep_open=True threader.setDaemon(1) threader.start()
class TCDash(App): NetworkTable.setIPAddress( "127.0.0.1") #for robot will be 10.2.17.2 aka ip of rio NetworkTable.setClientMode() NetworkTable.initialize() sd = NetworkTable.getTable("SmartDashboard") def build(self): return main()
def __init__(self, fakerobot): try: if fakerobot: NetworkTable.setIPAddress("localhost") else: NetworkTable.setIPAddress("roboRIO-4915-FRC") NetworkTable.setClientMode() NetworkTable.initialize() self.smartDashboard = NetworkTable.getTable("SmartDashboard") self.connectionListener = ConnectionListener() self.smartDashboard.addConnectionListener(self.connectionListener) self.visTable = self.smartDashboard.getSubTable("vision") self.visTable.addTableListener(self.visValueChanged) self.targetState = targetState.TargetState(self.visTable) except: xcpt = sys.exc_info() print("ERROR initializing network tables", xcpt[0]) traceback.print_tb(xcpt[2])
def main(): print('Initializing NetworkTables') NetworkTable.setClientMode() NetworkTable.setIPAddress('localhost') NetworkTable.initialize() print('Creating Video Capture') cap = cv2.VideoCapture(1) print('Creating Pipeline') pipeline = GripPipeline() print('Running Pipeline') while cap.isOpened(): have_frame, frame = cap.read() if have_frame: pipeline.process(frame) extra_processing(pipeline) print('Capture Closed')
def __init__(self): client = False if len(sys.argv) > 1: client = True NetworkTable.setIPAddress(sys.argv[1]) NetworkTable.setClientMode() NetworkTable.initialize() # Default write flush is 0.05, could adjust for less latency #NetworkTable.setWriteFlushPeriod(0.01) self.nt = NetworkTable.getTable('/benchmark') self.updates = 0 if client: self.nt.addTableListener(self.on_update) self.recv_benchmark() else: self.send_benchmark()
class test: ip = 'roborio-2643-frc.local' ip2 = '10.26.43.20' NetworkTable.setIPAddress(ip2) NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable("Vision") x = 0 def sendVal(val): self.table.putNumber("Hello", val) while 1: table.putNumber("Hello", x) print(table.getNumber("Hello", 0)) x += 1
def init_network_tables(ipAddress, table): NetworkTable.setIPAddress(ipAddress) NetworkTable.setClientMode() NetworkTable.initialize() vp = NetworkTable.getTable(table) return vp
ap.add_argument("-n", "--num-frames", type=int, default=100, help="# of frames to loop over for FPS test") ap.add_argument("-d", "--display", type=int, default=-1, help="Whether or not frames should be displayed") args = vars(ap.parse_args()) NetworkTable.setIPAddress( "roborio-2609-frc.local") #Change the address to your own NetworkTable.setClientMode() NetworkTable.initialize() sd = NetworkTable.getTable("RaspberryPi") # grab a pointer to the video stream and initialize the FPS counter print("[INFO] sampling frames from webcam...") stream = cv2.VideoCapture(0) fps = FPS().start() # loop over some frames while fps._numFrames < args["num_frames"]: # grab the frame from the stream and resize it to have a maximum # width of 640 pixels (grabbed, frame) = stream.read() frame = imutils.resize(frame, width=640) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Convert from BGR to HSV #Get values from the Networktable to use
from networktables import NetworkTable from networktables.util import ChooserControl # To see messages from networktables, you must setup logging import logging logging.basicConfig(level=logging.DEBUG) if len(sys.argv) != 2: print("Error: specify an IP to connect to!") exit(0) ip = sys.argv[1] NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() NetworkTable.initialize() def on_choices(value): print('OnChoices', value) def on_selected(value): print('OnSelected', value) cc = ChooserControl('Autonomous Mode', on_choices, on_selected) while True: time.sleep(1)
def main(): # brightness adjusted, used to be 30, now is 70 os.system("v4l2-ctl -d /dev/video0 " "-c brightness=70 " "-c contrast=10 " "-c saturation=100 " "-c white_balance_temperature_auto=0 " "-c power_line_frequency=2 " "-c white_balance_temperature=4500 " "-c sharpness=25 " "-c backlight_compensation=0 " "-c exposure_auto=1 " "-c exposure_absolute=5 " "-c pan_absolute=0 " "-c tilt_absolute=0 " "-c zoom_absolute=0") NetworkTable.setIPAddress("roboRIO-687-FRC.local") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable("NerdyVision") print("NetworkTables initialized") angle_to_turn = 0 while 687: aligned = False previous_angle_to_turn = angle_to_turn ret, frame = cap.read() capture_time = time.time() # blur = cv2.GaussianBlur(frame, (11, 11), 0) kernel = np.ones((5, 5), np.uint8) erosion = cv2.erode(frame, kernel, iterations=1) dilation = cv2.dilate(erosion, kernel, iterations=1) res, mask = NerdyFunctions.mask(NerdyConstants.LOWER_GREEN, NerdyConstants.UPPER_GREEN, dilation) _, cnts, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) area = cv2.contourArea(c) if area > NerdyConstants.MIN_GOAL_AREA: goal = NerdyFunctions.polygon(c, 0.025) if len(goal) == 4: cv2.drawContours(res, [goal], 0, (255, 0, 0), 5) M = cv2.moments(goal) if M['m00'] > 0: cx, cy = NerdyFunctions.calc_center(M) center = (cx, cy) cv2.circle(res, center, 5, (255, 0, 0), -1) error = cx - NerdyConstants.FRAME_CX angle_to_turn = NerdyFunctions.calc_horiz_angle(error) print("ANGLE_TO_TURN: " + str(angle_to_turn)) aligned = NerdyFunctions.is_aligned(angle_to_turn) print("IS_ALIGNED: " + str(aligned)) processed_time = time.time() delta_time = processed_time - capture_time print("PROCESSED_TIME: " + str(delta_time)) # Has to be commented out because ssh doesn't allow opencv windows open # NerdyFunctions.draw_static(res) # cv2.imshow("NerdyVision", res) try: table.putBoolean('IS_ALIGNED', aligned) if previous_angle_to_turn != angle_to_turn: table.putNumber('ANGLE_TO_TURN', angle_to_turn) table.putNumber('PROCESSED_TIME', delta_time) else : table.putNumber('ANGLE_TO_TURN', 0) table.putNumber('PROCESSED_TIME', 0) table.putBoolean('VISION_ON', True) except: print("DATA NOT SENDING...") table.putBoolean('IS_ALINGED', False) table.putNumber('ANGLE_TO_TURN', 0) table.putNumber('PROCESSED_TIME', 0) table.putBoolean('VISION_ON', False) cv2.waitKey(1) cap.release() cv2.destroyAllWindows()
def __init__(self, ip="roboRIO-3242-FRC.local"): NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() NetworkTable.initialize() self.table = NetworkTable.getTable("rpi")
def main(): # turn on modes specified by user # uncomment next line if this feature is desired # shooting, gears = check_modes() # network table setup NetworkTable.setIPAddress("roboRIO-687-FRC.local") NetworkTable.setClientMode() NetworkTable.initialize() SmartDashboard = NetworkTable.getTable("NerdyVision") print("NetworkTables initialized") # adjust camera settings cap.set(cv2.CAP_PROP_FRAME_WIDTH, NerdyConstants.FRAME_X) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, NerdyConstants.FRAME_Y) cap.set(cv2.CAP_PROP_EXPOSURE, -8.0) # set up FPS list and iterator times = [0] * 25 time_idx = 0 time_start = time.time() camfps = 0 while 687: ret, frame = cap.read() # the next 2 lines are for sample image testing for shooting #frame = cv2.imread("sample_images/LED_Boiler/" + str(sample_image) + ".jpg") #print(sample_image) # init values (for x) angle_to_turn = 0 aligned = False # gaussian blur to remove noise blur = cv2.GaussianBlur(frame, (11, 11), 0) # remove everything but specified color res, mask = NerdyFunctions.mask(NerdyConstants.LOWER_GREEN, NerdyConstants.UPPER_GREEN, blur) # draw references NerdyFunctions.draw_static(res) # find contour of goal _, cnts, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) center = None if shooting: # only proceed if at least one contour was found if len(cnts) > 0: # find the largest contour (closest goal) in the mask c = max(cnts, key=cv2.contourArea) # make sure the largest contour is significant area = cv2.contourArea(c) if area > NerdyConstants.MIN_BOILER_AREA: goal = NerdyFunctions.polygon(c, 0) # draw the contour cv2.drawContours(res, [goal], 0, (255, 0, 0), 5) # calculate centroid M = cv2.moments(goal) if M['m00'] > 0: cx, cy = NerdyFunctions.calc_center(M) center = (cx, cy) # draw centroid cv2.circle(res, center, 5, (255, 0, 0), -1) # calculate error in degrees error = cx - NerdyFunctions.FRAME_CX angle_to_turn = NerdyFunctions.calc_horiz_angle(error) print("ANGLE TO TURN " + str(angle_to_turn)) # check if shooter is aligned aligned = NerdyFunctions.is_aligned(angle_to_turn) print("ALIGNED " + str(aligned)) NerdyFunctions.report_command(error) NerdyFunctions.report_y(cy) elif gears: # only proceed if at least two contours (two blocks around peg) was found if len(cnts) > 1: centers_x = [0] centers_y = [0] # find the two blocks in the mask based on areas for i in range(len(cnts)): c = cnts[i] area = cv2.contourArea(c) if NerdyConstants.MIN_GEAR_AREA < area < NerdyConstants.MAX_GEAR_AREA: goal = NerdyFunctions.polygon(c, 0.02) # draw the contour cv2.drawContours(res, [goal], 0, (255, 0, 0), 5) M = cv2.moments(goal) if M['m00'] > 0: cx, cy = NerdyFunctions.calc_center(M) center = (cx, cy) # draw centroid cv2.circle(res, center, 5, (255, 0, 0), -1) centers_x.append(cx) centers_y.append(cy) # calculate center of two contours (blocks next to peg) if len(centers_x) == 3 and len(centers_y) == 3: target_x = NerdyFunctions.avg(centers_x[1], centers_x[2]) target_y = NerdyFunctions.avg(centers_y[1], centers_y[2]) target = (target_x, target_y) cv2.circle(res, target, 5, (0, 255, 0), -1) print(target_x) print(target_y) # calculate angle to turn error = target_x - NerdyConstants.FRAME_CX angle_to_turn = NerdyFunctions.calc_horiz_angle(error) print("ANGLE TO TURN " + str(angle_to_turn)) # check if gear mechanism is aligned aligned = NerdyFunctions.is_aligned(angle_to_turn) print("ALIGNED " + str(aligned)) NerdyFunctions.report_command(error) # results cv2.imshow("NerdyVision", res) try: # send to network tables SmartDashboard.putNumber('ANGLE_TO_TURN', angle_to_turn) SmartDashboard.putBoolean('IS_ALIGNED', aligned) print("DATA SENDING") except: print("DATA NOT SENDING...") cv2.waitKey(1) cap.release() cv2.destroyAllWindows()
def main(): # brightness adjusted, used to be 30, now is 70 os.system("v4l2-ctl -d /dev/video0 " "-c brightness=70 " "-c contrast=10 " "-c saturation=100 " "-c white_balance_temperature_auto=0 " "-c power_line_frequency=2 " "-c white_balance_temperature=4500 " "-c sharpness=25 " "-c backlight_compensation=0 " "-c exposure_auto=1 " "-c exposure_absolute=5 " "-c pan_absolute=0 " "-c tilt_absolute=0 " "-c zoom_absolute=0") NetworkTable.setIPAddress("roboRIO-687-FRC.local") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable("NerdyVision") print("NetworkTables initialized") angle_to_turn = 0 while 687: aligned = False previous_angle_to_turn = angle_to_turn ret, frame = cap.read() capture_time = time.time() # blur = cv2.GaussianBlur(frame, (11, 11), 0) kernel = np.ones((5, 5), np.uint8) erosion = cv2.erode(frame, kernel, iterations=1) dilation = cv2.dilate(erosion, kernel, iterations=1) res, mask = NerdyFunctions.mask(NerdyConstants.LOWER_GREEN, NerdyConstants.UPPER_GREEN, dilation) _, cnts, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) center = None if len(cnts) > 1: centers_x = [0] centers_y = [0] for i in range(len(cnts)): c = cnts[i] area = cv2.contourArea(c) # if NerdyConstants.MIN_GEAR_AREA < area < NerdyConstants.MAX_GEAR_AREA: x, y, w, h = cv2.boundingRect(c) aspect_ratio = float(w) / h if NerdyConstants.MIN_GEAR_ASPECT < aspect_ratio < NerdyConstants.MAX_GEAR_ASPECT: goal = NerdyFunctions.polygon(c, 0.02) cv2.drawContours(res, [goal], 0, (255, 0, 0), 5) M = cv2.moments(goal) if M['m00'] > 0: cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) center = (cx, cy) cv2.circle(res, center, 5, (255, 0, 0), -1) centers_x.append(cx) centers_y.append(cy) if len(centers_x) == 3 and len(centers_y) == 3: target_x = (centers_x[1] + centers_x[2])/2 target_y = (centers_y[1] + centers_y[2])/2 target = (target_x, target_y) cv2.circle(res, target, 5, (0, 255, 0), -1) print(target_x) print(target_y) error = target_x - NerdyConstants.FRAME_CX angle_to_turn = NerdyFunctions.calc_horiz_angle(error) print("ANGLE_TO_TURN: " + str(angle_to_turn)) aligned = NerdyFunctions.is_aligned(angle_to_turn) print("IS_ALIGNED: " + str(aligned)) processed_time = time.time() delta_time = processed_time - capture_time print("PROCESSED_TIME: " + str(delta_time)) # NerdyFunctions.draw_static(res) # cv2.imshow("NerdyVision", res) try: table.putBoolean('IS_ALIGNED', aligned) if previous_angle_to_turn != angle_to_turn: table.putNumber('ANGLE_TO_TURN', angle_to_turn) table.putNumber('PROCESSED_TIME', delta_time) else : table.putNumber('ANGLE_TO_TURN', 0) table.putNumber('PROCESSED_TIME', 0) table.putBoolean('VISION_ON', True) except: print("DATA NOT SENDING...") table.putBoolean('IS_ALINGED', False) table.putNumber('ANGLE_TO_TURN', 0) table.putNumber('PROCESSED_TIME', 0) table.putBoolean('VISION_ON', False) cv2.waitKey(1) cap.release() cv2.destroyAllWindows()
def __init__(self, ip): NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() NetworkTable.initialize() self.table = NetworkTable
def main(): # set modes (default without user input) cal_mode_on = CAL_MODE_ON track_mode_on = TRACK_MODE_ON # turn on modes specified by user # comment out next line if this feature is not desired cal_mode_on, track_mode_on = check_modes() # network table setup NetworkTable.setIPAddress("10.13.39.2") NetworkTable.setClientMode() NetworkTable.initialize() SmartDashboard = NetworkTable.getTable("SmartDashboard") #stream=urllib.urlopen('http://10.13.39.10/mjpg/video.mjpg') FOR SCHOOL #bytes='' FOR SCHOOL # adjust camera settings #cap.set(cv2.CAP_PROP_FRAME_WIDTH, FRAME_X) #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FRAME_Y) # cap.set(cv2.CAP_PROP_FPS,30) #cap.set(cv2.CAP_PROP_EXPOSURE, -8.0) # set up FPS list and iterator times = [0] * 25 time_idx = 0 time_start = time.time() camfps = 0 while 687: ret, frame = cap.read() '''bytes+=stream.read(1024) a = bytes.find('\xff\xd8') b = bytes.find('\xff\xd9') if a!=-1 and b!=-1: jpg = bytes[a:b+2] bytes= bytes[b+2:] live = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR) ''' # compute FPS information time_end = time.time() times[time_idx] = time_end - time_start time_idx += 1 if time_idx >= len(times): camfps = 1 / (sum(times) / len(times)) time_idx = 0 if time_idx > 0 and time_idx % 5 == 0: camfps = 1 / (sum(times) / len(times)) time_start = time_end print("FPS: " + str(camfps)) print("Time: " + str(time.time())) # calibration if cal_mode_on: print(np.array_str(calibration_box(frame))) cv2.imshow("NerdyCalibration", frame) # tracking if track_mode_on: # init values (for x) angle_to_turn = 0 aligned = False # gaussian blur to remove noise blur = cv2.GaussianBlur(frame, (11, 11), 0) # remove everything but specified color res, mask = masking(LOWER_LIM, UPPER_LIM, blur) # draw references draw_static(res) # find contour of goal cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None # only proceed if at least one contour was found if len(cnts) > 0: # find the largest contour (closest goal) in the mask c = max(cnts, key=cv2.contourArea) # make sure the largest contour is significant area = cv2.contourArea(c) if area > 1500: # make suggested contour into a polygon goal = polygon(c) # make sure goal contour has 4 sides if len(goal) == 4: # draw the contour cv2.drawContours(res, [goal], 0, (255, 0, 0), 5) # calculate centroid M = cv2.moments(goal) if M['m00'] > 0: cx, cy = calc_center(M) center = (cx, cy) # draw centroid cv2.circle(res, center, 5, (255, 0, 0), -1) # calculate error in degrees error = cx - FRAME_CX angle_to_turn = calc_horiz_angle(error) print("Angle to turn: " + str(angle_to_turn)) print("CX=" + str(cx)) # check if shooter is aligned aligned = is_aligned(angle_to_turn) print("Aligned: " + str(aligned)) # results cv2.imshow("NerdyVision", res) try: # send to network tables SmartDashboard.putNumber('ANGLE_TO_TURN', angle_to_turn) SmartDashboard.putBoolean('IS_ALIGNED', aligned) except: print("DATA NOT SENDING...") # capture a keypress key = cv2.waitKey(20) & 0xFF # escape key if key == 27: break cap.release() cv2.destroyAllWindows()
def __init__(self): NetworkTable.setIPAddress('127.0.0.1') NetworkTable.setClientMode() NetworkTable.initialize() self.nt = NetworkTable.getTable("vision")
def makeNetworkTable(IP): NetworkTable.setIPAddress(IP) NetworkTable.setClientMode() NetworkTable.initialize() return NetworkTable.getTable("vision")
def main(): # network table setup NetworkTable.setIPAddress("roboRIO-687-FRC.local") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable("NerdyVision") # stream from axis camera setup stream = urllib.urlopen('http://10.06.87.11/mjpg/video.mjpg') bytes = '' while 687: # get frame from stream bytes += stream.read(16384) b = bytes.rfind('\xff\xd9') a = bytes.rfind('\xff\xd8', 0, b-1) if a != -1 and b != -1: jpg = bytes[a:b+2] bytes = bytes[b+2:] frame = cv2.imdecode(np.fromstring(jpg, dtype = np.uint8), cv2.CV_LOAD_IMAGE_COLOR) # calibration if CAL_MODE_ON: print(np.array_str(calibration_box(frame))) cv2.imshow("NerdyCalibration", frame) # tracking if TRACK_MODE_ON: # init values (for x) angle_to_turn = 0 aligned = False # gaussian blur to remove noise blur = cv2.GaussianBlur(frame, (11, 11), 0) # remove everything but specified color res, mask = masking(LOWER_LIM, UPPER_LIM, blur) # draw references draw_static(res) # find contours cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None # only proceed if at least one contour was found if len(cnts) > 0: # find the largest contour (closest goal) in the mask c = max(cnts, key=cv2.contourArea) # make sure the largest contour is significant area = cv2.contourArea(c) if area > 1500: # make suggested contour into a polygon goal = polygon(c) # make sure goal contour has 4 sides if len(goal) == 4: # draw the contour cv2.drawContours(res, [goal], 0, (255, 0, 0), 5) # calculate centroid M = cv2.moments(goal) if M['m00'] > 0: cx, cy = calc_center(M) center = (cx, cy) # draw centroid cv2.circle(res, center, 5, (255, 0, 0), -1) # calculate error in degrees error = cx - FRAME_CX angle_to_turn = calc_horiz_angle(error) print("ANGLE_TO_TURN" + str(angle_to_turn)) # check if shooter is aligned aligned = is_aligned(angle_to_turn) print("IS_ALIGNED: " + str(aligned)) # results cv2.imshow("NerdyVision", res) try: # send data to network tables table.putNumber('ANGLE_TO_TURN', angle_to_turn) table.putBoolean('IS_ALIGNED', aligned) except: print("DATA NOT SENDING...")
def main(): global frameFinal frameFinal = vdev.read() try: server = ThreadedHTTPServer(("0.0.0.0", 5800), CamHandler) target = Thread(target=server.serve_forever, args=()) target.start() NetworkTable.setIPAddress('10.54.53.2') NetworkTable.setClientMode() NetworkTable.initialize() nt = NetworkTable.getTable("Forgiving/Vision") while 2333366666: image = vdev.read() blur = cv2.GaussianBlur(image, (9, 9), 0) frame = image result, mask = forgive.function.singleColorGlass( forgive.constant._HSV_RANGE_CYAN[0], forgive.constant._HSV_RANGE_CYAN[1], blur) cv2.line(frame, (middle, 0), (middle, forgive.constant._CAMERA_FRAMESIZE[1]), (205, 194, 255), 2) # output _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) centersX = [] centersY = [] print("contourNumber: " + str(len(contours))) ii = 0 for i in range(len(contours)): cnt = contours[i] area = cv2.contourArea(cnt) if (_GEAR_ONE_SIDE_AREA[0] < area < _GEAR_ONE_SIDE_AREA[1]): ii += 1 goal = forgive.function.polygon(cnt, 0.02) cv2.drawContours(frame, [goal], 0, (255, 0, 0), 5) # output moments = cv2.moments(goal) centerX, centerY = forgive.function.getCenter(moments) cv2.circle(frame, (centerX, centerY), 5, (255, 0, 255), -1) # output centersX.append(centerX) centersY.append(centerY) print(" contourSelected: " + str(ii)) if ((len(centersX) == 2) and (len(centersY) == 2)): finalX = int((centersX[0] + centersX[1]) / 2) finalY = int((centersY[0] + centersY[1]) / 2) cv2.circle(frame, (finalX, finalY), 5, (255, 255, 255), -1) # output err = finalX - middle align = forgive.function.alignedToWhere( err, forgive.constant._PIXEL_TOLERANCE) angle = forgive.constant._DEGREES_PPX * err if (align == -1): print('left now, turn RIGHT for ' + str(angle) + ' deg') nt.putString("turn", "right") elif (align == 1): print('right now, turn LEFT for ' + str(angle) + ' deg') nt.putString("turn", "left") else: print('center now, GREAT! ' + str(angle) + ' deg') nt.putString("turn", "great") nt.putNumber("angle", angle) frameFinal = result # cv2.imshow("FRC 2017 Vision",result) # cv2.waitKey(1) except KeyboardInterrupt: sys.exit()
class GripPipeline: """ An OpenCV pipeline generated by GRIP. """ # Arrays to send centerX = [] centerY = [] width = [] height = [] ratio = [] # IP addresses ip = 'roborio-2643-frc.local' ip2 = 'localhost' ip3 = '10.26.43.20' ip4 = '192.168.137.1' # Runs network tables NetworkTable.setIPAddress(ip) NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable("Vision") def cameraStatus(self): """ Retrieves the status of camera from network tables sent by the robot Java code. """ return self.table.getNumber("Camera Call", -1) def autoMode(self): """ Determines auto mode or telop mode from robot java code. """ return self.table.getNumber("Auto Mode", -1) def __init__(self): self.__blur_type = BlurType.Box_Blur self.__blur_radius = 6.306306306306306 self.blur_output = None self.__hsv_threshold_input = self.blur_output self.__hsv_threshold_hue = [113.0, 180.0] self.__hsv_threshold_saturation = [101.0, 246.0] self.__hsv_threshold_value = [183.0, 255.0] self.hsv_threshold_output = None self.__find_contours_input = self.hsv_threshold_output self.__find_contours_external_only = False self.find_contours_output = None self.__filter_contours_contours = self.find_contours_output self.__filter_contours_min_area = 0.0 self.__filter_contours_min_perimeter = 0.0 self.__filter_contours_min_width = 0.0 self.__filter_contours_max_width = 1000.0 self.__filter_contours_min_height = 0.0 self.__filter_contours_max_height = 1000.0 self.__filter_contours_solidity = [80, 100] self.__filter_contours_max_vertices = 100.0 self.__filter_contours_min_vertices = 4.0 self.__filter_contours_min_ratio = 0.0 self.__filter_contours_max_ratio = 1000.0 self.filter_contours_output = None """GRIP GENERATED""" def process(self, source0): """ Runs the pipeline and sets all outputs to new values. """ # Step Blur0: self.__blur_input = source0 (self.blur_output) = self.__blur(self.__blur_input, self.__blur_type, self.__blur_radius) # Step HSV_Threshold0: self.__hsv_threshold_input = self.blur_output (self.hsv_threshold_output) = self.__hsv_threshold( self.__hsv_threshold_input, self.__hsv_threshold_hue, self.__hsv_threshold_saturation, self.__hsv_threshold_value) # Step Find_Contours0: self.__find_contours_input = self.hsv_threshold_output (self.find_contours_output) = self.__find_contours( self.__find_contours_input, self.__find_contours_external_only) # Step Filter_Contours0: self.__filter_contours_contours = self.find_contours_output ( self.filter_contours_output ), self.centerX, self.centerY, self.height, self.width, self.ratio = self.__filter_contours( self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio) self.sendDataToServer(self) #print(self.centerX) def cameraOffline(self): """ Returns whether the camera is online or offline and sends to network table """ self.table.putString("Camera Status", "Camera is Offline") def cameraOnline(self): self.table.putString("Camera Status", "Camera is Online") @staticmethod def sendDataToServer(self): """ Sends data to network table. At the moment, it is only sending necessary code to be used during robot movement and auto. """ # Arrays tmpX = [] tmpHeight = [] # Values* NOT USED ATM tmpH1 = 0 tmpH2 = 0 tmpX1 = 0 tmpX2 = 0 xCheck = 0 # Checks whether the values of width and height make the correct ratio of retroreflective tape for x in range(len(self.ratio)): if 0.75 < self.ratio[x] < 5.0: if x < len(self.centerX): tmpX.insert(1, self.centerX[x]) tmpHeight.insert(1, self.height[x]) ''' for x in range(len(tmpHeight)): if tmpHeight[x] > tmpH1: tmpH1 = tmpHeight[x] tmpX1 = tmpX[x] xCheck = x #print(tmp1) for x in range(len(tmpHeight)): if tmpHeight[x] >= tmpH2 and x != xCheck: tmpH2 = tmpHeight[x] tmpX2 = tmpX[x] del tmpX[:] del tmpHeight[:] tmpX.insert(0, tmpX1) tmpHeight.insert(0, tmpH1) tmpX.insert(1, tmpX2) tmpHeight.insert(1, tmpH2) ''' # Puts data into networktable self.table.putNumberArray("CenterX", tmpX) self.table.putNumberArray("Height", tmpHeight) """GRIP GENERATED""" @staticmethod def __blur(src, type, radius): """Softens an image using one of several filters. Args: src: The source mat (numpy.ndarray). type: The blurType to perform represented as an int. radius: The radius for the blur as a float. Returns: A numpy.ndarray that has been blurred. """ if type is BlurType.Box_Blur: ksize = int(2 * round(radius) + 1) return cv2.blur(src, (ksize, ksize)) elif type is BlurType.Gaussian_Blur: ksize = int(6 * round(radius) + 1) return cv2.GaussianBlur(src, (ksize, ksize), round(radius)) elif type is BlurType.Median_Filter: ksize = int(2 * round(radius) + 1) return cv2.medianBlur(src, ksize) else: return cv2.bilateralFilter(src, -1, round(radius), round(radius)) """GRIP GENERATED""" @staticmethod def __hsv_threshold(input, hue, sat, val): """Segment an image based on hue, saturation, and value ranges. Args: input: A BGR numpy.ndarray. hue: A list of two numbers the are the min and max hue. sat: A list of two numbers the are the min and max saturation. lum: A list of two numbers the are the min and max value. Returns: A black and white numpy.ndarray. """ out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV) return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1])) """GRIP GENERATED""" @staticmethod def __find_contours(input, external_only): """Sets the values of pixels in a binary image to their distance to the nearest black pixel. Args: input: A numpy.ndarray. external_only: A boolean. If true only external contours are found. Return: A list of numpy.ndarray where each one represents a contour. """ if external_only: mode = cv2.RETR_EXTERNAL else: mode = cv2.RETR_LIST method = cv2.CHAIN_APPROX_SIMPLE im2, contours, hierarchy = cv2.findContours(input, mode=mode, method=method) return contours """GRIP GENERATED""" @staticmethod def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width, min_height, max_height, solidity, max_vertex_count, min_vertex_count, min_ratio, max_ratio): """Filters out contours that do not meet certain criteria. Args: input_contours: Contours as a list of numpy.ndarray. min_area: The minimum area of a contour that will be kept. min_perimeter: The minimum perimeter of a contour that will be kept. min_width: Minimum width of a contour. max_width: MaxWidth maximum width. min_height: Minimum height. max_height: Maximimum height. solidity: The minimum and maximum solidity of a contour. min_vertex_count: Minimum vertex Count of the contours. max_vertex_count: Maximum vertex Count. min_ratio: Minimum ratio of width to height. max_ratio: Maximum ratio of width to height. Returns: Contours as a list of numpy.ndarray. """ # Arrays to return contours = [] centerXs = [] centerYs = [] widths = [] heights = [] ratios = [] for contour in input_contours: x, y, w, h = cv2.boundingRect(contour) if min_width < w < max_width and min_height < h < max_height: area = cv2.contourArea(contour) centerXs.insert(1, x + (w / 2)) centerYs.insert(1, y + (h / 2)) widths.insert(1, w) heights.insert(1, h) ratios.insert(1, h / w) hull = cv2.convexHull(contour) hullCA = cv2.contourArea(hull) solid = 100 * area / hullCA size_correct = area > min_area and\ cv2.arcLength(contour, True) > min_perimeter and\ hullCA != 0 substance_correct = solidity[1] < solid < solidity[0] and\ len(contour) > min_vertex_count and\ len(contour) > max_vertex_count if size_correct and substance_correct: ratio = float(w) / h #ratio insert* NOT USED #ratios.insert(1, h / w) if min_ratio < ratio < max_ratio: output.append(contour) return contours, centerXs, centerYs, heights, widths, ratios
def main(): try: NetworkTable.setIPAddress('10.54.53.2') NetworkTable.setClientMode() NetworkTable.initialize() nt = NetworkTable.getTable("Forgiving/Vision") while 2333366666: image = vdev.read() blur = cv2.GaussianBlur(image, (9, 9), 0) result, mask = forgive.function.singleColorGlass( forgive.constant._HSV_RANGE_CYAN[0], forgive.constant._HSV_RANGE_CYAN[1], blur) _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) centersX = [] centersY = [] areas = [] print("contourNumber: " + str(len(contours))) ii = 0 for i in range(len(contours)): cnt = contours[i] area = cv2.contourArea(cnt) if ((_GEAR_ONE_SIDE_AREA[0] - 2000) < area < (_GEAR_ONE_SIDE_AREA[1] + 2000)): areas.append(area) if (_GEAR_ONE_SIDE_AREA[0] < area < _GEAR_ONE_SIDE_AREA[1]): ii += 1 goal = forgive.function.polygon(cnt, 0.02) moments = cv2.moments(goal) centerX, centerY = forgive.function.getCenter(moments) centersX.append(centerX) centersY.append(centerY) print(" contourSelected: " + str(ii)) if (len(areas) == 2): nt.putNumber("area0", areas[0]) nt.putNumber("area1", areas[1]) if ((len(centersX) == 2) and (len(centersY) == 2)): finalX = int((centersX[0] + centersX[1]) / 2) finalY = int((centersY[0] + centersY[1]) / 2) err = finalX - middle align = forgive.function.alignedToWhere( err, forgive.constant._PIXEL_TOLERANCE) angle = forgive.constant._DEGREES_PPX * err if (align == -1): print('left now, turn RIGHT for ' + str(angle) + ' deg') nt.putString("turn", "right") elif (align == 1): print('right now, turn LEFT for ' + str(angle) + ' deg') nt.putString("turn", "left") else: print('center now, GREAT! ' + str(angle) + ' deg') nt.putString("turn", "great") # cv2.imshow("FRC 2017 Vision",result) # cv2.waitKey(1) except KeyboardInterrupt: sys.exit()