def __init__(self, *args, **kwargs): if 'switch_type' in kwargs: self.switch_type = kwargs.pop('switch_type') else: self.switch_type = ['openflow'] super(RyuApp, self).__init__(*args, **kwargs) RyuApp._instance = self self.logger.debug(" __init__()") self.result = {} self.lock = False self.dp_id = None self.configured = False self.dl_port = None # port numbers in the OpenFlow switch self.ul_port = None # self.status = 'init' self.logger.debug("%s, %s" % (args, kwargs)) self.pl_conf = ObjectView(CONF['pipeline_conf'], logger=self.logger) self.bm_conf = ObjectView(CONF['benchmark_conf'], logger=self.logger) # port "names" used to configure the OpenFlow switch self.dl_port_name = self.bm_conf.sut.downlink_port self.ul_port_name = self.bm_conf.sut.uplink_port self.instantiate_pipeline() self._timer = LoopingCall(self.handle_timer) wsgi = kwargs['wsgi'] self.waiters = {} self.data = {'waiters': self.waiters} mapper = wsgi.mapper wsgi.registory['TipsyController'] = self.data for attr in dir(TipsyController): if attr.startswith('get_'): mapper.connect('tipsy', '/tipsy/' + attr[len('get_'):], controller=TipsyController, action=attr, conditions=dict(method=['GET'])) self.initialize_datapath() self.change_status('wait') # Wait datapath to connect
def _create_timer(self, name, func, *arg, **kwarg): timer = LoopingCall(func, *arg, **kwarg) self._timers[name] = timer return timer
class RyuApp(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] _CONTEXTS = { 'wsgi': WSGIApplication } _instance = None def __init__(self, *args, **kwargs): if 'switch_type' in kwargs: self.switch_type = kwargs.pop('switch_type') else: self.switch_type = ['openflow'] super(RyuApp, self).__init__(*args, **kwargs) RyuApp._instance = self self.logger.debug(" __init__()") self.result = {} self.lock = False self.dp_id = None self.configured = False self.dl_port = None # port numbers in the OpenFlow switch self.ul_port = None # self.status = 'init' self.logger.debug("%s, %s" % (args, kwargs)) self.pl_conf = ObjectView(CONF['pipeline_conf'], logger=self.logger) self.bm_conf = ObjectView(CONF['benchmark_conf'], logger=self.logger) # port "names" used to configure the OpenFlow switch self.dl_port_name = self.bm_conf.sut.downlink_port self.ul_port_name = self.bm_conf.sut.uplink_port self.instantiate_pipeline() self._timer = LoopingCall(self.handle_timer) wsgi = kwargs['wsgi'] self.waiters = {} self.data = {'waiters': self.waiters} mapper = wsgi.mapper wsgi.registory['TipsyController'] = self.data for attr in dir(TipsyController): if attr.startswith('get_'): mapper.connect('tipsy', '/tipsy/' + attr[len('get_'):], controller=TipsyController, action=attr, conditions=dict(method=['GET'])) self.initialize_datapath() self.change_status('wait') # Wait datapath to connect def instantiate_pipeline(self): pl_class = None pl_name = self.pl_conf.name for switch_type in self.switch_type: try: backend = 'SUT_%s' % switch_type pl_class = find_mod.find_class(backend, pl_name) self.logger.info('pipeline: %s_%s', backend, pl_name) break except KeyError as e: pass if pl_class is None: self.signal_fauilure('Pipeline (%s) not found for %s', pl_name, self.switch_type) return self.pl = pl_class(self, self.pl_conf) def signal_fauilure(self, *args): self.logger.critical(*args) self.change_status('failed') try: requests.get(CONF['webhook_failed']) except requests.ConnectionError: pass hub.spawn_after(1, TipsyController.do_exit) def change_status(self, new_status): self.logger.info("status: %s -> %s" % (self.status, new_status)) self.status = new_status def get_status(self, **kw): return self.status def handle_timer(self): self.logger.warn("timer called %s", datetime.datetime.now()) if self.lock: self.logger.error('Previous handle_timer is still running') self._timer.stop() raise Exception('Previous handle_timer is still running') self.lock = True for cmd in self.pl_conf.run_time: attr = getattr(self.pl, 'do_%s' % cmd.action, self.pl.do_unknown) attr(cmd) #time.sleep(0.5) self.logger.warn("time : %s", datetime.datetime.now()) self.lock = False def initialize_datapath(self): """Confingure the switch (as opposed to fill the flow tables with entries) For example, add tunnels, tune performace knobs, etc. """ self.change_status('initialize_datapath') def stop_datapath(self): pass @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) def handle_switch_features(self, ev): if self.dp_id and self.dp_id != ev.msg.datapath.id: self.logger.error("This app can control only one switch (%s, %s)", self.dp_id, ev.msg.datapath.id) raise Exception("This app can control only one switch") if self.dp_id is not None: self.logger.info("Switch has reconnected, reconfiguring") self.configured = False self.dp = ev.msg.datapath self.dp_id = self.dp.id ofp = self.dp.ofproto parser = self.dp.ofproto_parser self.logger.info("switch_features: datapath:%s, ofproto:%s" % (self.dp.id, ofp.OFP_VERSION)) self.change_status('connected') self.dp.send_msg( parser.OFPDescStatsRequest(self.dp, 0) ) self.configure() @set_ev_cls(ofp_event.EventOFPDescStatsReply, MAIN_DISPATCHER) def handle_desc_stats_reply(self, ev): self.logger.info(str(ev.msg.body)) for field in ['mfr_desc', 'hw_desc', 'sw_desc', 'serial_num', 'dp_desc']: self.result[field] = getattr(ev.msg.body, field) @set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER) def handle_port_desc_stats_reply(self, ev): ofp = self.dp.ofproto # Map port names in cfg to actual OF port numbers port_nums = {} if self.dl_port_name == self.ul_port_name: self.dl_port_name = self.ul_port_name = 'in_port' self.ports = {'in_port': ofp.OFPP_IN_PORT} for port in ev.msg.body: self.ports[port.name] = port.port_no port_nums[port.port_no] = port.name for name in sorted(self.ports): self.logger.debug('port: %s, %s' % (name, self.ports[name])) if self.pl.has_tunnels: ports = ['ul_port'] else: ports = ['ul_port', 'dl_port'] for spec_port in ports: port_name = getattr(self, '%s_name' % spec_port) if port_name.isdigit(): # port is defined by its port number in the configuration file, # Check if it actually exists. p = int(port_name) self.__dict__[spec_port] = p try: self.logger.info('%s (%s): %s', spec_port, port_nums[p], p) except KeyError: self.logger.critical('%s (%s): not found' % (spec_port, port_name)) elif self.ports.get(port_name): # kernel interface -> OF returns the interface name as port_name port_no = self.ports[port_name] self.__dict__[spec_port] = port_no self.logger.info('%s (%s): %s' % (spec_port, port_name, port_no)) elif self.ports.get(spec_port): # dpdk interface -> OF returns the "logical" br name as port_name port_no = self.ports[spec_port] self.__dict__[spec_port] = port_no self.logger.info('%s (%s): %s' % (spec_port, port_name, port_no)) else: self.logger.critical('%s (%s): not found' % (spec_port, port_name)) self.configure_1() @set_ev_cls(ofp_event.EventOFPErrorMsg, [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER]) def handle_error_msg(self, ev): msg = ev.msg ofp = self.dp.ofproto if msg.type == ofp.OFPET_METER_MOD_FAILED: cmd = 'ovs-vsctl set bridge s1 datapath_type=netdev' self.logger.error('METER_MOD failed, "%s" might help' % cmd) elif msg.type and msg.code: self.logger.error('OFPErrorMsg received: type=0x%02x code=0x%02x ' 'message=%s', msg.type, msg.code, utils.hex_array(msg.data)) else: self.logger.error('OFPErrorMsg received: %s', msg) def goto(self, table_name): "Return a goto insturction to table_name" parser = self.dp.ofproto_parser return parser.OFPInstructionGotoTable(self.pl.tables[table_name]) def get_netmask(self, prefix_len): from socket import inet_ntoa from struct import pack bits = 0xffffffff ^ (1 << 32 - prefix_len) - 1 mask = inet_ntoa(pack('>I', bits)) return mask def mod_match_addr(self, match, key): """Convert address from CIDR notation to (addr, netmask) format. match[key] is the address to convert. """ try: val = match[key] except: return if type(val) != str: return # val cannot be in CIDR format m = re.match(r'^([^\/]*)\/([^\/]*)$', val) if not m: return mask = self.get_netmask(int(m.group(2))) match[key] = (m.group(1), mask) def mod_flow(self, table=0, priority=None, match=None, actions=None, inst=None, out_port=None, out_group=None, output=None, goto=None, cmd='add'): ofp = self.dp.ofproto parser = self.dp.ofproto_parser # Lagopus extensions have been added to an older version of ryu, # which does not support the "ip_address/prefix_length" notation for key in ['ipv4_src', 'ipv4_dst']: self.mod_match_addr(match, key) if actions is None: actions = [] if inst is None: inst = [] if type(table) in [str, unicode]: table = self.pl.tables[table] if priority is None: priority = ofp.OFP_DEFAULT_PRIORITY if output: actions.append(parser.OFPActionOutput(output)) if goto: inst.append(self.goto(goto)) if cmd == 'add': command=ofp.OFPFC_ADD elif cmd == 'del': command=ofp.OFPFC_DELETE else: command=cmd if type(match) == dict: match = parser.OFPMatch(**match) if out_port is None: out_port = ofp.OFPP_ANY if out_group is None: out_group=ofp.OFPG_ANY # Construct flow_mod message and send it. if actions: inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)] + inst msg = parser.OFPFlowMod(datapath=self.dp, table_id=table, priority=priority, match=match, instructions=inst, command=command, out_port=out_port, out_group=out_group) self.dp.send_msg(msg) def add_group(self, gr_id, actions, gr_type=None): ofp = self.dp.ofproto parser = self.dp.ofproto_parser gr_type = gr_type or ofp.OFPGT_INDIRECT weight = 0 watch_port = ofp.OFPP_ANY watch_group = ofp.OFPG_ANY buckets = [parser.OFPBucket(weight, watch_port, watch_group, actions)] req = parser.OFPGroupMod(self.dp, ofp.OFPGC_ADD, gr_type, gr_id, buckets) self.dp.send_msg(req) def del_group(self, gr_id, gr_type=None): ofp = self.dp.ofproto parser = self.dp.ofproto_parser gr_type = gr_type or ofp.OFPGT_INDIRECT req = parser.OFPGroupMod(self.dp, ofp.OFPGC_DELETE, gr_type, gr_id) self.dp.send_msg(req) def clear_table(self, table_id): parser = self.dp.ofproto_parser ofp = self.dp.ofproto clear = parser.OFPFlowMod(self.dp, table_id=table_id, command=ofp.OFPFC_DELETE, out_port=ofp.OFPP_ANY, out_group=ofp.OFPG_ANY) self.dp.send_msg(clear) def clear_switch(self): for table_id in self.pl.tables.values(): self.clear_table(table_id) # Delete all meters parser = self.dp.ofproto_parser ofp = self.dp.ofproto clear = parser.OFPMeterMod(self.dp, command=ofp.OFPMC_DELETE, meter_id=ofp.OFPM_ALL) self.dp.send_msg(clear) # Delete all groups clear = parser.OFPGroupMod(self.dp, ofp.OFPGC_DELETE, ofp.OFPGT_INDIRECT, ofp.OFPG_ALL) self.dp.send_msg(clear) def insert_fakedrop_rules(self): if self.pl_conf.get('fakedrop', None) is None: return # Insert default drop actions for the sake of statistics mod_flow = self.mod_flow for table_name in self.pl.tables.iterkeys(): if table_name != 'drop': mod_flow(table_name, 0, goto='drop') if not self.pl_conf.fakedrop: mod_flow('drop', 0) else: # fakedrop == True mod_flow('drop', match={'in_port': self.ul_port}, output=self.dl_port) mod_flow('drop', match={'in_port': self.dl_port}, output=self.ul_port) def configure(self): if self.configured: return ofp = self.dp.ofproto parser = self.dp.ofproto_parser self.clear_switch() self.dp.send_msg(parser.OFPPortDescStatsRequest(self.dp, 0, ofp.OFPP_ANY)) self.change_status('wait_for_PortDesc') # Will continue from self.configure_1() def configure_1(self): self.change_status('configure_1') parser = self.dp.ofproto_parser self.insert_fakedrop_rules() self.pl.config_switch(parser) # Finally, send and wait for a barrier msg = parser.OFPBarrierRequest(self.dp) msgs = [] ofctl.send_stats_request(self.dp, msg, self.waiters, msgs, self.logger) self.handle_configured() def handle_configured(self): "Called when initial configuration is uploaded to the switch" self.configured = True self.change_status('configured') try: requests.get(CONF['webhook_configured']) except requests.ConnectionError: pass if self.pl_conf.get('run_time'): self._timer.start(1) # else: # hub.spawn_after(1, TipsyController.do_exit) def stop(self): self.change_status('stopping') self.stop_datapath() self.close() self.change_status('stopped')