def _start_core(self, settings): """Starts BGPS core using setting and given pool. """ # Get common settings routing_settings = settings.BGP.get('routing') common_settings = {} # Get required common settings. try: common_settings[LOCAL_AS] = routing_settings.pop(LOCAL_AS) common_settings[ROUTER_ID] = routing_settings.pop(ROUTER_ID) except KeyError as e: raise ApplicationException( desc='Required minimum configuration missing %s' % e) # Get optional common settings common_settings[BGP_SERVER_PORT] = \ routing_settings.get(BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT) common_settings[REFRESH_STALEPATH_TIME] = \ routing_settings.get(REFRESH_STALEPATH_TIME, DEFAULT_REFRESH_STALEPATH_TIME) common_settings[REFRESH_MAX_EOR_TIME] = \ routing_settings.get(REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_MAX_EOR_TIME) common_settings[LABEL_RANGE] = \ routing_settings.get(LABEL_RANGE, DEFAULT_LABEL_RANGE) # Start BGPS core service waiter = hub.Event() call('core.start', waiter=waiter, **common_settings) waiter.wait() LOG.debug('Core started %s' % CORE_MANAGER.started) # Core manager started add configured neighbor and vrfs if CORE_MANAGER.started: # Add neighbors. self._add_neighbors(routing_settings) # Add Vrfs. self._add_vrfs(routing_settings) # Add Networks self._add_networks(routing_settings)
def __init__(self, core_service, host, port): super(BMPClient, self).__init__(name='BMPClient(%s:%s)' % (host, port)) self._core_service = core_service self._core_service.signal_bus.register_listener( BgpSignalBus.BGP_ADJ_RIB_IN_CHANGED, lambda _, data: self.on_adj_rib_in_changed(data) ) self._core_service.signal_bus.register_listener( BgpSignalBus.BGP_ADJ_UP, lambda _, data: self.on_adj_up(data) ) self._core_service.signal_bus.register_listener( BgpSignalBus.BGP_ADJ_DOWN, lambda _, data: self.on_adj_down(data) ) self._socket = None self.server_address = (host, port) self._connect_retry_event = hub.Event() self._connect_retry_time = 5
def start(**kwargs): """Starts new context using provided configuration. Raises RuntimeConfigError if a context is already active. """ if CORE_MANAGER.started: raise RuntimeConfigError('Current context has to be stopped to start ' 'a new context.') try: waiter = kwargs.pop('waiter') except KeyError: waiter = hub.Event() common_config = CommonConf(**kwargs) hub.spawn(CORE_MANAGER.start, *[], **{ 'common_conf': common_config, 'waiter': waiter }) return True
def get_port_stats(dpath, waiters, port_id, ofctl): """ get port stats """ _LOG.debug("get_port_stats: %d %s", dpath.id, port_id) xid = dpath.get_xid() lock = hub.Event() msgs = list() prev_msg_num = len(msgs) if port_id is None: port_id = dpath.ofproto.OFPP_ANY dp_waiter = waiters.setdefault(dpath.id, {}) dp_waiter[xid] = (lock, msgs) msg = fibcapi.new_ff_multipart_request_port(dpath.id, port_id, _PORT_STATS_NAMES) dpath.send_msg(pb.FF_MULTIPART_REQUEST, msg, xid) while True: lock.wait(timeout=0.5) curr_msg_num = len(msgs) if curr_msg_num == prev_msg_num: break prev_msg_num = curr_msg_num if not lock.is_set(): # Timeout del dp_waiter[xid] if not dp_waiter: del waiters[dpath.id] return None stats_list = list() for msg in msgs: for stats in msg.port.stats: stats_entry = {k:v for k, v in stats.values.items()} stats_entry["port_no"] = stats.port_no stats_list.append(stats_entry) return {dpath.id: stats_list}
def _state_machine(self): """ Port state machine. Change next status when timer is exceeded or _change_status() method is called.""" role_str = { ROOT_PORT:'ROOT_PORT', DESIGNATED_PORT:'DESIGNATED_PORT', NON_DESIGNATED_PORT:'NON_DESIGNATED_PORT' } state_str = { PORT_STATE_DISABLE:'DISABLE', PORT_STATE_BLOCK:'BLOCK', PORT_STATE_LISTEN:'LISTEN', PORT_STATE_LEARN:'LEARN', PORT_STATE_FORWARD:'FORWARD' } if self.state is PORT_STATE_DISABLE: self.ofctl.set_port_status(self.ofport, self.state) while True: self.logger.info('[port=%d] %s / %s', self.ofport.port_no, role_str[self.role], state_str[self.state], extra=self.dpid_str) self.state_event = hub.Event() timer = self._get_timer() if timer: timeout = hub.Timeout(timer) try: self.state_event.wait() except hub.Timeout as t: if t is not timeout: err_msg = 'Internal error. Not my timeout.' raise RyuException(msg=err_msg) new_state = self._get_next_state() self._change_status(new_state, thread_switch=False) finally: timeout.cancel() else: self.state_event.wait() self.state_event = None
def _core_start(self, settings): waiter = hub.Event() call('core.start', waiter=waiter, **settings) waiter.wait()
def test_event2(self): ev = hub.Event() # allow multiple sets unlike eventlet Event ev.set() ev.set()
def test_event1(self): ev = hub.Event() ev.set() with hub.Timeout(1): ev.wait() # should return immediately
def __init__(self, *args, **kwargs): super(SimpleSwitchRest13, self).__init__(*args, **kwargs) wsgi = kwargs['wsgi'] wsgi.register(SimpleSwitchController, {simple_switch_instance_name : self}) self.lock = hub.Event() self.flows = []
def create_custom_event(): LOG.debug('Create CustomEvent called') return hub.Event()
def _test_execute(self, test, description): if not self.target_sw or not self.tester_sw: self.logger.info('waiting for switches connection...') self.sw_waiter = hub.Event() self.sw_waiter.wait() self.sw_waiter = None if description: self.logger.info('%s', description) # Test execute. try: # 0. Initialize. self._test(STATE_INIT) # 1. Install flows. for flow in test.prerequisite: self._test(STATE_FLOW_INSTALL, flow) self._test(STATE_FLOW_EXIST_CHK, flow) # 2. Check flow matching. for pkt in test.tests: if KEY_EGRESS in pkt or KEY_PKT_IN in pkt: target_pkt_count = [ self._test(STATE_TARGET_PKT_COUNT, True) ] tester_pkt_count = [ self._test(STATE_TESTER_PKT_COUNT, False) ] result = self._test(STATE_FLOW_MATCH_CHK, pkt) if result == TIMEOUT: target_pkt_count.append( self._test(STATE_TARGET_PKT_COUNT, True)) tester_pkt_count.append( self._test(STATE_TESTER_PKT_COUNT, False)) test_type = (KEY_EGRESS if KEY_EGRESS in pkt else KEY_PKT_IN) self._test(STATE_NO_PKTIN_REASON, test_type, target_pkt_count, tester_pkt_count) else: before_stats = self._test(STATE_GET_MATCH_COUNT) self._test(STATE_UNMATCH_PKT_SEND, pkt) hub.sleep(INTERVAL) self._test(STATE_FLOW_UNMATCH_CHK, before_stats, pkt) result = [TEST_OK] result_type = TEST_OK except (TestFailure, TestError, TestTimeout, TestReceiveError) as err: result = [TEST_ERROR, str(err)] result_type = str(err).split(':', 1)[0] except Exception: result = [TEST_ERROR, RYU_INTERNAL_ERROR] result_type = RYU_INTERNAL_ERROR # Output test result. self.logger.info(' %-100s %s', test.description, result[0]) if 1 < len(result): self.logger.info(' %s', result[1]) if (result[1] == RYU_INTERNAL_ERROR or result == 'An unknown exception'): self.logger.error(traceback.format_exc()) if result[0] != TEST_OK and self.state == STATE_INIT: self._test_end('--- Test terminated ---') hub.sleep(0) return result_type
def _test_execute(self, test, description): if isinstance(self.target_sw.dp, DummyDatapath) or \ isinstance(self.tester_sw.dp, DummyDatapath): self.logger.info('waiting for switches connection...') self.sw_waiter = hub.Event() self.sw_waiter.wait() self.sw_waiter = None if description: self.logger.info('%s', description) self.thread_msg = None # Test execute. try: # Initialize. self._test(STATE_INIT_METER) self._test(STATE_INIT_FLOW, self.target_sw) self._test(STATE_INIT_THROUGHPUT_FLOW, self.tester_sw, THROUGHPUT_COOKIE) # Install flows. for flow in test.prerequisite: if isinstance(flow, ofproto_v1_3_parser.OFPFlowMod): self._test(STATE_FLOW_INSTALL, self.target_sw, flow) self._test(STATE_FLOW_EXIST_CHK, self.target_sw.send_flow_stats, flow) elif isinstance(flow, ofproto_v1_3_parser.OFPMeterMod): self._test(STATE_METER_INSTALL, self.target_sw, flow) self._test(STATE_METER_EXIST_CHK, self.target_sw.send_meter_config_stats, flow) # Do tests. for pkt in test.tests: # Get stats before sending packet(s). if KEY_EGRESS in pkt or KEY_PKT_IN in pkt: target_pkt_count = [ self._test(STATE_TARGET_PKT_COUNT, True) ] tester_pkt_count = [ self._test(STATE_TESTER_PKT_COUNT, False) ] elif KEY_THROUGHPUT in pkt: # install flows for throughput analysis for throughput in pkt[KEY_THROUGHPUT]: flow = throughput[KEY_FLOW] self._test(STATE_THROUGHPUT_FLOW_INSTALL, self.tester_sw, flow) self._test(STATE_THROUGHPUT_FLOW_EXIST_CHK, self.tester_sw.send_flow_stats, flow) start = self._test(STATE_GET_THROUGHPUT) elif KEY_TBL_MISS in pkt: before_stats = self._test(STATE_GET_MATCH_COUNT) # Send packet(s). if KEY_INGRESS in pkt: self._one_time_packet_send(pkt) elif KEY_PACKETS in pkt: self._continuous_packet_send(pkt) # Check a result. if KEY_EGRESS in pkt or KEY_PKT_IN in pkt: result = self._test(STATE_FLOW_MATCH_CHK, pkt) if result == TIMEOUT: target_pkt_count.append( self._test(STATE_TARGET_PKT_COUNT, True)) tester_pkt_count.append( self._test(STATE_TESTER_PKT_COUNT, False)) test_type = (KEY_EGRESS if KEY_EGRESS in pkt else KEY_PKT_IN) self._test(STATE_NO_PKTIN_REASON, test_type, target_pkt_count, tester_pkt_count) elif KEY_THROUGHPUT in pkt: end = self._test(STATE_GET_THROUGHPUT) self._test(STATE_THROUGHPUT_CHK, pkt[KEY_THROUGHPUT], start, end) elif KEY_TBL_MISS in pkt: self._test(STATE_SEND_BARRIER) hub.sleep(INTERVAL) self._test(STATE_FLOW_UNMATCH_CHK, before_stats, pkt) result = [TEST_OK] result_type = TEST_OK except (TestFailure, TestError, TestTimeout, TestReceiveError) as err: result = [TEST_ERROR, str(err)] result_type = str(err).split(':', 1)[0] except Exception: result = [TEST_ERROR, RYU_INTERNAL_ERROR] result_type = RYU_INTERNAL_ERROR finally: self.ingress_event = None for tid in self.ingress_threads: hub.kill(tid) self.ingress_threads = [] # Output test result. self.logger.info(' %-100s %s', test.description, result[0]) if 1 < len(result): self.logger.info(' %s', result[1]) if (result[1] == RYU_INTERNAL_ERROR or result == 'An unknown exception'): self.logger.error(traceback.format_exc()) hub.sleep(0) return result_type
def __init__(self, *args, **kwargs): super(Valve, self).__init__(*args, **kwargs) self.mac_to_port = {} self._snoop = kwargs['igmplib'] # if you want a switch to operate as a querier, # set up as follows: self._snoop.set_querier_mode(dpid=str_to_dpid('0000000000000001'), server_port=2) # dpid the datapath id that will operate as a querier. # server_port a port number which connect to the multicast # server. # # NOTE: you can set up only the one querier. # when you called this method several times, # only the last one becomes effective. # start a thread for stats gethering self.stats_event = hub.Event() self.threads.append(hub.spawn(self.stats_loop)) self.datapaths = [] self.statstimeout = 5 # Setup logging handler = logging.StreamHandler() log_format = '%(asctime)s %(name)-8s %(levelname)-8s %(message)s' formatter = logging.Formatter(log_format, '%b %d %H:%M:%S') handler.setFormatter(formatter) self.logger.addHandler(handler) self.logger.propagate = 0 # Read in config file self.portdb = None self.vlandb = {} self.acldb = {} with open('valve.yaml', 'r') as stream: self.portdb = yaml.load(stream) # Make sure exclude property always exists in 'default' if 'default' in self.portdb and 'exclude' not in self.portdb['default']: self.portdb['default'] = [] # Make sure acls property always at the top level if 'acls' not in self.portdb: self.portdb['acls'] = {} # Parse top level acls for nw_address in self.portdb['acls']: if nw_address not in self.acldb: self.acldb[nw_address] = [] for acl in self.portdb['acls'][nw_address]: acl = ACL(acl['match'], acl['action']) self.logger.info("adding %s on nw_dst:%s" % (acl, nw_address)) self.acldb[nw_address].append(acl) # Parse configuration for dpid in self.portdb: if dpid in ('all', 'default', 'acls'): # Skip nodes that aren't real datapaths continue # Handle acls, default acls < port acls < global acls # Copy out port acls and clear port acl list #port_acls = [] #if 'acls' in self.portdb[dpid]: # port_acls = self.portdb[dpid]['acls'] #self.portdb[dpid]['acls'] = [] # Add default acls #if 'default' in self.portdb and \ #'acls' in self.portdb['default'] and \ #port not in self.portdb['default']['exclude']: # self.add_acls_to_port(port, self.portdb['default']['acls']) # Add port acls #self.add_acls_to_port(port, port_acls) # Add global acls #if 'all' in self.portdb and 'acls' in self.portdb['all']: # self.add_acls_to_port(port, self.portdb['all']['acls']) # Now that we've resolved all acls we can print them #for acl in self.portdb[port]['acls']: # self.logger.info("adding %s on port:%s" % (acl, port)) # Handle vlans # If we have global vlans add them if 'all' in self.portdb and \ all (k in self.portdb['all'] for k in ('vlans','type')): vlans = self.portdb['all']['vlans'] ptype = self.portdb['all']['type'] self.logger.info("adding ALL type:%s, vlan:%s" % (ptype, str(vlans))) for port in self.portdb[dpid]: #self.dump(self.portdb[dpid][port]) self.portdb[dpid][port]['vlans'] = vlans self.portdb[dpid][port]['type'] = ptype for port in self.portdb[dpid]: # Add vlans defined on this port (or add default values) if 'vlans' in self.portdb[dpid][ port] and 'type' in self.portdb[dpid][port]: vlans = self.portdb[dpid][port]['vlans'] ptype = self.portdb[dpid][port]['type'] self.add_port_to_vlans(dpid, port, vlans, ptype) elif 'default' in self.portdb and \ all (k in self.portdb['default'] for k in ('vlans','type')) and \ port not in self.portdb['default']['exclude']: vlans = self.portdb['default']['vlans'] ptype = self.portdb['default']['type'] self.portdb[dpid][port]['vlans'] = vlans self.portdb[dpid][port]['type'] = ptype self.add_port_to_vlans(dpid, subif, vlans, ptype) # Remove nodes that aren't real ports for n in ('all', 'default', 'acls'): if n in self.portdb: del self.portdb[n]