def __init__(self, *args, **kwargs): super(SimpleSwitch13, self).__init__(*args, **kwargs) # self.mac_to_port = {} db_store._instance = None self.fake_lswitch_default_subnets = [ l2.Subnet(dhcp_ip="192.168.123.0", name="private-subnet", enable_dhcp=True, topic="fake_tenant1", gateway_ip="192.168.123.1", cidr="192.168.123.0/24", id="fake_subnet1") ] #print (self.fake_lswitch_default_subnets[0].dhcp_ip) #common_config.init(sys.argv[1:3]) #common_config.setup_logging() self.nb_api = api_nb.NbApi.get_instance(False) self.controller = controller_concept.DfStandaloneController( 'df_standalone', self.nb_api) self.db_store = db_store.get_instance() self.controller.on_datapath_set() self.nb_api.on_db_change.append(self.db_change_callback) if self.USE_CACHE: self.sync_with_database()
def test_update_router(self): self.test_create_router() subnets2 = [l2.Subnet(dhcp_ip="10.2.0.2", name="private-subnet", enable_dhcp=True, topic="fake_tenant1", gateway_ip="10.2.0.1", cidr="10.2.0.0/24", id="test_subnet10_2")] lswitch2 = l2.LogicalSwitch(subnets=subnets2, unique_key=6, name='test_lswitch_2', is_external=False, segmentation_id=42, topic='fake_tenant1', id='test_lswitch_2', version=5) router_ports2 = [l3.LogicalRouterPort(network="10.2.0.1/24", lswitch=lswitch2, topic="fake_tenant1", mac="fa:16:3e:50:96:f6", unique_key=7, id="fake_router_1_port2")] self.controller.update(lswitch2) router = copy.copy(self.router) router.ports = router_ports2 router.version += 1 self.app._add_router_port.reset_mock() self.controller.update(router) self.app._add_router_port.assert_called_once_with(router_ports2[0]) self.app._delete_router_port.assert_called_once_with( self.router_ports[0])
def _get_external_subnet(self, fip): floating_lport = fip.floating_lport subnets = self.db_store.get_all( l2.Subnet(lswitch=floating_lport.lswitch.id), index=l2.Subnet.get_index('lswitch')) for subnet in subnets: if fip.floating_ip_address in subnet.cidr: return subnet
def subnet_from_neutron_subnet(subnet): return l2.Subnet(id=subnet['id'], topic=subnet['tenant_id'], name=subnet.get('name', df_const.DF_SUBNET_DEFAULT_NAME), enable_dhcp=subnet['enable_dhcp'], cidr=subnet['cidr'], gateway_ip=subnet['gateway_ip'], dns_nameservers=subnet.get('dns_nameservers', []), host_routes=subnet.get('host_routes', []))
def subnet_from_neutron_subnet(subnet): return l2.Subnet( id=subnet['id'], topic=utils.get_obj_topic(subnet), name=subnet.get('name'), enable_dhcp=subnet['enable_dhcp'], cidr=subnet['cidr'], gateway_ip=subnet['gateway_ip'], dns_nameservers=subnet.get('dns_nameservers', []), host_routes=subnet.get('host_routes', []), version=subnet['revision_number'], lswitch=subnet['network_id'])
def test_create_router(self): self.subnets = [ l2.Subnet(dhcp_ip="10.1.0.2", name="private-subnet", enable_dhcp=True, topic="fake_tenant1", gateway_ip="10.1.0.1", cidr="10.1.0.0/24", id="test_subnet10_1") ] self.lswitch = l2.LogicalSwitch(subnets=self.subnets, unique_key=3, name='test_lswitch_1', is_external=False, segmentation_id=41, topic='fake_tenant1', id='test_lswitch_1', version=5) self.router_ports = [ l3.LogicalRouterPort(network="10.1.0.1/24", lswitch=self.lswitch, topic="fake_tenant1", mac="fa:16:3e:50:96:f5", unique_key=4, id="fake_router_1_port1") ] self.router = l3.LogicalRouter(name="fake_router_1", topic="fake_tenant1", version=10, id="fake_router_1", unique_key=5, ports=self.router_ports) self.controller.update(self.lswitch) self.app.mod_flow.reset_mock() self.controller.update(self.router) self.app._add_router_port.assert_called_once_with(self.router_ports[0]) parser = self.app.parser ofproto = self.app.ofproto match = parser.OFPMatch(metadata=5, eth_dst="fa:16:3e:50:96:f5") actions = [parser.OFPActionSetField(reg7=4)] inst = [ parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions), parser.OFPInstructionGotoTable(const.EGRESS_TABLE), ] self.app.mod_flow.assert_called_once_with( inst=inst, table_id=const.L3_LOOKUP_TABLE, priority=const.PRIORITY_VERY_LOW, match=match)
def delete_subnet_postcommit(self, context): subnet = context.current net_id = subnet['network_id'] subnet_id = subnet['id'] # The network in context is still the network before deleting subnet network = self.core_plugin.get_network(context._plugin_context, net_id) try: topic = df_utils.get_obj_topic(network) self.nb_api.delete(l2.Subnet(id=subnet_id)) self.nb_api.update(l2.LogicalSwitch( id=net_id, topic=topic, version=network['revision_number'])) except df_exceptions.DBKeyNotFound: LOG.debug("network %s is not found in DB, might have " "been deleted concurrently", net_id) return LOG.info("DFMechDriver: delete subnet %s", subnet_id)
def test_db_consistent(self): self.db_sync_time = self.conf.db_sync_time network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api)) network_id = network.create() topic = network.get_topic() subnet = self.store(objects.SubnetTestObj(self.neutron, self.nb_api, network_id)) subnet_body = {'network_id': network_id, 'cidr': '10.50.0.0/24', 'gateway_ip': '10.50.0.1', 'ip_version': 4, 'name': 'private', 'enable_dhcp': True} subnet.create(subnet=subnet_body) time.sleep(constants.DEFAULT_RESOURCE_READY_TIMEOUT) self.assertTrue(network.exists()) self.assertTrue(subnet.exists()) vm = self.store(objects.VMTestObj(self, self.neutron)) vm.create(network=network) self.assertIsNotNone(vm.server.addresses['mynetwork']) mac = vm.server.addresses['mynetwork'][0]['OS-EXT-IPS-MAC:mac_addr'] self.assertIsNotNone(mac) ovs = utils.OvsFlowsParser() utils.wait_until_true( lambda: self._check_l2_lookup_rule( ovs.dump(self.integration_bridge), mac), timeout=10, sleep=1, exception=Exception('no rule for vm in l2 lookup table') ) net_id = '11111111-1111-1111-1111-111111111111' df_network = l2.LogicalSwitch( id=net_id, topic=topic, name='df_nw1', network_type='vxlan', segmentation_id=4000, is_external=False, mtu=1500, unique_key=1, version=1) df_subnet = l2.Subnet( id='22222222-2222-2222-2222-222222222222', topic=topic, name='df_sn1', enable_dhcp=True, cidr='10.60.0.0/24', dhcp_ip='10.60.0.2', gateway_ip='10.60.0.1') df_network.add_subnet(df_subnet) df_network_json = df_network.to_json() self.nb_api.driver.create_key( 'lswitch', net_id, df_network_json, topic) time.sleep(self.db_sync_time) utils.wait_until_true( lambda: utils.check_dhcp_ip_rule( ovs.dump(self.integration_bridge), '10.60.0.2'), timeout=self.db_sync_time + constants.DEFAULT_CMD_TIMEOUT, sleep=1, exception=Exception('no goto dhcp rule for lswitch') ) df_network.version = 2 df_network.subnets[0].dhcp_ip = '10.60.0.3' df_network_json = df_network.to_json() self.nb_api.driver.set_key('lswitch', net_id, df_network_json, topic) time.sleep(self.db_sync_time) utils.wait_until_true( lambda: utils.check_dhcp_ip_rule( ovs.dump(self.integration_bridge), '10.60.0.3'), timeout=self.db_sync_time + constants.DEFAULT_CMD_TIMEOUT, sleep=1, exception=Exception('no goto dhcp rule for lswitch') ) self.nb_api.driver.delete_key('lswitch', net_id, topic) time.sleep(self.db_sync_time) utils.wait_until_true( lambda: self._check_no_lswitch_dhcp_rule( ovs.dump(self.integration_bridge), '10.60.0.3'), timeout=self.db_sync_time + constants.DEFAULT_CMD_TIMEOUT, sleep=1, exception=Exception('could not delete goto dhcp rule for lswitch') ) vm.close() subnet.close() network.close()
def _create_2nd_subnet(self): return l2.Subnet(id='subnet2', enable_dhcp=False, cidr='192.168.18.0/24', topic='fake_tenant1')
ports=fake_logical_router_ports) fake_logic_switch1 = l2.LogicalSwitch(unique_key=1, name='private', is_external=False, segmentation_id=41, mtu=1450, topic='fake_tenant1', id='fake_switch1', version=5) fake_lswitch_default_subnets = [ l2.Subnet(name="private-subnet", enable_dhcp=True, topic="fake_tenant1", gateway_ip="10.0.0.1", cidr="10.0.0.0/24", id="fake_subnet1", version=1, lswitch='fake_switch1') ] fake_external_switch1 = l2.LogicalSwitch(unique_key=2, name='public', is_external=True, segmentation_id=69, mtu=1450, topic='fake_tenant1', id='fake_external_switch1') external_switch1_subnets = [ l2.Subnet(name="public-subnet",
id="fake_router_port1") ] fake_logic_router1 = l3.LogicalRouter(name="router1", topic="fake_tenant1", version=10, routes=[], id="fake_router_id", unique_key=1, ports=fake_logical_router_ports) fake_lswitch_default_subnets = [ l2.Subnet(dhcp_ip="10.0.0.2", name="private-subnet", enable_dhcp=True, topic="fake_tenant1", gateway_ip="10.0.0.1", cidr="10.0.0.0/24", id="fake_subnet1") ] fake_logic_switch1 = l2.LogicalSwitch(subnets=fake_lswitch_default_subnets, unique_key=1, name='private', is_external=False, segmentation_id=41, mtu=1450, topic='fake_tenant1', id='fake_switch1', version=5)
class RyuDFAdapter(ofp_handler.OFPHandler): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] OF_AUTO_PORT_DESC_STATS_REQ_VER = 0x04 call_on_datapath_set = None ctrl = None chassis = core.Chassis( id='whoami', ip='172.24.4.50', tunnel_types=('vxlan', ), ) local_binding = l2.PortBinding( type=l2.BINDING_CHASSIS, chassis=chassis, ) fake_lswitch_default_subnets = [ l2.Subnet(dhcp_ip="192.168.123.0", name="private-subnet", enable_dhcp=True, topic="fake_tenant1", gateway_ip="192.168.123.1", cidr="192.168.123.0/24", id="fake_subnet1") ] def __init__(self, vswitch_api, nb_api, neutron_server_notifier=None): super(RyuDFAdapter, self).__init__() self.dispatcher = dispatcher.AppDispatcher(cfg.CONF.df.apps_list) # TODO: Remove vswitch api for standalone version self.vswitch_api = vswitch_api self.nb_api = nb_api self.neutron_server_notifier = neutron_server_notifier self._datapath = None self.table_handlers = {} self.first_connect = True @property def datapath(self): return self._datapath def start(self): super(RyuDFAdapter, self).start() self.load(self, vswitch_api=self.vswitch_api, nb_api=self.nb_api, neutron_server_notifier=self.neutron_server_notifier) #self.wait_until_ready() def load(self, *args, **kwargs): self.dispatcher.load(*args, **kwargs) def is_ready(self): return self.datapath is not None def wait_until_ready(self): while not self.is_ready(): LOG.debug("Not ready. Going to sleep 3") time.sleep(3) def register_table_handler(self, table_id, handler): if table_id in self.table_handlers: raise RuntimeError( _('Cannot register handler {new_handler} for table {table},' 'occupied by {existing_handler}').format( table=table_id, new_handler=handler, existing_handler=self.table_handlers[table_id], ), ) self.table_handlers[table_id] = handler def unregister_table_handler(self, table_id, handler): self.table_handlers.pop(table_id, None) def notify_ovs_sync_finished(self): self.dispatcher.dispatch('ovs_sync_finished') def notify_ovs_sync_started(self): self.dispatcher.dispatch('ovs_sync_started') @handler.set_ev_cls(ofp_event.EventOFPStateChange, [handler.MAIN_DISPATCHER, handler.DEAD_DISPATCHER]) def state_change_handler(self, ev): dp = ev.datapath @handler.set_ev_handler(ofp_event.EventOFPSwitchFeatures, handler.CONFIG_DISPATCHER) def switch_features_handler(self, ev): # TODO(oanson) is there a better way to get the datapath? datapath = ev.msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser self._datapath = datapath super(RyuDFAdapter, self).switch_features_handler(ev) version = self.datapath.ofproto.OFP_VERSION if version < RyuDFAdapter.OF_AUTO_PORT_DESC_STATS_REQ_VER: # Otherwise, this is done automatically by OFPHandler self._send_port_desc_stats_request(self.datapath) self.get_sw_async_msg_config() self.dispatcher.dispatch('switch_features_handler', ev) if not self.first_connect: # For reconnecting to the ryu controller, df needs a full sync # in case any resource added during the disconnection. self.nb_api.db_change_callback(None, None, constants.CONTROLLER_REINITIALIZE, None) self.first_connect = False #self.vswitch_api.initialize(self.nb_api) if RyuDFAdapter.call_on_datapath_set is not None: RyuDFAdapter.call_on_datapath_set(RyuDFAdapter.ctrl) # install table miss flow # match = parser.OFPMatch() # actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, # ofproto.OFPCML_NO_BUFFER)] # self.add_flow(datapath, 0, match, actions) def _send_port_desc_stats_request(self, datapath): ofp_parser = datapath.ofproto_parser req = ofp_parser.OFPPortDescStatsRequest(datapath, 0) datapath.send_msg(req) @handler.set_ev_handler(ofp_event.EventOFPPortDescStatsReply, handler.MAIN_DISPATCHER) def port_desc_stats_reply_handler(self, ev): self.dispatcher.dispatch('port_desc_stats_reply_handler', ev) @handler.set_ev_handler(ofp_event.EventOFPPacketIn, handler.MAIN_DISPATCHER) def OF_packet_in_handler(self, event): msg = event.msg table_id = msg.table_id if table_id in self.table_handlers: handler = self.table_handlers[table_id] handler(event) else: LOG.info( "No handler for table id %(table)s with message " "%(msg)", { 'table': table_id, 'msg': msg }) @handler.set_ev_handler(ofp_event.EventOFPErrorMsg, handler.MAIN_DISPATCHER) def OF_error_msg_handler(self, event): msg = event.msg try: (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) ryu_msg = ofproto_parser.msg( self._datapath, version, msg_type, msg_len - ofproto_common.OFP_HEADER_SIZE, xid, msg.data) LOG.error('OFPErrorMsg received: %s', ryu_msg) except Exception: LOG.error( 'Unrecognized OFPErrorMsg received: ' 'type=0x%(type)02x code=0x%(code)02x ' 'message=%(msg)s', { 'type': msg.type, 'code': msg.code, 'msg': utils.hex_array(msg.data) }) @handler.set_ev_cls(ofp_event.EventOFPGetAsyncReply, handler.MAIN_DISPATCHER) def get_async_reply_handler(self, event): msg = event.msg LOG.debug( 'OFPGetAsyncReply received: packet_in_mask=0x%08x:0x%08x ' 'port_status_mask=0x%08x:0x%08x ' 'flow_removed_mask=0x%08x:0x%08x', msg.packet_in_mask[0], msg.packet_in_mask[1], msg.port_status_mask[0], msg.port_status_mask[1], msg.flow_removed_mask[0], msg.flow_removed_mask[1]) self.set_sw_async_msg_config_for_ttl(msg) def get_sw_async_msg_config(self): """Get the configuration of current switch""" ofp_parser = self._datapath.ofproto_parser req = ofp_parser.OFPGetAsyncRequest(self._datapath) self._datapath.send_msg(req) def set_sw_async_msg_config_for_ttl(self, cur_config): """Configure switch for TTL Configure the switch to packet-in TTL invalid packets to controller. Note that this method only works in OFP 1.3, however, this ryu app claims that it only supports ofproto_v1_3.OFP_VERSION. So, no check will be made here. """ dp = self._datapath parser = dp.ofproto_parser ofproto = dp.ofproto if cur_config.packet_in_mask[0] & 1 << ofproto.OFPR_INVALID_TTL != 0: LOG.info('SW config for TTL error packet in has already ' 'been set') return packet_in_mask = (cur_config.packet_in_mask[0] | 1 << ofproto.OFPR_INVALID_TTL) m = parser.OFPSetAsync( dp, [packet_in_mask, cur_config.packet_in_mask[1]], [cur_config.port_status_mask[0], cur_config.port_status_mask[1]], [cur_config.flow_removed_mask[0], cur_config.flow_removed_mask[1]]) dp.send_msg(m) LOG.info('Set SW config for TTL error packet in.')
def get_subnet(self): return self.nb_api.get(l2.Subnet(id=self.subnet_id))
def test_db_consistent(self): self.db_sync_time = self.conf.db_sync_time network = self.store(objects.NetworkTestObj(self.neutron, self.nb_api)) network_id = network.create() self.addCleanup(network.close) topic = network.get_topic() subnet = self.store(objects.SubnetTestObj(self.neutron, self.nb_api, network_id)) subnet_body = {'network_id': network_id, 'cidr': '10.50.0.0/24', 'gateway_ip': '10.50.0.1', 'ip_version': 4, 'name': 'private', 'enable_dhcp': True} subnet.create(subnet=subnet_body) self.addCleanup(subnet.close) time.sleep(constants.DEFAULT_RESOURCE_READY_TIMEOUT) self.assertTrue(network.exists()) self.assertTrue(subnet.exists()) vm = self.store(objects.VMTestObj(self, self.neutron)) vm.create(network=network) self.addCleanup(vm.close) self.assertIsNotNone(vm.server.addresses['mynetwork']) mac = vm.server.addresses['mynetwork'][0]['OS-EXT-IPS-MAC:mac_addr'] self.assertIsNotNone(mac) ovs = utils.OvsFlowsParser() utils.wait_until_true( lambda: self._check_l2_lookup_rule( ovs.dump(self.integration_bridge), mac), timeout=10, sleep=1, exception=Exception('no rule for vm in l2 lookup table') ) net_id = '11111111-1111-1111-1111-111111111111' df_network = l2.LogicalSwitch( id=net_id, topic=topic, name='df_nw1', network_type='vxlan', segmentation_id=4000, is_external=False, mtu=1500, unique_key=1, version=1) df_network_json = df_network.to_json() self.nb_api.driver.create_key(l2.LogicalSwitch.table_name, net_id, df_network_json, topic) self.addCleanup(self.nb_api.driver.delete_key, 'lswitch', net_id, topic) subnet_id = '22222222-2222-2222-2222-222222222222' df_subnet = l2.Subnet( id=subnet_id, topic=topic, name='df_sn1', enable_dhcp=True, cidr='10.60.0.0/24', dhcp_ip='10.60.0.2', gateway_ip='10.60.0.1', version=1, lswitch=net_id) self.nb_api.driver.create_key(l2.Subnet.table_name, subnet_id, df_subnet.to_json(), topic) self.addCleanup(self.nb_api.driver.delete_key, l2.Subnet.table_name, subnet_id, topic) port_id = '33333333-2222-2222-2222-222222222222,' dhcp_port = l2.LogicalPort( topic=topic, name='df_dhcp1', macs=['aa:bb:cc:dd:ee:ff'], id=port_id, ips=['10.60.0.2'], subnets=[df_subnet.id], device_owner=n_const.DEVICE_OWNER_DHCP, lswitch=df_network.id, unique_key=1 ).to_json() self.nb_api.driver.create_key( 'lport', port_id, dhcp_port, topic) df_net_unique_key = df_network.unique_key time.sleep(self.db_sync_time) utils.wait_until_true( lambda: utils.check_dhcp_network_rule( ovs.dump(self.integration_bridge), df_net_unique_key), timeout=self.db_sync_time + constants.DEFAULT_CMD_TIMEOUT, sleep=1, exception=Exception('no goto dhcp rule for lswitch') ) self.nb_api.driver.delete_key('lport', port_id, topic) time.sleep(self.db_sync_time) utils.wait_until_true( lambda: self._check_no_lswitch_dhcp_rule( ovs.dump(self.integration_bridge), df_net_unique_key), timeout=self.db_sync_time + constants.DEFAULT_CMD_TIMEOUT, sleep=1, exception=Exception('could not delete goto dhcp rule for lswitch') )