def test_get_tenants_port_network_union(self): tenant_1_id = 't1' network_1_id = 'n1' seg_1_id = 11 network_1_ctx = utils.create_network(tenant_1_id, network_1_id, seg_1_id, shared=True) tenant_2_id = 't2' network_2_id = 'n2' seg_2_id = 21 network_2_ctx = utils.create_network(tenant_2_id, network_2_id, seg_2_id, shared=True) tenant_3_id = 't3' port_1_id = 'p1' device_1_id = 'v1' utils.create_port(tenant_3_id, network_1_id, device_1_id, port_1_id, network_1_ctx) tenant_4_id = 't4' port_2_id = 'p2' device_2_id = 'v2' utils.create_port(tenant_4_id, network_2_id, device_2_id, port_2_id, network_2_ctx) tenants = db_lib.get_tenants() self.assertEqual( tenants, set([tenant_1_id, tenant_2_id, tenant_3_id, tenant_4_id]))
def _cleanup_db(self): """Clean up any uncessary entries in our DB.""" db_tenants = db_lib.get_tenants() for tenant in db_tenants: neutron_nets = self.ndb.get_all_networks_for_tenant(tenant) neutron_nets_id = [] for net in neutron_nets: neutron_nets_id.append(net['id']) db_nets = db_lib.get_networks(tenant) for net_id in db_nets.keys(): if net_id not in neutron_nets_id: db_lib.forget_network(tenant, net_id)
def test_get_tenants_from_networks(self): tenant_1_id = 't1' network_1_id = 'n1' seg_1_id = 11 utils.create_network(tenant_1_id, network_1_id, seg_1_id) tenant_2_id = 't2' network_2_id = 'n2' seg_2_id = 21 utils.create_network(tenant_2_id, network_2_id, seg_2_id) tenants = db_lib.get_tenants() self.assertEqual(tenants, set([tenant_1_id, tenant_2_id]))
def test_get_tenants_uniqueness(self): tenant_1_id = 't1' network_1_id = 'n1' seg_1_id = 11 network_1_ctx = utils.create_network(tenant_1_id, network_1_id, seg_1_id) tenant_2_id = 't2' network_2_id = 'n2' seg_2_id = 21 network_2_ctx = utils.create_network(tenant_2_id, network_2_id, seg_2_id) device_1_id = 'v1' port_1_id = 'p1' device_2_id = 'v2' port_2_id = 'p2' utils.create_port(tenant_1_id, network_1_id, device_1_id, port_1_id, network_1_ctx) utils.create_port(tenant_2_id, network_2_id, device_2_id, port_2_id, network_2_ctx) tenants = db_lib.get_tenants() self.assertEqual(tenants, set([tenant_1_id, tenant_2_id]))
def test_get_tenants_with_shared_network_ports(self): tenant_1_id = 't1' port_1_id = 'p1' device_1_id = 'v1' tenant_2_id = 't2' port_2_id = 'p2' device_2_id = 'v2' network_id = 'n1' seg_id = 11 network_ctx = utils.create_network(tenant_1_id, network_id, seg_id, shared=True) utils.create_port(tenant_1_id, network_id, device_1_id, port_1_id, network_ctx) utils.create_port(tenant_2_id, network_id, device_2_id, port_2_id, network_ctx) tenants = db_lib.get_tenants() self.assertEqual(tenants, set([tenant_1_id, tenant_2_id]))
def test_get_tenants_empty(self): tenants = db_lib.get_tenants() self.assertEqual(tenants, set())
def synchronize(self): """Sends data to EOS which differs from neutron DB.""" LOG.info(_LI('Syncing Neutron <-> EOS')) try: # Register with EOS to ensure that it has correct credentials self._rpc.register_with_eos() eos_tenants = self._rpc.get_tenants() except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return db_tenants = db_lib.get_tenants() # Delete tenants that are in EOS, but not in the database tenants_to_delete = frozenset(eos_tenants.keys()).difference( db_tenants.keys()) if tenants_to_delete: try: self._rpc.delete_tenant_bulk(tenants_to_delete) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return # None of the commands have failed till now. But if subsequent # operations fail, then force_sync is set to true self._force_sync = False # To support shared networks, split the sync loop in two parts: # In first loop, delete unwanted VM and networks and update networks # In second loop, update VMs. This is done to ensure that networks for # all tenats are updated before VMs are updated vms_to_update = {} for tenant in db_tenants: db_nets = db_lib.get_networks(tenant) db_vms = db_lib.get_vms(tenant) eos_nets = self._get_eos_networks(eos_tenants, tenant) eos_vms = self._get_eos_vms(eos_tenants, tenant) db_nets_key_set = frozenset(db_nets.keys()) db_vms_key_set = frozenset(db_vms.keys()) eos_nets_key_set = frozenset(eos_nets.keys()) eos_vms_key_set = frozenset(eos_vms.keys()) # Find the networks that are present on EOS, but not in Neutron DB nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) # Find the VMs that are present on EOS, but not in Neutron DB vms_to_delete = eos_vms_key_set.difference(db_vms_key_set) # Find the Networks that are present in Neutron DB, but not on EOS nets_to_update = db_nets_key_set.difference(eos_nets_key_set) # Find the VMs that are present in Neutron DB, but not on EOS vms_to_update[tenant] = db_vms_key_set.difference(eos_vms_key_set) try: if vms_to_delete: self._rpc.delete_vm_bulk(tenant, vms_to_delete) if nets_to_delete: self._rpc.delete_network_bulk(tenant, nets_to_delete) if nets_to_update: # Create a dict of networks keyed by id. neutron_nets = dict( (network['id'], network) for network in self._ndb.get_all_networks_for_tenant( tenant)) networks = [{ 'network_id': net_id, 'segmentation_id': db_nets[net_id]['segmentationTypeId'], 'network_name': neutron_nets.get(net_id, {'name': ''})['name'], 'shared': neutron_nets.get(net_id, {'shared': False})['shared'], } for net_id in nets_to_update] self._rpc.create_network_bulk(tenant, networks) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True # Now update the VMs for tenant in vms_to_update: if not vms_to_update[tenant]: continue try: # Filter the ports to only the vms that we are interested # in. vm_ports = [ port for port in self._ndb.get_all_ports_for_tenant(tenant) if port['device_id'] in vms_to_update[tenant] ] if vm_ports: db_vms = db_lib.get_vms(tenant) self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True
def synchronize(self): """Sends data to EOS which differs from neutron DB.""" LOG.info(_LI('Syncing Neutron <-> EOS')) try: # Register with EOS to ensure that it has correct credentials self._rpc.register_with_eos() eos_tenants = self._rpc.get_tenants() except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return db_tenants = db_lib.get_tenants() # Delete tenants that are in EOS, but not in the database tenants_to_delete = frozenset(eos_tenants.keys()).difference( db_tenants.keys()) if tenants_to_delete: try: self._rpc.delete_tenant_bulk(tenants_to_delete) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return # None of the commands have failed till now. But if subsequent # operations fail, then force_sync is set to true self._force_sync = False # To support shared networks, split the sync loop in two parts: # In first loop, delete unwanted VM and networks and update networks # In second loop, update VMs. This is done to ensure that networks for # all tenats are updated before VMs are updated vms_to_update = {} for tenant in db_tenants: db_nets = db_lib.get_networks(tenant) db_vms = db_lib.get_vms(tenant) eos_nets = self._get_eos_networks(eos_tenants, tenant) eos_vms = self._get_eos_vms(eos_tenants, tenant) db_nets_key_set = frozenset(db_nets.keys()) db_vms_key_set = frozenset(db_vms.keys()) eos_nets_key_set = frozenset(eos_nets.keys()) eos_vms_key_set = frozenset(eos_vms.keys()) # Find the networks that are present on EOS, but not in Neutron DB nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) # Find the VMs that are present on EOS, but not in Neutron DB vms_to_delete = eos_vms_key_set.difference(db_vms_key_set) # Find the Networks that are present in Neutron DB, but not on EOS nets_to_update = db_nets_key_set.difference(eos_nets_key_set) # Find the VMs that are present in Neutron DB, but not on EOS vms_to_update[tenant] = db_vms_key_set.difference(eos_vms_key_set) try: if vms_to_delete: self._rpc.delete_vm_bulk(tenant, vms_to_delete) if nets_to_delete: self._rpc.delete_network_bulk(tenant, nets_to_delete) if nets_to_update: # Create a dict of networks keyed by id. neutron_nets = dict( (network['id'], network) for network in self._ndb.get_all_networks_for_tenant(tenant) ) networks = [{ 'network_id': net_id, 'segmentation_id': db_nets[net_id]['segmentationTypeId'], 'network_name': neutron_nets.get(net_id, {'name': ''})['name'], 'shared': neutron_nets.get(net_id, {'shared': False})['shared'], } for net_id in nets_to_update ] self._rpc.create_network_bulk(tenant, networks) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True # Now update the VMs for tenant in vms_to_update: if not vms_to_update[tenant]: continue try: # Filter the ports to only the vms that we are interested # in. vm_ports = [ port for port in self._ndb.get_all_ports_for_tenant( tenant) if port['device_id'] in vms_to_update[tenant] ] if vm_ports: db_vms = db_lib.get_vms(tenant) self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True
def synchronize(self): """Sends data to EOS which differs from neutron DB.""" LOG.info(_LI('Syncing Neutron <-> EOS')) try: # Register with EOS to ensure that it has correct credentials self._rpc.register_with_eos(sync=True) eos_tenants = self._rpc.get_tenants() except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True return db_tenants = db_lib.get_tenants() # Delete tenants that are in EOS, but not in the database tenants_to_delete = frozenset(eos_tenants.keys()).difference( db_tenants) if tenants_to_delete: try: self._rpc.delete_tenant_bulk(tenants_to_delete, sync=True) except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True return # None of the commands have failed till now. But if subsequent # operations fail, then force_sync is set to true self._force_sync = False # Get Baremetal port switch_bindings, if any port_profiles = db_lib.get_all_portbindings() # To support shared networks, split the sync loop in two parts: # In first loop, delete unwanted VM and networks and update networks # In second loop, update VMs. This is done to ensure that networks for # all tenats are updated before VMs are updated instances_to_update = {} for tenant in db_tenants: db_nets = {n['id']: n for n in self._ndb.get_all_networks_for_tenant(tenant)} db_instances = db_lib.get_instances(tenant) eos_nets = self._get_eos_networks(eos_tenants, tenant) eos_vms, eos_bms, eos_routers = self._get_eos_vms(eos_tenants, tenant) db_nets_key_set = frozenset(db_nets.keys()) db_instances_key_set = frozenset(db_instances) eos_nets_key_set = frozenset(eos_nets.keys()) eos_vms_key_set = frozenset(eos_vms.keys()) eos_routers_key_set = frozenset(eos_routers.keys()) eos_bms_key_set = frozenset(eos_bms.keys()) # Create a candidate list by incorporating all instances eos_instances_key_set = (eos_vms_key_set | eos_routers_key_set | eos_bms_key_set) # Find the networks that are present on EOS, but not in Neutron DB nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) # Find the VMs that are present on EOS, but not in Neutron DB instances_to_delete = eos_instances_key_set.difference( db_instances_key_set) vms_to_delete = [ vm for vm in eos_vms_key_set if vm in instances_to_delete] routers_to_delete = [ r for r in eos_routers_key_set if r in instances_to_delete] bms_to_delete = [ b for b in eos_bms_key_set if b in instances_to_delete] # Find the Networks that are present in Neutron DB, but not on EOS nets_to_update = db_nets_key_set.difference(eos_nets_key_set) # Find the VMs that are present in Neutron DB, but not on EOS instances_to_update[tenant] = db_instances_key_set.difference( eos_instances_key_set) try: if vms_to_delete: self._rpc.delete_vm_bulk(tenant, vms_to_delete, sync=True) if routers_to_delete: self._rpc.delete_instance_bulk( tenant, routers_to_delete, constants.InstanceType.ROUTER, sync=True) if bms_to_delete: self._rpc.delete_instance_bulk( tenant, bms_to_delete, constants.InstanceType.BAREMETAL, sync=True) if nets_to_delete: self._rpc.delete_network_bulk(tenant, nets_to_delete, sync=True) if nets_to_update: networks = [{ 'network_id': net_id, 'network_name': db_nets.get(net_id, {'name': ''})['name'], 'shared': db_nets.get(net_id, {'shared': False})['shared'], 'segments': self._ndb.get_all_network_segments(net_id), } for net_id in nets_to_update ] self._rpc.create_network_bulk(tenant, networks, sync=True) except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True # Now update the instances for tenant in instances_to_update: if not instances_to_update[tenant]: continue try: # Filter the ports to only the vms that we are interested # in. ports_of_interest = {} for port in self._ndb.get_all_ports_for_tenant(tenant): ports_of_interest.update( self._port_dict_representation(port)) if ports_of_interest: instance_ports = db_lib.get_instance_ports( tenant, self._manage_fabric, self._managed_physnets) if instance_ports: self._rpc.create_instance_bulk(tenant, ports_of_interest, instance_ports, port_profiles, sync=True) except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True