def test_create_and_delete_ports(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vms = ['vm1', 'vm2', 'vm3'] network_context = self._get_network_context(tenant_id, network_id, segmentation_id) self.drv.create_network_precommit(network_context) for vm_id in vms: port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) self.drv.create_port_precommit(port_context) vm_list = db_lib.get_vms(tenant_id) provisioned_vms = len(vm_list) expected_vms = len(vms) self.assertEqual( expected_vms, provisioned_vms, 'There should be %d ' 'hosts, not %d' % (expected_vms, provisioned_vms)) # Now test the delete ports for vm_id in vms: port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) self.drv.delete_port_precommit(port_context) vm_list = db_lib.get_vms(tenant_id) provisioned_vms = len(vm_list) expected_vms = 0 self.assertEqual( expected_vms, provisioned_vms, 'There should be %d ' 'VMs, not %d' % (expected_vms, provisioned_vms))
def test_create_and_delete_ports(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vms = ['vm1', 'vm2', 'vm3'] network_context = self._get_network_context(tenant_id, network_id, segmentation_id) self.drv.create_network_precommit(network_context) for vm_id in vms: port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) self.drv.create_port_precommit(port_context) vm_list = db_lib.get_vms(tenant_id) provisioned_vms = len(vm_list) expected_vms = len(vms) self.assertEqual(expected_vms, provisioned_vms, 'There should be %d ' 'hosts, not %d' % (expected_vms, provisioned_vms)) # Now test the delete ports for vm_id in vms: port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) self.drv.delete_port_precommit(port_context) vm_list = db_lib.get_vms(tenant_id) provisioned_vms = len(vm_list) expected_vms = 0 self.assertEqual(expected_vms, provisioned_vms, 'There should be %d ' 'VMs, not %d' % (expected_vms, provisioned_vms))
def test_num_vm_is_valid(self): tenant_id = 'test' network_id = '123' port_id = 456 host_id = 'ubuntu1' vm_to_remember = ['vm1', 'vm2', 'vm3'] vm_to_forget = ['vm2', 'vm1'] for vm in vm_to_remember: db_lib.remember_vm(vm, host_id, port_id, network_id, tenant_id) for vm in vm_to_forget: db_lib.forget_vm(vm, host_id, port_id, network_id, tenant_id) num_vms = len(db_lib.get_vms(tenant_id)) expected = len(vm_to_remember) - len(vm_to_forget) self.assertEqual(expected, num_vms, 'There should be %d records, ' 'got %d records' % (expected, num_vms)) # clean up afterwards db_lib.forget_vm('vm3', host_id, port_id, network_id, tenant_id)
def test_num_vm_is_valid(self): tenant_id = 'test' network_id = '123' port_id = 456 host_id = 'ubuntu1' vm_to_remember = ['vm1', 'vm2', 'vm3'] vm_to_forget = ['vm2', 'vm1'] for vm in vm_to_remember: db_lib.remember_vm(vm, host_id, port_id, network_id, tenant_id) for vm in vm_to_forget: db_lib.forget_vm(vm, host_id, port_id, network_id, tenant_id) num_vms = len(db_lib.get_vms(tenant_id)) expected = len(vm_to_remember) - len(vm_to_forget) self.assertEqual( expected, num_vms, 'There should be %d records, ' 'got %d records' % (expected, num_vms)) # clean up afterwards db_lib.forget_vm('vm3', host_id, port_id, network_id, tenant_id)
def synchronize(self): """Sends data to EOS which differs from neutron DB.""" LOG.info(_LI('Syncing Neutron <-> EOS')) try: # Register with EOS to ensure that it has correct credentials self._rpc.register_with_eos() eos_tenants = self._rpc.get_tenants() except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return db_tenants = db_lib.get_tenants() # Delete tenants that are in EOS, but not in the database tenants_to_delete = frozenset(eos_tenants.keys()).difference( db_tenants.keys()) if tenants_to_delete: try: self._rpc.delete_tenant_bulk(tenants_to_delete) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return # None of the commands have failed till now. But if subsequent # operations fail, then force_sync is set to true self._force_sync = False # To support shared networks, split the sync loop in two parts: # In first loop, delete unwanted VM and networks and update networks # In second loop, update VMs. This is done to ensure that networks for # all tenats are updated before VMs are updated vms_to_update = {} for tenant in db_tenants: db_nets = db_lib.get_networks(tenant) db_vms = db_lib.get_vms(tenant) eos_nets = self._get_eos_networks(eos_tenants, tenant) eos_vms = self._get_eos_vms(eos_tenants, tenant) db_nets_key_set = frozenset(db_nets.keys()) db_vms_key_set = frozenset(db_vms.keys()) eos_nets_key_set = frozenset(eos_nets.keys()) eos_vms_key_set = frozenset(eos_vms.keys()) # Find the networks that are present on EOS, but not in Neutron DB nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) # Find the VMs that are present on EOS, but not in Neutron DB vms_to_delete = eos_vms_key_set.difference(db_vms_key_set) # Find the Networks that are present in Neutron DB, but not on EOS nets_to_update = db_nets_key_set.difference(eos_nets_key_set) # Find the VMs that are present in Neutron DB, but not on EOS vms_to_update[tenant] = db_vms_key_set.difference(eos_vms_key_set) try: if vms_to_delete: self._rpc.delete_vm_bulk(tenant, vms_to_delete) if nets_to_delete: self._rpc.delete_network_bulk(tenant, nets_to_delete) if nets_to_update: # Create a dict of networks keyed by id. neutron_nets = dict( (network['id'], network) for network in self._ndb.get_all_networks_for_tenant( tenant)) networks = [{ 'network_id': net_id, 'segmentation_id': db_nets[net_id]['segmentationTypeId'], 'network_name': neutron_nets.get(net_id, {'name': ''})['name'], 'shared': neutron_nets.get(net_id, {'shared': False})['shared'], } for net_id in nets_to_update] self._rpc.create_network_bulk(tenant, networks) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True # Now update the VMs for tenant in vms_to_update: if not vms_to_update[tenant]: continue try: # Filter the ports to only the vms that we are interested # in. vm_ports = [ port for port in self._ndb.get_all_ports_for_tenant(tenant) if port['device_id'] in vms_to_update[tenant] ] if vm_ports: db_vms = db_lib.get_vms(tenant) self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True
def synchronize(self): """Sends data to EOS which differs from neutron DB.""" LOG.info(_LI('Syncing Neutron <-> EOS')) try: # Register with EOS to ensure that it has correct credentials self._rpc.register_with_eos() eos_tenants = self._rpc.get_tenants() except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return db_tenants = db_lib.get_tenants() # Delete tenants that are in EOS, but not in the database tenants_to_delete = frozenset(eos_tenants.keys()).difference( db_tenants.keys()) if tenants_to_delete: try: self._rpc.delete_tenant_bulk(tenants_to_delete) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return # None of the commands have failed till now. But if subsequent # operations fail, then force_sync is set to true self._force_sync = False # To support shared networks, split the sync loop in two parts: # In first loop, delete unwanted VM and networks and update networks # In second loop, update VMs. This is done to ensure that networks for # all tenats are updated before VMs are updated vms_to_update = {} for tenant in db_tenants: db_nets = db_lib.get_networks(tenant) db_vms = db_lib.get_vms(tenant) eos_nets = self._get_eos_networks(eos_tenants, tenant) eos_vms = self._get_eos_vms(eos_tenants, tenant) db_nets_key_set = frozenset(db_nets.keys()) db_vms_key_set = frozenset(db_vms.keys()) eos_nets_key_set = frozenset(eos_nets.keys()) eos_vms_key_set = frozenset(eos_vms.keys()) # Find the networks that are present on EOS, but not in Neutron DB nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) # Find the VMs that are present on EOS, but not in Neutron DB vms_to_delete = eos_vms_key_set.difference(db_vms_key_set) # Find the Networks that are present in Neutron DB, but not on EOS nets_to_update = db_nets_key_set.difference(eos_nets_key_set) # Find the VMs that are present in Neutron DB, but not on EOS vms_to_update[tenant] = db_vms_key_set.difference(eos_vms_key_set) try: if vms_to_delete: self._rpc.delete_vm_bulk(tenant, vms_to_delete) if nets_to_delete: self._rpc.delete_network_bulk(tenant, nets_to_delete) if nets_to_update: # Create a dict of networks keyed by id. neutron_nets = dict( (network['id'], network) for network in self._ndb.get_all_networks_for_tenant(tenant) ) networks = [{ 'network_id': net_id, 'segmentation_id': db_nets[net_id]['segmentationTypeId'], 'network_name': neutron_nets.get(net_id, {'name': ''})['name'], 'shared': neutron_nets.get(net_id, {'shared': False})['shared'], } for net_id in nets_to_update ] self._rpc.create_network_bulk(tenant, networks) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True # Now update the VMs for tenant in vms_to_update: if not vms_to_update[tenant]: continue try: # Filter the ports to only the vms that we are interested # in. vm_ports = [ port for port in self._ndb.get_all_ports_for_tenant( tenant) if port['device_id'] in vms_to_update[tenant] ] if vm_ports: db_vms = db_lib.get_vms(tenant) self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms) except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True