def update_port_precommit(self, context): """Update the name of a given port. At the moment we only support port name change. Any other change to port is not supported at this time. We do not store the port names, therefore, no DB store action is performed here. """ new_port = context.current orig_port = context.original if new_port['name'] != orig_port['name']: LOG.info(_LI('Port name changed to %s'), new_port['name']) new_port = context.current device_id = new_port['device_id'] host = context.host # device_id and device_owner are set on VM boot port_id = new_port['id'] network_id = new_port['network_id'] tenant_id = new_port['tenant_id'] or INTERNAL_TENANT_ID with self.eos_sync_lock: port_provisioned = db_lib.is_port_provisioned(port_id) if not port_provisioned: # Create a new port in the DB db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id) else: # Port exists in the DB. Update it db_lib.update_port(device_id, host, port_id, network_id, tenant_id)
def test_tenant_is_removed(self): tenant_id = 'test' db_lib.remember_tenant(tenant_id) db_lib.forget_tenant(tenant_id) net_provisioned = db_lib.is_tenant_provisioned(tenant_id) self.assertFalse(net_provisioned, 'The Tenant should be deleted')
def create_port_precommit(self, context): """Remember the information about a VM and its ports A VM information, along with the physical host information is saved. """ port = context.current device_id = port['device_id'] device_owner = port['device_owner'] host = context.host pretty_log("create_port_precommit:", port) if device_owner == 'compute:probe': return # device_id and device_owner are set on VM boot is_vm_boot = device_id and device_owner if host and is_vm_boot: port_id = port['id'] network_id = port['network_id'] tenant_id = port['tenant_id'] or INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) with self.eos_sync_lock: # If network does not exist under this tenant, # it may be a shared network. Get shared network owner Id if not self._network_provisioned(tenant_id, network_id): # Ignore this request if network is not provisioned return db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id)
def create_port_precommit(self, context): """Remember the infromation about a VM and its ports A VM information, along with the physical host information is saved. """ port = context.current device_id = port['device_id'] device_owner = port['device_owner'] host = context.host # device_id and device_owner are set on VM boot is_vm_boot = device_id and device_owner if host and is_vm_boot: port_id = port['id'] network_id = port['network_id'] tenant_id = port['tenant_id'] if not tenant_id: tenant_id = context._plugin_context.tenant_id with self.eos_sync_lock: # If network does not exist under this tenant, # it may be a shared network. Get shared network owner Id net_provisioned = ( db_lib.is_network_provisioned(tenant_id, network_id) or self.ndb.get_shared_network_owner_id(network_id)) if not net_provisioned: # Ignore this request if network is not provisioned return db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id)
def create_port_precommit(self, context): """Remember the infromation about a VM and its ports A VM information, along with the physical host information is saved. """ port = context.current device_id = port["device_id"] device_owner = port["device_owner"] host = context.host # device_id and device_owner are set on VM boot is_vm_boot = device_id and device_owner if host and is_vm_boot: port_id = port["id"] network_id = port["network_id"] tenant_id = port["tenant_id"] if not tenant_id: tenant_id = context._plugin_context.tenant_id with self.eos_sync_lock: if not db_lib.is_network_provisioned(tenant_id, network_id): # Ignore this request if network is not provisioned return db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id)
def create_port_precommit(self, context): """Remember the infromation about a VM and its ports A VM information, along with the physical host information is saved. """ port = context.current device_id = port['device_id'] device_owner = port['device_owner'] host = context.host # device_id and device_owner are set on VM boot is_vm_boot = device_id and device_owner if host and is_vm_boot: port_id = port['id'] network_id = port['network_id'] tenant_id = port['tenant_id'] if not tenant_id: tenant_id = context._plugin_context.tenant_id with self.eos_sync_lock: # If network does not exist under this tenant, # it may be a shared network. Get shared network owner Id net_provisioned = ( db_lib.is_network_provisioned(tenant_id, network_id) or self.ndb.get_shared_network_owner_id(network_id) ) if not net_provisioned: # Ignore this request if network is not provisioned return db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id)
def test_remembers_multiple_tenants(self): expected_num_tenants = 100 tenants = ['id%s' % n for n in range(expected_num_tenants)] for tenant_id in tenants: db_lib.remember_tenant(tenant_id) num_tenants_provisioned = db_lib.num_provisioned_tenants() self.assertEqual(expected_num_tenants, num_tenants_provisioned, 'There should be %d tenants, not %d' % (expected_num_tenants, num_tenants_provisioned))
def create_network_precommit(self, context): """Remember the tenant, and network information.""" network = context.current segments = context.network_segments network_id = network['id'] tenant_id = network['tenant_id'] segmentation_id = segments[0]['segmentation_id'] with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) db_lib.remember_network(tenant_id, network_id, segmentation_id)
def create_network_precommit(self, context): """Remember the tenant, and network information.""" network = context.current segments = context.network_segments if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN: # If network type is not VLAN, do nothing return network_id = network["id"] tenant_id = network["tenant_id"] if not tenant_id: tenant_id = context._plugin_context.tenant_id segmentation_id = segments[0]["segmentation_id"] with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) db_lib.remember_network(tenant_id, network_id, segmentation_id)
def create_network_precommit(self, context): """Remember the tenant, and network information.""" network = context.current segments = context.network_segments if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN: # If network type is not VLAN, do nothing return network_id = network['id'] tenant_id = network['tenant_id'] or INTERNAL_TENANT_ID segmentation_id = segments[0]['segmentation_id'] with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) db_lib.remember_network(tenant_id, network_id, segmentation_id)
def create_network_precommit(self, context): """Remember the tenant, and network information.""" network = context.current segments = context.network_segments if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN: # If network type is not VLAN, do nothing return network_id = network['id'] tenant_id = network['tenant_id'] if not tenant_id: tenant_id = context._plugin_context.tenant_id segmentation_id = segments[0]['segmentation_id'] with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) db_lib.remember_network(tenant_id, network_id, segmentation_id)
def test_synchronize_required(self): """Tests whether synchronize() sends the right commands. This test verifies a scenario when the sync is required. """ region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '12345' } self.rpc.get_region_updated_time.return_value = region_updated_time self.sync_service._region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '0', } tenant_id = 'tenant-1' network_id = 'net-1' segmentation_id = 42 db_lib.remember_tenant(tenant_id) db_lib.remember_network(tenant_id, network_id, segmentation_id) self.rpc.get_tenants.return_value = {} self.rpc.sync_start.return_value = True self.rpc.sync_end.return_value = True self.sync_service.do_synchronize() expected_calls = [ mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), mock.call.get_tenants(), mock.call.create_network_bulk( tenant_id, [{'network_id': network_id, 'segmentation_id': segmentation_id, 'network_name': '', 'shared': False}], sync=True), mock.call.sync_end(), mock.call.get_region_updated_time() ] assert self.rpc.mock_calls == expected_calls db_lib.forget_network(tenant_id, network_id) db_lib.forget_tenant(tenant_id)
def create_network_precommit(self, context): """Remember the tenant, and network information.""" network = context.current segments = context.network_segments if not self.rpc.hpb_supported(): # Hierarchical port binding is not supported by CVX, only # allow VLAN network type. if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN: return network_id = network['id'] tenant_id = network['tenant_id'] or INTERNAL_TENANT_ID with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) for segment in segments: db_lib.remember_network_segment(tenant_id, network_id, segment.get('segmentation_id'), segment.get('id'))
def create_port_precommit(self, context): """Remember the infromation about a VM and its ports A VM information, along with the physical host information is saved. """ port = context.current device_id = port['device_id'] device_owner = port['device_owner'] host = context.host # device_id and device_owner are set on VM boot is_vm_boot = device_id and device_owner if host and is_vm_boot: port_id = port['id'] network_id = port['network_id'] tenant_id = port['tenant_id'] with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id)
def test_synchronize_one_network(self): """Test to ensure that only the required resources are sent to EOS.""" # Store two tenants in a db and a single tenant in EOS. # The sync should send details of the second tenant to EOS tenant_1_id = 'tenant-1' tenant_1_net_1_id = 'ten-1-net-1' tenant_1_net_1_seg_id = 11 db_lib.remember_tenant(tenant_1_id) db_lib.remember_network(tenant_1_id, tenant_1_net_1_id, tenant_1_net_1_seg_id) tenant_2_id = 'tenant-2' tenant_2_net_1_id = 'ten-2-net-1' tenant_2_net_1_seg_id = 21 db_lib.remember_tenant(tenant_2_id) db_lib.remember_network(tenant_2_id, tenant_2_net_1_id, tenant_2_net_1_seg_id) self.rpc.get_tenants.return_value = { tenant_1_id: { 'tenantVmInstances': {}, 'tenantNetworks': { tenant_1_net_1_id: { 'networkId': tenant_1_net_1_id, 'shared': False, 'networkName': 'Net1', 'segmenationType': 'vlan', 'segmentationTypeId': tenant_1_net_1_seg_id, } } } } self.sync_service.do_synchronize() expected_calls = [ mock.call.get_region_updated_time(), mock.call._run_openstack_cmds(['sync start']), mock.call.register_with_eos(), mock.call.get_tenants(), mock.call.create_network_bulk(tenant_2_id, [{ 'network_id': tenant_2_net_1_id, 'segmentation_id': tenant_2_net_1_seg_id, 'network_name': '', 'shared': False }]), mock.call._run_openstack_cmds(['sync end']), mock.call.get_region_updated_time() ] self.assertTrue( self.rpc.mock_calls == expected_calls, "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, )) db_lib.forget_network(tenant_1_id, tenant_1_net_1_id) db_lib.forget_network(tenant_2_id, tenant_2_net_1_id) db_lib.forget_tenant(tenant_1_id) db_lib.forget_tenant(tenant_2_id)
def test_synchronize_all_networks(self): """Test to ensure that only the required resources are sent to EOS.""" # Store two tenants in a db and none on EOS. # The sync should send details of all tenants to EOS tenant_1_id = u'tenant-1' tenant_1_net_1_id = u'ten-1-net-1' tenant_1_net_1_seg_id = 11 db_lib.remember_tenant(tenant_1_id) db_lib.remember_network(tenant_1_id, tenant_1_net_1_id, tenant_1_net_1_seg_id) tenant_2_id = u'tenant-2' tenant_2_net_1_id = u'ten-2-net-1' tenant_2_net_1_seg_id = 21 db_lib.remember_tenant(tenant_2_id) db_lib.remember_network(tenant_2_id, tenant_2_net_1_id, tenant_2_net_1_seg_id) self.rpc.get_tenants.return_value = {} self.sync_service.do_synchronize() expected_calls = [ mock.call.get_region_updated_time(), mock.call._run_openstack_cmds(['sync start']), mock.call.register_with_eos(), mock.call.get_tenants(), mock.call.create_network_bulk( tenant_1_id, [{'network_id': tenant_1_net_1_id, 'segmentation_id': tenant_1_net_1_seg_id, 'network_name': '', 'shared': False}]), mock.call.create_network_bulk( tenant_2_id, [{'network_id': tenant_2_net_1_id, 'segmentation_id': tenant_2_net_1_seg_id, 'network_name': '', 'shared': False}]), mock.call._run_openstack_cmds(['sync end']), mock.call.get_region_updated_time() ] # The create_network_bulk() can be called in different order. So split # it up. The first part checks if the initial set of methods are # invoked. self.assertTrue(self.rpc.mock_calls[:4] == expected_calls[:4], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) # Check if tenant 1 networks are created. It must be one of the two # methods. self.assertTrue(self.rpc.mock_calls[4] in expected_calls[4:6], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) # Check if tenant 2 networks are created. It must be one of the two # methods. self.assertTrue(self.rpc.mock_calls[5] in expected_calls[4:6], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) # Check if the sync end methods are invoked. self.assertTrue(self.rpc.mock_calls[6:8] == expected_calls[6:8], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) db_lib.forget_network(tenant_1_id, tenant_1_net_1_id) db_lib.forget_network(tenant_2_id, tenant_2_net_1_id) db_lib.forget_tenant(tenant_1_id) db_lib.forget_tenant(tenant_2_id)
def test_synchronize_one_network(self): """Test to ensure that only the required resources are sent to EOS.""" # Store two tenants in a db and a single tenant in EOS. # The sync should send details of the second tenant to EOS tenant_1_id = 'tenant-1' tenant_1_net_1_id = 'ten-1-net-1' tenant_1_net_1_seg_id = 11 db_lib.remember_tenant(tenant_1_id) db_lib.remember_network(tenant_1_id, tenant_1_net_1_id, tenant_1_net_1_seg_id) tenant_2_id = 'tenant-2' tenant_2_net_1_id = 'ten-2-net-1' tenant_2_net_1_seg_id = 21 db_lib.remember_tenant(tenant_2_id) db_lib.remember_network(tenant_2_id, tenant_2_net_1_id, tenant_2_net_1_seg_id) self.rpc.get_tenants.return_value = { tenant_1_id: { 'tenantVmInstances': {}, 'tenantNetworks': { tenant_1_net_1_id: { 'networkId': tenant_1_net_1_id, 'shared': False, 'networkName': 'Net1', 'segmenationType': 'vlan', 'segmentationTypeId': tenant_1_net_1_seg_id, } } } } self.sync_service.do_synchronize() expected_calls = [ mock.call.get_region_updated_time(), mock.call._run_openstack_cmds(['sync start']), mock.call.register_with_eos(), mock.call.get_tenants(), mock.call.create_network_bulk( tenant_2_id, [{'network_id': tenant_2_net_1_id, 'segmentation_id': tenant_2_net_1_seg_id, 'network_name': '', 'shared': False}]), mock.call._run_openstack_cmds(['sync end']), mock.call.get_region_updated_time() ] self.assertTrue(self.rpc.mock_calls == expected_calls, "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) db_lib.forget_network(tenant_1_id, tenant_1_net_1_id) db_lib.forget_network(tenant_2_id, tenant_2_net_1_id) db_lib.forget_tenant(tenant_1_id) db_lib.forget_tenant(tenant_2_id)
def update_port_precommit(self, context): """Update the name of a given port. At the moment we only support port name change. Any other change to port is not supported at this time. We do not store the port names, therefore, no DB store action is performed here. """ new_port = context.current orig_port = context.original if new_port['name'] != orig_port['name']: LOG.info(_LI('Port name changed to %s'), new_port['name']) device_id = new_port['device_id'] host = context.host pretty_log("update_port_precommit: new", new_port) pretty_log("update_port_precommit: orig", orig_port) if new_port['device_owner'] == 'compute:probe': return # Check if it is port migration case if self._handle_port_migration_precommit(context): return # Check if the port is part of managed physical network seg_info = self._bound_segments(context) if not seg_info: # Ignoring the update as the port is not managed by # arista mechanism driver. return # device_id and device_owner are set on VM boot port_id = new_port['id'] network_id = new_port['network_id'] tenant_id = new_port['tenant_id'] or INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) if not self._network_provisioned(tenant_id, network_id, seg_info[driver_api.SEGMENTATION_ID], seg_info[driver_api.ID]): if not self.rpc.hpb_supported(): LOG.info( _LI("Ignoring port %(port)s conntected to " "%(net_id)s"), { 'port': port_id, 'net_id': network_id }) return LOG.info(_LI("Adding %s to provisioned network database"), seg_info) with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) db_lib.remember_network_segment( tenant_id, network_id, seg_info[driver_api.SEGMENTATION_ID], seg_info[driver_api.ID]) with self.eos_sync_lock: port_down = False if (new_port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE ): # We care about port status only for DVR ports because # for DVR, a single port exists on multiple hosts. If a port # is no longer needed on a host then the driver gets a # port_update notification for that <port, host> with the # port status as PORT_STATUS_DOWN. port_down = context.status == n_const.PORT_STATUS_DOWN if host and not port_down: port_host_filter = None if (new_port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE): # <port, host> uniquely identifies a DVR port. Other # ports are identified by just the port id port_host_filter = host port_provisioned = db_lib.is_port_provisioned( port_id, port_host_filter) if not port_provisioned: LOG.info("Remembering the port") # Create a new port in the DB db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id) else: if (new_port['device_id'] != orig_port['device_id'] or context.host != context.original_host or new_port['network_id'] != orig_port['network_id'] or new_port['tenant_id'] != orig_port['tenant_id']): LOG.info("Updating the port") # Port exists in the DB. Update it db_lib.update_port(device_id, host, port_id, network_id, tenant_id) else: # Unbound or down port does not concern us orig_host = context.original_host LOG.info("Forgetting the port on %s" % str(orig_host)) db_lib.forget_port(port_id, orig_host)
def test_tenant_is_remembered(self): tenant_id = 'test' db_lib.remember_tenant(tenant_id) net_provisioned = db_lib.is_tenant_provisioned(tenant_id) self.assertTrue(net_provisioned, 'Tenant must be provisioned')
def test_synchronize_all_networks(self): """Test to ensure that only the required resources are sent to EOS.""" # Store two tenants in a db and none on EOS. # The sync should send details of all tenants to EOS tenant_1_id = u'tenant-1' tenant_1_net_1_id = u'ten-1-net-1' tenant_1_net_1_seg_id = 11 db_lib.remember_tenant(tenant_1_id) db_lib.remember_network(tenant_1_id, tenant_1_net_1_id, tenant_1_net_1_seg_id) tenant_2_id = u'tenant-2' tenant_2_net_1_id = u'ten-2-net-1' tenant_2_net_1_seg_id = 21 db_lib.remember_tenant(tenant_2_id) db_lib.remember_network(tenant_2_id, tenant_2_net_1_id, tenant_2_net_1_seg_id) self.rpc.get_tenants.return_value = {} self.sync_service.do_synchronize() expected_calls = [ mock.call.get_region_updated_time(), mock.call._run_openstack_cmds(['sync start']), mock.call.register_with_eos(), mock.call.get_tenants(), mock.call.create_network_bulk(tenant_1_id, [{ 'network_id': tenant_1_net_1_id, 'segmentation_id': tenant_1_net_1_seg_id, 'network_name': '', 'shared': False }]), mock.call.create_network_bulk(tenant_2_id, [{ 'network_id': tenant_2_net_1_id, 'segmentation_id': tenant_2_net_1_seg_id, 'network_name': '', 'shared': False }]), mock.call._run_openstack_cmds(['sync end']), mock.call.get_region_updated_time() ] # The create_network_bulk() can be called in different order. So split # it up. The first part checks if the initial set of methods are # invoked. self.assertTrue( self.rpc.mock_calls[:4] == expected_calls[:4], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, )) # Check if tenant 1 networks are created. It must be one of the two # methods. self.assertTrue( self.rpc.mock_calls[4] in expected_calls[4:6], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, )) # Check if tenant 2 networks are created. It must be one of the two # methods. self.assertTrue( self.rpc.mock_calls[5] in expected_calls[4:6], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, )) # Check if the sync end methods are invoked. self.assertTrue( self.rpc.mock_calls[6:8] == expected_calls[6:8], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, )) db_lib.forget_network(tenant_1_id, tenant_1_net_1_id) db_lib.forget_network(tenant_2_id, tenant_2_net_1_id) db_lib.forget_tenant(tenant_1_id) db_lib.forget_tenant(tenant_2_id)
def update_port_precommit(self, context): """Update the name of a given port. At the moment we only support port name change. Any other change to port is not supported at this time. We do not store the port names, therefore, no DB store action is performed here. """ new_port = context.current orig_port = context.original if new_port['name'] != orig_port['name']: LOG.info(_LI('Port name changed to %s'), new_port['name']) new_port = context.current device_id = new_port['device_id'] host = context.host pretty_log("update_port_precommit: new", new_port) pretty_log("update_port_precommit: orig", orig_port) if new_port['device_owner'] == 'compute:probe': return # device_id and device_owner are set on VM boot port_id = new_port['id'] network_id = new_port['network_id'] tenant_id = new_port['tenant_id'] or INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) if not self._network_provisioned(tenant_id, network_id): # If the Arista driver does not know about the network, ignore the # port. LOG.info(_LI("Ignoring port connected to %s"), network_id) return with self.eos_sync_lock: port_down = False if(new_port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE): # We care about port status only for DVR ports because # for DVR, a single port exists on multiple hosts. If a port # is no longer needed on a host then the driver gets a # port_update notification for that <port, host> with the # port status as PORT_STATUS_DOWN. port_down = context.status == n_const.PORT_STATUS_DOWN if host and not port_down: port_host_filter = None if(new_port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE): # <port, host> uniquely identifies a DVR port. Other # ports are identified by just the port id port_host_filter = host port_provisioned = db_lib.is_port_provisioned( port_id, port_host_filter) if not port_provisioned: LOG.info("Remembering the port") # Create a new port in the DB db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id) else: if(new_port['device_id'] != orig_port['device_id'] or context.host != context.original_host or new_port['network_id'] != orig_port['network_id'] or new_port['tenant_id'] != orig_port['tenant_id']): LOG.info("Updating the port") # Port exists in the DB. Update it db_lib.update_port(device_id, host, port_id, network_id, tenant_id) else: # Unbound or down port does not concern us orig_host = context.original_host LOG.info("Forgetting the port on %s" % str(orig_host)) db_lib.forget_port(port_id, orig_host)