def run(self, **kwargs): search_opts, search_opts_img = None, {} filter_path = self.cfg.migrate.filter_path if utl.read_yaml_file(filter_path): filter_config = utl.read_yaml_file(filter_path) if utl.INSTANCES_TYPE in filter_config: search_opts = filter_config[utl.INSTANCES_TYPE] if utl.IMAGES_TYPE in filter_config: search_opts_img = filter_config[utl.IMAGES_TYPE] return { 'search_opts': search_opts, 'search_opts_img': search_opts_img }
def __init__(self, config, cloud): super(NeutronNetwork, self).__init__(config) self.cloud = cloud self.identity_client = cloud.resources['identity'] self.neutron_client = self.proxy(self.get_client(), config) self.ext_net_map = \ utl.read_yaml_file(self.config.migrate.ext_net_map) or {}
def run(self, **kwargs): search_opts, search_opts_img, search_opts_tenant = None, {}, {} filter_path = self.cfg.migrate.filter_path if utl.read_yaml_file(filter_path): filter_config = utl.read_yaml_file(filter_path) if utl.INSTANCES_TYPE in filter_config: search_opts = filter_config[utl.INSTANCES_TYPE] if utl.IMAGES_TYPE in filter_config: search_opts_img = filter_config[utl.IMAGES_TYPE] if utl.TENANTS_TYPE in filter_config: search_opts_tenant = filter_config[utl.TENANTS_TYPE] return { 'search_opts': search_opts, 'search_opts_img': search_opts_img, 'search_opts_tenant': search_opts_tenant }
def run(self, **kwargs): search_opts, search_opts_img, search_opts_tenant = None, {}, {} search_opts_vol = {} filter_path = self.cfg.migrate.filter_path if utl.read_yaml_file(filter_path): filter_config = utl.read_yaml_file(filter_path) if utl.INSTANCES_TYPE in filter_config: search_opts = filter_config[utl.INSTANCES_TYPE] if utl.IMAGES_TYPE in filter_config: search_opts_img = filter_config[utl.IMAGES_TYPE] if utl.VOLUMES_TYPE in filter_config: search_opts_vol = filter_config[utl.VOLUMES_TYPE] if utl.TENANTS_TYPE in filter_config: search_opts_tenant = filter_config[utl.TENANTS_TYPE] return { "search_opts": search_opts, "search_opts_img": search_opts_img, "search_opts_vol": search_opts_vol, "search_opts_tenant": search_opts_tenant, }
def __init__(self, config, group_file, cloud_id): self.config = config self.group_config = utils.read_yaml_file(group_file) resources = {'identity': keystone.KeystoneIdentity, 'network': neutron.NeutronNetwork, 'compute': nova_compute.NovaCompute} self.cloud = cloud.Cloud(resources, cloud_id, config) self.network = self.cloud.resources['network'] self.compute = self.cloud.resources['compute'] self.identity = self.cloud.resources['identity'] self.groups = {}
def run(self, **kwargs): if self.raw: filters = utils.read_yaml_file(cfglib.CONF.migrate.filter_path) if filters: LOG.info('Filters: %s', pprint.pformat(filters)) else: self._print_options('Filter by instances', kwargs.get('search_opts', {})) self._print_options('Filter by images', kwargs.get('search_opts_img', {})) self._print_options('Filter by volumes', kwargs.get('search_opts_vol', {})) self._print_options('Filter by tenants', kwargs.get('search_opts_tenant', {}))
def __init__(self, config, group_file, cloud_id): self.config = config if group_file is None: message = "Grouping config is not provided." LOG.error(message) raise ValueError(message) self.group_config = utils.read_yaml_file(group_file) resources = {'identity': keystone.KeystoneIdentity, 'network': neutron.NeutronNetwork, 'compute': nova_compute.NovaCompute} self.cloud = cloud.Cloud(resources, cloud_id, config) self.network = self.cloud.resources['network'] self.compute = self.cloud.resources['compute'] self.identity = self.cloud.resources['identity'] self.groups = {}
def test_group_by_network(self): group_rules = """ group_by: - network """ self.make_group_file(group_rules) group = grouping.Grouping(FAKE_CONFIG, FILE_NAME, 'src') group.compute.get_instances_list.return_value = [self.fake_instance1, self.fake_instance2, self.fake_instance3] group.group() expected_result = {'net1_id': ['s1', 's3'], 'net3_id': ['s2']} result = utils.read_yaml_file(RESULT_FILE) self.assertEquals(expected_result, result)
def __init__(self, config, group_file, cloud_id): self.config = config if group_file is None: message = "Grouping config is not provided." LOG.error(message) raise ValueError(message) self.group_config = utils.read_yaml_file(group_file) resources = { 'identity': keystone.KeystoneIdentity, 'network': neutron.NeutronNetwork, 'compute': nova_compute.NovaCompute } self.cloud = cloud.Cloud(resources, cloud_id, config) self.network = self.cloud.resources['network'] self.compute = self.cloud.resources['compute'] self.identity = self.cloud.resources['identity'] self.groups = {}
def run(self, **kwargs): LOG.debug("Checking networks...") overlapping_resources = {} invalid_resources = {} src_net = self.src_cloud.resources[utils.NETWORK_RESOURCE] dst_net = self.dst_cloud.resources[utils.NETWORK_RESOURCE] src_compute = self.src_cloud.resources[utils.COMPUTE_RESOURCE] search_opts = kwargs.get('search_opts_tenant', {}) search_opts.update({'search_opts': kwargs.get('search_opts', {})}) LOG.debug("Retrieving Network information from Source cloud...") ports = src_net.get_ports_list() src_net_info = NetworkInfo(src_net.read_info(**search_opts), ports) LOG.debug("Retrieving Network information from Destination cloud...") dst_net_info = NetworkInfo(dst_net.read_info()) LOG.debug("Retrieving Compute information from Source cloud...") src_compute_info = ComputeInfo(src_compute, search_opts) ext_net_map = utils.read_yaml_file(self.cfg.migrate.ext_net_map) or {} # Check external networks mapping if ext_net_map: LOG.info("Check external networks mapping...") invalid_ext_net_ids = src_net_info.get_invalid_ext_net_ids( dst_net_info, ext_net_map) if invalid_ext_net_ids: invalid_resources.update( {"invalid_external_nets_ids_in_map": invalid_ext_net_ids}) # Check subnets and segmentation IDs overlap LOG.info("Check networks overlapping...") nets_overlapping_subnets, nets_overlapping_seg_ids = ( src_net_info.get_overlapping_networks(dst_net_info)) if nets_overlapping_subnets: overlapping_resources.update( {'networks_with_overlapping_subnets': nets_overlapping_subnets}) if nets_overlapping_seg_ids: LOG.warning("Networks with segmentation IDs overlapping:\n%s", nets_overlapping_seg_ids) # Check external subnets overlap LOG.info("Check external subnets overlapping...") overlapping_external_subnets = ( src_net_info.get_overlapping_external_subnets(dst_net_info, ext_net_map)) if overlapping_external_subnets: overlapping_resources.update( {"overlapping_external_subnets": overlapping_external_subnets}) # Check floating IPs overlap LOG.info("Check floating IPs overlapping...") floating_ips = src_net_info.list_overlapping_floating_ips(dst_net_info, ext_net_map) if floating_ips: overlapping_resources.update( {'overlapping_floating_ips': floating_ips}) # Check busy physical networks on DST of FLAT network type LOG.info("Check busy physical networks for FLAT network type...") busy_flat_physnets = src_net_info.busy_flat_physnets(dst_net_info) if busy_flat_physnets: overlapping_resources.update( {'busy_flat_physnets': busy_flat_physnets}) # Check physical networks existence on DST for VLAN network type LOG.info("Check physical networks existence for VLAN network type...") dst_neutron_client = dst_net.neutron_client missing_vlan_physnets = src_net_info.missing_vlan_physnets( dst_net_info, dst_neutron_client) if missing_vlan_physnets: overlapping_resources.update( {'missing_vlan_physnets': missing_vlan_physnets}) # Check VMs spawned directly in external network LOG.info("Check VMs spawned directly in external networks...") devices = src_net_info.get_devices_from_external_networks() vms_list = src_compute_info.list_vms_in_external_network(devices) if vms_list: overlapping_resources.update({'vms_in_external_network': vms_list}) # Print LOG message with all overlapping stuff and abort migration if overlapping_resources or invalid_resources: if overlapping_resources: LOG.critical('Network overlapping list:\n%s', overlapping_resources) if invalid_resources: LOG.critical('Invalid Network resources list:\n%s', invalid_resources) raise exception.AbortMigrationError( "There is a number of overlapping/invalid Network resources, " "so migration process can not be continued. Resolve it please " "and try again")