def evacuate(name_config=None, debug=False, iteration=False): if debug: utils.configure_logging("DEBUG") try: iteration = int(iteration) except ValueError: LOG.error("Invalid value provided as 'iteration' argument, it must be " "integer") return cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) env.key_filename = cfglib.CONF.migrate.key_filename cloud = cloud_ferry.CloudFerry(cfglib.CONF) LOG.info("running evacuation") evacuation_chain.process_chain(cloud, iteration) freed_nodes = get_freed_nodes(iteration) if not freed_nodes: LOG.warning("Evacuation cannot be completed, because there are no " "available compute nodes, that should be freed") return LOG.info("Following nodes will be freed once in-cloud migration finishes, " "and can be moved from source to destination: %s", freed_nodes)
def create_filters(name_config=None, filter_folder=DEFAULT_FILTERS_FILES, images_date='2000-01-01'): """Generates filter files for CloudFerry based on the schedule prepared by condensation/grouping.""" cfglib.collector_configs_plugins() cfglib.init_config(name_config) make_filters.make(filter_folder, images_date)
def evacuate(name_config=None, debug=False, iteration=False): if debug: utils.configure_logging("DEBUG") try: iteration = int(iteration) except ValueError: LOG.error("Invalid value provided as 'iteration' argument, it must be " "integer") return cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) env.key_filename = cfglib.CONF.migrate.key_filename cloud = cloud_ferry.CloudFerry(cfglib.CONF) LOG.info("running evacuation") evacuation_chain.process_chain(cloud, iteration) freed_nodes = get_freed_nodes(iteration) if not freed_nodes: LOG.warning("Evacuation cannot be completed, because there are no " "available compute nodes, that should be freed") return LOG.info( "Following nodes will be freed once in-cloud migration finishes, " "and can be moved from source to destination: %s", freed_nodes)
def setUp(self): super(KeystoneIdentityTestCase, self).setUp() self.mock_client = mock.MagicMock() self.kc_patch = mockpatch.PatchObject(keystone_client, 'Client', new=self.mock_client) self.useFixture(self.kc_patch) self.fake_cloud = mock.Mock() self.fake_cloud.mysql_connector = mock.Mock() self.keystone_client = keystone.KeystoneIdentity( FAKE_CONFIG, self.fake_cloud) self.fake_tenant_0 = mock.Mock(spec=keystone_client.tenants.Tenant) self.fake_tenant_0.name = 'tenant_name_0' self.fake_tenant_0.description = 'tenant_description_0' self.fake_tenant_0.id = 'tenant_id_0' self.fake_tenant_1 = mock.Mock(spec=keystone_client.tenants.Tenant) self.fake_tenant_1.name = 'tenant_name_1' self.fake_tenant_1.description = 'tenant_description_1' self.fake_tenant_1.id = 'tenant_id_1' self.fake_user_0 = mock.Mock(spec=keystone_client.users.User) self.fake_user_0.name = 'user_name_0' self.fake_user_0.id = 'user_id_0' self.fake_user_0.tenantId = self.fake_tenant_0.id self.fake_user_0.email = '*****@*****.**' self.fake_user_1 = mock.Mock(spec=keystone_client.users.User) self.fake_user_1.name = 'user_name_1' self.fake_user_1.id = 'user_id_1' self.fake_user_1.tenantId = self.fake_tenant_1.id self.fake_user_1.email = '*****@*****.**' self.fake_role_0 = mock.Mock(spec=keystone_client.roles.Role) self.fake_role_0.name = 'role_name_0' self.fake_role_0.id = 'role_id_0' self.fake_role_1 = mock.Mock(spec=keystone_client.roles.Role) self.fake_role_1.name = 'role_name_1' self.fake_role_1.id = 'role_id_1' self.fake_src_keystone = mock.Mock() self.fake_dst_keystone = mock.Mock() self.fake_src_keystone.keystone_client.users.find.side_effect = ( self.mock_user_find) self.fake_dst_keystone.keystone_client.users.find.side_effect = ( self.mock_user_find) cfglib.init_config() cfglib.CONF.src.user = '******' cfglib.CONF.dst.user = '******' self.fake_src_admin_user = mock.Mock() self.fake_dst_admin_user = mock.Mock() self.fake_same_user = mock.Mock() self.fake_same_user.id = 'fake_same_id' self.fake_same_user.name = 'fake_same_name'
def condense(config=None, vm_grouping_config=None, debug=False): """ When migration is done in-place (there's no spare hardware), cloud migration admin would want to free as many hardware nodes as possible. This task handles that case by analyzing source cloud and rearranging source cloud load and generating migration scenario. Condensation is a process of: 1. Retrieving groups of connected VMs (see `get_groups`) 2. Rearrangement of source cloud load in order to free up as many hardware nodes as possible. 3. Generation filter files for CloudFerry, which only contain groups of VMs identified in step 1 in the order identified in step 2. Method arguments: :config: - path to CloudFerry configuration file (based on `configs/config.ini`) :vm_grouping_config: - path to grouping config file (based on `configs/groups.yaml`) :debug: - boolean value, enables debugging messages if set to `True` """ if debug: utils.configure_logging("DEBUG") cfglib.collector_configs_plugins() cfglib.init_config(config) data_storage.check_redis_config() LOG.info("Retrieving flavors, VMs and nodes from SRC cloud") flavors, vms, nodes = nova_collector.get_flavors_vms_and_nodes(cfglib.CONF) if cfglib.CONF.condense.keep_interim_data: condense_utils.store_condense_data(flavors, nodes, vms) LOG.info("Retrieving groups of VMs") # get_groups stores results in group_file_path config get_groups(config, vm_grouping_config) groups = condense_utils.read_file(cfglib.CONF.migrate.group_file_path) if groups is None: message = ("Grouping information is missing. Make sure you have " "grouping file defined in config.") LOG.critical(message) raise RuntimeError(message) LOG.info("Generating migration schedule based on grouping rules") process.process(nodes=nodes, flavors=flavors, vms=vms, groups=groups) LOG.info("Starting generation of filter files for migration") create_filters(config) LOG.info("Migration schedule generated. You may now want to start " "evacuation job: 'fab evacuate'") LOG.info("Condensation process finished. Checkout filters file: %s.", DEFAULT_FILTERS_FILES)
def setUp(self): super(KeystoneIdentityTestCase, self).setUp() self.mock_client = mock.MagicMock() self.kc_patch = mockpatch.PatchObject(keystone_client, 'Client', new=self.mock_client) self.useFixture(self.kc_patch) self.fake_cloud = mock.Mock() self.fake_cloud.mysql_connector = mock.Mock() self.keystone_client = keystone.KeystoneIdentity(FAKE_CONFIG, self.fake_cloud) self.fake_tenant_0 = mock.Mock(spec=keystone_client.tenants.Tenant) self.fake_tenant_0.name = 'tenant_name_0' self.fake_tenant_0.description = 'tenant_description_0' self.fake_tenant_0.id = 'tenant_id_0' self.fake_tenant_1 = mock.Mock(spec=keystone_client.tenants.Tenant) self.fake_tenant_1.name = 'tenant_name_1' self.fake_tenant_1.description = 'tenant_description_1' self.fake_tenant_1.id = 'tenant_id_1' self.fake_user_0 = mock.Mock(spec=keystone_client.users.User) self.fake_user_0.name = 'user_name_0' self.fake_user_0.id = 'user_id_0' self.fake_user_0.tenantId = self.fake_tenant_0.id self.fake_user_0.email = '*****@*****.**' self.fake_user_1 = mock.Mock(spec=keystone_client.users.User) self.fake_user_1.name = 'user_name_1' self.fake_user_1.id = 'user_id_1' self.fake_user_1.tenantId = self.fake_tenant_1.id self.fake_user_1.email = '*****@*****.**' self.fake_role_0 = mock.Mock(spec=keystone_client.roles.Role) self.fake_role_0.name = 'role_name_0' self.fake_role_0.id = 'role_id_0' self.fake_role_1 = mock.Mock(spec=keystone_client.roles.Role) self.fake_role_1.name = 'role_name_1' self.fake_role_1.id = 'role_id_1' self.fake_src_keystone = mock.Mock() self.fake_dst_keystone = mock.Mock() self.fake_src_keystone.keystone_client.users.find.side_effect = ( self.mock_user_find) self.fake_dst_keystone.keystone_client.users.find.side_effect = ( self.mock_user_find) cfglib.init_config() cfglib.CONF.src.user = '******' cfglib.CONF.dst.user = '******' self.fake_src_admin_user = mock.Mock() self.fake_dst_admin_user = mock.Mock() self.fake_same_user = mock.Mock() self.fake_same_user.id = 'fake_same_id' self.fake_same_user.name = 'fake_same_name'
def migrate(name_config=None, name_instance=None): """ :name_config - name of config yaml-file, example 'config.yaml' """ cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) env.key_filename = cfglib.CONF.migrate.key_filename cloud = cloud_ferry.CloudFerry(cfglib.CONF) cloud.migrate(Scenario())
def migrate(name_config=None, name_instance=None): """ :name_config - name of config yaml-file, example 'config.yaml' """ cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) env.key_filename = cfglib.CONF.migrate.key_filename cloud = cloud_ferry.CloudFerry(cfglib.CONF) cloud.migrate()
def migrate(name_config=None, name_instance=None, debug=False): """ :name_config - name of config yaml-file, example 'config.yaml' """ if debug: utl.configure_logging("DEBUG") cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) env.key_filename = cfglib.CONF.migrate.key_filename cloud = cloud_ferry.CloudFerry(cfglib.CONF) cloud.migrate(Scenario())
def test_nothing_is_filtered_if_skip_down_hosts_option_not_set(self): cfglib.init_config() cfglib.CONF.migrate.skip_down_hosts = False self.fake_instance_0.host = 'host1' self.fake_instance_1.host = 'host2' hosts_down = ['host1', 'host2', 'host3'] instances = [self.fake_instance_0, self.fake_instance_1] filtered = nova_compute.filter_down_hosts(hosts_down, instances) self.assertEqual(filtered, instances)
def migrate(name_config=None, name_instance=None, debug=False): """ :name_config - name of config yaml-file, example 'config.yaml' """ if debug: utils.configure_logging("DEBUG") cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) env.key_filename = cfglib.CONF.migrate.key_filename cloud = cloud_ferry.CloudFerry(cfglib.CONF) cloud.migrate(Scenario(path_scenario=cfglib.CONF.migrate.scenario, path_tasks=cfglib.CONF.migrate.tasks_mapping))
def test_get_instances_list(self): fake_instances_list = [self.fake_instance_0, self.fake_instance_1] self.mock_client().servers.list.return_value = fake_instances_list test_args = {'marker': None, 'detailed': True, 'limit': None, 'search_opts': None} cfglib.init_config() instances_list = self.nova_client.get_instances_list(**test_args) self.mock_client().servers.list.assert_called_once_with(**test_args) self.assertEqual(fake_instances_list, instances_list)
def init(name_config=None, debug=None): try: cfglib.init_config(name_config) except oslo_config.cfg.Error: traceback.print_exc() sys.exit(ERROR_INVALID_CONFIGURATION) utils.init_singletones(cfglib.CONF) if cfglib.CONF.migrate.hide_ssl_warnings: warnings.simplefilter("ignore") if debug is not None: value = oslo_config.types.Boolean()(debug) cfglib.CONF.set_override('debug', value, 'migrate') log.configure_logging()
def init(name_config=None, debug=None): try: cfglib.init_config(name_config) except oslo_config.cfg.Error: traceback.print_exc() sys.exit(ERROR_INVALID_CONFIGURATION) utils.init_singletones(cfglib.CONF) if cfglib.CONF.migrate.hide_ssl_warnings: warnings.simplefilter("ignore") if debug is not None: value = oslo_config.types.Boolean()(debug) cfglib.CONF.set_override("debug", value, "migrate") log.configure_logging()
def migrate(name_config=None, name_instance=None, debug=False): """ :name_config - name of config yaml-file, example 'config.yaml' """ if debug: utils.configure_logging("DEBUG") cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) env.key_filename = cfglib.CONF.migrate.key_filename env.connection_attempts = cfglib.CONF.migrate.ssh_connection_attempts cloud = cloud_ferry.CloudFerry(cfglib.CONF) cloud.migrate( Scenario(path_scenario=cfglib.CONF.migrate.scenario, path_tasks=cfglib.CONF.migrate.tasks_mapping))
def get_groups(name_config=None, group_file=None, cloud_id='src', validate_users_group=False): """ Function to group VM's by any of those dependencies (f.e. tenants, networks, etc.). :param name_config: name of config ini-file, example 'config.ini', :param group_file: name of groups defined yaml-file, example 'groups.yaml', :param validate_users_group: Remove dublicate id's and check if valid VM id specified. Takes more time because of nova API multiple calls :return: yaml-file with tree-based groups defined based on grouping rules. """ cfglib.collector_configs_plugins() cfglib.init_config(name_config) group = grouping.Grouping(cfglib.CONF, group_file, cloud_id) group.group(validate_users_group)
def test_down_hosts_are_filtered_if_config_option_is_set(self): def instance(hostname): inst = mock.Mock() setattr(inst, nova_compute.INSTANCE_HOST_ATTRIBUTE, hostname) return inst num_instances_up = 5 num_hosts_down = 10 hosts_down = ['downhost%d' % i for i in xrange(num_hosts_down)] instances = [instance('host%d' % i) for i in xrange(num_instances_up)] instances.extend([instance(host_down) for host_down in hosts_down]) cfglib.init_config() cfglib.CONF.migrate.skip_down_hosts = True filtered = nova_compute.filter_down_hosts( hosts_down=hosts_down, elements=instances, hostname_attribute=nova_compute.INSTANCE_HOST_ATTRIBUTE) self.assertEqual(len(filtered), num_instances_up)
def get_groups(name_config=None, group_file=None, cloud_id='src', validate_users_group=False, debug=False): """ Function to group VMs by any of those dependencies (f.e. tenants, networks, etc.). :param name_config: name of config ini-file, example 'config.ini', :param group_file: name of groups defined yaml-file, example 'groups.yaml', :param validate_users_group: Remove duplicate IDs and check if valid VM id specified. Takes more time because of nova API multiple calls :return: yaml-file with tree-based groups defined based on grouping rules. """ if debug: utils.configure_logging("DEBUG") cfglib.collector_configs_plugins() cfglib.init_config(name_config) group = grouping.Grouping(cfglib.CONF, group_file, cloud_id) group.group(validate_users_group)
def condense(name_config=None, debug=False): if debug: utl.configure_logging("DEBUG") cfglib.collector_configs_plugins() cfglib.init_config(name_config) process.process()
def get_condensation_info(name_config=None): cfglib.collector_configs_plugins() cfglib.init_config(name_config) nova_collector.run_it(cfglib.CONF)
def create_filters(name_config=None, filter_folder='configs/filters', images_date='2000-01-01'): cfglib.collector_configs_plugins() cfglib.init_config(name_config) make_filters.make(filter_folder, images_date)
def nova_collector(name_config=None): cfglib.collector_configs_plugins() cfglib.init_config(name_config) nova_collector_module.run_it(cfglib.CONF)
def setUp(self): super(TestCase, self).setUp() self.addCleanup(mock.patch.stopall) self.cfg = cfglib.CONF self.addCleanup(self.cfg.reset) cfglib.init_config()
def load_config(name_config): cfglib.collector_configs_plugins() cfglib.init_config(name_config) utils.init_singletones(cfglib.CONF) if cfglib.CONF.migrate.hide_ssl_warnings: warnings.simplefilter("ignore")
import logging import sys import os import unittest from generate_load import Prerequisites from filtering_utils import FilteringUtils def get_cf_root_folder(): return os.path.dirname(os.path.dirname(os.path.split(__file__)[0])) sys.path.append(get_cf_root_folder()) import cfglib cfglib.init_config(os.path.join(get_cf_root_folder(), config.cloud_ferry_conf)) def suppress_dependency_logging(): suppressed_logs = [ "iso8601.iso8601", "keystoneclient.session", "neutronclient.client", "requests.packages.urllib3.connectionpool", "glanceclient.common.http", ] for l in suppressed_logs: logging.getLogger(l).setLevel(logging.WARNING)
import config import logging import sys import os import unittest from generate_load import Prerequisites from filtering_utils import FilteringUtils def get_cf_root_folder(): return os.path.dirname(os.path.dirname(os.path.split(__file__)[0])) sys.path.append(get_cf_root_folder()) import cfglib cfglib.init_config(os.path.join(get_cf_root_folder(), config.cloud_ferry_conf)) def suppress_dependency_logging(): suppressed_logs = ['iso8601.iso8601', 'keystoneclient.session', 'neutronclient.client', 'requests.packages.urllib3.connectionpool', 'glanceclient.common.http'] for l in suppressed_logs: logging.getLogger(l).setLevel(logging.WARNING) class FunctionalTest(unittest.TestCase): def __init__(self, *args, **kwargs):