def __init__(self, config_files=None, refresh=False, private=False, config_key=None, config_defaults=None, cloud=None, use_direct_get=False): if config_files is None: config_files = [] config = loader.OpenStackConfig(config_files=loader.CONFIG_FILES + config_files) self.extra_config = config.get_extra_config(config_key, config_defaults) if cloud is None: self.clouds = [ connection.Connection(config=cloud_region) for cloud_region in config.get_all() ] else: self.clouds = [connection.Connection(config=config.get_one(cloud))] if private: for cloud in self.clouds: cloud.private = True # Handle manual invalidation of entire persistent cache if refresh: for cloud in self.clouds: cloud._cache.invalidate()
def _load_os_config(self): """ Reads cloud settings and sets them as object's 'cloud' attribute """ # Create OpenStackConfig config instance os_config = loader.OpenStackConfig() # Try reading cloud settings for self.cloud_name try: os_cloud = os_config.cloud_config['clouds'][self.cloud_name] except KeyError: self.logger.error( 'Error fetching cloud settings for cloud "{0}"'.format( self.cloud_name)) sys.exit(1) self.logger.debug('Cloud config:\n {0}'.format( json.dumps(os_cloud, indent=4))) # Extract all OS settings keys and alter their names # to conform to openstack cli client self.cloud = {} for k in os_cloud: if isinstance(os_cloud[k], dict): for sub_k in os_cloud[k]: os_setting_name = '--os-' + sub_k.replace('_', '-') self.cloud[os_setting_name] = os_cloud[k][sub_k] else: os_setting_name = '--os-' + k.replace('_', '-') self.cloud[os_setting_name] = os_cloud[k]
def get_cache_settings(cloud=None): config_files = cloud_config.CONFIG_FILES + CONFIG_FILES if cloud: config = cloud_config.OpenStackConfig( config_files=config_files).get_one(cloud=cloud) else: config = cloud_config.OpenStackConfig( config_files=config_files).get_all()[0] # For inventory-wide caching cache_expiration_time = config.get_cache_expiration_time() cache_path = config.get_cache_path() if cloud: cache_path = '{0}_{1}'.format(cache_path, cloud) if not os.path.exists(cache_path): os.makedirs(cache_path) cache_file = os.path.join(cache_path, 'ansible-inventory.cache') return (cache_file, cache_expiration_time)
def test__load_yaml_json_file_nonexisting(self): tested_files = [] fn = os.path.join('/fake', 'file.txt') tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files) self.assertEqual(None, path)
def get_vm_ports(self): config = loader.OpenStackConfig() conn = openstack.connect(cloud=self.name) ports = conn.network.ports() taps = [] for port in ports: if port['device_owner'].startswith('compute'): taps.append(Tap(**port)) return taps
def test__load_yaml_json_file_without_json_yaml(self): with tempfile.TemporaryDirectory() as tmpdir: tested_files = [] fn = os.path.join(tmpdir, 'file.txt') with open(fn, 'w+') as fp: fp.write(FILES['txt']) tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files) self.assertEqual(fn, path)
def find_virtual_machine_by_ip(self, ip): config = loader.OpenStackConfig() conn = openstack.connect(cloud=self.name) for port in conn.network.ports(): tap = Tap(**port) if tap.get_ip() == ip: server = conn.compute.find_server(port.get('device_id')) vm = VirtualMachine(**server) vm.add_tap(tap) logger.warning(vm) return(vm) return None
def find_virtual_machine(self, name): config = loader.OpenStackConfig() conn = openstack.connect(cloud=self.name) server = conn.compute.find_server(name) if server: vm = VirtualMachine(**server) for port in conn.network.ports(): if(port.get('device_id') == vm.uuid): vm.add_tap(Tap(**port)) return vm return None
def get_all_virtual_machines(self): config = loader.OpenStackConfig() conn = openstack.connect(cloud=self.name) vms = [] for server in conn.compute.servers(): vm = VirtualMachine(**server) for port in conn.network.ports(): if(port.get('device_id') == vm.uuid): vm.add_tap(Tap(**port)) vms.append(vm) return vms
def test_base_load_yaml_json_file(self): with tempfile.TemporaryDirectory() as tmpdir: tested_files = [] for key, value in FILES.items(): fn = os.path.join(tmpdir, 'file.{ext}'.format(ext=key)) with open(fn, 'w+') as fp: fp.write(value) tested_files.append(fn) path, result = loader.OpenStackConfig()._load_yaml_json_file( tested_files) # NOTE(hberaud): Prefer to test path rather than file because # our FILES var is a dict so results are appened # without keeping the initial order (python 3.5) self.assertEqual(tmpdir, os.path.dirname(path))
def __init__(self, cloud='ops_work', name="", template_file="", dbg=logging.INFO): self.__logger = AkarLogging(dbg, "OS Stack").get_color_logger() self.dbg = dbg self.name = name self.template_file = template_file self.timeout = 3600 self.cloud = cloud self.rollback = False parameters = dict(default={}, type='dict') self.__logger.info( f'Name stack: {self.name} Template file: {self.template_file}') self.config = loader.OpenStackConfig() self.conn = openstack.connect(cloud=self.cloud) self.__logger.info(f'Openstack connected successful')
def main(): parser = create_argument_parser() cloud_config = loader.OpenStackConfig() cloud_config.register_argparse_arguments(parser, sys.argv) options = parser.parse_args() configure_logging(options.verbose) creds_manager = CredentialsManager(options=options, config=cloud_config) creds_manager.ensure_enabled_project() creds_manager.ensure_role_on_project() resource_managers = sorted( [cls(creds_manager) for cls in utils.get_resource_classes(options.resource)], key=operator.methodcaller('order') ) # This is an `Event` used to signal whether one of the threads encountered # an unrecoverable error, at which point all threads should exit because # otherwise there's a chance the cleanup process never finishes. exit = threading.Event() # Dummy function to work around `ThreadPoolExecutor.map()` not accepting # a callable with arguments. def partial_runner(resource_manager): runner(resource_manager, options=options, exit=exit) # pragma: no cover try: with concurrent.futures.ThreadPoolExecutor(8) as executor: executor.map(partial_runner, resource_managers) except KeyboardInterrupt: exit.set() if creds_manager.revoke_role_after_purge: creds_manager.revoke_role_on_project() if creds_manager.disable_project_after_purge: creds_manager.disable_project() sys.exit(int(exit.is_set()))
def __init__(self, cloud='ops_work', name="", name_underline=True, dbg=logging.WARNING): self.servers = list() self.dbg = dbg if name_underline: self.name = f'{name}_' else: self.name = name self.cloud = cloud self.config = loader.OpenStackConfig() self.conn = openstack.connect(cloud=self.cloud) # kvminfo = kv.KvmInstanceInfo(uri_qemu=uri_qemu, name_instance=srv.instance) self.__logger = AkarLogging(dbg, "OS Servers").get_color_logger() self.__get_info_servers() self.__logger.debug(f'Total found servers: {len(self.servers)}') self.__get_info_ips() self.__logger.info(f'Stack: {name} Total servers: {len(self.servers)}')
def __init__(self, config_files=None, refresh=False, private=False, config_key=None, config_defaults=None, cloud=None, use_direct_get=False): if config_files is None: config_files = [] config = loader.OpenStackConfig(config_files=loader.CONFIG_FILES + config_files) self.extra_config = config.get_extra_config(config_key, config_defaults) if cloud is None: self.clouds = [ shade.OpenStackCloud(cloud_config=cloud_config) for cloud_config in config.get_all_clouds() ] else: try: self.clouds = [ shade.OpenStackCloud( cloud_config=config.get_one_cloud(cloud)) ] except exceptions.ConfigException as e: raise shade.OpenStackCloudException(e) if private: for cloud in self.clouds: cloud.private = True # Handle manual invalidation of entire persistent cache if refresh: for cloud in self.clouds: cloud._cache.invalidate()
def create_virtual_machine(self, name, image, flavor, network, availability_zone, may_exist=False): config = loader.OpenStackConfig() conn = openstack.connect(cloud=self.name) server = self.find_virtual_machine(name) logger.info(server) if server is not None: if may_exist: return server else: return image = conn.compute.find_image(image) flavor = conn.compute.find_flavor(flavor) network = conn.network.find_network(network) server = conn.compute.create_server( name=name, image_id=image.id, flavor_id=flavor.id, availability_zone=availability_zone, networks=[{"uuid": network.id}]) server = conn.compute.wait_for_server(server) return VirtualMachine(**server)
def create_connection_from_args(): parser = argparse.ArgumentParser() config = loader.OpenStackConfig() config.register_argparse_arguments(parser, sys.argv[1:]) args = parser.parse_args() return openstack.connect(config=config.get_one(argparse=args))
import argparse import os import openstack from openstack.config import loader import sys openstack.enable_logging(True, stream=sys.stdout) #: Defines the OpenStack Config loud key in your config file, #: typically in $HOME/.config/openstack/clouds.yaml. That configuration #: will determine where the examples will be run and what resource defaults #: will be used to run the examples. TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'devstack-admin') config = loader.OpenStackConfig() cloud = openstack.connect(cloud=TEST_CLOUD) class Opts(object): def __init__(self, cloud_name='devstack-admin', debug=True): self.cloud = cloud_name self.debug = debug # Use identity v3 API for examples. self.identity_api_version = '3' def _get_resource_value(resource_key, default): return config.get_extra_config('example').get(resource_key, default)
def reset(self): self.openstack_config = loader.OpenStackConfig()
def _get_openstack_config(app_name=None, app_version=None): # Protect against older versions of os-client-config that don't expose this return loader.OpenStackConfig(app_name=app_name, app_version=app_version)