def __init__(self, **kwargs): if "case_name" not in kwargs: kwargs["case_name"] = 'tenantnetwork1' super(TenantNetwork1, self).__init__(**kwargs) self.dir_results = os.path.join(getattr(config.CONF, 'dir_results')) self.res_dir = os.path.join(self.dir_results, self.case_name) self.output_log_name = 'functest.log' self.output_debug_log_name = 'functest.debug.log' try: cloud_config = os_client_config.get_config() self.cloud = self.orig_cloud = shade.OpenStackCloud( cloud_config=cloud_config) except Exception: # pylint: disable=broad-except self.cloud = self.orig_cloud = None self.ext_net = None self.__logger.exception("Cannot connect to Cloud") try: self.ext_net = self.get_external_network(self.cloud) except Exception: # pylint: disable=broad-except self.ext_net = None self.__logger.exception("Cannot get the external network") self.guid = str(uuid.uuid4()) self.network = None self.subnet = None self.router = None
def __init__(self, **kwargs): super(SnapsTestRunner, self).__init__(**kwargs) self.logger = logging.getLogger(__name__) try: cloud_config = os_client_config.get_config() self.orig_cloud = shade.OpenStackCloud(cloud_config=cloud_config) guid = str(uuid.uuid4()) self.project = tenantnetwork.NewProject(self.orig_cloud, self.case_name, guid) self.project.create() except Exception: # pylint: disable=broad-except raise Exception("Cannot create user or project") if self.orig_cloud.get_role("admin"): role_name = "admin" elif self.orig_cloud.get_role("Admin"): role_name = "Admin" else: raise Exception("Cannot detect neither admin nor Admin") self.orig_cloud.grant_role(role_name, user=self.project.user.id, project=self.project.project.id, domain=self.project.domain.id) self.role = None if not self.orig_cloud.get_role("heat_stack_owner"): self.role = self.orig_cloud.create_role("heat_stack_owner") self.orig_cloud.grant_role("heat_stack_owner", user=self.project.user.id, project=self.project.project.id, domain=self.project.domain.id) creds_overrides = dict(username=self.project.user.name, project_name=self.project.project.name, project_id=self.project.project.id, password=self.project.password) self.os_creds = kwargs.get('os_creds') or snaps_utils.get_credentials( overrides=creds_overrides) if 'ext_net_name' in kwargs: self.ext_net_name = kwargs['ext_net_name'] else: self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds) self.netconf_override = None if hasattr(config.CONF, 'snaps_network_config'): self.netconf_override = getattr(config.CONF, 'snaps_network_config') self.use_fip = (getattr(config.CONF, 'snaps_use_floating_ips') == 'True') self.use_keystone = (getattr(config.CONF, 'snaps_use_keystone') == 'True') self.flavor_metadata = getattr(config.CONF, 'snaps_flavor_extra_specs', None) self.logger.info("Using flavor metadata '%s'", self.flavor_metadata) self.image_metadata = None if hasattr(config.CONF, 'snaps_images'): self.image_metadata = getattr(config.CONF, 'snaps_images')
def _getClient(self): if self._use_taskmanager: manager = self._taskmanager else: manager = None return shade.OpenStackCloud(cloud_config=self.provider.cloud_config, manager=manager, **self.provider.cloud_config.config)
def __init__(self, **kwargs): if "case_name" not in kwargs: kwargs["case_name"] = 'connection_check' super(ConnectionCheck, self).__init__(**kwargs) try: cloud_config = os_client_config.get_config() self.cloud = shade.OpenStackCloud(cloud_config=cloud_config) except Exception: # pylint: disable=broad-except self.cloud = None
def check_requirements(self): """Skip if OpenStack Rocky or newer.""" try: cloud_config = os_client_config.get_config() cloud = shade.OpenStackCloud(cloud_config=cloud_config) if functest_utils.get_nova_version(cloud) > (2, 60): self.is_skipped = True except Exception: # pylint: disable=broad-except pass
def make_shade(options=None, **kwargs): """Simple wrapper for getting a Shade OpenStackCloud object A mechanism that matches make_sdk, make_client and make_rest_client. :rtype: :class:`~shade.OpenStackCloud` """ import shade cloud = get_config(options=options, **kwargs) return shade.OpenStackCloud(cloud_config=cloud, **kwargs)
def setUp(self): super(BaseFunctionalTestCase, self).setUp() self.config = occ.OpenStackConfig() demo_config = self.config.get_one_cloud(cloud='devstack') self.demo_cloud = shade.OpenStackCloud(cloud_config=demo_config, log_inner_exceptions=True) operator_config = self.config.get_one_cloud(cloud='devstack-admin') self.operator_cloud = shade.OperatorCloud(cloud_config=operator_config, log_inner_exceptions=True)
def _getClient(self): if self._use_taskmanager: manager = self._taskmanager else: manager = None return shade.OpenStackCloud( cloud_config=self.provider.cloud_config, manager=manager, app_name='nodepool', app_version=version.version_info.version_string(), **self.provider.cloud_config.config)
def __init__(self, config_files=None, refresh=False, private=False, config_key=None, config_defaults=None, cloud=None, use_direct_get=False): if config_files is None: config_files = [] config = os_client_config.config.OpenStackConfig( config_files=os_client_config.config.CONFIG_FILES + config_files) self.extra_config = config.get_extra_config(config_key, config_defaults) if cloud is None: self.clouds = [ shade.OpenStackCloud(cloud_config=cloud_config) for cloud_config in config.get_all_clouds() ] else: try: self.clouds = [ shade.OpenStackCloud( cloud_config=config.get_one_cloud(cloud)) ] except os_client_config.exceptions.OpenStackConfigException as e: raise shade.OpenStackCloudException(e) if private: for cloud in self.clouds: cloud.private = True # Handle manual invalidation of entire persistent cache if refresh: for cloud in self.clouds: cloud._cache.invalidate()
def setUp(self): super(BaseFunctionalTestCase, self).setUp() demo_name = os.environ.get('SHADE_DEMO_CLOUD', 'devstack') op_name = os.environ.get('SHADE_OPERATOR_CLOUD', 'devstack-admin') self.config = occ.OpenStackConfig() demo_config = self.config.get_one_cloud(cloud=demo_name) self.demo_cloud = shade.OpenStackCloud( cloud_config=demo_config, log_inner_exceptions=True) operator_config = self.config.get_one_cloud(cloud=op_name) self.operator_cloud = shade.OperatorCloud( cloud_config=operator_config, log_inner_exceptions=True)
def create(self): """Create projects/users""" assert self.orig_cloud assert self.case_name self.password = ''.join( random.choice(string.ascii_letters + string.digits) for _ in range(30)) self.domain = self.orig_cloud.get_domain( name_or_id=self.orig_cloud.auth.get("project_domain_name", "Default")) self.project = self.orig_cloud.create_project( name='{}-project_{}'.format(self.case_name[:18], self.guid), description="Created by OPNFV Functest: {}".format(self.case_name), domain_id=self.domain.id) self.__logger.debug("project: %s", self.project) self.user = self.orig_cloud.create_user(name='{}-user_{}'.format( self.case_name, self.guid), password=self.password, domain_id=self.domain.id) self.__logger.debug("user: %s", self.user) try: if self.orig_cloud.get_role(self.default_member): self.role_name = self.default_member elif self.orig_cloud.get_role(self.default_member.lower()): self.role_name = self.default_member.lower() else: raise Exception("Cannot detect {}".format(self.default_member)) except Exception: # pylint: disable=broad-except self.__logger.info("Creating default role %s", self.default_member) role = self.orig_cloud.create_role(self.default_member) self.role_name = role.name self.__logger.debug("role: %s", role) self.orig_cloud.grant_role(self.role_name, user=self.user.id, project=self.project.id, domain=self.domain.id) osconfig = os_client_config.config.OpenStackConfig() osconfig.cloud_config['clouds']['envvars'][ 'project_name'] = self.project.name osconfig.cloud_config['clouds']['envvars'][ 'project_id'] = self.project.id osconfig.cloud_config['clouds']['envvars']['username'] = self.user.name osconfig.cloud_config['clouds']['envvars']['password'] = self.password self.__logger.debug("cloud_config %s", osconfig.cloud_config) self.cloud = shade.OpenStackCloud( cloud_config=osconfig.get_one_cloud()) self.__logger.debug("new cloud %s", self.cloud.auth)
def __init__(self, config_files=[], refresh=False, private=False): config = os_client_config.config.OpenStackConfig( config_files=os_client_config.config.CONFIG_FILES + config_files) self.clouds = [ shade.OpenStackCloud(cloud_config=cloud_config) for cloud_config in config.get_all_clouds() ] if private: for cloud in self.clouds: cloud.private = True # Handle manual invalidation of entire persistent cache if refresh: for cloud in self.clouds: cloud._cache.invalidate()
def enable_scheduling(topo, change_number=None): """Enables scheduling on hypervisors whose reason includes change_number. Iterates over all disabled hosts and re-enables them when the provided change number text is in the disabled_reason field. :param topo: The topology for the cloud we're scheduling for. :param change_number: The change number to re-enable for. :return: A tuple with list of successes, failed, last exception """ succeeded = [] failed = [] error = None if not topo: return succeeded, failed, \ 'Attempted re-enable hypervisors for scheduling without ' \ 'providing a valid topology. Bailing out. Fix it!' # Get a list of all compute services for this cloud config = topo.render('os_client_config') cloud_config = config.get_one_cloud(cloud=topo.cloud.canonical_name) cloud = shade.OpenStackCloud(cloud_config=cloud_config) nova_client = cloud.nova_client services = nova_client.services.list(binary="nova-compute") # Find all disabled hosts that have our change_number text matched_services = [] for service in services: if service.status == 'disabled' and service.disabled_reason and \ change_number in service.disabled_reason: matched_services.append(service) if not matched_services: return succeeded, failed, \ 'Did not find any disabled HVs to re-enable. skipping.' # Re-enable the services. for service in matched_services: try: nova_client.services.enable(service.host, 'nova-compute') succeeded.append(service.host) except Exception as ex: error = ex.message failed.append(service.host) return succeeded, failed, error
def __init__( self, config_files=[], refresh=False): config = os_client_config.config.OpenStackConfig( config_files=os_client_config.config.CONFIG_FILES + config_files) self.clouds = [ shade.OpenStackCloud( cloud=f.name, cache_interval=config.get_cache_max_age(), cache_class=config.get_cache_class(), cache_arguments=config.get_cache_arguments(), **f.config) for f in config.get_all_clouds() ] # Handle manual invalidation of entire persistent cache if refresh: for cloud in self.clouds: cloud._cache.invalidate()
def main(): # get environment variables cloud_name = os.environ['OS_CLOUD'] vnf_name = os.environ['OS_VNF_NAME'] project_name = os.environ['OS_PROJECT_NAME'] cluster_name = os.environ['OS_CLUSTER_NAME'] # Context: central administration try: cloud_config = os_client_config.OpenStackConfig().get_one_cloud( cloud_name) cloud = shade.OpenStackCloud(cloud_config=cloud_config) except Exception as exc: print(exc) sys.exit(1) # Get servers data = cloud.list_servers(detailed=False) # dictionary of all found servers nodes = {} # filter servers cluster_prefix = vnf_name + "_" + project_name + "_" + cluster_name for entity in data: name = entity["name"] if name.startswith(cluster_prefix): # index is the last substring after the last "_" seperator index = name.split("_")[-1] # availability zone zone = entity["location"].zone nodes[index] = {"zone": zone} # find next available slot index = 0 while str(index) in nodes.keys(): index = index + 1 print("{'node':'" + str(index) + "'}")
def main(): args = parse_args() logging.basicConfig(level=args.loglevel) cloud = cloud_config.get_one_cloud(argparse=args) cloud_api = shade.OpenStackCloud(cloud_config=cloud) placement = Placement(cloud_api) tally = {} providers = {} LOG.info('getting resource allocations') for provider in placement.list_resource_providers(): providers[provider['uuid']] = provider allocations = ( placement.get_resource_provider_allocations(provider['uuid'])) for instance_uuid, allocation in allocations['allocations'].items(): if instance_uuid not in tally: tally[instance_uuid] = {} tally[instance_uuid][provider['uuid']] = allocation multiple = {} LOG.info('auditing allocations') for instance_uuid, allocations in tally.items(): if args.limit and instance_uuid not in args.limit: continue if len(allocations) > 1: LOG.info('{} has multiple allocations'.format(instance_uuid)) instance = cloud_api.get_server(instance_uuid, all_projects=True) if instance: current_hypervisor = instance.get( 'OS-EXT-SRV-ATTR:hypervisor_hostname') else: current_hypervisor = None multiple[instance_uuid] = { 'uuid': instance_uuid, 'active': current_hypervisor, 'allocations': [], } for provider_uuid, allocation in allocations.items(): provider = providers[provider_uuid] if provider['name'] == current_hypervisor: active = True else: active = False multiple[instance_uuid]['allocations'].append({ 'provider': provider, 'active': active, 'allocation': allocation, }) if args.output_json: with open(args.output_json, 'w') as fd: json.dump(multiple, fd, indent=2) for instance_uuid, info in multiple.items(): print(instance_uuid) for allocation in info['allocations']: mark = '*' if allocation['active'] else '-' print('{} {}'.format(mark, allocation['provider']['name'])) if args.repair: for instance_uuid, info in multiple.items(): for allocation in info['allocations']: if allocation['active']: LOG.warning( 'setting allocation for {}'.format(instance_uuid)) placement.set_allocation(instance_uuid, allocation) break
def main(): parser = argparse.ArgumentParser() parser.add_argument("name", help="server name") parser.add_argument("--cloud", dest="cloud", required=True, help="cloud name") parser.add_argument("--region", dest="region", help="cloud region") parser.add_argument("--flavor", dest="flavor", default='1GB', help="name (or substring) of flavor") parser.add_argument("--image", dest="image", default="Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)", help="image name") parser.add_argument("--volume", dest="volume", help="UUID of volume to attach to the new server.", default=None) parser.add_argument("--mount-path", dest="mount_path", help="Path to mount cinder volume at.", default=None) parser.add_argument("--fs-label", dest="fs_label", help="FS label to use when mounting cinder volume.", default=None) parser.add_argument("--boot-from-volume", dest="boot_from_volume", help="Create a boot volume for the server and use it.", action='store_true', default=False) parser.add_argument("--keep", dest="keep", help="Don't clean up or delete the server on error.", action='store_true', default=False) parser.add_argument("--verbose", dest="verbose", default=False, action='store_true', help="Be verbose about logging cloud actions") parser.add_argument("--network", dest="network", default=None, help="network label to attach instance to") parser.add_argument("--config-drive", dest="config_drive", help="Boot with config_drive attached.", action='store_true', default=False) parser.add_argument("--az", dest="availability_zone", default=None, help="AZ to boot in.") options = parser.parse_args() shade.simple_logging(debug=options.verbose) cloud_kwargs = {} if options.region: cloud_kwargs['region_name'] = options.region cloud_config = os_client_config.OpenStackConfig().get_one_cloud( options.cloud, **cloud_kwargs) cloud = shade.OpenStackCloud(cloud_config) flavor = cloud.get_flavor(options.flavor) if flavor: print "Found flavor", flavor.name else: print "Unable to find matching flavor; flavor list:" for i in cloud.list_flavors(): print i.name sys.exit(1) image = cloud.get_image_exclude(options.image, 'deprecated') if image: print "Found image", image.name else: print "Unable to find matching image; image list:" for i in cloud.list_images(): print i.name sys.exit(1) server = build_server(cloud, options.name, image, flavor, options.volume, options.keep, options.network, options.boot_from_volume, options.config_drive, options.mount_path, options.fs_label, options.availability_zone) dns.print_dns(cloud, server)
def main(): parser = argparse.ArgumentParser() parser.add_argument("name", help="server name") parser.add_argument("--cloud", dest="cloud", required=True, help="cloud name") parser.add_argument("--region", dest="region", help="cloud region") parser.add_argument("--flavor", dest="flavor", default='1GB', help="name (or substring) of flavor") parser.add_argument("--image", dest="image", default="Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)", help="image name") parser.add_argument("--volume", dest="volume", help="UUID of volume to attach to the new server.", default=None) parser.add_argument("--mount-path", dest="mount_path", help="Path to mount cinder volume at.", default=None) parser.add_argument("--fs-label", dest="fs_label", help="FS label to use when mounting cinder volume.", default=None) parser.add_argument("--boot-from-volume", dest="boot_from_volume", help="Create a boot volume for the server and use it.", action='store_true', default=False) parser.add_argument("--keep", dest="keep", help="Don't clean up or delete the server on error.", action='store_true', default=False) parser.add_argument("--verbose", dest="verbose", default=False, action='store_true', help="Be verbose about logging cloud actions") parser.add_argument("--network", dest="network", default=None, help="network label to attach instance to") parser.add_argument("--config-drive", dest="config_drive", help="Boot with config_drive attached.", action='store_true', default=True) parser.add_argument("--az", dest="availability_zone", default=None, help="AZ to boot in.") options = parser.parse_args() shade.simple_logging(debug=options.verbose) cloud_kwargs = {} if options.region: cloud_kwargs['region_name'] = options.region cloud_config = os_client_config.OpenStackConfig().get_one_cloud( options.cloud, **cloud_kwargs) cloud = shade.OpenStackCloud(cloud_config) flavor = cloud.get_flavor(options.flavor) if flavor: print "Found flavor", flavor.name else: print "Unable to find matching flavor; flavor list:" for i in cloud.list_flavors(): print i.name sys.exit(1) image = cloud.get_image_exclude(options.image, 'deprecated') if image: print "Found image", image.name else: print "Unable to find matching image; image list:" for i in cloud.list_images(): print i.name sys.exit(1) server = build_server(cloud, options.name, image, flavor, options.volume, options.keep, options.network, options.boot_from_volume, options.config_drive, options.mount_path, options.fs_label, options.availability_zone) dns.print_dns(cloud, server) # Zero the ansible inventory cache so that next run finds the new server inventory_cache = '/var/cache/ansible-inventory/ansible-inventory.cache' if os.path.exists(inventory_cache): with open(inventory_cache, 'w'): pass # Remove cloud and region from the environment to work around a bug in occ expand_env = os.environ.copy() expand_env.pop('OS_CLOUD', None) expand_env.pop('OS_REGION_NAME', None) print subprocess.check_output( '/usr/local/bin/expand-groups.sh', env=expand_env, stderr=subprocess.STDOUT)
def _search(self, thing, filters, only_private=True, target_search=True, cloud='', replier=None, expand_images=True): if replier is None: replier = _dummy_replier replier("Loading all" " the %s, please wait..." % random.choice(cu.CLOUDS)) topos = [] for env_name in self.bot.topo_loader.env_names: topo = self.bot.topo_loader.load_one(env_name) if topo.cloud.kind != 'production': continue if only_private and topo.cloud.type != 'private': continue if (cloud and cloud not in (topo.cloud.name, topo.cloud.canonical_name)): continue topos.append(topo) found = [] searched_clouds = 0 replier("Searching %s clouds, please wait..." % len(topos)) if topos: configs = {} for topo in topos: config = topo.render('os_client_config') configs[topo.cloud.canonical_name] = config for cloud_name in sorted(configs.keys()): if self.dead.is_set(): replier("I am dying, try again later...") raise excp.Dying replier("Searching `%s`..." % cloud_name) searched_clouds += 1 config = configs[cloud_name] cloud_config = config.get_one_cloud(cloud=cloud_name) cloud = shade.OpenStackCloud(cloud_config=cloud_config) found_servers = cloud.list_servers(detailed=False, bare=False, all_projects=True, filters=filters) if found_servers: found.append((cloud, cloud_name, found_servers)) if target_search: break # Found what we wanted. Skip other clouds servers = [] found_clouds = [] for cloud, cloud_name, found_servers in found: if self.dead.is_set(): replier("I am dying, try again later...") raise excp.Dying found_clouds.append(cloud_name) if expand_images: cloud_images = cloud.list_images(filter_deleted=False) cloud_images_by_id = dict( (image.id, image) for image in cloud_images) for s in found_servers: try: s_image_id = s.image.id s.image = cloud_images_by_id[s_image_id] except (KeyError, AttributeError): pass servers.extend(found_servers) return servers, searched_clouds, found_clouds
def main(): parser = argparse.ArgumentParser() config = os_client_config.OpenStackConfig() version_info = pkg_resources.get_distribution('os-downpour').version parser.add_argument( '--version', action='version', version='%(prog)s ' + version_info, help='show the program version and exit', ) verbose_group = parser.add_mutually_exclusive_group() verbose_group.add_argument( '--verbose', '-v', action='count', dest='verbose_level', default=1, help='Increase verbosity of output. Can be repeated.', ) verbose_group.add_argument( '-q', '--quiet', action='store_const', dest='verbose_level', const=0, help='Suppress output except warnings and errors.', ) progress_group = parser.add_mutually_exclusive_group() progress_group.add_argument( '--progress', '-p', default=True, action='store_true', help='show download progress', ) progress_group.add_argument( '--no-progress', dest='progress', action='store_false', help='do not show download progress', ) config.register_argparse_arguments(parser, sys.argv, None) subparsers = parser.add_subparsers(title='commands') export.register_command(subparsers) query.register_command(subparsers) args = parser.parse_args(sys.argv[1:]) cloud_config = config.get_one_cloud(options=(args, [])) cloud = shade.OpenStackCloud(cloud_config=cloud_config) root_logger = logging.getLogger('') root_logger.setLevel(logging.DEBUG) console = logging.StreamHandler(sys.stderr) console_level = { 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG, }.get(args.verbose_level, logging.DEBUG) console.setLevel(console_level) formatter = logging.Formatter('[%(asctime)s] %(message)s') console.setFormatter(formatter) root_logger.addHandler(console) return args.func(cloud, config, args)
def _set_user_cloud(self, **kwargs): user_config = self.config.get_one_cloud(cloud=self._demo_name, **kwargs) self.user_cloud = shade.OpenStackCloud(cloud_config=user_config)
def get_cloud(): # client_config = os_client_config.OpenStackConfig(cloud='cloud_v3_api') # cloud_config = client_config.get_one_cloud() return shade.OpenStackCloud(cloud='cloud_v3_api')
response_raw = session.post(server_endpoint, json.dumps(server_info)) print(response_raw.content) response_json = json.loads(response_raw.content) # step-3 cloud_init = '''#!/bin/bash sudo apt-get update && sudo apt-get -y install wget ca-certificates && \ sudo wget -nv -O serverpilot-installer \ https://download.serverpilot.io/serverpilot-installer && \ sudo sh serverpilot-installer \ --server-id={serverid} \ --server-apikey={serverapikey} '''.format(serverid=response_json['data']['id'], serverapikey=response_json['data']['apikey']) # step-4 image_name = 'Ubuntu-16.04' flavor_id = '100' key_name = 'KEY NAME GOES HERE' # step-5 conn = shade.OpenStackCloud() image = conn.get_image(image_name) conn.create_server(image=image, flavor=flavor_id, name=server_info['name'], network='public', userdata=cloud_init, key_name=key_name)
def disable_scheduling(topo, hypervisors, change_number='Unknown', task_number='Unknown', reason=None, when_disabled=du.get_now()): """Disables scheduling on the provided hypervisors. Iterates over all provided hypervisors and disables them. :param topo: A topology to use for disabling :param hypervisors: A iterable collection of hypervisor fqdns. :param change_number: The change this tied to (default Unknown) :param task_number: The task this was done for (default Unknown) :param reason: A short reason why this is being disabled. :param when_disabled: Datetime when this disabling occurred (default now) :return: A tuple with list of successes, failed, last exception """ succeeded = [] failed = [] error = None # Sanity checks if not hypervisors: return succeeded, failed, \ 'Attempted disable hypervisors for scheduling without ' \ 'providing a collection of hypervisors to match. Skipping.' if not topo: return succeeded, failed, \ 'Attempted disable hypervisor for scheduling without ' \ 'providing a valid cloud topo. Skipping.' # Get a list of all compute services for this cloud config = topo.render('os_client_config') cloud_config = config.get_one_cloud(cloud=topo.cloud.canonical_name) cloud = shade.OpenStackCloud(cloud_config=cloud_config) nova_client = cloud.nova_client services = nova_client.services.list(binary="nova-compute") # Find common list between services and provided hvs (not already disabled) hv_set = set(hypervisors) matched_services = [] for service in services: if service.host in hv_set and service.status == 'enabled': matched_services.append(service) if not matched_services: return succeeded, failed, \ 'Did not find any enabled HVs to disable. skipping.' # Disable the services. reason = "Disabled on %s via automation for change (%s), ctask(%s) " \ "for reason: %s" % (when_disabled.strftime('%Y-%m-%d'), change_number, task_number, reason or 'No reason provided') for service in matched_services: try: nova_client.services.disable_log_reason(service.host, 'nova-compute', reason=reason) succeeded.append(service.host) except Exception as ex: LOG.exception('Exception while disabling host scheduling') failed.append(service.host) error = ex.message return succeeded, failed, error
def get_cloud(): global cloud if not cloud: cloud = shade.OpenStackCloud(cloud=sys.argv[1]) return cloud
def _set_user_cloud(self, **kwargs): user_config = self.config.get_one_cloud(cloud=self._demo_name, **kwargs) self.user_cloud = shade.OpenStackCloud(cloud_config=user_config, log_inner_exceptions=True)
def main(): global nodes global password global cloud_name global project_name # get environment variables password = os.environ['OS_PASSWORD'] cloud_name = os.environ['OS_CLOUD'] vnf_name = os.environ['OS_VNF_NAME'] project_name = os.environ['OS_PROJECT_NAME'] # Context: central administration try: cloud_config = os_client_config.OpenStackConfig().get_one_cloud( cloud_name ) cloud = shade.OpenStackCloud(cloud_config=cloud_config) except Exception as exc: print(exc) sys.exit(1) # Get project data = cloud.get_project(vnf_name + "_" + project_name) tenant = Tenant(data) # Context: tenant administration try: cloud_config = os_client_config.OpenStackConfig().get_one_cloud( vnf_name + "_" + project_name ) cloud = shade.OpenStackCloud(cloud_config=cloud_config) except Exception as exc: print(exc) sys.exit(1) # # Get flavors # data = cloud.list_flavors() # flavors = Flavors(data) # Get security groups data = cloud.list_security_groups() security_groups = SecurityGroups(data) # Get networks data = cloud.list_networks() networks = Networks(data) # Get subnets data = cloud.list_subnets() subnets = Subnets(networks,data) # Get servers data = cloud.list_servers(detailed=True) nodes = Nodes(data) # Get ports # filters = { "tenant_id": tenant.id } data = cloud.list_ports() ports = Ports(data) # Get volumes # filters = { "tenant_id": tenant.id } data = cloud.list_volumes() volumes = Volumes(data) # ----- ouput data --------------------------------------------------------- print("---") print("tenant:") print(" name: {}".format(tenant.name)) print(" description: {}".format(tenant.description)) print(" password: {}".format(tenant.password)) print(" cloud: {}".format(tenant.cloud)) print() print("security_groups:") for group_id, group in security_groups.groups.items(): print("- {}".format(group.name)) print() print("external_security_group_rules:") for group_id, group in security_groups.groups.items(): for rule in group.rules: if rule.prefix: print("- group: {}".format( group.name ) ) print(" direction: {}".format( rule.direction ) ) print(" ethertype: {}".format( rule.type ) ) print(" protocol: {}".format( rule.protocol ) ) print(" min: {}".format( rule.min ) ) print(" max: {}".format( rule.max ) ) print(" mode: {}".format( "cidr" ) ) print(" remote_ip_prefix: {}".format( rule.prefix ) ) print() print("internal_security_group_rules:") for group_id, group in security_groups.groups.items(): for rule in group.rules: if rule.group: print("- group: {}".format( group.name ) ) print(" direction: {}".format( rule.direction ) ) print(" ethertype: {}".format( rule.type ) ) print(" protocol: {}".format( rule.protocol ) ) print(" min: {}".format( rule.min ) ) print(" max: {}".format( rule.max ) ) print(" mode: {}".format( "group" ) ) print(" remote_group: {}".format( rule.group ) ) print() print("networks:") for network_id, network in networks.networks.items(): print("- name: {}".format( network.name ) ) if network.ipv4: print(" ipv4:") print(" cidr: {}".format(network.ipv4.cidr)) if network.ipv6: print(" ipv6:") print(" cidr: {}".format(network.ipv6.cidr)) print() print("nodes:") for node_id, node in nodes.nodes.items(): print("- name: {}".format(node.name)) print(" availability_zone: {}".format(node.availability_zone)) print(" flavor: {}".format(node.flavor)) print(" image: {}".format(node.image)) print(" key_name: {}".format(node.key_name)) if not node.nics: print(" nics: []") else: print(" nics:") for nic in node.nics: print(" - port-name: {}".format(nic)) print() print("volumes:") for volume_id, volume in volumes.volumes.items(): print("- name: {}".format(volume.name)) print(" server: {}".format(volume.server)) print(" type: {}".format(volume.type)) print(" size: {}".format(volume.size)) print(" device: {}".format(volume.device)) print()
def _getClient(self): return shade.OpenStackCloud(cloud_config=self.provider.cloud_config, manager=self, **self.provider.cloud_config.config)
def main(): """The main program.""" global VERBOSE global DEBUG baremode = False # If true, only output the openstack migration commands cloud = shade.OpenStackCloud() # Cloud connection object HypervisorDict = {} # Store osHyperVisor objects here PlanList = [ ] # List of actions to take, in tuple form: (srchyper, desthyper, instance_id, memsize) Flavors = flavorCache() # A cache of flavor definitions tolerance = 0.05 # How close in fullness percentage the hypervisors should be # Parse command line options myargs = parseArgs() tolerance = myargs.tolerance if myargs.verbose: print("## Verbose mode selected") VERBOSE = True else: VERBOSE = False if myargs.debug: print("## Debug mode selected") DEBUG = True else: DEBUG = False if myargs.bare: baremode = True else: baremode = False # Get basic information about our hypervisors, and store into global list "HypervisorList" printdebug("Limiting hypervisor selection to: {}".format(myargs.compute)) try: printverbose("Obtaining list of hypervisors") getHypervisors(cloud, HypervisorDict, limithosts=myargs.compute) except: sys.exit( "## Unable to retrive list of cluster hypervisors. Ensure you are connecting as a cloud admin and try again." ) if len(HypervisorDict) < 1: sys.exit("## No hypervisors matched. Aborting.") ## Populate VM data for each hypervisor printverbose("Inventorying instances") for hyper in HypervisorDict.keys(): hname = HypervisorDict[hyper].getName() printverbose("Finding all instances on {}".format(hname)) slist = cloud.list_servers(all_projects=True, bare=True, filters={'host': hname}) for s in slist: sid = s['id'] sname = s['name'] sflavor = s['flavor']['id'] (sram, svcpus, sdisk) = getFlavorInfo(cloud, Flavors, sflavor) printdebug("Adding {} to {}'s instance list".format(sname, hname)) # Don't modify hypervisor memory; we already have a total HypervisorDict[hyper].addInstance(sid, name=sname, ram=sram, vcpus=svcpus, disk=sdisk, modifymemory=False) ## Interatively pick random VM from most full hypervisor to move to least full ## Repeat until all hypervisors are within ~n percent of each other. printverbose("Generating migration plan") (smallesthyper, smallestpct) = getEmptiestHyperMem(HypervisorDict) (biggesthyper, biggestpct) = getFullestHyperMem(HypervisorDict) while getPctDiff(biggestpct, smallestpct) > tolerance: printdebug( "Hypervisor spread is {0:3.1f} percent, looking for VM to move from {1} to {2}..." .format( getPctDiff(biggestpct, smallestpct) * 100, biggesthyper, smallesthyper)) # Find an instance to move bighypercfg = HypervisorDict[ biggesthyper] # Is reference to osHypervisor object smallhypercfg = HypervisorDict[smallesthyper] vmtomove = bighypercfg.getRandInst() # Is VM UUID vmtomovemem = bighypercfg.getInstRam(vmtomove) printdebug("Found {} ({} MB)".format(vmtomove, vmtomovemem)) # Move instance from fullest to newest node # TODO: retain more than UUID and memory size. bighypercfg.rmInstance(vmtomove, modifymemory=True) smallhypercfg.addInstance(vmtomove, ram=vmtomovemem, modifymemory=True) # Record to plan for later output PlanList.append((biggesthyper, smallesthyper, vmtomove, vmtomovemem)) # Update biggest/smallest and try again (smallesthyper, smallestpct) = getEmptiestHyperMem(HypervisorDict) (biggesthyper, biggestpct) = getFullestHyperMem(HypervisorDict) ## Output plan and quit printverbose("Plan calculated. Output follows:") for mig in PlanList: # mig: (srchyper, desthyper, instance_id, memsize) if not baremode: print("# migrate {0} MB instance from {1} to {2}".format( mig[3], mig[0], mig[1])) print("openstack server migrate --live {0} --wait {1}".format( mig[1], mig[2])) if not baremode: print("### Plan Summary:") print("### Total migrations: {0}".format(len(PlanList))) print(getMigSummTable(HypervisorDict))