def create_keypair(name, public_key=None, private_key=None, fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin')): """ Create a new keypair Args: name (str): keypair name to create public_key (str|None): existing public key file path to use private_key (str|None): file path to save private key fail_ok (bool) con_ssh (SSHClient): auth_info (dict): Returns (tuple): """ args_dict = {'--public-key': public_key, '--private-key': private_key} args = '{} "{}"'.format(common.parse_args(args_dict), name) LOG.info("Creating keypair with args: {}".format(args)) code, out = cli.openstack('keypair create', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, out LOG.info("Keypair {} created successfully".format(name)) return 0, name
def get_aggregated_measures(field='value', resource_type=None, metrics=None, start=None, stop=None, overlap=None, refresh=None, resource_ids=None, extra_query=None, fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None): """ Get measurements via 'openstack metric measures aggregation' Args: field (str): header of a column resource_type (str|None): used in --resource-type <resource_type> metrics (str|list|tuple|None): used in --metric <metric1> [metric2 ...] start (str|None): used in --start <start> stop (str|None): used in --stop <stop> refresh (bool): used in --refresh overlap (str|None): overlap percentage. used in --needed-overlap <overlap> resource_ids (str|list|tuple|None): used in --query "id=<resource_id1>[ or id=<resource_id2> ...]" extra_query (str|None): used in --query <extra_query> fail_ok: auth_info: con_ssh: Returns (list): list of strings """ LOG.info("Getting aggregated measurements...") args_dict = { 'resource-type': resource_type, 'metric': metrics, 'start': start, 'stop': stop, 'needed-overlap': overlap, 'refresh': refresh, } args = common.parse_args(args_dict, vals_sep=' ') query_str = '' if resource_ids: if isinstance(resource_ids, str): resource_ids = [resource_ids] resource_ids = ['id={}'.format(val) for val in resource_ids] query_str = ' or '.join(resource_ids) if extra_query: if resource_ids: query_str += ' and ' query_str += '{}'.format(extra_query) if query_str: args += ' --query "{}"'.format(query_str) code, out = cli.openstack('metric measures aggregation', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, out table_ = table_parser.table(out) return 0, table_parser.get_values(table_, field)
def get_role_assignments(field='Role', names=True, role=None, user=None, project=None, user_domain=None, group=None, group_domain=None, domain=None, project_domain=None, inherited=None, effective_only=None, con_ssh=None, auth_info=Tenant.get('admin')): """ Get values from 'openstack role assignment list' table Args: field (str|list|tuple): role assignment table header to determine which values to return names (bool): whether to display role assignment with name (default is ID) role (str): an existing role from openstack role list project (str): tenant name. When unset, the primary tenant name will be used user (str): an existing user that belongs to given tenant domain (str): Include <domain> (name or ID) group (str): Include <group> (name or ID) group_domain (str): Domain the group belongs to (name or ID). This can be used in case collisions between group names exist. project_domain (str): Domain the project belongs to (name or ID). This can be used in case collisions between project names exist. user_domain (str): Domain the user belongs to (name or ID). This can be used in case collisions between user names exist. inherited (bool): Specifies if the role grant is inheritable to the sub projects effective_only (bool): Whether to show effective roles only con_ssh (SSHClient): active controller ssh session auth_info (dict): auth info to use to executing the add role cli Returns (list): list of values """ optional_args = { 'role': role, 'user': user, 'project': project, 'domain': domain, 'group': group, 'group-domain': group_domain, 'project-domain': project_domain, 'user-domain': user_domain, 'names': True if names else None, 'effective': True if effective_only else None, 'inherited': True if inherited else None } args = common.parse_args(optional_args) role_assignment_tab = table_parser.table( cli.openstack('role assignment list', args, ssh_client=con_ssh, auth_info=auth_info)[1]) if not role_assignment_tab['headers']: LOG.info("No role assignment is found with criteria: {}".format(args)) return [] return table_parser.get_multi_values(role_assignment_tab, field)
def set_flavor(flavor, project=None, project_domain=None, description=None, no_property=None, con_ssh=None, auth_info=Tenant.get('admin'), fail_ok=False, **properties): """ Set flavor with given parameters Args: flavor (str): id of a flavor project (str) project_domain (str) description (str) no_property (bool) con_ssh (SSHClient): auth_info (dict): fail_ok (bool): **properties: extra specs to set. e.g., **{"hw:mem_page_size": "2048"} Returns (tuple): (rtn_code (int), message (str)) (0, 'Flavor extra specs set successfully.'): required extra spec(s) added successfully (1, <stderr>): add extra spec cli rejected """ args_dict = { '--description': description, '--project': project, '--project-domain': project_domain, '--no-property': no_property and not properties, '--property': properties } args = common.parse_args(args_dict, repeat_arg=True) if not args.strip(): raise ValueError("Nothing is provided to set") LOG.info("Setting flavor {} with args: {}".format(flavor, args)) args = '{} {}'.format(args, flavor) exit_code, output = cli.openstack('flavor set', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if exit_code == 1: return 1, output msg = "Flavor {} set successfully".format(flavor) LOG.info(msg) return 0, flavor
def __get_resource_tables(namespace=None, all_namespaces=None, resource_types=None, resource_names=None, labels=None, field_selectors=None, wide=True, con_ssh=None, fail_ok=False, grep=None): if not resource_types: resource_types = '' elif isinstance(resource_types, (list, tuple)): resource_types = ','.join(resource_types) resources = resource_types if resource_names: if ',' in resource_types: raise ValueError( "At most 1 resource_types can be specified if resource_names " "are provided.") if all_namespaces and not namespace: raise ValueError( "all_namespaces is disallowed when resource_names are provided" ) if isinstance(resource_names, (list, tuple)): resource_names = ' '.join(resource_names) resources = '{} {}'.format(resources, resource_names) args_dict = { '-n': namespace, '--all-namespaces': True if all_namespaces and not namespace else None, '-l': labels, '--field-selector': field_selectors, '-o': 'wide' if wide else None } args = '{} {}'.format( resources, common.parse_args(args_dict, repeat_arg=False, vals_sep=',')) code, out = exec_kube_cmd(sub_cmd='get', args=args, con_ssh=con_ssh, fail_ok=fail_ok, grep=grep) if code > 0: return code, out tables = table_parser.tables_kube(out) return code, tables
def set_aggregate(aggregate, properties=None, no_property=None, zone=None, name=None, fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin')): """ Set aggregate with given params Args: aggregate (str): aggregate to set properties (dict|None): no_property (bool|None): zone (str|None): name (str|None): fail_ok (bool): con_ssh: auth_info: Returns (tuple): (0, "Aggregate <aggregate> set successfully with param: <params>) (1, <std_err>) returns only if fail_ok=True """ args_dict = { '--zone': zone, '--name': name, '--property': properties, '--no-property': no_property, } args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), aggregate) code, output = cli.openstack('aggregate set', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, output msg = "Aggregate {} set successfully with param: {}".format( aggregate, args) LOG.info(msg) return 0, msg
def get_server_groups(field='ID', all_projects=True, long=False, strict=True, regex=False, auth_info=Tenant.get('admin'), con_ssh=None, **kwargs): """ Get server groups ids based on the given criteria Args: auth_info (dict): con_ssh (SSHClient): strict (bool): whether to do strict search for given name regex (bool): whether or not to use regex when for given name all_projects(bool): whether to list for all projects long field (str|list|tuple): **kwargs: filters Returns (list): list of server groups """ args_dict = {'--all-projects': all_projects, '--long': long} args = common.parse_args(args_dict) table_ = table_parser.table( cli.openstack('server group list', args, ssh_client=con_ssh, auth_info=auth_info)[1]) def _parse_list(value_str): return [val.strip() for val in value_str.split(',')] parsers = {_parse_list: ('Policies', 'Members')} return table_parser.get_multi_values(table_, field, strict=strict, regex=regex, parsers=parsers, **kwargs)
def create_user(name=None, field='name', domain=None, project=None, project_domain=None, rtn_exist=None, password=HostLinuxUser.get_password(), email=None, description=None, enable=None, auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): """ Create an openstack user Args: name (str|None): field: name or id domain: project (str|None): default project project_domain: rtn_exist (bool) password: email: description: enable: auth_info: fail_ok: con_ssh: Returns (tuple): (0, <user>) (1, <std_err>) """ if not name: name = 'user' common.get_unique_name(name_str=name) LOG.info("Create/Show openstack user {}".format(name)) arg_dict = { 'domain': domain, 'project': project, 'project-domain': project_domain, 'password': password, 'email': email, 'description': description, 'enable': True if enable is True else None, 'disable': True if enable is False else None, 'or-show': rtn_exist, } arg_str = '{} {}'.format(common.parse_args(args_dict=arg_dict), name) code, output = cli.openstack('user create', arg_str, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, output user = table_parser.get_value_two_col_table(table_parser.table(output), field=field) LOG.info("Openstack user {} successfully created/showed".format(user)) return 0, user
def delete_resources(resource_names=None, select_all=None, resource_types='pod', namespace=None, recursive=None, labels=None, con_ssh=None, fail_ok=False, post_check=True, check_both_controllers=True): """ Delete pods via kubectl delete Args: resource_names (None|str|list|tuple): select_all (None|bool): resource_types (str|list|tuple): namespace (None|str): recursive (bool): labels (None|dict): con_ssh: fail_ok: post_check (bool): Whether to check if resources are gone after deletion check_both_controllers (bool): Returns (tuple): (0, None) # pods successfully deleted (1, <std_err>) (2, <undeleted_resources>(list of dict)) # pod(s) still exist in kubectl after deletion (3, <undeleted_resources_on_other_controller>(list of dict)) # pod(s) still exist on the other controller """ arg_dict = { '--all': select_all, '-l': labels, '--recursive': recursive, } arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',') if resource_types: if isinstance(resource_types, str): resource_types = [resource_types] arg_str = '{} {}'.format(','.join(resource_types), arg_str).strip() if resource_names: if isinstance(resource_names, str): resource_names = [resource_names] arg_str = '{} {}'.format(arg_str, ' '.join(resource_names)) if not con_ssh: con_ssh = ControllerClient.get_active_controller() code, output = exec_kube_cmd(sub_cmd='delete', args=arg_str, con_ssh=con_ssh, fail_ok=fail_ok) if code > 0: return 1, output if post_check: def __wait_for_resources_gone(ssh_client): final_remaining = [] if resource_types: for resource_type in resource_types: res, remaining_res = wait_for_resources_gone( resource_names=resource_names, resource_type=resource_type, namespace=namespace, con_ssh=ssh_client, fail_ok=fail_ok) if not res: final_remaining += remaining_res else: res, final_remaining = wait_for_resources_gone( resource_names=resource_names, namespace=namespace, con_ssh=ssh_client, fail_ok=fail_ok) return final_remaining LOG.info("Check pod is not running on current host") remaining = __wait_for_resources_gone(con_ssh) if remaining: return 2, remaining if check_both_controllers and not system_helper.is_aio_simplex( con_ssh=con_ssh): LOG.info("Check pod is running on the other controller as well") con_name = 'controller-1' if \ con_ssh.get_hostname() == 'controller-0' else 'controller-0' from keywords import host_helper with host_helper.ssh_to_host(hostname=con_name, con_ssh=con_ssh) as other_con: remaining = __wait_for_resources_gone(other_con) if remaining: return 3, remaining LOG.info("{} are successfully removed.".format(resource_names)) return 0, None
def create_image(name=None, image_id=None, source_image_file=None, volume=None, visibility='public', force=None, store=None, disk_format=None, container_format=None, min_disk=None, min_ram=None, tags=None, protected=None, project=None, project_domain=None, timeout=ImageTimeout.CREATE, con_ssh=None, auth_info=Tenant.get('admin'), fail_ok=False, ensure_sufficient_space=True, sys_con_for_dc=True, wait_for_subcloud_sync=True, cleanup=None, hw_vif_model=None, **properties): """ Create an image with given criteria. Args: name (str): string to be included in image name image_id (str): id for the image to be created source_image_file (str|None): local image file to create image from. DefaultImage will be used if unset volume (str) disk_format (str): One of these: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, iso container_format (str): One of these: ami, ari, aki, bare, ovf min_disk (int): Minimum size of disk needed to boot image (in gigabytes) min_ram (int): Minimum amount of ram needed to boot image (in megabytes) visibility (str): public|private|shared|community protected (bool): Prevent image from being deleted. store (str): Store to upload image to force (bool) tags (str|tuple|list) project (str|None) project_domain (str|None) timeout (int): max seconds to wait for cli return con_ssh (SSHClient): auth_info (dict|None): fail_ok (bool): ensure_sufficient_space (bool) sys_con_for_dc (bool): create image on system controller if it's distributed cloud wait_for_subcloud_sync (bool) cleanup (str|None): add to teardown list. 'function', 'class', 'module', 'session', or None hw_vif_model (None|str): if this is set, 'hw_vif_model' in properties will be overridden **properties: key=value pair(s) of properties to associate with the image Returns (tuple): (rtn_code(int), message(str)) # 1, 2 only applicable if fail_ok=True - (0, <id>, "Image <id> is created successfully") - (1, <id or ''>, <stderr>) # openstack image create cli rejected - (2, <id>, "Image status is not active.") """ # Use source image url if url is provided. Else use local img file. default_guest_img = GuestImages.IMAGE_FILES[ GuestImages.DEFAULT['guest']][2] file_path = source_image_file if not file_path and not volume: img_dir = GuestImages.DEFAULT['image_dir'] file_path = "{}/{}".format(img_dir, default_guest_img) if file_path: if file_path.startswith('~/'): file_path = file_path.replace('~', HostLinuxUser.get_home(), 1) file_path = os.path.normpath(file_path) if 'win' in file_path and 'os_type' not in properties: properties['os_type'] = 'windows' elif 'ge_edge' in file_path and 'hw_firmware_type' not in properties: properties['hw_firmware_type'] = 'uefi' if hw_vif_model: properties[ImageMetadata.VIF_MODEL] = hw_vif_model if sys_con_for_dc and ProjVar.get_var('IS_DC'): con_ssh = ControllerClient.get_active_controller('RegionOne') create_auth = Tenant.get(tenant_dictname=auth_info['tenant'], dc_region='SystemController').copy() image_host_ssh = get_cli_client(central_region=True) else: if not con_ssh: con_ssh = ControllerClient.get_active_controller() image_host_ssh = get_cli_client() create_auth = auth_info if ensure_sufficient_space and not volume: if not is_image_storage_sufficient(img_file_path=file_path, con_ssh=con_ssh, image_host_ssh=image_host_ssh)[0]: skip('Insufficient image storage for creating glance image ' 'from {}'.format(file_path)) source_str = file_path if file_path else '' known_imgs = [ 'cgcs-guest', 'tis-centos-guest', 'ubuntu', 'cirros', 'opensuse', 'rhel', 'centos', 'win', 'ge_edge', 'vxworks', 'debian-8-m-agent' ] name = name if name else 'auto' for img_str in known_imgs: if img_str in name: break elif img_str in source_str: name = img_str + '_' + name break else: if source_str: name_prefix = str(source_str.split(sep='/')[-1]).split(sep='.')[0] name = name_prefix + '_' + name name = common.get_unique_name(name_str=name, existing_names=get_images(), resource_type='image') LOG.info("Creating glance image: {}".format(name)) if not disk_format: if not source_image_file: # default tis-centos-guest image is raw disk_format = 'raw' else: disk_format = 'qcow2' args_dict = { '--id': image_id, '--store': store, '--disk-format': disk_format, '--container-format': container_format if container_format else 'bare', '--min-disk': min_disk, '--min-ram': min_ram, '--file': file_path, '--force': True if force else None, '--protected': True if protected else None, '--unprotected': True if protected is False else None, '--tag': tags, '--property': properties, '--project': project, '--project-domain': project_domain, '--volume': volume, } if visibility: args_dict['--{}'.format(visibility)] = True args_ = '{} {}'.format( common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) try: LOG.info("Creating image {} with args: {}".format(name, args_)) code, output = cli.openstack('image create', args_, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=create_auth, timeout=timeout) except: # This is added to help debugging image create failure in case of # insufficient space con_ssh.exec_cmd('df -h', fail_ok=True, get_exit_code=False) raise table_ = table_parser.table(output) field = 'image_id' if volume else 'id' actual_id = table_parser.get_value_two_col_table(table_, field) if cleanup and actual_id: ResourceCleanup.add('image', actual_id, scope=cleanup) if code > 1: return 1, actual_id, output in_active = wait_for_image_status(actual_id, con_ssh=con_ssh, auth_info=create_auth, fail_ok=fail_ok) if not in_active: return 2, actual_id, "Image status is not active." if image_id and image_id != actual_id: msg = "Actual image id - {} is different than requested id - {}.".\ format(actual_id, image_id) if fail_ok: return 3, actual_id, msg raise exceptions.ImageError(msg) if wait_for_subcloud_sync: wait_for_image_sync_on_subcloud(image_id=actual_id) msg = "Image {} is created successfully".format(actual_id) LOG.info(msg) return 0, actual_id, msg
def create_user(name=None, field='name', domain=None, project=None, project_domain=None, rtn_exist=None, password=None, email=None, description=None, enable=None, auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): """ Create an openstack user Args: name (str|None): field: name or id domain: project (str|None): default project project_domain: rtn_exist (bool) password: email: description: enable: auth_info: fail_ok: con_ssh: Returns (tuple): (0, <user>) (1, <std_err>) """ if not password: password = HostLinuxUser.get_password() if not name: name = 'user' common.get_unique_name(name_str=name) LOG.info("Create/Show openstack user {}".format(name)) arg_dict = { 'domain': domain, 'project': project, 'project-domain': project_domain, 'password': password, 'email': email, 'description': description, 'enable': True if enable is True else None, 'disable': True if enable is False else None, 'or-show': rtn_exist, } arg_str = '{} {}'.format(common.parse_args(args_dict=arg_dict), name) code, output = cli.openstack('user create', arg_str, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, output table_ = table_parser.table(output) username = table_parser.get_value_two_col_table(table_, field='name') user = username if field == 'name' else table_parser.get_value_two_col_table(table_, field=field) is_platform = auth_info and auth_info.get('platform') keystone = 'platform' if is_platform else 'containerized' dictname = user + '_platform' if is_platform else user existing_auth = Tenant.get(dictname) if existing_auth: if existing_auth['user'] != username: raise ValueError('Tenant.{} already exists for a different user {}'.format( dictname, existing_auth['user'])) Tenant.update(dictname, username=username, password=password, tenant=project, platform=is_platform) else: Tenant.add(username=username, tenantname=project, dictname=dictname, password=password, platform=is_platform) LOG.info('Tenant.{} for {} keystone user {} is added'.format(dictname, keystone, user)) LOG.info("{} keystone user {} successfully created/showed".format(keystone, user)) return 0, user
def apply_pod(file_path, pod_name, namespace=None, recursive=None, select_all=None, labels=None, con_ssh=None, fail_ok=False, check_both_controllers=True): """ Apply a pod from given file via kubectl apply Args: file_path (str): pod_name (str): namespace (None|str): recursive (None|bool): select_all (None|bool): labels (dict|str|list|tuple|None): key value pairs con_ssh: fail_ok: check_both_controllers (bool): Returns (tuple): (0, <pod_info>(dict)) (1, <std_err>) (2, <pod_info>) # pod is not running after apply (3, <pod_info>) # pod if not running on the other controller after apply """ arg_dict = { '--all': select_all, '-l': labels, '--recursive': recursive, } arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',') arg_str += ' -f {}'.format(file_path) if not con_ssh: con_ssh = ControllerClient.get_active_controller() code, output = exec_kube_cmd(sub_cmd='apply', args=arg_str, con_ssh=con_ssh, fail_ok=fail_ok) if code > 0: return 1, output LOG.info("Check pod is running on current host") res = wait_for_pods_status(pod_names=pod_name, namespace=namespace, status=PodStatus.RUNNING, con_ssh=con_ssh, fail_ok=fail_ok) if not res: return 2, "Pod {} is not running after apply on active " \ "controller".format(pod_name) if check_both_controllers and not system_helper.is_aio_simplex( con_ssh=con_ssh): LOG.info("Check pod is running on the other controller as well") con_name = 'controller-1' if con_ssh.get_hostname() == 'controller-0' \ else 'controller-0' from keywords import host_helper with host_helper.ssh_to_host(hostname=con_name, con_ssh=con_ssh) as other_con: res, pods_info = wait_for_pods_status(pod_names=pod_name, namespace=namespace, con_ssh=other_con, fail_ok=fail_ok) if not res: return 3, "Pod {} is not running after apply on standby " \ "controller".format(pod_name) LOG.info("{} pod is successfully applied and running".format(pod_name)) return 0, pod_name
def create_project(name=None, field='ID', domain=None, parent=None, description=None, enable=None, con_ssh=None, rtn_exist=None, fail_ok=False, auth_info=Tenant.get('admin'), **properties): """ Create a openstack project Args: name (str|None): field (str): ID or Name. Whether to return project id or name if created successfully domain (str|None): parent (str|None): description (str|None): enable (bool|None): con_ssh: rtn_exist fail_ok: auth_info: **properties: Returns (tuple): (0, <project>) (1, <std_err>) """ if not name: existing_names = get_projects(field='Name', auth_info=Tenant.get('admin'), con_ssh=con_ssh) max_count = 0 end_str = '' for name in existing_names: match = re.match(r'tenant(\d+)(.*)', name) if match: count, end_str = match.groups() max_count = max(int(count), max_count) name = 'tenant{}{}'.format(max_count + 1, end_str) LOG.info("Create/Show openstack project {}".format(name)) arg_dict = { 'domain': domain, 'parent': parent, 'description': description, 'enable': True if enable is True else None, 'disable': True if enable is False else None, 'or-show': rtn_exist, 'property': properties, } arg_str = common.parse_args(args_dict=arg_dict, repeat_arg=True) arg_str += ' {}'.format(name) code, output = cli.openstack('project create', arg_str, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if code > 0: return 1, output project_ = table_parser.get_value_two_col_table(table_parser.table(output), field=field) LOG.info("Project {} successfully created/showed.".format(project_)) return 0, project_
def create_flavor(name=None, flavor_id=None, vcpus=1, ram=1024, root_disk=None, ephemeral=None, swap=None, is_public=None, rxtx_factor=None, project=None, project_domain=None, description=None, guest_os=None, fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None, storage_backing=None, rtn_id=True, cleanup=None, add_default_specs=True, properties=None): """ Create a flavor with given criteria. Args: name (str): substring of flavor name. Whole name will be <name>-<auto_count>. e,g., 'myflavor-1'. If None, name will be set to 'flavor'. flavor_id (str): auto generated by default unless specified. vcpus (int): ram (int): root_disk (int): ephemeral (int): swap (int|None): is_public (bool): rxtx_factor (str): project project_domain description guest_os (str|None): guest name such as 'tis-centos-guest' or None - default tis guest assumed fail_ok (bool): whether it's okay to fail to create a flavor. Default to False. auth_info (dict): This is set to Admin by default. Can be set to other tenant for negative test. con_ssh (SSHClient): storage_backing (str): storage backing in extra flavor. Auto set storage backing based on system config if None. Valid values: 'local_image', 'remote' rtn_id (bool): return id or name cleanup (str|None): cleanup scope. function, class, module, or session add_default_specs (False): Whether to automatically add extra specs that are needed to launch vm properties (str|list|dict) Returns (tuple): (rtn_code (int), flavor_id/err_msg (str)) (0, <flavor_id/name>): flavor created successfully (1, <stderr>): create flavor cli rejected """ table_ = table_parser.table( cli.openstack('flavor list', ssh_client=con_ssh, auth_info=auth_info)[1]) existing_names = table_parser.get_column(table_, 'Name') if name is None: name = 'flavor' flavor_name = common.get_unique_name(name_str=name, existing_names=existing_names, resource_type='flavor') if root_disk is None: if not guest_os: guest_os = GuestImages.DEFAULT['guest'] root_disk = GuestImages.IMAGE_FILES[guest_os][1] args_dict = { '--ephemeral': ephemeral, '--swap': swap, '--rxtx-factor': rxtx_factor, '--disk': root_disk, '--ram': ram, '--vcpus': vcpus, '--id': flavor_id, '--project': project, '--project-domain': project_domain, '--description': description, '--public': True if is_public else None, '--private': True if is_public is False else None, '--property': properties, } args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), flavor_name) LOG.info("Creating flavor {}...".format(flavor_name)) LOG.info("openstack flavor create option: {}".format(args)) exit_code, output = cli.openstack('flavor create', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if exit_code > 1: return 1, output table_ = table_parser.table(output) flavor_id = table_parser.get_value_two_col_table(table_, 'id') LOG.info("Flavor {} created successfully.".format(flavor_name)) if cleanup: ResourceCleanup.add('flavor', flavor_id, scope=cleanup) if add_default_specs: extra_specs = {FlavorSpec.MEM_PAGE_SIZE: 'large'} # extra_specs = {FlavorSpec.MEM_PAGE_SIZE: 'small'} default_flavor_backing = ProjVar.get_var('DEFAULT_INSTANCE_BACKING') sys_inst_backing = ProjVar.get_var('INSTANCE_BACKING') if not default_flavor_backing: from keywords import host_helper sys_inst_backing = host_helper.get_hosts_per_storage_backing( auth_info=auth_info, con_ssh=con_ssh, refresh=True) configured_backings = [ backing for backing in sys_inst_backing if sys_inst_backing.get(backing) ] LOG.debug( "configured backing:{} sys inst backing: {}, required storage backing: {}" .format(configured_backings, sys_inst_backing, storage_backing)) if storage_backing and storage_backing not in configured_backings: raise ValueError( 'Required local_storage {} is not configured on any nova hypervisor' .format(storage_backing)) if len(configured_backings) > 1: extra_specs[FlavorSpec.STORAGE_BACKING] = storage_backing if storage_backing else \ ProjVar.get_var('DEFAULT_INSTANCE_BACKING') if extra_specs: LOG.info("Setting flavor specs: {}".format(extra_specs)) set_flavor(flavor_id, con_ssh=con_ssh, auth_info=auth_info, **extra_specs) flavor = flavor_id if rtn_id else flavor_name return 0, flavor, storage_backing
def create_aggregate(field='name', name=None, avail_zone=None, properties=None, check_first=True, fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin')): """ Add a aggregate with given name and availability zone. Args: field (str): name or id name (str): name for aggregate to create avail_zone (str|None): properties (dict|None) check_first (bool) fail_ok (bool): con_ssh (SSHClient): auth_info (dict): Returns (tuple): (0, <rtn_val>) -- aggregate successfully created (1, <stderr>) -- cli rejected (2, "Created aggregate is not as specified") -- name and/or availability zone mismatch """ if not name: existing_names = get_aggregates(field='name') name = common.get_unique_name(name_str='cgcsauto', existing_names=existing_names) args_dict = { '--zone': avail_zone, '--property': properties, } args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name) if check_first: aggregates_ = get_aggregates(field=field, name=name, avail_zone=avail_zone) if aggregates_: LOG.warning( "Aggregate {} already exists. Do nothing.".format(name)) return -1, aggregates_[0] LOG.info("Adding aggregate {}".format(name)) res, out = cli.openstack('aggregate create', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if res == 1: return res, out out_tab = table_parser.table(out) succ_msg = "Aggregate {} is successfully created".format(name) LOG.info(succ_msg) return 0, table_parser.get_value_two_col_table(out_tab, field)
def create_stack(stack_name, template, pre_creates=None, environments=None, stack_timeout=None, parameters=None, param_files=None, enable_rollback=None, dry_run=None, wait=None, tags=None, fail_ok=False, con_ssh=None, auth_info=None, cleanup='function', timeout=300): """ Create the given heat stack for a given tenant. Args: stack_name (str): Given name for the heat stack template (str): path of heat template pre_creates (str|list|None) environments (str|list|None) stack_timeout (int|str|None): stack creating timeout in minutes parameters (str|dict|None) param_files (str|dict|None) enable_rollback (bool|None) dry_run (bool|None) wait (bool|None) tags (str|list|None) auth_info (dict): Tenant dict. If None, primary tenant will be used. con_ssh (SSHClient): If None, active controller ssh will be used. timeout (int): automation timeout in seconds fail_ok (bool): cleanup (str|None) Returns (tuple): Status and msg of the heat deletion. """ args_dict = { '--template': template, '--environment': environments, '--timeout': stack_timeout, '--pre-create': pre_creates, '--enable-rollback': enable_rollback, '--parameter': parameters, '--parameter-file': param_files, '--wait': wait, '--tags': ','.join(tags) if isinstance(tags, (list, tuple)) else tags, '--dry-run': dry_run, } args = common.parse_args(args_dict, repeat_arg=True) LOG.info("Create Heat Stack {} with args: {}".format(stack_name, args)) exitcode, output = cli.openstack('stack create', '{} {}'.format(args, stack_name), ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info, timeout=timeout) if exitcode > 0: return 1, output if cleanup: ResourceCleanup.add('heat_stack', resource_id=stack_name, scope=cleanup) LOG.info( "Wait for Heat Stack Status to reach CREATE_COMPLETE for stack %s", stack_name) res, msg = wait_for_heat_status(stack_name=stack_name, status=HeatStackStatus.CREATE_COMPLETE, auth_info=auth_info, fail_ok=fail_ok) if not res: return 2, msg LOG.info("Stack {} created successfully".format(stack_name)) return 0, stack_name
def unset_flavor(flavor, properties=None, project=None, project_domain=None, check_first=True, fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None): """ Unset specific extra spec(s) from given flavor. Args: flavor (str): id of the flavor properties (str|list|tuple): extra spec(s) to be removed. At least one should be provided. project_domain project check_first (bool): Whether to check if extra spec exists in flavor before attempt to unset con_ssh (SSHClient): auth_info (dict): fail_ok (bool): con_ssh Returns (tuple): (rtn_code (int), message (str)) (-1, 'Extra spec(s) <specs> not exist in flavor. Do nothing.') (0, 'Flavor extra specs unset successfully.'): required extra spec(s) removed successfully (1, <stderr>): unset extra spec cli rejected (2, '<spec_name> is still in the extra specs list'): post action check failed """ if isinstance(properties, str): properties = [properties] if properties and check_first: existing_specs = get_flavor_values(flavor, fields='properties', con_ssh=con_ssh, auth_info=auth_info)[0] properties = list(set(properties) & set(existing_specs.keys())) args_dict = { '--property': properties, '--project': project, '--project_domain': project_domain, } args = common.parse_args(args_dict, repeat_arg=True) if not args: msg = "Nothing to unset for flavor {}. Do nothing.".format(flavor) LOG.info(msg) return -1, msg LOG.info("Unsetting flavor {} with args: {}".format(flavor, args)) exit_code, output = cli.openstack('flavor unset', args, ssh_client=con_ssh, fail_ok=fail_ok, auth_info=auth_info) if exit_code > 0: return 1, output success_msg = "Flavor {} unset successfully".format(flavor) LOG.info(success_msg) return 0, success_msg