def pci_prep(): primary_tenant = Tenant.get_primary() other_tenant = Tenant.get_secondary() primary_tenant_name = primary_tenant['tenant'] vm_helper.set_quotas(tenant=primary_tenant_name, cores=100) vm_helper.set_quotas(tenant=other_tenant['tenant'], cores=100) return primary_tenant, primary_tenant_name, other_tenant
def revert(): LOG.fixture_step("Revert network quotas to original values.") for tenant_id_, quotas in tenants_quotas.items(): network_quota_, subnet_quota_ = quotas vm_helper.set_quotas(tenant=tenant_id, networks=network_quota_, subnets=subnet_quota_)
def pre_check(request): """ This is to adjust the quota return: code 0/1 """ hypervisors = host_helper.get_up_hypervisors() if len(hypervisors) < 3: skip('Large heat tests require 3+ hypervisors') # disable remote cli for these testcases remote_cli = ProjVar.get_var('REMOTE_CLI') if remote_cli: ProjVar.set_var(REMOTE_CLI=False) def revert(): ProjVar.set_var(REMOTE_CLI=remote_cli) request.addfinalizer(revert) vm_helper.set_quotas(networks=100) vm_helper.ensure_vms_quotas(cores_num=100, vols_num=100, vms_num=100) def list_status(): LOG.fixture_step("Listing heat resources and nova migrations") stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin')) for stack in stacks: heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin')) nova_helper.get_migration_list_table() request.addfinalizer(list_status)
def pre_check(request): """ This is to adjust the quota and to launch the heat stack return: code 0/1 """ hypervisors = host_helper.get_up_hypervisors() if len(hypervisors) < 3: skip('System test heat tests require 3+ hypervisors') # disable remote cli for these testcases remote_cli = ProjVar.get_var('REMOTE_CLI') if remote_cli: ProjVar.set_var(REMOTE_CLI=False) def revert(): ProjVar.set_var(REMOTE_CLI=remote_cli) request.addfinalizer(revert) vm_helper.set_quotas(networks=600, ports=1000, volumes=1000, cores=1000, instances=1000, ram=7168000, server_groups=100, server_group_members=1000) system_test_helper.launch_lab_setup_tenants_vms() def list_status(): LOG.fixture_step("Listing heat resources and nova migrations") stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin')) for stack in stacks: heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin')) nova_helper.get_migration_list_table() # system_test_helper.delete_lab_setup_tenants_vms() request.addfinalizer(list_status)
def update_net_quota(request): network_quota = vm_helper.get_quotas('networks')[0] vm_helper.set_quotas(networks=network_quota + 2) def _revert_quota(): vm_helper.set_quotas(networks=network_quota) request.addfinalizer(_revert_quota)
def update_net_quota(request): if not system_helper.is_avs(): skip('Feature only supported by AVS') network_quota = vm_helper.get_quotas('networks')[0] vm_helper.set_quotas(networks=network_quota + 6) def _revert_quota(): vm_helper.set_quotas(networks=network_quota) request.addfinalizer(_revert_quota)
def test_heat_template(template_name, revert_quota): """ Basic Heat template testing: various Heat templates. Args: template_name (str): e.g, OS_Cinder_Volume. revert_quota (dict): test fixture to revert network quota. ===== Prerequisites (skip test if not met): - at least two hypervisors hosts on the system Test Steps: - Create a heat stack with the given template - Verify heat stack is created successfully - Verify heat resources are created - Delete Heat stack and verify resource deletion """ if 'QoSPolicy' in template_name: if not system_helper.is_avs(): skip("QoS policy is not supported by OVS") elif template_name == 'OS_Neutron_RouterInterface.yaml': LOG.tc_step("Increase network quota by 2 for every tenant") tenants_quotas = revert_quota for tenant_id, quotas in tenants_quotas.items(): network_quota, subnet_quota = quotas vm_helper.set_quotas(tenant=tenant_id, networks=network_quota + 10, subnets=subnet_quota + 10) elif template_name == 'OS_Nova_Server.yaml': # create new image to do update later LOG.tc_step("Creating an Image to be used for heat update later") glance_helper.create_image(name='tis-centos2', cleanup='function') # add test step verify_basic_template(template_name)
def create_tenants_and_update_quotas( new_tenants_index=(3, 6), add_swift_role=False): """ Create tenant3-6 and update quotas for admin and the new tenants """ projects = ['admin'] roles = ['_member_', 'admin'] if add_swift_role: roles.append('SwiftOperator') if new_tenants_index: for i in range(new_tenants_index[0], new_tenants_index[1] + 1): name = 'tenant{}'.format(i) keystone_helper.create_project(name=name, description=name, rtn_exist=True) keystone_helper.create_user(name=name, rtn_exist=True, password=USER_PASSWORD) for role in roles: if role == 'SwiftOperator' and name == 'admin': continue user = '******' if role == 'admin' else name keystone_helper.add_or_remove_role(role=role, project=name, user=user) projects.append(name) for project in projects: vm_helper.set_quotas(tenant=project, instances=20, cores=50, volumes=30, snapshots=20, ports=500, subnets=100, networks=100, **{'floating-ips': 50})
def _revert_quota(): vm_helper.set_quotas(networks=network_quota)
def adjust_cinder_quota(con_ssh, increase, backup_info): """ Increase the quota for number of volumes for the tenant as which System Backup will be done. By default, it's 'tenant1' Args: con_ssh - current ssh connection increase - number of volumes to bump up backup_info - options for backup Return: increase - actual increased free_space - free space left for cinder volumes max_per_volume_size - max limit for an individual volume """ if backup_info.get('is_storage_lab', False): free_space, total_space, unit = -1, -1, 1 else: free_space, total_space, unit = cinder_helper.get_lvm_usage(con_ssh) LOG.info('lvm space: free:{}, total:{}'.format(free_space, total_space)) quotas = ['gigabytes', 'per-volume-gigabytes', 'volumes'] tenant = backup_info['tenant'] cinder_quotas = vm_helper.get_quotas(quotas=quotas, auth_info=tenant, con_ssh=con_ssh) LOG.info('Cinder quotas:{}'.format(cinder_quotas)) max_total_volume_size = int(cinder_quotas[0]) max_per_volume_size = int(cinder_quotas[1]) max_volumes = int(cinder_quotas[2]) current_volumes = cinder_helper.get_volumes(auth_info=Tenant.get('admin'), con_ssh=con_ssh) LOG.info( 'Cinder VOLUME usage: current number of volumes:{}, quotas for {}: {}, ' .format(len(current_volumes), quotas, cinder_quotas)) if 0 < max_total_volume_size < free_space: free_space = max_total_volume_size new_volume_limit = len(current_volumes) + increase if 0 <= max_volumes < new_volume_limit: LOG.info( 'Not enough quota for number of cinder volumes, increase it to:{} from:{}' .format(new_volume_limit, max_volumes)) code, output = vm_helper.set_quotas(tenant, con_ssh=con_ssh, volumes=new_volume_limit, fail_ok=True) if code > 0: LOG.info( 'Failed to increase the Cinder quota for number of volumes to:{} from:{}, error:{}' .format(new_volume_limit, max_volumes, output)) increase = max_volumes - len(current_volumes) return increase, free_space, max_per_volume_size