def cu_vm(provider, vm_name, template): """ Deploys CU VM """ provider_dict = cfme_data['management_systems'][provider] # TODO this key isn't in cfme qe yamls datastore = provider_dict['cap_and_util']['allowed_datastores'] resource_pool = provider_dict['cap_and_util']['resource_pool'] # TODO methods deploy_template calls don't accept resourcepool and allowed_datastores as kwargs deploy_template(provider, vm_name, template, resourcepool=resource_pool, allowed_datastores=datastore) prov_mgmt = get_mgmt(provider) vm_running(prov_mgmt, vm_name) ip = prov_mgmt.get_ip_address(vm_name) # TODO this key isn't in cfme qe yamls vm_ssh_creds = provider_dict['capandu_vm_creds'] sshname = credentials[vm_ssh_creds]['username'] sshpass = credentials[vm_ssh_creds]['password'] # Create cron jobs to generate disk and network activity on the CU VM. with make_ssh_client(ip, sshname, sshpass) as ssh_client: try: config_cu_vm(ssh_client) except CUCommandException: _vm_cleanup(prov_mgmt, vm_name) raise vm_running(prov_mgmt, vm_name)
def cu_vm(provider, vm_name, template): """ Deploys CU VM """ provider_dict = cfme_data['management_systems'][provider] # TODO this key isn't in cfme qe yamls datastore = provider_dict['cap_and_util']['allowed_datastores'] resource_pool = provider_dict['cap_and_util']['resource_pool'] # TODO methods deploy_template calls don't accept resourcepool and allowed_datastores as kwargs deploy_template(provider, vm_name, template, resourcepool=resource_pool, allowed_datastores=datastore) prov_mgmt = get_mgmt(provider) vm_running(prov_mgmt, vm_name) ip = prov_mgmt.get_ip_address(vm_name) # TODO this key isn't in cfme qe yamls vm_ssh_creds = provider_dict['capandu_vm_creds'] sshname = credentials[vm_ssh_creds]['username'] sshpass = credentials[vm_ssh_creds]['password'] # Create cron jobs to generate disk and network activity on the CU VM. with make_ssh_client(ip, sshname, sshpass) as ssh_client: try: config_cu_vm(ssh_client) except CUCommandException: _vm_cleanup(prov_mgmt, vm_name) raise vm_running(prov_mgmt, vm_name)
def create_on_provider(self, timeout=900, find_in_cfme=False, delete_on_failure=True, **kwargs): """Create the VM on the provider via MgmtSystem. `deploy_template` handles errors during VM provision on MgmtSystem sideNS deletes VM if provisioned incorrectly Args: timeout: Number of seconds to wait for the VM to appear in CFME Will not wait at all, if set to 0 (Defaults to ``900``) find_in_cfme: Verifies that VM exists in CFME UI delete_on_failure: Attempts to remove VM on UI navigation failure """ deploy_template(self.provider.key, self.name, self.template_name, **kwargs) try: if find_in_cfme: self.wait_to_appear(timeout=timeout, load_details=False) except Exception as e: logger.warn("Couldn't find VM or Instance in CFME") if delete_on_failure: logger.info("Removing VM or Instance from mgmt system") self.provider.mgmt.delete_vm(self.name) raise e
def _get_vm(request, provider, template_name, vm_name): if provider.one_of(RHEVMProvider): kwargs = {"cluster": provider.data["default_cluster"]} elif provider.one_of(OpenStackProvider): kwargs = {} if 'small_template' in provider.data.templates: kwargs = { "flavour_name": provider.data.provisioning.get('instance_type') } elif provider.one_of(SCVMMProvider): kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts") } else: kwargs = {} collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate(vm_name, provider, template_name) try: deploy_template(provider.key, vm_name, template_name=template_name, allow_skip="default", power_on=True, **kwargs) except TimedOutError as e: logger.exception(e) try: vm.delete_from_provider() except TimedOutError: logger.warning("Could not delete VM %s!", vm_name) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip( "{} is quite likely overloaded! Check its status!\n{}: {}". format(provider.key, type(e).__name__, str(e))) request.addfinalizer(lambda: vm.delete_from_provider()) # Make it appear in the provider provider.refresh_provider_relationships() # Get the REST API object api = wait_for( get_vm_object, func_args=[provider.appliance, vm_name], message="VM object {} appears in CFME".format(vm), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider, vm_name, api)
def create_on_provider(self, timeout=900, find_in_cfme=False, **kwargs): """Create the VM on the provider Args: timeout: Number of seconds to wait for the VM to appear in CFME Will not wait at all, if set to 0 (Defaults to ``900``) """ deploy_template(self.provider.key, self.name, self.template_name, **kwargs) if find_in_cfme: self.provider.refresh_provider_relationships() self.wait_to_appear(timeout=timeout, load_details=False)
def create_on_provider(self, timeout=900, find_in_cfme=False, **kwargs): """Create the VM on the provider Args: timeout: Number of seconds to wait for the VM to appear in CFME Will not wait at all, if set to 0 (Defaults to ``900``) """ deploy_template(self.provider.key, self.name, self.template_name, **kwargs) if find_in_cfme: self.provider.refresh_provider_relationships() self.wait_to_appear(timeout=timeout, load_details=False)
def _get_vm(request, provider, template_name, vm_name): if provider.one_of(RHEVMProvider): kwargs = {"cluster": provider.data["default_cluster"]} elif provider.one_of(OpenStackProvider): kwargs = {} if 'small_template' in provider.data.templates: kwargs = {"flavor_name": provider.data.provisioning.get('instance_type')} elif provider.one_of(SCVMMProvider): kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")} else: kwargs = {} collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate(vm_name, provider, template_name) try: deploy_template( provider.key, vm_name, template_name=template_name, allow_skip="default", power_on=True, **kwargs ) except TimedOutError: message = "{} failed provisioning, check its status!".format(provider.key) logger.exception(message) try: vm.cleanup_on_provider() except TimedOutError: logger.error("Could not delete VM %s!", vm_name) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip(message) request.addfinalizer(vm.cleanup_on_provider) # Make it appear in the provider provider.refresh_provider_relationships() # Get the REST API object api = wait_for( get_vm_object, func_args=[provider.appliance, vm_name], message="VM object {} appears in CFME".format(vm), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider, vm_name, api)
def vm_crud(provider): collection = provider.appliance.provider_based_collection(provider) vm_name = random_vm_name(context='pblsh') vm = collection.instantiate(vm_name, provider) deploy_template(vm.provider.key, vm_name, provider.data['small_template'], timeout=2500) vm.wait_to_appear(timeout=900, load_details=False) yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def vm_crud(provider): collection = provider.appliance.provider_based_collection(provider) vm_name = random_vm_name(context='pblsh') vm = collection.instantiate(vm_name, provider) deploy_template(vm.provider.key, vm_name, provider.data['small_template'], timeout=2500) vm.wait_to_appear(timeout=900, load_details=False) yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def _get_vm(request, provider, template_name, vm_name): if provider.one_of(RHEVMProvider): kwargs = {"cluster": provider.data["default_cluster"]} elif provider.one_of(OpenStackProvider): kwargs = {} if 'small_template' in provider.data.templates: kwargs = {"flavour_name": provider.data.provisioning.get('instance_type')} elif provider.one_of(SCVMMProvider): kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")} else: kwargs = {} try: deploy_template( provider.key, vm_name, template_name=template_name, allow_skip="default", power_on=True, **kwargs ) except TimedOutError as e: logger.exception(e) try: VM.factory(vm_name, provider).cleanup_on_provider() except TimedOutError: logger.warning("Could not delete VM %s!", vm_name) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format( provider.key, type(e).__name__, str(e))) request.addfinalizer(lambda: VM.factory(vm_name, provider).cleanup_on_provider()) # Make it appear in the provider provider.refresh_provider_relationships() # Get the REST API object api = wait_for( get_vm_object, func_args=[provider.appliance, vm_name], message="VM object {} appears in CFME".format(vm_name), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider, vm_name, api)
def proxy_machine(): """ Deploy vm for proxy test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ try: data = cfme_data.proxy_template proxy_port = data.port vm = deploy_template(data.provider, random_vm_name('proxy'), template_name=data.template_name) except AttributeError: msg = 'Missing data in cfme_data.yaml, cannot deploy proxy' logger.exception(msg) pytest.skip(msg) try: found_ip, _ = wait_for(find_pingable, func_args=[vm], fail_condition=None, delay=5, num_sec=300) except TimedOutError: msg = 'Timed out waiting for reachable proxy VM IP' logger.exception(msg) pytest.skip(msg) yield found_ip, proxy_port vm.delete()
def depot_machine_ip(): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] prov = get_mgmt(depot_provider_key) deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) yield prov.get_ip_address(depot_machine_name) prov.delete_vm(depot_machine_name)
def utility_vm(): """ Deploy an utility vm for tests to use. This fixture creates a vm on provider and then receives its ip. After the test run vm is deleted from provider. """ try: data = cfme_data['utility_vm'] injected_user_cred = SSHCredential.from_config( data['injected_credentials']) try: with open(os.path.expanduser('~/.ssh/id_rsa.pub')) as f: authorized_ssh_keys = f.read() except FileNotFoundError: authorized_ssh_keys = None vm = deploy_template( data.provider, random_vm_name('proxy'), template_name=data.template_name, # The naming is not great. It comes from # https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.2/ # html-single/python_sdk_guide/index#Starting_a_Virtual_Machine_with_Cloud-Init initialization=dict(user_name=injected_user_cred.principal, root_password=injected_user_cred.secret, authorized_ssh_keys=authorized_ssh_keys)) except AttributeError: msg = 'Missing utility_vm data from cfme_data.yaml, cannot deploy the utility vm.' logger.exception(msg) pytest.skip(msg) yield vm, injected_user_cred, data vm.delete()
def depot_machine_ip(): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] prov_crud = get_crud(depot_provider_key) deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) yield prov_crud.mgmt.get_ip_address(depot_machine_name) VM.factory(depot_machine_name, prov_crud).cleanup_on_provider()
def depot_machine_ip(request, appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ try: # use long-test name so it has a longer life before automatic cleanup data = cfme_data.log_db_operations vm = deploy_template( data.log_db_depot_template.provider, fauxfactory.gen_alphanumeric(26, start="long-test-depot-"), template_name=data.log_db_depot_template.template_name) vm.ensure_state(VmState.RUNNING) except AttributeError: msg = 'Missing some yaml information necessary to deploy depot VM' logger.exception(msg) pytest.skip(msg) try: # TODO It would be better to use retry_connect here, but this requires changes to other # fixtures. found_ip = pick_responding_ip(lambda: vm.all_ips, FTP_PORT, 300, 5, 10) except TimedOutError: msg = 'Timed out waiting for reachable depot VM IP' logger.exception(msg) pytest.skip(msg) yield found_ip vm.cleanup()
def depot_machine_ip(request, appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ try: # use long-test name so it has a longer life before automatic cleanup data = cfme_data.log_db_operations vm = deploy_template( data.log_db_depot_template.provider, fauxfactory.gen_alphanumeric(26, start="long-test-depot-"), template_name=data.log_db_depot_template.template_name) vm.ensure_state(VmState.RUNNING) except AttributeError: msg = 'Missing some yaml information necessary to deploy depot VM' logger.exception(msg) pytest.skip(msg) try: found_ip, _ = wait_for(find_pingable, func_args=[vm], fail_condition=None, delay=5, num_sec=300) except TimedOutError: msg = 'Timed out waiting for reachable depot VM IP' logger.exception(msg) pytest.skip(msg) yield found_ip vm.cleanup()
def proxy_machine(): """ Deploy vm for proxy test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = random_vm_name('proxy') data = conf.cfme_data.get("proxy_template") proxy_provider_key = data["provider"] proxy_template_name = data["template_name"] proxy_port = data['port'] prov = get_mgmt(proxy_provider_key) deploy_template(proxy_provider_key, depot_machine_name, template_name=proxy_template_name) yield prov.get_ip_address(depot_machine_name), proxy_port prov.delete_vm(depot_machine_name)
def vm_crud(provider): collection = provider.appliance.provider_based_collection(provider) vm_name = random_vm_name(context='pblsh') vm = collection.instantiate(vm_name, provider) try: deploy_template(vm.provider.key, vm_name, provider.data.templates.small_template.name, timeout=2500) except (KeyError, AttributeError): pytest.skip("Skipping as small_template could not be found on the provider") vm.wait_to_appear(timeout=900, load_details=False) yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def proxy_machine(): """ Deploy vm for proxy test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = random_vm_name('proxy') data = conf.cfme_data.get("proxy_template") proxy_provider_key = data["provider"] proxy_template_name = data["template_name"] proxy_port = data['port'] prov = get_mgmt(proxy_provider_key) deploy_template(proxy_provider_key, depot_machine_name, template_name=proxy_template_name) yield prov.get_ip_address(depot_machine_name), proxy_port prov.delete_vm(depot_machine_name)
def vm_crud(provider): collection = provider.appliance.provider_based_collection(provider) vm_name = random_vm_name(context='pblsh') vm = collection.instantiate(vm_name, provider) try: deploy_template(vm.provider.key, vm_name, provider.data.templates.small_template.name, timeout=2500) except (KeyError, AttributeError): pytest.skip("Skipping as small_template could not be found on the provider") vm.wait_to_appear(timeout=900, load_details=False) yield vm try: vm.cleanup_on_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def db_depot_machine_ip(request): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_db_backup_depot_{}".format( fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] prov_crud = get_crud(depot_provider_key) deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) yield prov_crud.mgmt.get_ip_address(depot_machine_name) VM.factory(depot_machine_name, prov_crud).cleanup_on_provider()
def db_depot_machine_ip(request): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_db_backup_depot_{}".format(fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] prov = get_mgmt(depot_provider_key) deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) def fin(): prov.delete_vm(depot_machine_name) request.addfinalizer(fin) return prov.get_ip_address(depot_machine_name)
def depot_machine_ip(appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_long_log_depot_{}".format( fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] prov_crud = get_crud(depot_provider_key) deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) yield prov_crud.mgmt.get_ip_address(depot_machine_name) collection = appliance.provider_based_collection(prov_crud) collection.instantiate(depot_machine_name, prov_crud).delete_from_provider()
def create_on_provider(self, timeout=900, find_in_cfme=False, delete_on_failure=True, **kwargs): """Create the VM on the provider via MgmtSystem. `deploy_template` handles errors during VM provision on MgmtSystem sideNS deletes VM if provisioned incorrectly Args: timeout: Number of seconds to wait for the VM to appear in CFME Will not wait at all, if set to 0 (Defaults to ``900``) find_in_cfme: Verifies that VM exists in CFME UI delete_on_failure: Attempts to remove VM on UI navigation failure """ deploy_template(self.provider.key, self.name, self.template_name, **kwargs) try: if find_in_cfme: self.wait_to_appear(timeout=timeout, load_details=False) except Exception as e: logger.warn("Couldn't find VM or Instance in CMFE") if delete_on_failure: logger.info("Removing VM or Instance from mgmt system") self.provider.mgmt.delete_vm(self.name) raise e
def depot_machine_ip(appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] vm = deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) yield vm.ip vm.cleanup()
def db_depot_machine_ip(request, appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_db_backup_depot_{}".format( fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] vm = deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) yield vm.ip vm.cleanup()
def db_depot_machine_ip(request, appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_db_backup_depot_{}".format(fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] vm = deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) if vm.ip is None: pytest.skip('Depot VM does not have IP address') yield vm.ip vm.cleanup()
def test_provider_refresh(request, a_provider, appliance): """Test checking that refresh invoked from the REST API works. It provisions a VM when the Provider inventory functionality is disabled, then the functionality is enabled and we wait for refresh to finish by checking the field in provider and then we check whether the VM appeared in the provider. Prerequisities: * A provider that is set up, with templates suitable for provisioning. Steps: * Disable the ``ems_inventory`` and ``ems_operations`` roles * Provision a VM * Store old refresh date from the provider * Initiate refresh * Wait until the refresh date updates * The VM should appear soon. Metadata: test_flag: rest """ if "refresh" not in appliance.rest_api.collections.providers.action.all: pytest.skip("Refresh action is not implemented in this version") provider_rest = appliance.rest_api.collections.providers.get( name=a_provider.name) with server_roles_disabled("ems_inventory", "ems_operations"): vm_name = deploy_template( a_provider.key, "test_rest_prov_refresh_{}".format( fauxfactory.gen_alphanumeric(length=4))) request.addfinalizer(lambda: a_provider.mgmt.delete_vm(vm_name)) provider_rest.reload() old_refresh_dt = provider_rest.last_refresh_date response = provider_rest.action.refresh() assert appliance.rest_api.response.status_code == 200 assert response["success"], "Refresh was unsuccessful" wait_for( lambda: provider_rest.last_refresh_date != old_refresh_dt, fail_func=provider_rest.reload, num_sec=720, delay=5, ) # We suppose that thanks to the random string, there will be only one such VM wait_for( lambda: appliance.rest_api.collections.vms.find_by(name=vm_name) or False, num_sec=180, delay=10, )
def vm(request, a_provider, rest_api): provider_rest = rest_api.collections.providers.get(name=a_provider.name) vm_name = deploy_template( a_provider.key, 'test_rest_vm_{}'.format(fauxfactory.gen_alphanumeric(length=4))) @request.addfinalizer def _finished(): try: a_provider.mgmt.delete_vm(vm_name) except Exception: # vm can be deleted/retired by test logger.warning("Failed to delete vm '{}'.".format(vm_name)) provider_rest.action.refresh() wait_for( lambda: rest_api.collections.vms.find_by(name=vm_name) or False, num_sec=600, delay=5) return vm_name
def vm(request, a_provider, rest_api): provider_rest = rest_api.collections.providers.get(name=a_provider.name) vm_name = deploy_template( a_provider.key, 'test_rest_vm_{}'.format(fauxfactory.gen_alphanumeric(length=4))) @request.addfinalizer def _finished(): try: a_provider.mgmt.delete_vm(vm_name) except Exception: # vm can be deleted/retired by test logger.warning("Failed to delete vm '{}'.".format(vm_name)) provider_rest.action.refresh() wait_for(lambda: rest_api.collections.vms.find_by(name=vm_name) or False, num_sec=600, delay=5) return vm_name
def proxy_machine(): """ Deploy vm for proxy test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = random_vm_name('proxy') data = conf.cfme_data.get("proxy_template") proxy_provider_key = data["provider"] proxy_template_name = data["template_name"] proxy_port = data['port'] prov = get_mgmt(proxy_provider_key) vm = deploy_template(proxy_provider_key, depot_machine_name, template_name=proxy_template_name) wait_for(func=lambda: vm.ip is not None, num_sec=300, delay=10, message='Waiting for instance "{}" ip to be present.'.format(vm.name)) yield vm.ip, proxy_port vm.delete()
def depot_machine_ip(appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] vm = deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name, timeout=1200) try: wait_for(lambda: vm.ip is not None, timeout=600) except TimedOutError: pytest.skip('Depot VM does not have IP address') yield vm.ip vm.cleanup()
def vm(request, provider, appliance): provider_rest = appliance.rest_api.collections.providers.get(name=provider.name) vm = deploy_template( provider.key, 'test_rest_vm_{}'.format(fauxfactory.gen_alphanumeric(length=4)) ) vm_name = vm.name @request.addfinalizer def _finished(): try: vm.cleanup() except Exception: # vm can be deleted/retired by test logger.warning("Failed to delete vm %r", vm) provider_rest.action.refresh() wait_for( lambda: appliance.rest_api.collections.vms.find_by(name=vm_name) or False, num_sec=600, delay=5) return vm_name
def depot_machine_ip(appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_long_log_depot_{}".format( fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] vm = deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name, timeout=1200) try: wait_for(lambda: vm.ip is not None, timeout=600) except TimedOutError: pytest.skip('Depot VM does not have IP address') yield vm.ip vm.cleanup()
def proxy_machine(): """ Deploy vm for proxy test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = random_vm_name('proxy') data = conf.cfme_data.get("proxy_template") proxy_provider_key = data["provider"] proxy_template_name = data["template_name"] proxy_port = data['port'] vm = deploy_template(proxy_provider_key, depot_machine_name, template_name=proxy_template_name) wait_for(func=lambda: vm.ip is not None, num_sec=300, delay=10, message='Waiting for instance "{}" ip to be present.'.format(vm.name)) yield vm.ip, proxy_port vm.delete()
def vm(request, provider, appliance, **kwargs): vm_name = kwargs.pop( "name", fauxfactory.gen_alphanumeric(length=18, start="test_rest_vm_")) provider_rest = appliance.rest_api.collections.providers.get( name=provider.name) vm = deploy_template(provider.key, vm_name) @request.addfinalizer def _finished(): try: vm.cleanup() except Exception: # vm can be deleted/retired by test logger.warning("Failed to delete vm %r", vm) provider_rest.action.refresh() wait_for(lambda: appliance.rest_api.collections.vms.find_by(name=vm_name) or False, num_sec=600, delay=5) return vm_name
def cu_vm(provider, vm_name, template): """ Deploys CU VM """ provider_dict = cfme_data['management_systems'][provider] # TODO this key isn't in cfme qe yamls datastore = provider_dict['cap_and_util']['allowed_datastores'] resource_pool = provider_dict['cap_and_util']['resource_pool'] # TODO methods deploy_template calls don't accept resourcepool and allowed_datastores as kwargs vm = deploy_template(provider, vm_name, template, resourcepool=resource_pool, allowed_datastores=datastore) vm.ensure_state(VmState.RUNNING, timeout='2m') ip = vm.ip assert vm.ip, "VM has no IP" # TODO this key isn't in cfme qe yamls vm_ssh_creds = provider_dict['capandu_vm_creds'] sshname = credentials[vm_ssh_creds]['username'] sshpass = credentials[vm_ssh_creds]['password'] # Create cron jobs to generate disk and network activity on the CU VM. with make_ssh_client(ip, sshname, sshpass) as ssh_client: try: config_cu_vm(ssh_client) # Reboot so cron jobs get picked up vm.restart() vm.wait_for_state(VmState.RUNNING) except (CUCommandException, TimedOutError): vm.cleanup() raise assert vm.is_running, "VM is not running"
def depot_machine_ipv4_and_ipv6(request, appliance): """ Deploy vm for depot test This fixture is used for deploying a vm on a provider from the yamls and getting its ip (both ipv4 and ipv6) After test run vm deletes from provider """ try: # use long-test name so it has a longer life before automatic cleanup data = cfme_data.log_db_operations vm = deploy_template( data.log_db_depot_template.provider, f"long-test-depot-{fauxfactory.gen_alphanumeric()}", template_name=data.log_db_depot_template.template_name) vm.ensure_state(VmState.RUNNING) except AttributeError: msg = 'Missing some yaml information necessary to deploy depot VM' logger.exception(msg) pytest.skip(msg) try: ipv4, _ = wait_for(find_pingable, func_args=[vm, False], fail_condition=None, delay=5, num_sec=300) ipv6, _ = wait_for(find_pingable_ipv6, func_args=[vm], fail_condition=None, delay=5, num_sec=300) except TimedOutError: msg = 'Timed out waiting for reachable depot VM IP' logger.exception(msg) pytest.skip(msg) yield ipv4, ipv6 vm.cleanup()
def cu_vm(provider, vm_name, template): """ Deploys CU VM """ provider_dict = cfme_data['management_systems'][provider] # TODO this key isn't in cfme qe yamls datastore = provider_dict['cap_and_util']['allowed_datastores'] resource_pool = provider_dict['cap_and_util']['resource_pool'] # TODO methods deploy_template calls don't accept resourcepool and allowed_datastores as kwargs vm = deploy_template( provider, vm_name, template, resourcepool=resource_pool, allowed_datastores=datastore ) vm.ensure_state(VmState.RUNNING, timeout='2m') ip = vm.ip assert vm.ip, "VM has no IP" # TODO this key isn't in cfme qe yamls vm_ssh_creds = provider_dict['capandu_vm_creds'] sshname = credentials[vm_ssh_creds]['username'] sshpass = credentials[vm_ssh_creds]['password'] # Create cron jobs to generate disk and network activity on the CU VM. with make_ssh_client(ip, sshname, sshpass) as ssh_client: try: config_cu_vm(ssh_client) # Reboot so cron jobs get picked up vm.restart() vm.wait_for_state(VmState.RUNNING) except (CUCommandException, TimedOutError): vm.cleanup() raise assert vm.is_running, "VM is not running"
def _get_vm(request, provider, template_name, vm_name): if provider.one_of(RHEVMProvider): kwargs = {"cluster": provider.data["default_cluster"]} elif provider.one_of(OpenStackProvider): kwargs = {} if 'small_template' in provider.data.templates: kwargs = { "flavour_name": provider.data.templates.get('small_template').name } elif provider.one_of(SCVMMProvider): kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts") } else: kwargs = {} try: deploy_template(provider.key, vm_name, template_name=template_name, allow_skip="default", power_on=True, **kwargs) except TimedOutError as e: logger.exception(e) try: provider.mgmt.delete_vm(vm_name) except TimedOutError: logger.warning("Could not delete VM %s!", vm_name) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip( "{} is quite likely overloaded! Check its status!\n{}: {}". format(provider.key, type(e).__name__, str(e))) @request.addfinalizer def _finalize(): """if getting REST object failed, we would not get the VM deleted! So explicit teardown.""" logger.info("Shutting down VM with name %s", vm_name) if (provider.one_of(InfraProvider, OpenStackProvider, AzureProvider) and provider.mgmt.is_vm_suspended(vm_name)): logger.info("Powering up VM %s to shut it down correctly.", vm_name) provider.mgmt.start_vm(vm_name) if provider.mgmt.is_vm_running(vm_name): logger.info("Powering off VM %s", vm_name) provider.mgmt.stop_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name): logger.info("Deleting VM %s in %s", vm_name, provider.mgmt.__class__.__name__) provider.mgmt.delete_vm(vm_name) # Make it appear in the provider provider.refresh_provider_relationships() # Get the REST API object api = wait_for( get_vm_object, func_args=[provider.appliance, vm_name], message="VM object {} appears in CFME".format(vm_name), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider, vm_name, api)
def _ssa_single_vm(): template_name = vm_analysis_provisioning_data['image'] vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type) collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate(vm_name, provider, template_name=vm_analysis_provisioning_data.image) provision_data = vm_analysis_provisioning_data.copy() del provision_data['image'] if "test_ssa_compliance" in request._pyfuncitem.name: provisioning_data = {"catalog": {'vm_name': vm_name}, "environment": {'automatic_placement': True}} do_vm_provisioning(vm_name=vm_name, appliance=appliance, provider=provider, provisioning_data=provisioning_data, template_name=template_name, request=request, smtp_test=False, num_sec=2500) else: deploy_template(vm.provider.key, vm_name, template_name, timeout=2500) vm.wait_to_appear(timeout=900, load_details=False) request.addfinalizer(lambda: vm.delete_from_provider()) if provider.one_of(OpenStackProvider): public_net = provider.data['public_network'] vm.provider.mgmt.assign_floating_ip(vm.name, public_net) logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) @wait_for_decorator(timeout="20m", delay=5) def get_ip_address(): logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format( vm_name, provider.mgmt.vm_status(vm_name), provider.mgmt.is_vm_stopped(vm_name))) if provider.mgmt.is_vm_stopped(vm_name): provider.mgmt.start_vm(vm_name) ip = provider.mgmt.current_ip_address(vm_name) logger.info("Fetched IP for %s: %s", vm_name, ip) return ip is not None connect_ip = provider.mgmt.get_ip_address(vm_name) assert connect_ip is not None # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient( hostname=connect_ip, username=credentials[vm_analysis_provisioning_data.credentials]['username'], password=credentials[vm_analysis_provisioning_data.credentials]['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_provisioning_data['image'] vm.connect_ip = connect_ip # TODO: if rhev and iscsi, it need direct_lun if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") cfme_rel = InfraVm.CfmeRelationship(vm) cfme_rel.set_relationship(appliance.server.name, appliance.server_id()) # Close the SSH client if we have one request.addfinalizer(lambda: vm.ssh.close() if getattr(vm, 'ssh', None) else None) return vm
def _ssa_single_vm(): template_name = vm_analysis_provisioning_data['image'] vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type) collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate(vm_name, provider, template_name=vm_analysis_provisioning_data.image) provision_data = vm_analysis_provisioning_data.copy() del provision_data['image'] if "test_ssa_compliance" in request._pyfuncitem.name or provider.one_of(RHEVMProvider): provisioning_data = {"catalog": {'vm_name': vm_name}, "environment": {'automatic_placement': True}} if provider.one_of(RHEVMProvider): provisioning_data.update( {"network": {'vlan': partial_match(provision_data['vlan'])}} ) do_vm_provisioning(vm_name=vm_name, appliance=appliance, provider=provider, provisioning_data=provisioning_data, template_name=template_name, request=request, num_sec=2500) else: deploy_template(vm.provider.key, vm_name, template_name, timeout=2500) vm.wait_to_appear(timeout=900, load_details=False) request.addfinalizer(lambda: vm.cleanup_on_provider()) if provider.one_of(OpenStackProvider): public_net = provider.data['public_network'] vm.mgmt.assign_floating_ip(public_net) logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) vm.mgmt.ensure_state(VmState.RUNNING) @wait_for_decorator(timeout="10m", delay=5) def get_ip_address(): ip = vm.mgmt.ip logger.info("Fetched IP for %s: %s", vm_name, ip) return ip is not None connect_ip = vm.mgmt.ip assert connect_ip is not None # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient( hostname=connect_ip, username=credentials[vm_analysis_provisioning_data.credentials]['username'], password=credentials[vm_analysis_provisioning_data.credentials]['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_provisioning_data['image'] vm.connect_ip = connect_ip # TODO: if rhev and iscsi, it need direct_lun if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") cfme_rel = InfraVm.CfmeRelationship(vm) cfme_rel.set_relationship(appliance.server.name, appliance.server_id()) # Close the SSH client if we have one request.addfinalizer(lambda: vm.ssh.close() if getattr(vm, 'ssh', None) else None) return vm
def _ssa_single_vm(): template_name = vm_analysis_provisioning_data['image'] vm_name = f'test-ssa-{fauxfactory.gen_alphanumeric()}-{analysis_type}' collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate( vm_name, provider, template_name=vm_analysis_provisioning_data.image) provision_data = vm_analysis_provisioning_data.copy() del provision_data['image'] if "test_ssa_compliance" in request._pyfuncitem.name or provider.one_of( RHEVMProvider): provisioning_data = { "catalog": { 'vm_name': vm_name }, "environment": { 'automatic_placement': True } } if provider.one_of(RHEVMProvider): provisioning_data.update({ "network": { 'vlan': partial_match(provision_data['vlan']) } }) do_vm_provisioning(vm_name=vm_name, appliance=appliance, provider=provider, provisioning_data=provisioning_data, template_name=template_name, request=request, num_sec=2500) else: deploy_template(vm.provider.key, vm_name, template_name, timeout=2500) vm.wait_to_appear(timeout=900, load_details=False) request.addfinalizer(lambda: vm.cleanup_on_provider()) if provider.one_of(OpenStackProvider): public_net = provider.data['public_network'] vm.mgmt.assign_floating_ip(public_net) logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) vm.mgmt.ensure_state(VmState.RUNNING) try: connect_ip, _ = wait_for(find_pingable, func_args=[vm.mgmt], timeout="10m", delay=5, fail_condition=None) except TimedOutError: pytest.fail('Timed out waiting for pingable address on SSA VM') # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient( hostname=connect_ip, username=credentials[ vm_analysis_provisioning_data.credentials]['username'], password=credentials[ vm_analysis_provisioning_data.credentials]['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_provisioning_data['image'] vm.connect_ip = connect_ip # TODO: if rhev and iscsi, it need direct_lun if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") cfme_rel = InfraVm.CfmeRelationship(vm) cfme_rel.set_relationship(appliance.server.name, appliance.server_id()) # Close the SSH client if we have one request.addfinalizer(lambda: vm.ssh.close() if getattr(vm, 'ssh', None) else None) return vm
def _get_vm(request, provider, template_name, vm_name): if provider.one_of(RHEVMProvider): kwargs = {"cluster": provider.data["default_cluster"]} elif provider.one_of(OpenStackProvider): kwargs = {} if 'small_template' in provider.data.templates: kwargs = {"flavour_name": provider.data.provisioning.get('instance_type')} elif provider.one_of(SCVMMProvider): kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")} else: kwargs = {} try: deploy_template( provider.key, vm_name, template_name=template_name, allow_skip="default", power_on=True, **kwargs ) except TimedOutError as e: logger.exception(e) try: provider.mgmt.delete_vm(vm_name) except TimedOutError: logger.warning("Could not delete VM %s!", vm_name) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format( provider.key, type(e).__name__, str(e))) @request.addfinalizer def _finalize(): """if getting REST object failed, we would not get the VM deleted! So explicit teardown.""" logger.info("Shutting down VM with name %s", vm_name) if (provider.one_of(InfraProvider, OpenStackProvider, AzureProvider) and provider.mgmt.is_vm_suspended(vm_name)): logger.info("Powering up VM %s to shut it down correctly.", vm_name) provider.mgmt.start_vm(vm_name) if provider.mgmt.is_vm_running(vm_name): logger.info("Powering off VM %s", vm_name) provider.mgmt.stop_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name): logger.info("Deleting VM %s in %s", vm_name, provider.mgmt.__class__.__name__) provider.mgmt.delete_vm(vm_name) # Make it appear in the provider provider.refresh_provider_relationships() # Get the REST API object api = wait_for( get_vm_object, func_args=[provider.appliance, vm_name], message="VM object {} appears in CFME".format(vm_name), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider, vm_name, api)