def setup_for_alerts(request, alerts, event, vm_name, provider): """This function takes alerts and sets up CFME for testing it Args: request: py.test funcarg request alerts: Alert objects event: Event to hook on (VM Power On, ...) vm_name: VM name to use for policy filtering provider: funcarg provider_data """ setup_provider(provider.key) alert_profile = explorer.VMInstanceAlertProfile( "Alert profile for %s" % vm_name, alerts) alert_profile.create() request.addfinalizer(alert_profile.delete) alert_profile.assign_to("The Enterprise") action = explorer.Action("Evaluate Alerts for %s" % vm_name, "Evaluate Alerts", alerts) action.create() request.addfinalizer(action.delete) policy = explorer.VMControlPolicy( "Evaluate Alerts policy for %s" % vm_name, scope="fill_field(VM and Instance : Name, INCLUDES, %s)" % vm_name) policy.create() request.addfinalizer(policy.delete) policy_profile = explorer.PolicyProfile("Policy profile for %s" % vm_name, [policy]) policy_profile.create() request.addfinalizer(policy_profile.delete) policy.assign_actions_to_event(event, [action]) prov = Provider(provider.data["name"]) prov.assign_policy_profiles(policy_profile.description)
def configure_appliance(browser_setup, provider, vm_name): ''' Configure the appliance for smart state analysis ''' global appliance_vm_name # ensure smart proxy role enabled logger.info('Enabling smart proxy role...') nav_to_roles().edit_defaults_list("smartproxy") # add provider logger.info('Setting up provider...') setup_provider(provider) # credential hosts logger.info('Credentialing hosts') setup_providers_hosts_credentials(provider) prov_data = cfme_data['management_systems'][provider] if prov_data['type'] == 'rhevm': vm_details = nav_to_vm_details(provider, appliance_vm_name) vm_details.edit_cfme_relationship_and_save() #wait for vm smart state to enable logger.info('Waiting for smartstate option to enable...') vm_details = nav_to_vm_details(provider, vm_name) wait_for(vm_details.config_button.is_smart_state_analysis_enabled, delay=30, num_sec=450, fail_func=pytest.sel.refresh) return browser_setup
def _setup_provider(provider_key, request=None): def skip(provider_key, previous_fail=False): if request: node = request.node name, location = get_test_idents(node) skip_data = {'type': 'provider', 'reason': provider_key} art_client.fire_hook('skip_test', test_location=location, test_name=name, skip_data=skip_data) if previous_fail: raise pytest.skip( 'Provider {} failed to set up previously in another test, ' 'skipping test'.format(provider_key)) else: raise pytest.skip('Provider {} failed to set up this time, ' 'skipping test'.format(provider_key)) # This function is dynamically "fixturized" to setup up a specific provider, # optionally skipping the provider setup if that provider has previously failed. if provider_key in _failed_providers: skip(provider_key, previous_fail=True) try: providers.setup_provider(provider_key) except Exception as ex: logger.error('Error setting up provider {}'.format(provider_key)) logger.exception(ex) _failed_providers.add(provider_key) skip(provider_key)
def provider_init(provider_key): """cfme/infrastructure/provider.py provider object.""" try: setup_provider(provider_key) except Exception: pytest.skip( "It's not possible to set up this provider, therefore skipping")
def provider_init(provider_key): """cfme/infrastructure/provider.py provider object.""" try: setup_provider(provider_key) except Exception as e: logger.info("Exception detected on provider setup: " + str(e)) pytest.skip("It's not possible to set up this provider, therefore skipping")
def setup_for_alerts(request, alerts, event, vm_name, provider): """This function takes alerts and sets up CFME for testing it Args: request: py.test funcarg request alerts: Alert objects event: Event to hook on (VM Power On, ...) vm_name: VM name to use for policy filtering provider: funcarg provider_data """ setup_provider(provider.key) alert_profile = explorer.VMInstanceAlertProfile("Alert profile for %s" % vm_name, alerts) alert_profile.create() request.addfinalizer(alert_profile.delete) alert_profile.assign_to("The Enterprise") action = explorer.Action("Evaluate Alerts for %s" % vm_name, "Evaluate Alerts", alerts) action.create() request.addfinalizer(action.delete) policy = explorer.VMControlPolicy( "Evaluate Alerts policy for %s" % vm_name, scope="fill_field(VM and Instance : Name, INCLUDES, %s)" % vm_name ) policy.create() request.addfinalizer(policy.delete) policy_profile = explorer.PolicyProfile("Policy profile for %s" % vm_name, [policy]) policy_profile.create() request.addfinalizer(policy_profile.delete) policy.assign_actions_to_event(event, [action]) prov = Provider(provider.data["name"]) prov.assign_policy_profiles(policy_profile.description)
def _setup_provider(provider_key, request=None): def skip(provider_key, previous_fail=False): if request: node = request.node name, location = get_test_idents(node) skip_data = {'type': 'provider', 'reason': provider_key} art_client.fire_hook('skip_test', test_location=location, test_name=name, skip_data=skip_data) if previous_fail: raise pytest.skip('Provider {} failed to set up previously in another test, ' 'skipping test'.format(provider_key)) else: raise pytest.skip('Provider {} failed to set up this time, ' 'skipping test'.format(provider_key)) # This function is dynamically "fixturized" to setup up a specific provider, # optionally skipping the provider setup if that provider has previously failed. if provider_key in _failed_providers: skip(provider_key, previous_fail=True) try: providers.setup_provider(provider_key) except Exception as ex: logger.error('Error setting up provider %s', provider_key) logger.exception(ex) _failed_providers.add(provider_key) skip(provider_key)
def provisioner(request, provider): if not provider.exists: try: setup_provider(provider.key) except FlashMessageException as e: e.skip_and_log("Provider failed to set up") def _provisioner(template, provisioning_data, delayed=None): pytest.sel.force_navigate('infrastructure_provision_vms', context={ 'provider': provider, 'template_name': template, }) vm_name = provisioning_data["vm_name"] fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) if delayed is not None: total_seconds = (delayed - datetime.utcnow()).total_seconds() row_description = 'Provision from [%s] to [%s]' % (template, vm_name) cells = {'Description': row_description} try: row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=total_seconds, delay=5) pytest.fail("The provisioning was not postponed") except TimedOutError: pass logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key) wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s' % vm_name) row_description = 'Provision from [%s] to [%s]' % (template, vm_name) cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=900, delay=20) assert row.last_message.text == version.pick({ version.LOWEST: 'VM Provisioned Successfully', "5.3": 'Vm Provisioned Successfully', }) return VM.factory(vm_name, provider) return _provisioner
def vm(request, provider_mgmt, provider_crud, provider_key, provider_data, small_template): setup_provider(provider_key) vm_name = "test_actions-{}-{}".format(provider_key, generate_random_string()) if isinstance(provider_mgmt, mgmt_system.RHEVMSystem): kwargs = {"cluster": provider_data["default_cluster"]} elif isinstance(provider_mgmt, mgmt_system.VMWareSystem): kwargs = {} elif isinstance(provider_mgmt, mgmt_system.SCVMMSystem): kwargs = {"host_group": provider_data.get("host_group", "All Hosts")} else: raise TypeError("Cannot handle provider {}".format(type(provider_mgmt).__name__)) try: deploy_template( provider_key, vm_name, template_name=small_template, **kwargs ) except TimedOutError: try: provider_mgmt.delete_vm(vm_name) except TimedOutError: logger.warning("Could not delete VM {}!".format(vm_name)) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("{} is quite likely overloaded! Check its status!".format(provider_key)) def finalize(): """if getting SOAP object failed, we would not get the VM deleted! So explicit teardown.""" logger.info("Shutting down VM with name %s" % vm_name) if provider_mgmt.is_vm_suspended(vm_name): logger.info("Powering up VM %s to shut it down correctly." % vm_name) provider_mgmt.start_vm(vm_name) if provider_mgmt.is_vm_running(vm_name): logger.info("Powering off VM %s" % vm_name) provider_mgmt.stop_vm(vm_name) if provider_mgmt.does_vm_exist(vm_name): logger.info("Deleting VM %s in %s" % (vm_name, provider_mgmt.__class__.__name__)) provider_mgmt.delete_vm(vm_name) request.addfinalizer(finalize) # Make it appear in the provider provider_crud.refresh_provider_relationships() # Get the SOAP object soap = wait_for( lambda: get_vm_object(vm_name), message="VM object %s appears in CFME" % vm_name, fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider_mgmt, vm_name, soap)
def handle_provider(provider): try: providers.clear_providers() providers.setup_provider(provider.key) except FlashMessageException as e: e.skip_and_log("Provider failed to set up") else: yield finally: providers.clear_providers()
def testing_vm(request, provisioning, provider): setup_provider(provider.key) vm = VM.factory("test_ae_hd_{}".format(fauxfactory.gen_alphanumeric()), provider, template_name=provisioning["template"]) try: vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm finally: vm.delete_from_provider() if vm.exists: vm.delete()
def provider_init(provider_key, iso_cust_template, provisioning, iso_datastore): try: setup_provider(provider_key) except Exception: pytest.skip("It's not possible to set up this provider, therefore skipping") if not iso_datastore.exists(): iso_datastore.create() # Fails on upstream, BZ1109256 iso_datastore.set_iso_image_type(provisioning["iso_file"], provisioning["iso_image_type"]) if not iso_cust_template.exists(): iso_cust_template.create()
def testing_vm(request, provisioning, provider): setup_provider(provider.key) vm = VM.factory( "test_ae_hd_{}".format(fauxfactory.gen_alphanumeric()), provider, template_name=provisioning["template"] ) try: vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm finally: vm.delete_from_provider() if vm.exists: vm.delete()
def testing_vm(request, provisioning, provider): setup_provider(provider.key) vm = Vm(name="test_ae_hd_{}".format(fauxfactory.gen_alphanumeric()), provider_crud=provider, template_name=provisioning["template"]) def _finalize(): vm.delete_from_provider() if vm.does_vm_exist_in_cfme(): vm.remove_from_cfme() request.addfinalizer(_finalize) vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def testing_vm(request, provisioning, provider_crud, provider_key): setup_provider(provider_key) vm = Vm( name="test_ae_hd_{}".format(generate_random_string()), provider_crud=provider_crud, template_name=provisioning["template"] ) def _finalize(): vm.delete_from_provider() if vm.does_vm_exist_in_cfme(): vm.remove_from_cfme() request.addfinalizer(_finalize) vm.create_on_provider() return vm
def testing_vm(request, provisioning, provider_crud, provider_key): setup_provider(provider_key) vm = Vm( name="test_ae_hd_{}".format(fauxfactory.gen_alphanumeric()), provider_crud=provider_crud, template_name=provisioning["template"] ) def _finalize(): vm.delete_from_provider() if vm.does_vm_exist_in_cfme(): vm.remove_from_cfme() request.addfinalizer(_finalize) vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def provisioner(request, provider): if not provider.exists: try: setup_provider(provider.key) except FlashMessageException as e: e.skip_and_log("Provider failed to set up") def _provisioner(template, provisioning_data, delayed=None): sel.force_navigate('infrastructure_provision_vms', context={ 'provider': provider, 'template_name': template, }) fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() return _provisioner
def provisioner(request, provider): if not provider.exists: try: setup_provider(provider.key) except FlashMessageException as e: e.skip_and_log("Provider failed to set up") def _provisioner(template, provisioning_data, delayed=None): pytest.sel.force_navigate('infrastructure_provision_vms', context={ 'provider': provider, 'template_name': template, }) vm_name = provisioning_data["vm_name"] fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) if delayed is not None: total_seconds = (delayed - datetime.utcnow()).total_seconds() row_description = 'Provision from [%s] to [%s]' % (template, vm_name) cells = {'Description': row_description} try: row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=total_seconds, delay=5) pytest.fail("The provisioning was not postponed") except TimedOutError: pass logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key) wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s' % vm_name) row_description = 'Provision from [%s] to [%s]' % (template, vm_name) cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=900, delay=20) assert row.last_message.text == version.pick( {version.LOWEST: 'VM Provisioned Successfully', "5.3": 'Vm Provisioned Successfully', }) return VM.factory(vm_name, provider) return _provisioner
def _setup_provider(provider_key): def skip(provider_key, previous_fail=False): if previous_fail: raise pytest.skip('Provider {} failed to set up previously in another test, ' 'skipping test'.format(provider_key)) else: raise pytest.skip('Provider {} failed to set up this time, ' 'skipping test'.format(provider_key)) # This function is dynamically "fixturized" to setup up a specific provider, # optionally skipping the provider setup if that provider has previously failed. if provider_key in _failed_providers: skip(provider_key, previous_fail=True) try: providers.setup_provider(provider_key) except Exception as ex: logger.error('Error setting up provider %s', provider_key) logger.exception(ex) _failed_providers.add(provider_key) skip(provider_key)
def configure_appliance(browser_setup, provider, vm_to_analyze, listener_info): ''' Configure the appliance for smart state analysis ''' global appliance_vm_name # ensure smart proxy role enabled logger.info('Enabling smart proxy role...') nav_to_roles().edit_defaults_list("smartproxy") # add provider logger.info('Setting up provider...') setup_provider(provider) # credential hosts logger.info('Credentialing hosts') setup_providers_hosts_credentials(provider) prov_data = cfme_data['management_systems'][provider] if prov_data['type'] == 'rhevm': vm_details = nav_to_vm_details(provider, appliance_vm_name) vm_details.edit_cfme_relationship_and_save() #wait for vm smart state to enable logger.info('Waiting for smartstate option to enable...') vm_details = nav_to_vm_details(provider, vm_to_analyze) wait_for(vm_details.config_button.is_smart_state_analysis_enabled, delay=30, num_sec=450, fail_func=pytest.sel.refresh) # Configure for events ssh_kwargs = { 'username': conf.credentials['ssh']['username'], 'password': conf.credentials['ssh']['password'], 'hostname': appliance_list[provider] } # Init SSH client client = SSHClient(**ssh_kwargs) setup_for_event_testing(client, None, listener_info, providers.list_infra_providers()) return browser_setup
def configure_appliance(browser_setup, provider, vm_name, listener_info): """ Configure the appliance for smart state analysis """ global appliance_vm_name # ensure smart proxy role enabled logger.info("Enabling smart proxy role...") nav_to_roles().edit_defaults_list("smartproxy") # add provider logger.info("Setting up provider...") setup_provider(provider) # credential hosts logger.info("Credentialing hosts") setup_providers_hosts_credentials(provider) prov_data = cfme_data["management_systems"][provider] if prov_data["type"] == "rhevm": vm_details = nav_to_vm_details(provider, appliance_vm_name) vm_details.edit_cfme_relationship_and_save() # wait for vm smart state to enable logger.info("Waiting for smartstate option to enable...") vm_details = nav_to_vm_details(provider, vm_name) wait_for( vm_details.config_button.is_smart_state_analysis_enabled, delay=30, num_sec=450, fail_func=pytest.sel.refresh ) # Configure for events ssh_kwargs = { "username": conf.credentials["ssh"]["username"], "password": conf.credentials["ssh"]["password"], "hostname": appliance_list[provider], } # Init SSH client client = SSHClient(**ssh_kwargs) setup_for_event_testing(client, None, listener_info, providers.list_infra_providers()) return browser_setup
def provider_init(provider_key): """cfme/infrastructure/provider.py provider object.""" try: setup_provider(provider_key) except Exception as e: pytest.skip("Skipping,because it's not possible to set up this provider({})".format(str(e)))
def provider_init(provider_key): """cfme/infrastructure/provider.py provider object.""" try: setup_provider(provider_key) except Exception: pytest.skip("It's not possible to set up this provider, therefore skipping")
def test_app_migration(self, backup_test, soft_assert): vm_name = "migtest_" + backup_test provider = cfme_data["basic_info"]["appliances_provider"] test_data = migration_tests["backup_tests"][backup_test] template = cfme_data['basic_info']['appliance_template_big_db_disk'] # provision appliance and configure appliance = provision_appliance( vm_name_prefix=vm_name, template=template, provider_name=provider) logger.info("appliance IP address: " + str(appliance.address)) appliance.enable_internal_db() appliance.wait_for_web_ui() # start restore and migration appliance_ssh = appliance.ssh_client() appliance_ssh.put_file("./scripts/restore.py", "/root") appliance_ssh.run_command("curl -o restore_scripts.gz " + cfme_data["basic_info"]["restore_scripts_url"]) if "restore_fixes_url" in cfme_data["basic_info"].keys(): appliance_ssh.run_command("curl -o fix_scripts.gz " + cfme_data["basic_info"]["restore_fixes_url"]) appliance_ssh.run_command("curl -o backup.gz " + test_data['url']) logger.info("Running db restore/migration...") rc, output = appliance_ssh.run_command("/root/restore.py --scripts " + "/root/restore_scripts.gz --backupfile /root/backup.gz") soft_assert(rc == 0) # re-init the connection, times out over long migrations appliance_ssh.close() appliance_ssh = appliance.ssh_client() appliance_ssh.get_file("/root/output.log", ".") # Log the restore/migration output process = sub.Popen("cat ./output.log; rm -rf ./output.log", shell=True, stdout=sub.PIPE, stderr=sub.PIPE) output, error = process.communicate() logger.info("Running cmd: cat ./output.log; rm -rf ./output.log") logger.info("Output: \n" + output) # get database table counts this_db = appliance.db session = this_db.session logger.info("Checking db table counts after migration...") db_counts = {} for table_name in sorted(test_data['counts'].keys()): db_counts[table_name] = session.query(this_db[table_name]).count() # start up evmserverd and poke ui appliance_ssh.run_command("service evmserverd start") appliance.wait_for_web_ui() with appliance.browser_session(): nav.home_page_logged_in() nav_to_roles().edit_current_role_list("ems_inventory ems_operations") setup_provider(provider) provider_details = nav.infra_providers_pg().load_provider_details( cfme_data["management_systems"][provider]["name"]) vm_details = provider_details.all_vms().find_vm_page( appliance.vm_name, None, False, True, 6) soft_assert(vm_details.on_vm_details(appliance.vm_name)) # check table counts vs what we are expecting for table_name in sorted(test_data['counts'].keys()): expected_count = test_data['counts'][table_name] actual_count = db_counts[table_name] soft_assert(actual_count == expected_count, 'Table ' + table_name + '(' + str(actual_count) + ') not matching expected(' + str(expected_count) + ')') # delete appliance logger.info("Delete provisioned appliance: " + appliance.address) appliance.destroy()
def provider_init(provider_key): """cfme/cloud/provider.py provider object.""" setup_provider(provider_key)
def test_pxe_provision_from_template(provider_key, provider_crud, provider_type, provider_mgmt, provisioning, vm_name, smtp_test, request): setup_provider(provider_key) # generate_tests makes sure these have values pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart,\ pxe_root_password, pxe_image_type, pxe_vlan = map(provisioning.get, ('pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart', 'pxe_root_password', 'pxe_image_type', 'vlan')) pytest.sel.force_navigate('infrastructure_provision_vms', context={ 'provider': provider_crud, 'template_name': pxe_template, }) note = ('template %s to vm %s on provider %s' % (pxe_template, vm_name, provider_crud.key)) provisioning_data = { 'email': '*****@*****.**', 'first_name': 'Template', 'last_name': 'Provisioner', 'notes': note, 'vm_name': vm_name, 'host_name': { 'name': [host] }, 'datastore_name': { 'name': [datastore] }, 'provision_type': 'PXE', 'pxe_server': pxe_server, 'pxe_image': { 'name': [pxe_image] }, 'custom_template': { 'name': [pxe_kickstart] }, 'root_password': pxe_root_password, 'vlan': pxe_vlan, } fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() request.addfinalizer( lambda: cleanup_vm(vm_name, provider_key, provider_mgmt)) # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider_crud.key) wait_for(provider_mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s' % vm_name) row_description = 'Provision from [%s] to [%s]' % (pxe_template, vm_name) cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=2100, delay=20) assert row.last_message.text == 'VM Provisioned Successfully' # Wait for e-mails to appear def verify(): return ( len( smtp_test.get_emails( text_like="%%Your Virtual Machine Request was approved%%")) > 0 and len( smtp_test.get_emails( subject_like= "Your virtual machine request has Completed - VM:%%%s" % vm_name)) > 0) wait_for(verify, message="email receive check", delay=5)
def vm(request, provider, small_template, vm_name): try: setup_provider(provider.key) except FlashMessageException as e: e.skip_and_log("Provider failed to set up") if isinstance(provider.mgmt, mgmt_system.RHEVMSystem): kwargs = {"cluster": provider.data["default_cluster"]} elif isinstance(provider.mgmt, mgmt_system.VMWareSystem): kwargs = {} elif isinstance(provider.mgmt, mgmt_system.SCVMMSystem): kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")} else: kwargs = {} try: deploy_template( provider.key, vm_name, template_name=small_template, allow_skip="default", power_on=True, **kwargs ) except TimedOutError as e: logger.exception(e) try: provider.mgmt.delete_vm(vm_name) except TimedOutError: logger.warning("Could not delete VM {}!".format(vm_name)) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format( provider.key, type(e).__name__, str(e))) def finalize(): """if getting SOAP object failed, we would not get the VM deleted! So explicit teardown.""" logger.info("Shutting down VM with name {}".format(vm_name)) if provider.mgmt.is_vm_suspended(vm_name): logger.info("Powering up VM {} to shut it down correctly.".format(vm_name)) provider.mgmt.start_vm(vm_name) if provider.mgmt.is_vm_running(vm_name): logger.info("Powering off VM {}".format(vm_name)) provider.mgmt.stop_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name): logger.info("Deleting VM {} in {}".format(vm_name, provider.mgmt.__class__.__name__)) provider.mgmt.delete_vm(vm_name) request.addfinalizer(finalize) # Make it appear in the provider provider.refresh_provider_relationships() # Get the SOAP object soap = wait_for( lambda: get_vm_object(vm_name), message="VM object {} appears in CFME".format(vm_name), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider.mgmt, vm_name, soap)
def handle_provider(provider_key): providers.clear_providers() providers.setup_provider(provider_key)
def provider(provider_id): return setup_provider(provider_id)
def provider_init(provider_key): try: setup_provider(provider_key) except Exception: pytest.skip("It's not possible to set up this provider, therefore skipping")
def vm(request, provider_mgmt, provider_crud, provider_key, provider_data, small_template, vm_name): try: setup_provider(provider_key) except FlashMessageException as e: e.skip_and_log("Provider failed to set up") if isinstance(provider_mgmt, mgmt_system.RHEVMSystem): kwargs = {"cluster": provider_data["default_cluster"]} elif isinstance(provider_mgmt, mgmt_system.VMWareSystem): kwargs = {} elif isinstance(provider_mgmt, mgmt_system.SCVMMSystem): kwargs = { "host_group": provider_data.get("provisioning", {}).get("host_group", "All Hosts")} else: kwargs = {} try: deploy_template( provider_key, vm_name, template_name=small_template, allow_skip="default", power_on=True, **kwargs ) except TimedOutError as e: logger.exception(e) try: provider_mgmt.delete_vm(vm_name) except TimedOutError: logger.warning("Could not delete VM {}!".format(vm_name)) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format( provider_key, type(e).__name__, str(e))) def finalize(): """if getting SOAP object failed, we would not get the VM deleted! So explicit teardown.""" logger.info("Shutting down VM with name {}".format(vm_name)) if provider_mgmt.is_vm_suspended(vm_name): logger.info("Powering up VM {} to shut it down correctly.".format(vm_name)) provider_mgmt.start_vm(vm_name) if provider_mgmt.is_vm_running(vm_name): logger.info("Powering off VM {}".format(vm_name)) provider_mgmt.stop_vm(vm_name) if provider_mgmt.does_vm_exist(vm_name): logger.info("Deleting VM {} in {}".format(vm_name, provider_mgmt.__class__.__name__)) provider_mgmt.delete_vm(vm_name) request.addfinalizer(finalize) # Make it appear in the provider provider_crud.refresh_provider_relationships() # Get the SOAP object soap = wait_for( lambda: get_vm_object(vm_name), message="VM object {} appears in CFME".format(vm_name), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider_mgmt, vm_name, soap)
def vm(request, provider_mgmt, provider_crud, provider_key, provider_data, small_template): setup_provider(provider_key) vm_name = "test_actions-{}-{}".format(provider_key, generate_random_string()) if isinstance(provider_mgmt, mgmt_system.RHEVMSystem): # RHEV-M is sometimes overloaded, so a little protection here try: provider_mgmt.deploy_template( small_template, vm_name=vm_name, cluster_name=provider_data["default_cluster"] ) except TimedOutError: try: provider_mgmt.delete_vm(vm_name) except TimedOutError: pass finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("RHEV-M %s is probably full! Check its status!" % provider_key) elif isinstance(provider_mgmt, mgmt_system.VMWareSystem): # VMWare behaves correctly... usually, but we have to be sure! :) try: provider_mgmt.deploy_template( small_template, vm_name=vm_name, ) except TimedOutError: try: provider_mgmt.delete_vm() except TimedOutError: pass finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("vSphere %s is probably overloaded! Check its status!" % provider_key) else: raise Exception("Unknown provider") def finalize(): """if getting SOAP object failed, we would not get the VM deleted! So explicit teardown.""" logger.info("Shutting down VM with name %s" % vm_name) if provider_mgmt.is_vm_suspended(vm_name): logger.info("Powering up VM %s to shut it down correctly." % vm_name) provider_mgmt.start_vm(vm_name) if provider_mgmt.is_vm_running(vm_name): logger.info("Powering off VM %s" % vm_name) provider_mgmt.stop_vm(vm_name) if provider_mgmt.does_vm_exist(vm_name): logger.info("Deleting VM %s in %s" % (vm_name, provider_mgmt.__class__.__name__)) provider_mgmt.delete_vm(vm_name) request.addfinalizer(finalize) soap = wait_for( lambda: get_vm_object(vm_name), message="VM object %s appears in CFME" % vm_name, fail_condition=None, fail_func=lambda: provider_crud.refresh_provider_relationships(), num_sec=240, delay=30, )[0] return VMWrapper(provider_mgmt, vm_name, soap)
def test_pxe_provision_from_template(provider_key, provider_crud, provider_type, provider_mgmt, provisioning, vm_name, smtp_test, request): setup_provider(provider_key) # generate_tests makes sure these have values pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart,\ pxe_root_password, pxe_image_type, pxe_vlan = map(provisioning.get, ('pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart', 'pxe_root_password', 'pxe_image_type', 'vlan')) pytest.sel.force_navigate('infrastructure_provision_vms', context={ 'provider': provider_crud, 'template_name': pxe_template, }) note = ('template %s to vm %s on provider %s' % (pxe_template, vm_name, provider_crud.key)) provisioning_data = { 'email': '*****@*****.**', 'first_name': 'Template', 'last_name': 'Provisioner', 'notes': note, 'vm_name': vm_name, 'host_name': {'name': [host]}, 'datastore_name': {'name': [datastore]}, 'provision_type': 'PXE', 'pxe_server': pxe_server, 'pxe_image': {'name': [pxe_image]}, 'custom_template': {'name': [pxe_kickstart]}, 'root_password': pxe_root_password, 'vlan': pxe_vlan, } fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() request.addfinalizer(lambda: cleanup_vm(vm_name, provider_key, provider_mgmt)) # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider_crud.key) wait_for(provider_mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s' % vm_name) row_description = 'Provision from [%s] to [%s]' % (pxe_template, vm_name) cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=2100, delay=20) assert row.last_message.text == 'VM Provisioned Successfully' # Wait for e-mails to appear def verify(): return ( len( smtp_test.get_emails( text_like="%%Your Virtual Machine Request was approved%%" ) ) > 0 and len( smtp_test.get_emails( subject_like="Your virtual machine request has Completed - VM:%%%s" % vm_name ) ) > 0 ) wait_for(verify, message="email receive check", delay=5)