def test_vm(request, provider_init, provider_crud, provider_mgmt, provider_data, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = Vm(vm_name, provider_crud, template_name=provider_data['full_template']['name']) if not provider_mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def test_vm(request, provider_init, provider_crud, provider_mgmt, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = Vm(vm_name, provider_crud) if not provider_mgmt.does_vm_exist(vm_name): vm.create_on_provider() return vm
def test_no_template_power_control(provider, setup_provider_funcscope): """ Ensures that no power button is displayed for templates.""" provider.load_all_provider_templates() toolbar.set_vms_grid_view() try: with error.expected(NoSuchElementException): toolbar.select("Power") except Exception: # try again with error.expected(NoSuchElementException): toolbar.select("Power") # Ensure selecting a template doesn't cause power menu to appear templates = list(get_all_vms(True)) template_name = random.choice(templates) selected_template = Vm(template_name, provider) quadicon = selected_template.find_quadicon(do_not_navigate=True, mark=False, refresh=False) with error.expected(NoSuchElementException): toolbar.select("Power") # Ensure there isn't a power button on the details page pytest.sel.click(quadicon) with error.expected(NoSuchElementException): toolbar.select("Power")
def testing_vm(request, vm_name, provider_init, provider_crud, provider_mgmt, provisioning): vm_obj = Vm(vm_name, provider_crud, provisioning["template"]) def _finalize(): vm_obj.delete_from_provider() request.addfinalizer(_finalize) vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default") return vm_obj
def test_vm(request, provider_crud, provider_mgmt, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = Vm(vm_name, provider_crud) request.addfinalizer(vm.delete_from_provider) if not provider_mgmt.does_vm_exist(vm_name): vm.create(timeout_in_minutes=15) return vm
def test_vm(request, provider_crud, provider_mgmt, vm_name): '''Fixture to provision appliance to the provider being tested if necessary''' vm = Vm(vm_name, provider_crud) request.addfinalizer(vm.delete_from_provider) if not provider_mgmt.does_vm_exist(vm_name): vm.create_on_provider() return vm
def testing_vm(request, vm_name, setup_provider, provider, provisioning): vm_obj = Vm(vm_name, provider, provisioning["template"]) def _finalize(): vm_obj.delete_from_provider() request.addfinalizer(_finalize) vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default") return vm_obj
def test_no_dvd_ruins_refresh(provider, small_template): host_group = provider.data["provisioning"]["host_group"] with provider.mgmt.with_vm( small_template, vm_name="test_no_dvd_{}".format(fauxfactory.gen_alpha()), host_group=host_group) as vm_name: provider.mgmt.disconnect_dvd_drives(vm_name) vm = Vm(vm_name, provider) provider.refresh_provider_relationships() vm.wait_to_appear()
def test_vm(setup_provider_modscope, provider, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = Vm(vm_name, provider, template_name=provider.data['full_template']['name']) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def vm(request, vm_template_name, vm_name, provider_crud, provider_mgmt): logger.info("Starting vm fixture") vm = Vm(vm_name, provider_crud, template_name=vm_template_name) if not provider_mgmt.does_vm_exist(vm_name): vm.create_on_provider(allow_skip="default") request.addfinalizer(vm.delete_from_provider) return vm
def test_vm(request, provider_crud, provider_mgmt, vm_name): '''Fixture to provision appliance to the provider being tested if necessary''' vm = Vm(vm_name, provider_crud) request.addfinalizer(vm.delete_from_provider) if not provider_mgmt.does_vm_exist(vm_name): vm.create(timeout_in_minutes=15) return vm
def vm(request, vm_template_name, vm_name, provider): logger.info("Starting vm fixture") vm = Vm(vm_name, provider, template_name=vm_template_name) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(allow_skip="default") request.addfinalizer(vm.delete_from_provider) return vm
def test_vm(request, provider_crud, provider_mgmt, vm_name, provider_init): """Fixture to provision appliance to the provider being tested if necessary""" pytest.sel.force_navigate('infrastructure_providers') vm = Vm(vm_name, provider_crud) request.addfinalizer(vm.delete_from_provider) if not provider_mgmt.does_vm_exist(vm_name): vm.create(timeout_in_minutes=15) return vm
def test_vm(request, provider, vm_name, setup_provider_modscope): """Fixture to provision appliance to the provider being tested if necessary""" pytest.sel.force_navigate('infrastructure_providers') vm = Vm(vm_name, provider) request.addfinalizer(vm.delete_from_provider) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def test_vm(request, provider_crud, provider_mgmt, vm_name, provider_init): """Fixture to provision appliance to the provider being tested if necessary""" pytest.sel.force_navigate('infrastructure_providers') vm = Vm(vm_name, provider_crud) request.addfinalizer(vm.delete_from_provider) if not provider_mgmt.does_vm_exist(vm_name): vm.create_on_provider() return vm
def test_no_dvd_ruins_refresh(provider, small_template): host_group = provider.data["provisioning"]["host_group"] with provider.mgmt.with_vm(small_template, vm_name="test_no_dvd_{}".format( fauxfactory.gen_alpha()), host_group=host_group) as vm_name: provider.mgmt.disconnect_dvd_drives(vm_name) vm = Vm(vm_name, provider) provider.refresh_provider_relationships() vm.wait_to_appear()
def test_form_button_validation(request, user1, setup_infra_provider): set_vm_to_user = Vm('cu-9-5', setup_infra_provider) # Reset button test set_vm_to_user.set_ownership(user=user1.name, click_reset=True) # Cancel button test set_vm_to_user.set_ownership(user=user1.name, click_cancel=True) # Save button test set_vm_to_user.set_ownership(user=user1.name) # Unset the ownership set_vm_to_user.unset_ownership()
def fleecing_vm(request, compliance_vm, vm_analysis, provider_mgmt, provider_key, provider_crud, analysis_profile): logger.info("Provisioning an appliance for fleecing on {}".format(provider_key)) # TODO: When we get something smaller, use it! appliance = provision_appliance( vm_name_prefix=PREFIX + "for_fleece_", version=str(version.current_version()), provider_name=provider_key ) request.addfinalizer(lambda: diaper(appliance.destroy)) logger.info("Appliance {} provisioned".format(appliance.vm_name)) vm = Vm(appliance.vm_name, provider_crud) provider_crud.refresh_provider_relationships() vm.wait_to_appear() return vm
def testing_vm(request, vm_name, provider_init, provider_crud, provider_mgmt, provisioning): vm_obj = Vm(vm_name, provider_crud, provisioning["template"]) def _finalize(): vm_obj.delete_from_provider() if vm_obj.does_vm_exist_in_cfme(): vm_obj.remove_from_cfme() request.addfinalizer(_finalize) vm_obj.create_on_provider() provider_crud.refresh_provider_relationships() wait_for(vm_obj.does_vm_exist_in_cfme, num_sec=450, delay=10) return vm_obj
def test_vm_clone(provisioning, provider_type, provider_crud, clone_vm_name, provider_mgmt, request, create_vm, provider_key): request.addfinalizer(lambda: cleanup_vm(vm_name, provider_key, provider_mgmt)) request.addfinalizer(lambda: cleanup_vm(clone_vm_name, provider_key, provider_mgmt)) vm_name = create_vm + "_0001" vm = Vm(vm_name, provider_crud) vm.clone_vm("*****@*****.**", "first", "last", clone_vm_name) row_description = 'Clone from [%s] to [%s]' % (vm_name, clone_vm_name) cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=4000, delay=20) assert row.last_message.text == 'Vm Provisioned Successfully'
def new_snapshot(test_vm, has_name=True): if has_name: new_snapshot = Vm.Snapshot( name="snpshot_" + fauxfactory.gen_alphanumeric(8), description="snapshot", memory=False, parent_vm=test_vm ) else: new_snapshot = Vm.Snapshot( description="snapshot_" + fauxfactory.gen_alphanumeric(8), memory=False, parent_vm=test_vm ) return new_snapshot
def test_vm_migrate(setup_provider, provider, request): """Tests migration of a vm Metadata: test_flag: migrate, provision """ vm = Vm("vmtest", provider) vm.migrate_vm("*****@*****.**", "first", "last") flash.assert_no_errors() row_description = 'vmtest' cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells, True], fail_func=requests.reload, num_sec=600, delay=20) assert row.request_state.text == 'Migrated'
def testing_vm(request, provisioning, provider): setup_provider(provider.key) vm = Vm(name="test_ae_hd_{}".format(fauxfactory.gen_alphanumeric()), provider_crud=provider, template_name=provisioning["template"]) def _finalize(): vm.delete_from_provider() if vm.does_vm_exist_in_cfme(): vm.remove_from_cfme() request.addfinalizer(_finalize) vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def test_vm(request, provider, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = Vm(vm_name, provider) request.addfinalizer(vm.delete_from_provider) if not provider.mgmt.does_vm_exist(vm_name): logger.info("deploying {} on provider {}".format(vm_name, provider.key)) vm.create_on_provider(allow_skip="default") else: logger.info("recycling deployed vm {} on provider {}".format(vm_name, provider.key)) vm.provider_crud.refresh_provider_relationships() vm.wait_to_appear() return vm
def fleecing_vm(request, compliance_vm, vm_analysis, provider, analysis_profile): logger.info("Provisioning an appliance for fleecing on {}".format( provider.key)) # TODO: When we get something smaller, use it! appliance = provision_appliance(vm_name_prefix=PREFIX + "for_fleece_", version=str(version.current_version()), provider_name=provider.key) request.addfinalizer(lambda: diaper(appliance.destroy)) logger.info("Appliance {} provisioned".format(appliance.vm_name)) vm = Vm(appliance.vm_name, provider) provider.refresh_provider_relationships() vm.wait_to_appear() return vm
def test_vm_migrate(provider_init, provider_crud, provider_mgmt, request): """Tests migration of a vm Metadata: test_flag: migrate, provision """ vm = Vm("vmtest", provider_crud) vm.migrate_vm("*****@*****.**", "first", "last", "host", "datstore") flash.assert_no_errors() row_description = 'VM Migrate' cells = {'Request Type': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=600, delay=20) assert row.last_message.text == 'Request complete'
def testing_vm(request, provisioning, provider_crud, provider_key): setup_provider(provider_key) vm = Vm( name="test_ae_hd_{}".format(generate_random_string()), provider_crud=provider_crud, template_name=provisioning["template"] ) def _finalize(): vm.delete_from_provider() if vm.does_vm_exist_in_cfme(): vm.remove_from_cfme() request.addfinalizer(_finalize) vm.create_on_provider() return vm
def test_vm_clone(provisioning, provider, clone_vm_name, request, create_vm): vm_name = create_vm + "_0001" request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) request.addfinalizer(lambda: cleanup_vm(clone_vm_name, provider)) vm = Vm(vm_name, provider) if provider.type == 'rhevm': provision_type = 'Native Clone' elif provider.type == 'virtualcenter': provision_type = 'VMware' vm.clone_vm("*****@*****.**", "first", "last", clone_vm_name, provision_type) row_description = clone_vm_name cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells, True], fail_func=requests.reload, num_sec=4000, delay=20) assert row.last_message.text == 'Vm Provisioned Successfully'
def testing_vm(request, provisioning, provider_crud, provider_key): setup_provider(provider_key) vm = Vm( name="test_ae_hd_{}".format(fauxfactory.gen_alphanumeric()), provider_crud=provider_crud, template_name=provisioning["template"] ) def _finalize(): vm.delete_from_provider() if vm.does_vm_exist_in_cfme(): vm.remove_from_cfme() request.addfinalizer(_finalize) vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def provision_vm(request, provider_crud, provider_mgmt): """Function to provision appliance to the provider being tested""" vm_name = "test_rest_db_" + fauxfactory.gen_alphanumeric() vm = Vm(vm_name, provider_crud) request.addfinalizer(vm.delete_from_provider) if not provider_mgmt.does_vm_exist(vm_name): logger.info("deploying {} on provider {}".format(vm_name, provider_crud.key)) vm.create_on_provider(allow_skip="default") else: logger.info("recycling deployed vm {} on provider {}".format(vm_name, provider_crud.key)) vm.provider_crud.refresh_provider_relationships() vm.wait_to_appear() return vm
def test_scope_windows_registry_stuck(request, infra_provider, policy_collection, policy_profile_collection): """If you provide Scope checking windows registry, it messes CFME up. Recoverable.""" policy = policy_collection.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope= r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)") request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = policy_profile_collection.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy]) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM vm = VM.factory( Vm.get_first_vm(provider=infra_provider).name, infra_provider) vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(Server, 'Dashboard') navigate_to(Vm, 'All') assert "except" not in pytest.sel.title().lower() vm.unassign_policy_profiles(profile.description)
def new_snapshot(test_vm, has_name=True, memory=False): return Vm.Snapshot(name="snpshot_{}".format( fauxfactory.gen_alphanumeric(8)) if has_name else None, description="snapshot_{}".format( fauxfactory.gen_alphanumeric(8)), memory=memory, parent_vm=test_vm)
def _provisioner(template, provisioning_data, delayed=None): vm = Vm(name=vm_name, provider=provider, template_name=template) view = navigate_to(vm, 'Provision') view.form.fill_with(provisioning_data, on_change=view.form.submit_button) base_view = vm.appliance.browser.create_view(BaseLoggedInPage) base_view.flash.assert_no_error() request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) request_description = 'Provision from [{}] to [{}]'.format(template, vm_name) provision_request = appliance.collections.requests.instantiate( description=request_description) if delayed is not None: total_seconds = (delayed - datetime.utcnow()).total_seconds() try: wait_for(provision_request.is_finished, fail_func=provision_request.update, num_sec=total_seconds, delay=5) pytest.fail("The provisioning was not postponed") except TimedOutError: pass logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key) wait_for( provider.mgmt.does_vm_exist, [vm_name], fail_func=provider.refresh_provider_relationships, handle_exception=True, num_sec=600 ) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s', vm_name) provision_request.wait_for_request() assert provision_request.is_succeeded(method='ui') return vm
def generated_request(appliance, infra_provider, provider_data, provisioning, template_name, vm_name): """Creates a provision request, that is not automatically approved, and returns the search data. After finishing the test, request should be automatically deleted. Slightly modified code from :py:module:`cfme.tests.infrastructure.test_provisioning` """ first_name = fauxfactory.gen_alphanumeric() last_name = fauxfactory.gen_alphanumeric() notes = fauxfactory.gen_alphanumeric() e_mail = "{}@{}.test".format(first_name, last_name) host, datastore = map(provisioning.get, ('host', 'datastore')) vm = Vm(name=vm_name, provider=infra_provider, template_name=template_name) navigate_to(vm, 'ProvisionVM') provisioning_data = { 'email': e_mail, 'first_name': first_name, 'last_name': last_name, 'notes': notes, 'vm_name': vm_name, 'host_name': { 'name': [host] }, 'datastore_name': { 'name': [datastore] }, 'num_vms': "10", # so it won't get auto-approved } # Same thing, different names. :\ if provider_data["type"] == 'rhevm': provisioning_data['provision_type'] = 'Native Clone' elif provider_data["type"] == 'virtualcenter': provisioning_data['provision_type'] = 'VMware' try: provisioning_data['vlan'] = provisioning['vlan'] except KeyError: # provisioning['vlan'] is required for rhevm provisioning if provider_data["type"] == 'rhevm': raise pytest.fail( 'rhevm requires a vlan value in provisioning info') fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() request_cells = { "Description": "Provision from [{}] to [{}###]".format(template_name, vm_name), } yield request_cells browser().get(store.base_url) appliance.server.login_admin() requests.delete_request(request_cells) flash.assert_no_errors()
def _provisioner(template, provisioning_data, delayed=None): pytest.sel.force_navigate('infrastructure_provision_vms', context={ 'provider': provider, 'template_name': template, }) vm_name = provisioning_data["vm_name"] fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) if delayed is not None: total_seconds = (delayed - datetime.utcnow()).total_seconds() row_description = 'Provision from [%s] to [%s]' % (template, vm_name) cells = {'Description': row_description} try: row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=total_seconds, delay=5) pytest.fail("The provisioning was not postponed") except TimedOutError: pass logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key) wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s' % vm_name) row_description = 'Provision from [%s] to [%s]' % (template, vm_name) cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=900, delay=20) assert row.last_message.text == version.pick( {version.LOWEST: 'VM Provisioned Successfully', "5.3": 'Vm Provisioned Successfully', }) return Vm(vm_name, provider)
def test_no_template_power_control(provider_crud, provider_init): """ Ensures that no power button is displayed for templates. """ provider_crud.load_all_provider_templates() with error.expected(NoSuchElementException): toolbar.select("Power") # Ensure selecting a template doesn't cause power menu to appear toolbar.set_vms_grid_view() templates = list(get_all_vms(True)) template_name = random.choice(templates) selected_template = Vm(template_name, provider_crud) quadicon = selected_template.find_quadicon(do_not_navigate=True, mark=False, refresh=False) with error.expected(NoSuchElementException): toolbar.select("Power") # Ensure there isn't a power button on the details page pytest.sel.click(quadicon) with error.expected(NoSuchElementException): toolbar.select("Power")
def _provisioner(template, provisioning_data, delayed=None): vm = Vm(name=vm_name, provider=provider, template_name=template) navigate_to(vm, 'Provision') fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors()
def ssa_vm(request, local_setup_provider, provider, vm_analysis_provisioning_data, appliance, analysis_type): """ Fixture to provision instance on the provider """ vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type) vm = VM.factory(vm_name, provider, template_name=vm_analysis_provisioning_data.image) request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) provision_data = vm_analysis_provisioning_data.copy() del provision_data['image'] vm.create_on_provider(find_in_cfme=True, **provision_data) if provider.one_of(OpenStackProvider): public_net = provider.data['public_network'] vm.provider.mgmt.assign_floating_ip(vm.name, public_net) logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) @wait_for_decorator(timeout="20m", delay=5) def get_ip_address(): logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format( vm_name, provider.mgmt.vm_status(vm_name), provider.mgmt.is_vm_stopped(vm_name))) if provider.mgmt.is_vm_stopped(vm_name): provider.mgmt.start_vm(vm_name) ip = provider.mgmt.current_ip_address(vm_name) logger.info("Fetched IP for %s: %s", vm_name, ip) return ip is not None connect_ip = provider.mgmt.get_ip_address(vm_name) assert connect_ip is not None # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient( hostname=connect_ip, username=vm_analysis_provisioning_data['username'], password=vm_analysis_provisioning_data['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_provisioning_data['image'] vm.connect_ip = connect_ip # TODO: if rhev and iscsi, it need direct_lun if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") cfme_rel = Vm.CfmeRelationship(vm) cfme_rel.set_relationship(appliance.server.name, appliance.server_id()) yield vm # Close the SSH client if we have one if getattr(vm, 'ssh', None): vm.ssh.close()
def test_vm_genealogy( setup_provider, vm_name, provider_crud, provisioning, soft_assert, provider_mgmt, request): """Tests vm geneaology Metadata: test_flag: geneaology, provision """ original_template = provisioning["template"] original_vm = Vm(vm_name, provider_crud, template_name=original_template) original_vm.create_on_provider() request.addfinalizer( lambda: provider_mgmt.delete_vm(original_vm.name) if provider_mgmt.does_vm_exist(original_vm.name) else None) provider_mgmt.wait_vm_steady(original_vm.name) first_template = original_vm.publish_to_template("{}x".format(vm_name)) soft_assert(isinstance(first_template, Template), "first_template is not a template!") request.addfinalizer( lambda: provider_mgmt.delete_vm(first_template.name) if first_template.name in provider_mgmt.list_template() else None) second_vm = Vm( "{}x".format(first_template.name), provider_crud, template_name=first_template.name) second_vm.create_on_provider() request.addfinalizer( lambda: provider_mgmt.delete_vm(second_vm.name) if provider_mgmt.does_vm_exist(second_vm.name) else None) soft_assert(isinstance(second_vm, Vm), "second_vm is a template!") second_vm_ancestors = second_vm.genealogy.ancestors # IT SEEMS IT "BREAKS" THE CHAIN WHEN THE VM IS CLONED TO A TEMPLATE # soft_assert(original_vm.name in second_vm_ancestors, "{} is not in {}'s ancestors".format( # original_vm.name, second_vm.name)) soft_assert(first_template.name in second_vm_ancestors, "{} is not in {}'s ancestors".format( first_template.name, second_vm.name))
def instance(request, local_setup_provider, provider, vm_name, vm_analysis_data, appliance): """ Fixture to provision instance on the provider """ vm = VM.factory(vm_name, provider, template_name=vm_analysis_data['image']) request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) provision_data = vm_analysis_data.copy() del provision_data['image'] vm.create_on_provider(find_in_cfme=True, **provision_data) if provider.type == "openstack": vm.provider.mgmt.assign_floating_ip(vm.name, 'public') logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) mgmt_system = provider.get_mgmt_system() @pytest.wait_for(timeout="20m", delay=5) def get_ip_address(): logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format( vm_name, mgmt_system.vm_status(vm_name), mgmt_system.is_vm_stopped(vm_name))) if mgmt_system.is_vm_stopped(vm_name): mgmt_system.start_vm(vm_name) ip = mgmt_system.current_ip_address(vm_name) logger.info("Fetched IP for %s: %s", vm_name, ip) return ip is not None connect_ip = mgmt_system.get_ip_address(vm_name) assert connect_ip is not None # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient(hostname=connect_ip, username=vm_analysis_data['username'], password=vm_analysis_data['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_data['image'] vm.connect_ip = connect_ip # TODO: This is completely wrong and needs to be fixed # CFME relationship is suppose to be set to the appliance, which is required # to be placed within the same datastore that the VM resides # # Also, if rhev and iscsi, it need direct_lun if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") from cfme.infrastructure.virtual_machines import Vm cfme_rel = Vm.CfmeRelationship(vm) server_name = appliance.server_name() cfme_rel.set_relationship(str(server_name), configuration.server_id()) return vm
def test_create_snapshot_via_ae(appliance, request, domain, small_test_vm): """This test checks whether the vm.create_snapshot works in AE. Prerequisities: * A VMware provider * A VM that has been discovered by CFME Steps: * Clone the Request class inside the System namespace into a new domain * Add a method named ``snapshot`` and insert the provided code there. * Add an instance named ``snapshot`` and set the methd from previous step as ``meth5`` * Run the simulation of the method against the VM, preferably setting ``snap_name`` to something that can be checked * Wait until snapshot with such name appears. """ # PREPARE file = data_path.join("ui").join("automate").join( "test_create_snapshot_via_ae.rb") with file.open("r") as f: method_contents = f.read() miq_domain = DomainCollection(appliance).instantiate(name='ManageIQ') miq_class = miq_domain.namespaces.instantiate( name='System').classes.instantiate(name='Request') miq_class.copy_to(domain) request_cls = domain.namespaces.instantiate( name='System').classes.instantiate(name='Request') request.addfinalizer(request_cls.delete) method = request_cls.methods.create(name="snapshot", location='inline', script=method_contents) request.addfinalizer(method.delete) instance = request_cls.instances.create( name="snapshot", fields={"meth5": { 'value': "snapshot" }}) request.addfinalizer(instance.delete) # SIMULATE snap_name = fauxfactory.gen_alpha() snapshot = Vm.Snapshot(name=snap_name, parent_vm=small_test_vm) simulate(instance="Request", request="snapshot", target_type='VM and Instance', target_object=small_test_vm.name, execute_methods=True, attributes_values={"snap_name": snap_name}) wait_for(lambda: snapshot.exists, timeout="2m", delay=10, fail_func=small_test_vm.provider.browser.refresh, handle_exception=True, message="Waiting for snapshot create") # Clean up if it appeared snapshot.delete()
def new_vm(provider, request): if provider.one_of(CloudProvider): vm = Instance.factory(random_vm_name(context='cockpit'), provider) else: vm = Vm.factory(random_vm_name(context='cockpit'), provider) if not provider.mgmt.does_vm_exist(vm.name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") request.addfinalizer(vm.cleanup_on_provider) return vm
def test_group_ownership_on_user_or_group_role(request, user3, setup_infra_provider): set_vm_to_group = Vm('cu-9-5', setup_infra_provider) set_vm_to_group.set_ownership(group=user3.group.description) login.login(user3.credential.principal, user3.credential.secret) assert (set_vm_to_group.does_vm_exist_in_cfme(), "vm not found") # Unset the ownership login.login_admin() set_vm_to_group.unset_ownership() login.login(user3.credential.principal, user3.credential.secret) assert (not set_vm_to_group.does_vm_exist_in_cfme(), "vm exists")
def test_user_ownership_crud(request, user1, setup_infra_provider): set_vm_to_user = Vm('cu-9-5', setup_infra_provider) # Set the ownership and checking it set_vm_to_user.set_ownership(user=user1.name) login.login(user1.credential.principal, user1.credential.secret) assert (set_vm_to_user.does_vm_exist_in_cfme(), "vm not found") # Unset the ownership login.login_admin() set_vm_to_user.unset_ownership() login.login(user1.credential.principal, user1.credential.secret) assert (not set_vm_to_user.does_vm_exist_in_cfme(), "vm exists")
def do_vm_provisioning(appliance, template_name, provider, vm_name, provisioning_data, request, smtp_test, num_sec=1500, wait=True): # generate_tests makes sure these have values vm = Vm(name=vm_name, provider=provider, template_name=template_name) note = ('template {} to vm {} on provider {}'.format( template_name, vm_name, provider.key)) provisioning_data.update({ 'request': { 'email': '*****@*****.**', 'first_name': 'Template', 'last_name': 'Provisioner', 'notes': note } }) view = navigate_to(vm, 'Provision') view.form.fill_with(provisioning_data, on_change=view.form.submit_button) view.flash.assert_no_error() if not wait: return # Provision Re important in this test logger.info('Waiting for cfme provision request for vm %s', vm_name) request_description = 'Provision from [{}] to [{}]'.format( template_name, vm_name) provision_request = appliance.collections.requests.instantiate( request_description) provision_request.wait_for_request(method='ui') assert provision_request.is_succeeded(method='ui'), \ "Provisioning failed with the message {}".format(provision_request.row.last_message.text) # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key) wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600) if smtp_test: # Wait for e-mails to appear def verify(): approval = dict( subject_like= "%%Your Virtual Machine configuration was Approved%%") expected_text = "Your virtual machine request has Completed - VM:%%{}".format( vm_name) return (len(smtp_test.get_emails(**approval)) > 0 and len(smtp_test.get_emails(subject_like=expected_text)) > 0) wait_for(verify, message="email receive check", delay=30)
def vm_crud(vm_name, provider): if is_cloud_provider(provider.key): if provider.type == "openstack": return OpenStackInstance(vm_name, provider) elif provider.type == "ec2": return EC2Instance(vm_name, provider) else: raise Exception("Unknown provider type {}!".format(provider.type)) else: return Vm(vm_name, provider)
def test_vm(request, provider_crud, provider_mgmt, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = Vm(vm_name, provider_crud) logger.info("provider_key: {}".format(provider_crud.key)) def _cleanup(): vm.delete_from_provider() if_scvmm_refresh_provider(provider_crud) request.addfinalizer(_cleanup) if not provider_mgmt.does_vm_exist(vm_name): logger.info("deploying {} on provider {}".format(vm_name, provider_crud.key)) vm.create_on_provider() else: logger.info("recycling deployed vm {} on provider {}".format(vm_name, provider_crud.key)) vm.provider_crud.refresh_provider_relationships() vm.wait_to_appear() return vm
def fleecing_vm( request, compliance_vm, vm_analysis, provider_mgmt, provider_key, provider_crud, analysis_profile): logger.info("Provisioning an appliance for fleecing on {}".format(provider_key)) # TODO: When we get something smaller, use it! appliance = provision_appliance( vm_name_prefix=PREFIX + "for_fleece_", version=str(version.current_version()), provider_name=provider_key) request.addfinalizer(lambda: diaper(appliance.destroy)) logger.info("Appliance {} provisioned".format(appliance.vm_name)) vm = Vm(appliance.vm_name, provider_crud) provider_crud.refresh_provider_relationships() vm.wait_to_appear() # Assign the analysis profile action = Action( "Assign analysis profile {}".format(analysis_profile.name), "Assign Profile to Analysis Task", dict(analysis_profile=analysis_profile.name)) action.create() request.addfinalizer(action.delete) policy = VMControlPolicy("Analysis profile policy {}".format(generate_random_string())) policy.create() request.addfinalizer(policy.delete) policy.assign_actions_to_event("VM Analysis Start", action) analysis_pp = PolicyProfile( "Analysis profile PP {}".format(generate_random_string()), policies=[policy]) analysis_pp.create() request.addfinalizer(analysis_pp.delete) vm.assign_policy_profiles(analysis_pp.description) request.addfinalizer(lambda: vm.unassign_policy_profiles(analysis_pp.description)) return vm
def compliance_vm(request, provider_key, provider_crud): try: ip_addr = re.findall(r'[0-9]+(?:\.[0-9]+){3}', store.base_url)[0] appl_name = provider_crud.get_mgmt_system().get_vm_name_from_ip(ip_addr) appliance = Appliance(provider_key, appl_name) logger.info( "The tested appliance ({}) is already on this provider ({}) so reusing it.".format( appl_name, provider_key)) appliance.configure_fleecing() vm = Vm(appl_name, provider_crud) except VmNotFoundViaIP: logger.info("Provisioning a new appliance on provider {}.".format(provider_key)) appliance = provision_appliance( vm_name_prefix=PREFIX + "host_", version=str(version.current_version()), provider_name=provider_key) request.addfinalizer(lambda: diaper(appliance.destroy)) appliance.configure(setup_fleece=True) vm = Vm(appliance.vm_name, provider_crud) # Do the final touches with appliance.ipapp(browser_steal=True) as appl: appl.set_session_timeout(86400) provider_crud.refresh_provider_relationships() vm.wait_to_appear() vm.load_details() wait_for_ssa_enabled() yield vm
def test_create_snapshot_via_ae(request, domain, test_vm): """This test checks whether the vm.create_snapshot works in AE. Prerequisities: * A VMware provider * A VM that has been discovered by CFME Steps: * Clone the Request class inside the System namespace into a new domain * Add a method named ``snapshot`` and insert the provided code there. * Add an instance named ``snapshot`` and set the methd from previous step as ``meth5`` * Run the simulation of the method against the VM, preferably setting ``snap_name`` to something that can be checked * Wait until snapshot with such name appears. """ # PREPARE file = data_path.join("ui").join("automate").join( "test_create_snapshot_via_ae.rb") with file.open("r") as f: method_contents = f.read() miq_domain = Domain("ManageIQ (Locked)") miq_class = Class("Request", namespace=Namespace("System", domain=miq_domain)) request_cls = miq_class.copy_to(domain) request.addfinalizer(request_cls.delete) method = Method("snapshot", data=method_contents, cls=request_cls) method.create() request.addfinalizer(method.delete) instance = Instance("snapshot", values={"meth5": "snapshot"}, cls=request_cls) instance.create() request.addfinalizer(instance.delete) # SIMULATE snap_name = fauxfactory.gen_alpha() snapshot = Vm.Snapshot(name=snap_name, parent_vm=test_vm) simulate(instance="Request", request="snapshot", attribute=["VM and Instance", test_vm.name], execute_methods=True, avp={"snap_name": snap_name}) wait_for(snapshot.does_snapshot_exist, timeout="2m", delay=10) # Clean up if it appeared snapshot.delete()
def compliance_vm(request, provider): try: ip_addr = urlparse(store.base_url).hostname appl_name = provider.mgmt.get_vm_name_from_ip(ip_addr) appliance = Appliance(provider.key, appl_name) logger.info( "The tested appliance ({}) is already on this provider ({}) so reusing it." .format(appl_name, provider.key)) try: appliance.configure_fleecing() except (EOFError, ApplianceException) as e: # If something was happening, restart and wait for the UI to reappear to prevent errors appliance.ipapp.reboot() pytest.skip( "Error during appliance configuration. Skipping:\n{}: {}". format(type(e).__name__, str(e))) vm = Vm(appl_name, provider) except VmNotFoundViaIP: logger.info("Provisioning a new appliance on provider {}.".format( provider.key)) appliance = provision_appliance(vm_name_prefix=PREFIX + "host_", version=str(version.current_version()), provider_name=provider.key) request.addfinalizer(lambda: diaper(appliance.destroy)) try: appliance.configure(setup_fleece=True) except (EOFError, ApplianceException) as e: # Add known exceptions as needed. pytest.skip( "Error during appliance configuration. Skipping:\n{}: {}". format(type(e).__name__, str(e))) vm = Vm(appliance.vm_name, provider) if provider.type in {"rhevm"}: request.addfinalizer(appliance.remove_rhev_direct_lun_disk) # Do the final touches with appliance.ipapp(browser_steal=True) as appl: appl.set_session_timeout(86400) provider.refresh_provider_relationships() vm.wait_to_appear() vm.load_details() wait_for_ssa_enabled() yield vm
def test_scope_windows_registry_stuck(request, appliance, infra_provider, policy_collection, policy_profile_collection): """If you provide Scope checking windows registry, it messes CFME up. Recoverable.""" policy = policy_collection.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = policy_profile_collection.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM vm = VM.factory(Vm.get_first_vm(provider=infra_provider).name, infra_provider) vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(appliance.server, 'Dashboard') view = navigate_to(Vm, 'All') assert "except" not in view.entities.title.text.lower() vm.unassign_policy_profiles(profile.description)
def test_scope_windows_registry_stuck(request, setup_a_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable.""" policy = VMCompliancePolicy( "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy.create() profile = PolicyProfile( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) profile.create() # Now assign this malformed profile to a VM vm = VM.factory(Vm.get_first_vm_title(provider=setup_a_provider), setup_a_provider) vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check pytest.sel.force_navigate("dashboard") pytest.sel.force_navigate("infrastructure_virtual_machines") assert "except" not in pytest.sel.title().lower() vm.unassign_policy_profiles(profile.description)
def compliance_vm(request, provider): try: ip_addr = urlparse(store.base_url).hostname appl_name = provider.mgmt.get_vm_name_from_ip(ip_addr) appliance = Appliance(provider.key, appl_name) logger.info( "The tested appliance ({}) is already on this provider ({}) so reusing it.".format( appl_name, provider.key)) try: appliance.configure_fleecing() except (EOFError, ApplianceException) as e: # If something was happening, restart and wait for the UI to reappear to prevent errors appliance.ipapp.reboot() pytest.skip( "Error during appliance configuration. Skipping:\n{}: {}".format( type(e).__name__, str(e))) vm = Vm(appl_name, provider) except VmNotFoundViaIP: logger.info("Provisioning a new appliance on provider {}.".format(provider.key)) appliance = provision_appliance( vm_name_prefix=PREFIX + "host_", version=str(version.current_version()), provider_name=provider.key) request.addfinalizer(lambda: diaper(appliance.destroy)) try: appliance.configure(setup_fleece=True) except (EOFError, ApplianceException) as e: # Add known exceptions as needed. pytest.skip( "Error during appliance configuration. Skipping:\n{}: {}".format( type(e).__name__, str(e))) vm = Vm(appliance.vm_name, provider) if provider.type in {"rhevm"}: request.addfinalizer(appliance.remove_rhev_direct_lun_disk) # Do the final touches with appliance.ipapp(browser_steal=True) as appl: appl.set_session_timeout(86400) provider.refresh_provider_relationships() vm.wait_to_appear() vm.load_details() wait_for_ssa_enabled() yield vm
def vmware_vm(request, vmware_provider): vm = Vm("test_control_{}".format(fauxfactory.gen_alpha().lower()), vmware_provider) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.delete_from_provider) return vm