def existing_vm(request): """ Fixture will be using for set\unset retirement date for existing vm instead of creation a new one """ list_of_existing_providers = providers.existing_providers() if list_of_existing_providers: test_provider = providers.get_crud(list_of_existing_providers[0]) else: test_provider = providers.setup_a_provider() all_vms = test_provider.mgmt.list_vm() need_to_create_vm = True for virtual_machine in all_vms: if test_provider.mgmt.is_vm_running(virtual_machine): need_vm = VM.factory(virtual_machine, test_provider) need_to_create_vm = False break if need_to_create_vm: machine_name = 'test_retire_prov_{}'.format(fauxfactory.gen_alpha(length=8).lower()) need_vm = VM.factory(machine_name, test_provider) need_vm.create_on_provider(find_in_cfme=True, allow_skip="default") @request.addfinalizer def _delete_vm(): if need_to_create_vm: test_provider.mgmt.delete_vm(need_vm.name) return need_vm
def finalize(): policy_for_testing.assign_events() with update(policy_for_testing): policy_for_testing.scope = ( "fill_field(VM and Instance : Name, INCLUDES, {})".format(vm_name)) with update(compliance_policy): compliance_policy.scope = ( "fill_field(VM and Instance : Name, INCLUDES, {})".format(vm_name)) VM.factory(clone_vm_name, provider).cleanup_on_provider()
def vm_name(provider): # also tries to delete the VM that gets made with this name vm_name = random_vm_name('scat') yield vm_name scat_vm = "{}0001".format(vm_name) if scat_vm in provider.mgmt.list_vm(): vm_name_to_cleanup = "{}0001".format(vm_name) else: vm_name_to_cleanup = vm_name VM.factory(vm_name_to_cleanup, provider).cleanup_on_provider()
def _get_vm(request, provider, template_name, vm_name): if provider.one_of(RHEVMProvider): kwargs = {"cluster": provider.data["default_cluster"]} elif provider.one_of(OpenStackProvider): kwargs = {} if 'small_template' in provider.data.templates: kwargs = {"flavour_name": provider.data.provisioning.get('instance_type')} elif provider.one_of(SCVMMProvider): kwargs = { "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")} else: kwargs = {} try: deploy_template( provider.key, vm_name, template_name=template_name, allow_skip="default", power_on=True, **kwargs ) except TimedOutError as e: logger.exception(e) try: VM.factory(vm_name, provider).cleanup_on_provider() except TimedOutError: logger.warning("Could not delete VM %s!", vm_name) finally: # If this happened, we should skip all tests from this provider in this module pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format( provider.key, type(e).__name__, str(e))) request.addfinalizer(lambda: VM.factory(vm_name, provider).cleanup_on_provider()) # Make it appear in the provider provider.refresh_provider_relationships() # Get the REST API object api = wait_for( get_vm_object, func_args=[provider.appliance, vm_name], message="VM object {} appears in CFME".format(vm_name), fail_condition=None, num_sec=600, delay=15, )[0] return VMWrapper(provider, vm_name, api)
def depot_machine_ip(): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric()) data = conf.cfme_data.get("log_db_operations", {}) depot_provider_key = data["log_db_depot_template"]["provider"] depot_template_name = data["log_db_depot_template"]["template_name"] prov_crud = get_crud(depot_provider_key) deploy_template(depot_provider_key, depot_machine_name, template_name=depot_template_name) yield prov_crud.mgmt.get_ip_address(depot_machine_name) VM.factory(depot_machine_name, prov_crud).cleanup_on_provider()
def vm_name(request, initialize_provider, full_template): name = "test_alerts_{}".format(fauxfactory.gen_alpha()) @request.addfinalizer def _cleanup_vm(): try: if initialize_provider.mgmt.does_vm_exist(name): initialize_provider.mgmt.delete_vm(name) initialize_provider.refresh_provider_relationships() except Exception as e: logger.exception(e) vm_obj = VM.factory(name, initialize_provider, template_name=full_template["name"]) vm_obj.create_on_provider(allow_skip="default") initialize_provider.mgmt.start_vm(vm_obj.name) initialize_provider.mgmt.wait_vm_running(vm_obj.name) # In order to have seamless SSH connection vm_ip, _ = wait_for( lambda: initialize_provider.mgmt.current_ip_address(vm_obj.name), num_sec=300, delay=5, fail_condition={None}, message="wait for testing VM IP address.") wait_for( net_check, [ports.SSH, vm_ip], {"force": True}, num_sec=300, delay=5, message="testing VM's SSH available") if not vm_obj.exists: initialize_provider.refresh_provider_relationships() vm_obj.wait_to_appear() if initialize_provider.type in CANDU_PROVIDER_TYPES: vm_obj.wait_candu_data_available(timeout=20 * 60) return name
def tagged_vm(new_tag, setup_provider_modscope, provider): ownership_vm = provider.data['ownership_vm'] tag_vm = VM.factory(ownership_vm, provider) tag_vm.add_tag(new_tag) yield tag_vm login.login_admin() tag_vm.remove_tag(new_tag)
def test_no_template_power_control(provider, setup_provider_funcscope, soft_assert): """ Ensures that no power button is displayed for templates. Prerequisities: * An infra provider that has some templates. Steps: * Open the view of all templates of the provider * Verify the Power toolbar button is not visible * Select some template using the checkbox * Verify the Power toolbar button is not visible * Click on some template to get into the details page * Verify the Power toolbar button is not visible """ provider.load_all_provider_templates() toolbar.select('Grid View') soft_assert(not toolbar.exists("Power"), "Power displayed in template grid view!") # Ensure selecting a template doesn't cause power menu to appear templates = list(get_all_vms(True)) template_name = random.choice(templates) selected_template = VM.factory(template_name, provider, template=True) # Check the power button with checking the quadicon quadicon = selected_template.find_quadicon(do_not_navigate=True, mark=True, refresh=False) soft_assert(not toolbar.exists("Power"), "Power displayed when template quadicon checked!") # Ensure there isn't a power button on the details page pytest.sel.click(quadicon) soft_assert(not toolbar.exists("Power"), "Power displayed in template details!")
def vm_ownership(enable_candu, clean_setup_provider, provider, appliance): # In these tests, Metering report is filtered on VM owner.So,VMs have to be # assigned ownership. vm_name = provider.data['cap_and_util']['chargeback_vm'] if not provider.mgmt.does_vm_exist(vm_name): pytest.skip("Skipping test, {} VM does not exist".format(vm_name)) provider.mgmt.start_vm(vm_name) provider.mgmt.wait_vm_running(vm_name) group_collection = appliance.collections.groups cb_group = group_collection.instantiate(description='EvmGroup-user') user = appliance.collections.users.create( name=fauxfactory.gen_alphanumeric(), credential=Credential(principal='uid' + '{}'.format(fauxfactory.gen_alphanumeric()), secret='secret'), email='*****@*****.**', groups=cb_group, cost_center='Workload', value_assign='Database') vm = VM.factory(vm_name, provider) try: vm.set_ownership(user=user.name) logger.info('Assigned VM OWNERSHIP for {} running on {}'.format(vm_name, provider.name)) yield user.name finally: vm.unset_ownership() user.delete()
def test_vm(setup_provider_modscope, provider, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = VM.factory(vm_name, provider, template_name=provider.data['full_template']['name']) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def test_no_template_power_control(provider, soft_assert): """ Ensures that no power button is displayed for templates. Prerequisities: * An infra provider that has some templates. Steps: * Open the view of all templates of the provider * Verify the Power toolbar button is not visible * Select some template using the checkbox * Verify the Power toolbar button is not visible * Click on some template to get into the details page * Verify the Power toolbar button is not visible """ view = navigate_to(provider, 'ProviderTemplates') view.toolbar.view_selector.select('Grid View') soft_assert(not view.toolbar.power.is_enabled, "Power displayed in template grid view!") # Ensure selecting a template doesn't cause power menu to appear templates = list(get_all_vms(True)) template_name = random.choice(templates) selected_template = VM.factory(template_name, provider, template=True) # Check the power button with checking the quadicon view = navigate_to(selected_template, 'AllForProvider', use_resetter=False) entity = view.entities.get_entity(name=selected_template.name, surf_pages=True) entity.check() soft_assert(not view.toolbar.power.is_enabled, "Power displayed when template quadicon checked!") # Ensure there isn't a power button on the details page entity.click() soft_assert(not view.toolbar.power.is_enabled, "Power displayed in template details!")
def tagged_vm(tag, has_no_providers_modscope, setup_provider_modscope, provider): ownership_vm = provider.data.cap_and_util.capandu_vm tag_vm = VM.factory(ownership_vm, provider) tag_vm.add_tag(tag=tag) yield tag_vm tag_vm.appliance.server.login_admin() tag_vm.remove_tag(tag=tag)
def test_iso_provision_from_template(appliance, provider, vm_name, smtp_test, datastore_init, request, setup_provider): """Tests ISO provisioning Metadata: test_flag: iso, provision suite: infra_provisioning """ # generate_tests makes sure these have values iso_template, host, datastore, iso_file, iso_kickstart,\ iso_root_password, iso_image_type, vlan = map(provider.data['provisioning'].get, ('pxe_template', 'host', 'datastore', 'iso_file', 'iso_kickstart', 'iso_root_password', 'iso_image_type', 'vlan')) request.addfinalizer(lambda: VM.factory(vm_name, provider).cleanup_on_provider()) provisioning_data = { 'catalog': { 'vm_name': vm_name, 'provision_type': 'ISO', 'iso_file': {'name': iso_file}}, 'environment': { 'host_name': {'name': host}, 'datastore_name': {'name': datastore}}, 'customize': { 'custom_template': {'name': iso_kickstart}, 'root_password': iso_root_password}, 'network': { 'vlan': vlan}} do_vm_provisioning(appliance, iso_template, provider, vm_name, provisioning_data, request, smtp_test, num_sec=1500)
def provision_vm(provider, template): vm_name = random_vm_name(context="snpst") vm = VM.factory(vm_name, provider, template_name=template.name) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm
def _provisioner(template, provisioning_data, delayed=None): vm = InfraVm(name=vm_name, provider=provider, template_name=template) view = navigate_to(vm, 'Provision') view.form.fill_with(provisioning_data, on_change=view.form.submit_button) base_view = vm.appliance.browser.create_view(BaseLoggedInPage) base_view.flash.assert_no_error() request.addfinalizer(lambda: VM.factory(vm_name, provider).cleanup_on_provider()) request_description = 'Provision from [{}] to [{}]'.format(template, vm_name) provision_request = appliance.collections.requests.instantiate( description=request_description) if delayed is not None: total_seconds = (delayed - datetime.utcnow()).total_seconds() try: wait_for(provision_request.is_finished, fail_func=provision_request.update, num_sec=total_seconds, delay=5) pytest.fail("The provisioning was not postponed") except TimedOutError: pass logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key) wait_for( provider.mgmt.does_vm_exist, [vm_name], fail_func=provider.refresh_provider_relationships, handle_exception=True, num_sec=600 ) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s', vm_name) provision_request.wait_for_request() msg = "Provisioning failed with the message {}".format(provision_request.rest.message) assert provision_request.is_succeeded(), msg return vm
def _provisioner(template, provisioning_data, delayed=None): pytest.sel.force_navigate('infrastructure_provision_vms', context={ 'provider': provider, 'template_name': template, }) vm_name = provisioning_data["vm_name"] fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button) flash.assert_no_errors() request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) if delayed is not None: total_seconds = (delayed - datetime.utcnow()).total_seconds() row_description = 'Provision from [{}] to [{}]'.format(template, vm_name) cells = {'Description': row_description} try: row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=total_seconds, delay=5) pytest.fail("The provisioning was not postponed") except TimedOutError: pass logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key) wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600) # nav to requests page happens on successful provision logger.info('Waiting for cfme provision request for vm %s', vm_name) row_description = 'Provision from [{}] to [{}]'.format(template, vm_name) cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells], fail_func=requests.reload, num_sec=900, delay=20) assert row.last_message.text == 'Vm Provisioned Successfully' return VM.factory(vm_name, provider)
def ssa_vm(request, local_setup_provider, provider, vm_analysis_provisioning_data, appliance, analysis_type): """ Fixture to provision instance on the provider """ vm_name = 'test-ssa-{}-{}'.format(fauxfactory.gen_alphanumeric(), analysis_type) vm = VM.factory(vm_name, provider, template_name=vm_analysis_provisioning_data.image) request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) provision_data = vm_analysis_provisioning_data.copy() del provision_data['image'] vm.create_on_provider(find_in_cfme=True, **provision_data) if provider.one_of(OpenStackProvider): public_net = provider.data['public_network'] vm.provider.mgmt.assign_floating_ip(vm.name, public_net) logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) @wait_for_decorator(timeout="20m", delay=5) def get_ip_address(): logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format( vm_name, provider.mgmt.vm_status(vm_name), provider.mgmt.is_vm_stopped(vm_name))) if provider.mgmt.is_vm_stopped(vm_name): provider.mgmt.start_vm(vm_name) ip = provider.mgmt.current_ip_address(vm_name) logger.info("Fetched IP for %s: %s", vm_name, ip) return ip is not None connect_ip = provider.mgmt.get_ip_address(vm_name) assert connect_ip is not None # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_provisioning_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient( hostname=connect_ip, username=vm_analysis_provisioning_data['username'], password=vm_analysis_provisioning_data['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_provisioning_data['image'] vm.connect_ip = connect_ip # TODO: if rhev and iscsi, it need direct_lun if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") cfme_rel = VM.CfmeRelationship(vm) server_name = appliance.server.name cfme_rel.set_relationship(str(server_name), configuration.server_id()) yield vm # Close the SSH client if we have one if getattr(vm, 'ssh', None): vm.ssh.close()
def small_vm(provider, small_template_modscope): vm = VM.factory(random_vm_name(context='reconfig'), provider, small_template_modscope.name) vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() yield vm vm.delete_from_provider()
def vm_crud(provider, setup_provider_modscope, small_template_modscope): vm = VM.factory( 'test_events_{}'.format(fauxfactory.gen_alpha(length=8).lower()), provider, template_name=small_template_modscope) yield vm if vm.does_vm_exist_on_provider(): vm.delete_from_provider()
def new_vm(provider, setup_provider, small_template_modscope): """Fixture to provision and delete vm on the provider""" vm_name = 'test_service_{}'.format(fauxfactory.gen_alphanumeric()) vm = VM.factory(vm_name, provider, small_template_modscope.name) vm.create_on_provider(find_in_cfme=True, timeout=700, allow_skip="default") yield vm vm.cleanup_on_provider() provider.refresh_provider_relationships()
def test_group_ownership_on_user_or_group_role(request, user3, setup_infra_provider): set_vm_to_group = VM.factory('cu-9-5', setup_infra_provider) set_vm_to_group.set_ownership(group=user3.group.description) with user3: assert(set_vm_to_group.exists, "vm not found") set_vm_to_group.unset_ownership() with user3: assert(not set_vm_to_group.exists, "vm exists")
def testing_vm(request, vm_name, setup_provider, provider, provisioning): vm_obj = VM.factory(vm_name, provider, provisioning["template"]) def _finalize(): vm_obj.delete_from_provider() request.addfinalizer(_finalize) vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default") return vm_obj
def test_vm(setup_provider_modscope, provider, vm_name, request): """Fixture to provision appliance to the provider being tested if necessary""" vm = VM.factory(vm_name, provider, template_name=provider.data["small_template"]) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") request.addfinalizer(vm.delete_from_provider) return vm
def new_vm(request, provider): vm = VM.factory(random_vm_name('timelines', max_length=16), provider) logger.debug('Fixture new_vm set up! Name: %r', vm.name) logger.info('Will create %r on Provider: %r', vm.name, vm.provider.name) vm.create_on_provider(find_in_cfme=False, timeout=500) yield vm logger.debug('Fixture new_vm teardown! Name: %r Provider: %r', vm.name, vm.provider.name) vm.provider.mgmt.delete_vm(vm.name)
def vm_obj(provider, setup_provider_modscope, small_template_modscope): """Creates new VM or instance""" vm_name = random_vm_name('attrs') new_vm = VM.factory(vm_name, provider, template_name=small_template_modscope.name) yield new_vm new_vm.cleanup_on_provider()
def _get_vm_obj_if_exists_on_provider(provider, vm_name): vm = VM.factory(vm_name, provider) if not vm.does_vm_exist_on_provider(): raise ValueError( "Unable to ensure VM state: " "VM '{}' does not exist on provider '{}'".format(vm_name, provider.key) ) return vm
def test_group_ownership_on_user_only_role(request, user2, setup_provider, provider): ownership_vm = provider.data['ownership_vm'] group_ownership_vm = VM.factory(ownership_vm, provider) group_ownership_vm.set_ownership(group=user2.group.description) with user2: assert (group_ownership_vm.exists, "vm not found") group_ownership_vm.unset_ownership() with user2: assert (not group_ownership_vm.exists, "vm exists")
def new_vm(setup_provider_modscope, provider, request): """Fixture to provision appliance to the provider being tested if necessary""" vm_name = random_vm_name(context='migrate') vm = VM.factory(vm_name, provider, template_name=provider.data['small_template']) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") request.addfinalizer(vm.cleanup_on_provider) return vm
def test_no_dvd_ruins_refresh(provider, small_template): host_group = provider.data["provisioning"]["host_group"] with provider.mgmt.with_vm( small_template, vm_name="test_no_dvd_{}".format(fauxfactory.gen_alpha()), host_group=host_group) as vm_name: provider.mgmt.disconnect_dvd_drives(vm_name) vm = VM.factory(vm_name, provider) provider.refresh_provider_relationships() vm.wait_to_appear()
def new_vm(request, a_provider): vm = VM.factory(random_vm_name("timelines", max_length=16), a_provider) request.addfinalizer(vm.delete_from_provider) if not a_provider.mgmt.does_vm_exist(vm.name): logger.info("deploying %s on provider %s", vm.name, a_provider.key) vm.create_on_provider(allow_skip="default", find_in_cfme=True) return vm
def vm_ownership(enable_candu, clean_setup_provider, provider): # In these tests, chargeback reports are filtered on VM owner.So,VMs have to be # assigned ownership. try: vm_name = provider.data['cap_and_util']['chargeback_vm'] vm = VM.factory(vm_name, provider) cb_group = ac.Group(description='EvmGroup-user') user = ac.User(name=provider.name + fauxfactory.gen_alphanumeric(), credential=new_credential(), email='*****@*****.**', group=cb_group, cost_center='Workload', value_assign='Database') user.create() vm.set_ownership(user=user.name) logger.info('Assigned VM OWNERSHIP for {} running on {}'.format( vm_name, provider.name)) yield user.name finally: vm.unset_ownership() user.delete()
def vm_obj(request, provider, setup_provider, console_template, vm_name): """ Create a VM on the provider with the given template, and return the vm_obj. Also, it will remove VM from provider using nested function _delete_vm after the test is completed. """ vm_obj = VM.factory(vm_name, provider, template_name=console_template) @request.addfinalizer def _delete_vm(): try: vm_obj.delete_from_provider() except Exception: logger.warning("Failed to delete vm `{}`.".format(vm_obj.name)) vm_obj.create_on_provider(timeout=2400, find_in_cfme=True, allow_skip="default") if provider.one_of(OpenStackProvider): # Assign FloatingIP to Openstack Instance from pool 'public' # so that we can SSH to it provider.mgmt.assign_floating_ip(vm_obj.name, 'public') return vm_obj
def myservice(appliance, provider, catalog_item, request): vm_name = catalog_item.prov_data["catalog"]["vm_name"] request.addfinalizer( lambda: VM.factory(vm_name + "_0001", provider).cleanup_on_provider()) service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name) service_catalogs.order() logger.info('Waiting for cfme provision request for service %s', catalog_item.name) request_description = catalog_item.name provision_request = appliance.collections.requests.instantiate( request_description, partial_check=True) provision_request.wait_for_request() assert provision_request.is_finished() service = MyService(appliance, catalog_item.name, vm_name) yield service try: service.delete() except Exception as ex: logger.warning( 'Exception while deleting MyService, continuing: {}'.format( ex.message))
def test_scope_windows_registry_stuck(request, infra_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable.""" policy = VMCompliancePolicy( "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy.create() profile = PolicyProfile( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) profile.create() # Now assign this malformed profile to a VM vm = VM.factory(Vm.get_first_vm(provider=infra_provider).name, infra_provider) vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(Server, 'Dashboard') navigate_to(Vm, 'All') assert "except" not in pytest.sel.title().lower() vm.unassign_policy_profiles(profile.description)
def vmware_vm(full_template_modscope, provider): vm_obj = VM.factory(random_vm_name("ansible"), provider, template_name=full_template_modscope) vm_obj.create_on_provider(allow_skip="default") provider.mgmt.start_vm(vm_obj.name) provider.mgmt.wait_vm_running(vm_obj.name) # In order to have seamless SSH connection vm_ip, _ = wait_for(lambda: provider.mgmt.current_ip_address(vm_obj.name), num_sec=300, delay=5, fail_condition={None}, message="wait for testing VM IP address.") wait_for(net_check, [ports.SSH, vm_ip], {"force": True}, num_sec=300, delay=5, message="testing VM's SSH available") if not vm_obj.exists: provider.refresh_provider_relationships() vm_obj.wait_to_appear() yield vm_obj if provider.mgmt.does_vm_exist(vm_obj.name): provider.mgmt.delete_vm(vm_obj.name) provider.refresh_provider_relationships()
def test_order_catalog_item_via_rest( request, appliance, provider, catalog_item, catalog): """Same as :py:func:`test_order_catalog_item`, but using REST. Metadata: test_flag: provision, rest """ vm_name = catalog_item.prov_data['catalog']["vm_name"] request.addfinalizer(lambda: VM.factory(vm_name, provider).cleanup_on_provider()) request.addfinalizer(catalog_item.delete) catalog = appliance.rest_api.collections.service_catalogs.find_by(name=catalog.name) assert len(catalog) == 1 catalog, = catalog template = catalog.service_templates.find_by(name=catalog_item.name) assert len(template) == 1 template, = template req = template.action.order() assert_response(appliance) @wait_for_decorator(timeout="15m", delay=5) def request_finished(): req.reload() logger.info("Request status: {}, Request state: {}, Request message: {}".format( req.status, req.request_state, req.message)) return req.status.lower() == "ok" and req.request_state.lower() == "finished"
def test_provision_vm_to_virtual_network(appliance, setup_provider, provider, vm_name, request, provisioning, network): """ Tests provisioning a vm from a template to a virtual network Metadata: test_flag: provision """ request.addfinalizer( lambda: VM.factory(vm_name, provider).cleanup_on_provider()) template = provisioning['template'] provisioning_data = { 'catalog': { 'vm_name': vm_name }, 'environment': { 'vm_name': vm_name, 'automatic_placement': True }, 'network': { 'vlan': partial_match(network.name) } } wait_for( do_vm_provisioning, [appliance, template, provider, vm_name, provisioning_data, request], { 'num_sec': 900, 'smtp_test': False }, handle_exception=True, delay=50, num_sec=900, fail_func=appliance.server.browser.refresh, message='Cannot do provision for vm {}.'.format(vm_name))
def vm_crud(vm_name, provider): return VM.factory(vm_name, provider)
def test_cloud_catalog_item(appliance, vm_name, setup_provider, provider, dialog, catalog, request, provisioning): """Tests cloud catalog item Metadata: test_flag: provision """ wait_for(provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600) vm = VM.factory("{}0001".format(vm_name), provider) request.addfinalizer(lambda: vm.cleanup_on_provider()) image = provisioning['image']['name'] item_name = "{}-service-{}".format(provider.name, fauxfactory.gen_alphanumeric()) inst_args = { 'catalog': { 'catalog_name': { 'name': image, 'provider': provider.name }, 'vm_name': vm_name }, 'environment': { 'availability_zone': provisioning.get('availability_zone', None), 'security_groups': [provisioning.get('security_group', None)], 'cloud_tenant': provisioning.get('cloud_tenant', None), 'cloud_network': provisioning.get('cloud_network', None), 'cloud_subnet': provisioning.get('cloud_subnet', None), 'resource_groups': provisioning.get('resource_group', None) }, 'properties': { 'instance_type': partial_match(provisioning.get('instance_type', None)), 'guest_keypair': provisioning.get('guest_keypair', None) } } # GCE specific if provider.one_of(GCEProvider): recursive_update( inst_args, { 'properties': { 'boot_disk_size': provisioning['boot_disk_size'], 'is_preemptible': True } }) # Azure specific if provider.one_of(AzureProvider): recursive_update( inst_args, { 'customize': { 'admin_username': provisioning['customize_username'], 'root_password': provisioning['customize_password'] } }) catalog_item = appliance.collections.catalog_items.create( provider.catalog_item_type, name=item_name, description="my catalog", display_in=True, catalog=catalog, dialog=dialog, prov_data=inst_args) request.addfinalizer(catalog_item.delete) service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name) service_catalogs.order() logger.info('Waiting for cfme provision request for service %s', item_name) request_description = item_name provision_request = appliance.collections.requests.instantiate( request_description, partial_check=True) provision_request.wait_for_request() msg = "Request failed with the message {}".format( provision_request.rest.message) assert provision_request.is_succeeded(), msg
def tagged_vm(new_tag, setup_provider, provider, ownership_vm): tag_vm = VM.factory(ownership_vm, provider) tag_vm.add_tag(new_tag) yield tag_vm login.login_admin() tag_vm.remove_tag(new_tag)
def vmware_vm(request, vmware_provider): vm = VM.factory("test_control_{}".format(fauxfactory.gen_alpha().lower()), vmware_provider) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.delete_from_provider) return vm
def vm_crud(provider, vm_name, full_template): return VM.factory(vm_name, provider, template_name=full_template["name"])
def __init__(self, provider, vm_name, api): self._prov = provider self._vm = vm_name self.api = api self.crud = VM.factory(vm_name, self._prov)
def test_cloud_provision_from_template_using_rest(appliance, request, setup_provider, provider, vm_name, provisioning): """ Tests provisioning from a template using the REST API. Metadata: test_flag: provision, rest """ if 'flavors' not in appliance.rest_api.collections.all_names: pytest.skip("This appliance does not have `flavors` collection.") image_guid = appliance.rest_api.collections.templates.find_by( name=provisioning['image']['name'])[0].guid if ':' in provisioning['instance_type'] and provider.one_of( EC2Provider, GCEProvider): instance_type = provisioning['instance_type'].split(':')[0].strip() elif provider.type == 'azure': instance_type = provisioning['instance_type'].lower() else: instance_type = provisioning['instance_type'] flavors = appliance.rest_api.collections.flavors.find_by( name=instance_type) assert flavors # TODO: Multi search when it works for flavor in flavors: if flavor.ems.name == provider.name: flavor_id = flavor.id break else: pytest.fail("Cannot find flavour {} for provider {}".format( instance_type, provider.name)) provision_data = { "version": "1.1", "template_fields": { "guid": image_guid, }, "vm_fields": { "vm_name": vm_name, "instance_type": flavor_id, "request_type": "template", }, "requester": { "user_name": "admin", "owner_first_name": "Administrator", "owner_last_name": "Administratorovich", "owner_email": "*****@*****.**", "auto_approve": True, }, "tags": {}, "additional_values": {}, "ems_custom_attributes": {}, "miq_custom_attributes": {} } if not isinstance(provider, AzureProvider): recursive_update( provision_data, { 'vm_fields': { 'availability_zone': provisioning['availability_zone'], 'security_groups': [provisioning['security_group']], 'guest_keypair': provisioning['guest_keypair'] } }) if isinstance(provider, GCEProvider): recursive_update( provision_data, { 'vm_fields': { 'cloud_network': provisioning['cloud_network'], 'boot_disk_size': provisioning['boot_disk_size'].replace(' ', '.'), 'zone': provisioning['availability_zone'], 'region': provider.data["region"] } }) elif isinstance(provider, AzureProvider): try: template = provider.data.templates.small_template vm_user = credentials[template.creds].username vm_password = credentials[template.creds].password except AttributeError: pytest.skip( 'Could not find small_template or credentials for {}'.format( provider.name)) # mapping: product/dialogs/miq_dialogs/miq_provision_azure_dialogs_template.yaml recursive_update(provision_data, { 'vm_fields': { 'root_username': vm_user, 'root_password': vm_password } }) request.addfinalizer( lambda: VM.factory(vm_name, provider).cleanup_on_provider()) response = appliance.rest_api.collections.provision_requests.action.create( **provision_data)[0] assert_response(appliance) provision_request = appliance.collections.requests.instantiate( description=response.description) provision_request.wait_for_request() assert provision_request.is_succeeded(), ( "Provisioning failed with the message {}".format( provision_request.rest.message)) wait_for(lambda: provider.mgmt.does_vm_exist(vm_name), num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name))
def vm_crud(provider, setup_provider_modscope, small_template): return VM.factory('test_events_{}'.format( fauxfactory.gen_alpha(length=8).lower()), provider, template_name=small_template)
def clean_vm(vm_name, provider): vm_obj = VM.factory(vm_name=vm_name, provider=provider) vm_obj.delete_from_provider()
def instance(request, local_setup_provider, provider, vm_name, vm_analysis_data, appliance): """ Fixture to provision instance on the provider """ vm = VM.factory(vm_name, provider, template_name=vm_analysis_data['image']) request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) provision_data = vm_analysis_data.copy() del provision_data['image'] vm.create_on_provider(find_in_cfme=True, **provision_data) if provider.type == "openstack": vm.provider.mgmt.assign_floating_ip(vm.name, 'public') logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) mgmt_system = provider.get_mgmt_system() @wait_for_decorator(timeout="20m", delay=5) def get_ip_address(): logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format( vm_name, mgmt_system.vm_status(vm_name), mgmt_system.is_vm_stopped(vm_name))) if mgmt_system.is_vm_stopped(vm_name): mgmt_system.start_vm(vm_name) ip = mgmt_system.current_ip_address(vm_name) logger.info("Fetched IP for %s: %s", vm_name, ip) return ip is not None connect_ip = mgmt_system.get_ip_address(vm_name) assert connect_ip is not None # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient(hostname=connect_ip, username=vm_analysis_data['username'], password=vm_analysis_data['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_data['image'] vm.connect_ip = connect_ip # TODO: This is completely wrong and needs to be fixed # CFME relationship is suppose to be set to the appliance, which is required # to be placed within the same datastore that the VM resides # # Also, if rhev and iscsi, it need direct_lun if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") from cfme.infrastructure.virtual_machines import Vm cfme_rel = Vm.CfmeRelationship(vm) server_name = appliance.server_name() cfme_rel.set_relationship(str(server_name), configuration.server_id()) yield vm # Close the SSH client if we have one if getattr(vm, 'ssh', None): vm.ssh.close()
def vm_crud(provider, small_template): return VM.factory(random_vm_name(context='genealogy'), provider, template_name=small_template.name)
def instance(request, local_setup_provider, provider, vm_name, vm_analysis_data): """ Fixture to provision instance on the provider """ template = vm_analysis_data.get('image', None) host_name, datastore_name = map(vm_analysis_data.get, ('host', 'datastore')) mgmt_system = provider.get_mgmt_system() provisioning_data = { 'vm_name': vm_name, 'host_name': { 'name': [host_name] }, 'datastore_name': { 'name': [datastore_name] }, } try: provisioning_data['vlan'] = vm_analysis_data['vlan'] except KeyError: # provisioning['vlan'] is required for rhevm provisioning if provider.type == 'rhevm': raise pytest.fail( 'rhevm requires a vlan value in provisioning info') vm = VM.factory(vm_name, provider) connect_ip = None if provider.type == "openstack": image = vm_analysis_data['image'] vm = VM.factory(vm_name, provider, image) request.addfinalizer(vm.delete_from_provider) connect_ip = mgmt_system.get_first_floating_ip() provider.refresh_provider_relationships(method='ui') inst_args = { 'email': '*****@*****.**', 'first_name': 'Image', 'last_name': 'Provisioner', 'template_name': image, 'notes': ('Testing provisioning from image {} to vm {} on provider {}'. format(image, vm_name, provider.key)), 'instance_type': vm_analysis_data['instance_type'], 'availability_zone': vm_analysis_data['availability_zone'], 'security_groups': [vm_analysis_data['security_group']], 'cloud_network': vm_analysis_data['cloud_network'], 'public_ip_address': connect_ip, } vm.create(**inst_args) else: request.addfinalizer(lambda: cleanup_vm(vm_name, provider)) do_vm_provisioning(template, provider, vm_name, provisioning_data, request, None, num_sec=6000) logger.info("VM %s provisioned, waiting for IP address to be assigned", vm_name) @pytest.wait_for(timeout="20m", delay=5) def get_ip_address(): logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format( vm_name, mgmt_system.vm_status(vm_name), mgmt_system.is_vm_stopped(vm_name))) if mgmt_system.is_vm_stopped(vm_name): mgmt_system.start_vm(vm_name) ip = mgmt_system.current_ip_address(vm_name) logger.info("Fetched IP for %s: %s", vm_name, ip) return ip is not None connect_ip = mgmt_system.get_ip_address(vm_name) assert connect_ip is not None # Check that we can at least get the uptime via ssh this should only be possible # if the username and password have been set via the cloud-init script so # is a valid check if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']: logger.info("Waiting for %s to be available via SSH", connect_ip) ssh_client = ssh.SSHClient(hostname=connect_ip, username=vm_analysis_data['username'], password=vm_analysis_data['password'], port=22) wait_for(ssh_client.uptime, num_sec=3600, handle_exception=False) vm.ssh = ssh_client vm.system_type = detect_system_type(vm) logger.info("Detected system type: %s", vm.system_type) vm.image = vm_analysis_data['image'] vm.connect_ip = connect_ip if provider.type == 'rhevm': logger.info("Setting a relationship between VM and appliance") from cfme.infrastructure.virtual_machines import Vm cfme_rel = Vm.CfmeRelationship(vm) cfme_rel.set_relationship(str(configuration.server_name()), configuration.server_id()) return vm
def test_provision_approval(appliance, setup_provider, provider, vm_name, smtp_test, request, edit, provisioning): """ Tests provisioning approval. Tests couple of things. * Approve manually * Approve by editing the request to conform Prerequisities: * A provider that can provision. * Automate role enabled * User with e-mail set so you can receive and view them Steps: * Create a provisioning request that does not get automatically approved (eg. ``num_vms`` bigger than 1) * Wait for an e-mail to come, informing you that the auto-approval was unsuccessful. * Depending on whether you want to do manual approval or edit approval, do: * MANUAL: manually approve the request in UI * EDIT: Edit the request in UI so it conforms the rules for auto-approval. * Wait for an e-mail with approval * Wait until the request finishes * Wait until an email, informing about finished provisioning, comes. Metadata: test_flag: provision suite: infra_provisioning """ # generate_tests makes sure these have values template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore')) # It will provision two of them vm_names = [vm_name + "001", vm_name + "002"] request.addfinalizer(lambda: [ VM.factory(name, provider).cleanup_on_provider() for name in vm_names ]) provisioning_data = { 'catalog': { 'vm_name': vm_name, 'num_vms': '2' }, 'environment': { 'host_name': { 'name': host }, 'datastore_name': { 'name': datastore } }, 'network': { 'vlan': partial_match(provisioning['vlan']) } } do_vm_provisioning(appliance, template, provider, vm_name, provisioning_data, request, smtp_test, wait=False) wait_for(lambda: len( filter( lambda mail: "your request for a new vms was not autoapproved" in normalize_text(mail["subject"]), smtp_test.get_emails())) == 1, num_sec=90, delay=5) wait_for(lambda: len( filter( lambda mail: "virtual machine request was not approved" in normalize_text(mail["subject"]), smtp_test.get_emails())) == 1, num_sec=90, delay=5) smtp_test.clear_database() cells = { 'Description': 'Provision from [{}] to [{}###]'.format(template, vm_name) } provision_request = appliance.collections.requests.instantiate(cells=cells) navigate_to(provision_request, 'Details') if edit: # Automatic approval after editing the request to conform new_vm_name = vm_name + "-xx" modifications = { 'catalog': { 'num_vms': "1", 'vm_name': new_vm_name }, 'Description': 'Provision from [{}] to [{}]'.format(template, new_vm_name) } provision_request.edit_request(values=modifications) vm_names = [new_vm_name] # Will be just one now request.addfinalizer( lambda: VM.factory(new_vm_name, provider).cleanup_on_provider()) else: # Manual approval provision_request.approve_request(method='ui', reason="Approved") vm_names = [vm_name + "001", vm_name + "002"] # There will be two VMs request.addfinalizer(lambda: [ VM.factory(name, provider).cleanup_on_provider() for name in vm_names ]) wait_for(lambda: len( filter( lambda mail: "your virtual machine configuration was approved" in normalize_text(mail["subject"]), smtp_test.get_emails())) == 1, num_sec=120, delay=5) smtp_test.clear_database() # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup logger.info('Waiting for vms %s to appear on provider %s', ", ".join(vm_names), provider.key) wait_for(lambda: all(map(provider.mgmt.does_vm_exist, vm_names)), handle_exception=True, num_sec=600) provision_request.wait_for_request(method='ui') msg = "Provisioning failed with the message {}".format( provision_request.row.last_message.text) assert provision_request.is_succeeded(method='ui'), msg # Wait for e-mails to appear def verify(): return (len( filter( lambda mail: "your virtual machine request has completed vm {}" .format(normalize_text(vm_name)) in normalize_text(mail[ "subject"]), smtp_test.get_emails())) == len(vm_names)) wait_for(verify, message="email receive check", delay=5)
def vm_crud(provider, small_template): return VM.factory('test_genealogy_{}'.format( fauxfactory.gen_alpha(length=8).lower()), provider, template_name=small_template)
def test_manual_placement_using_rest(appliance, request, setup_provider, provider, vm_name, provisioning): """ Tests provisioning cloud instance with manual placement using the REST API. Metadata: test_flag: provision, rest """ image_guid = appliance.rest_api.collections.templates.get( name=provisioning['image']['name']).guid provider_rest = appliance.rest_api.collections.providers.get( name=provider.name) security_group_name = provisioning['security_group'].split(':')[0].strip() if ':' in provisioning['instance_type'] and provider.one_of(EC2Provider): instance_type = provisioning['instance_type'].split(':')[0].strip() else: instance_type = provisioning['instance_type'] flavors = appliance.rest_api.collections.flavors.find_by( name=instance_type) assert flavors flavor = None for flavor in flavors: if flavor.ems_id == provider_rest.id: break else: pytest.fail("Cannot find flavour.") provider_data = appliance.rest_api.get( provider_rest._href + '?attributes=cloud_networks,cloud_subnets,security_groups,cloud_tenants' ) # find out cloud network assert provider_data['cloud_networks'] cloud_network_name = provisioning.get('cloud_network').strip() if provider.one_of(EC2Provider): cloud_network_name = cloud_network_name.split()[0] cloud_network = None for cloud_network in provider_data['cloud_networks']: # If name of cloud network is available, find match. # Otherwise just "enabled" is enough. if cloud_network_name and cloud_network_name != cloud_network['name']: continue if cloud_network['enabled']: break else: pytest.fail("Cannot find cloud network.") # find out security group assert provider_data['security_groups'] security_group = None for group in provider_data['security_groups']: if (group.get('cloud_network_id') == cloud_network['id'] and group['name'] == security_group_name): security_group = group break # OpenStack doesn't seem to have the "cloud_network_id" attribute. # At least try to find the group where the group name matches. elif not security_group and group['name'] == security_group_name: security_group = group if not security_group: pytest.fail("Cannot find security group.") # find out cloud subnet assert provider_data['cloud_subnets'] cloud_subnet = None for cloud_subnet in provider_data['cloud_subnets']: if (cloud_subnet.get('cloud_network_id') == cloud_network['id'] and cloud_subnet['status'] in ('available', 'active')): break else: pytest.fail("Cannot find cloud subnet.") def _find_availability_zone_id(): subnet_data = appliance.rest_api.get(provider_rest._href + '?attributes=cloud_subnets') for subnet in subnet_data['cloud_subnets']: if subnet['id'] == cloud_subnet[ 'id'] and 'availability_zone_id' in subnet: return subnet['availability_zone_id'] return False # find out availability zone availability_zone_id = None if provisioning.get('availability_zone'): availability_zone_entities = appliance.rest_api.collections.availability_zones.find_by( name=provisioning['availability_zone']) if availability_zone_entities and availability_zone_entities[ 0].ems_id == flavor.ems_id: availability_zone_id = availability_zone_entities[0].id if not availability_zone_id and 'availability_zone_id' in cloud_subnet: availability_zone_id = cloud_subnet['availability_zone_id'] if not availability_zone_id: availability_zone_id, _ = wait_for(_find_availability_zone_id, num_sec=100, delay=5, message="availability_zone present") # find out cloud tenant cloud_tenant_id = None tenant_name = provisioning.get('cloud_tenant') if tenant_name: for tenant in provider_data.get('cloud_tenants', []): if (tenant['name'] == tenant_name and tenant['enabled'] and tenant['ems_id'] == flavor.ems_id): cloud_tenant_id = tenant['id'] provision_data = { "version": "1.1", "template_fields": { "guid": image_guid }, "vm_fields": { "vm_name": vm_name, "instance_type": flavor.id, "request_type": "template", "placement_auto": False, "cloud_network": cloud_network['id'], "cloud_subnet": cloud_subnet['id'], "placement_availability_zone": availability_zone_id, "security_groups": security_group['id'], "monitoring": "basic" }, "requester": { "user_name": "admin", "owner_first_name": "Administrator", "owner_last_name": "Administratorovich", "owner_email": "*****@*****.**", "auto_approve": True, }, "tags": {}, "additional_values": {}, "ems_custom_attributes": {}, "miq_custom_attributes": {} } if cloud_tenant_id: provision_data['vm_fields']['cloud_tenant'] = cloud_tenant_id request.addfinalizer( lambda: VM.factory(vm_name, provider).cleanup_on_provider()) response = appliance.rest_api.collections.provision_requests.action.create( **provision_data)[0] assert_response(appliance) provision_request = appliance.collections.requests.instantiate( description=response.description) provision_request.wait_for_request() assert provision_request.is_succeeded(), ( "Provisioning failed with the message {}".format( provision_request.rest.message)) wait_for(lambda: provider.mgmt.does_vm_exist(vm_name), num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name))
def template(request, vm_template_name, provider): logger.info("Starting template fixture") return VM.factory(vm_template_name, provider)
def vmware_vm(request, virtualcenter_provider): vm = VM.factory(random_vm_name("control"), virtualcenter_provider) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.delete_from_provider) return vm