def docs_info(): if version.current_version() < "5.4.0.1": return [ 'Control', 'Lifecycle and Automation', 'Quick Start', 'Settings And Operations', 'Insight', 'Integration Services' ] elif version.current_version() < "5.5.0.12": return [ 'Insight', 'Control', 'Lifecycle and Automation', 'REST API', 'SOAP API', 'User', 'Settings and Operations' ] elif version.appliance_is_downstream(): return [ 'Monitoring Alerts Reporting', 'General Configuration', 'Virtual Machines Hosts', 'Methods For Automation', 'Infrastructure Inventory', 'Providers', 'Scripting Actions', 'Defining Policies Profiles' ] else: # Upstream version has no docs return []
def test_evmserverd_start_twice(ssh_client): """If evmserverd start is ran twice, it will then tell that it is already running. Steps: * Stop the evmserverd using ``service evmserverd stop``. * Start the evmserverd using ``service evmserverd start`` command. * Assert that the output of the previous command states "Running EVM in background". * Start the evmserverd using ``service evmserverd start`` command. * Assert that the output of the previous command states "EVM is already running". * Extract the PID of the evmserverd from the output from the last command. * Verify the process with such PID exists ``kill -0 $PID``. """ assert ssh_client.run_command("service evmserverd stop").rc == 0 # Start first time res = ssh_client.run_command("service evmserverd start") if current_version() < "5.5": assert "running evm in background" in res.output.lower() else: assert "started evm server daemon" in res.output.lower() assert res.rc == 0 # Start second time res = ssh_client.run_command("service evmserverd start") if current_version() < "5.5": assert "evm is already running" in res.output.lower() assert res.rc == 0 # Verify the process is running if current_version() < "5.5": pid_match = re.search(r"\(PID=(\d+)\)", res.output) else: pid_match = re.search(r"Main PID: (\d+)", res.output) assert pid_match is not None pid = int(pid_match.groups()[0]) assert ssh_client.run_command("kill -0 {}".format(pid)).rc == 0
def test_retire_service_future(self, rest_api, services): """Test retiring a service Prerequisities: * An appliance with ``/api`` available. Steps: * Retrieve list of entities using GET /api/services , pick the first one * POST /api/service/<id> (method ``retire``) with the ``retire_date`` Metadata: test_flag: rest """ assert "retire" in rest_api.collections.services.action.all retire_service = services[0] date = (datetime.datetime.now() + datetime.timedelta(days=5)).strftime('%m/%d/%y') future = { "date": date, "warn": "4", } date_before = retire_service.updated_at retire_service.action.retire(future) def _finished(): retire_service.reload() if retire_service.updated_at > date_before: return True return False wait_for(_finished, num_sec=600, delay=5, message="REST automation_request finishes") @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') def test_set_service_owner(self, rest_api, services): if "set_ownership" not in rest_api.collections.services.action.all: pytest.skip("Set owner action for service is not implemented in this version") service = services[0] user = rest_api.collections.users.get(userid='admin') data = { "owner": {"href": user.href} } service.action.set_ownership(data) service.reload() assert hasattr(service, "evm_owner") assert service.evm_owner.userid == user.userid @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') def test_set_services_owner(self, rest_api, services): if "set_ownership" not in rest_api.collections.services.action.all: pytest.skip("Set owner action for service is not implemented in this version") data = [] user = rest_api.collections.users.get(userid='admin') for service in services: tmp_data = { "href": service.href, "owner": {"href": user.href} } data.append(tmp_data) rest_api.collections.services.action.set_ownership(*data) for service in services: service.reload() assert hasattr(service, "evm_owner") assert service.evm_owner.userid == user.userid
def get_current_menu_state(self): """Returns the current menu state This function returns what each level of the menu is pointing to, or None, if that level of menu is unused. Future work could possibly see this method using recursion to allow unlimited levels of menu to be used, however it is unlikely that more than 3 will ever be used. """ lev = [None, None, None] lev[0] = (sel.text(self.CURRENT_TOP_MENU).encode("utf-8").strip()) if version.current_version() < "5.6.0.1" or version.current_version() == version.LATEST: try: lev[1] = sel.text("//nav[contains(@class, 'navbar')]//ul/li[@class='active']/a") \ .encode("utf-8").strip() except NoSuchElementException: pass else: lev[1] = sel.text("{}{}".format( self.CURRENT_TOP_MENU, self.ACTIVE_LEV)).encode("utf-8").strip() try: lev[2] = sel.text("{}{}{}".format( self.CURRENT_TOP_MENU, self.ACTIVE_LEV, self.ACTIVE_LEV)).encode( "utf-8").strip() except NoSuchElementException: pass return lev
def all_possible_roles(): roles = server_roles_conf['all'] if version.current_version() < 5.6: roles.remove('git_owner') roles.remove('websocket') if version.current_version() >= 5.7: roles.remove('database_synchronization') return roles
def create(self): # Create has sequential forms, the first is only the provider type navigate_to(self, 'Add') # For element not found exception (To be removed) sel.sleep(5) sel.select("//select[@id='st_prov_type']", self.provider_type or self.item_type or 'Generic') sel.wait_for_element(basic_info_form.name_text) catalog = fakeobject_or_object(self.catalog, "name", "Unassigned") dialog = fakeobject_or_object(self.dialog, "name", "No Dialog") # Need to provide the (optional) provider name to the form, not the object provider_name = None provider_required_types = ['AnsibleTower', 'Orchestration'] if self.item_type in provider_required_types \ or self.provider_type in provider_required_types: provider_name = self.provider.name # For tests where orchestration template is None orch_template = None if self.orch_template: orch_template = self.orch_template.template_name fill(basic_info_form, {'name_text': self.name, 'description_text': self.description, 'display_checkbox': self.display_in, 'select_catalog': catalog.name, 'select_dialog': dialog.name, 'select_orch_template': orch_template, 'select_provider': provider_name, 'select_config_template': self.config_template}) if not (self.item_type in provider_required_types): sel.click(basic_info_form.field_entry_point) if version.current_version() < "5.7": dynamic_tree.click_path("Datastore", self.domain, "Service", "Provisioning", "StateMachines", "ServiceProvision_Template", "default") else: entry_tree.click_path("Datastore", self.domain, "Service", "Provisioning", "StateMachines", "ServiceProvision_Template", "default") sel.click(basic_info_form.apply_btn) if version.current_version() >= "5.7" and self.item_type == "AnsibleTower": sel.click(basic_info_form.retirement_entry_point) entry_tree.click_path("Datastore", self.domain, "Service", "Retirement", "StateMachines", "ServiceRetirement", "Generic") sel.click(basic_info_form.apply_btn) if self.catalog_name is not None \ and self.provisioning_data is not None \ and not isinstance(self.provider, NoneType): tabstrip.select_tab("Request Info") tabstrip.select_tab("Catalog") template = template_select_form.template_table.find_row_by_cells({ 'Name': self.catalog_name, 'Provider': self.provider.name }) sel.click(template) request_form.fill(self.provisioning_data) sel.click(template_select_form.add_button)
def resolve_blocker(self, blocker, version=None, ignore_bugs=None, force_block_streams=None): # ignore_bugs is mutable but is not mutated here! Same force_block_streams force_block_streams = force_block_streams or [] ignore_bugs = set([]) if not ignore_bugs else ignore_bugs if isinstance(id, BugWrapper): bug = blocker else: bug = self.get_bug(blocker) if version is None: version = current_version() if version == LATEST: version = bug.product.latest_version variants = self.get_bug_variants(bug) filtered = set([]) version_series = ".".join(str(version).split(".")[:2]) for variant in variants: if variant.id in ignore_bugs: continue if variant.version is not None and variant.version > version: continue if ((variant.version is not None and variant.target_release is not None) and ( variant.version.is_in_series(version_series) or variant.target_release.is_in_series(version_series))): filtered.add(variant) elif variant.release_flag is not None: if version.is_in_series(variant.release_flag): # Simple case filtered.add(variant) else: logger.info( "Ignoring bug #%s, appliance version not in bug release flag", variant.id) else: logger.info("No release flags, wrong versions, ignoring %s", variant.id) if not filtered: # No appropriate bug was found for forced_stream in force_block_streams: # Find out if we force this bug. if current_version().is_in_series(forced_stream): return bug else: # No bug, yipee :) return None # First, use versions for bug in filtered: if ((bug.version is not None and bug.target_release is not None) and check_fixed_in(bug.fixed_in, version_series) and ( bug.version.is_in_series(version_series) or bug.target_release.is_in_series(version_series))): return bug # Otherwise prefer release_flag for bug in filtered: if bug.release_flag and version.is_in_series(bug.release_flag): return bug return None
def group_data(): roles = cfme_data.get("group_roles", {}) # These roles are not present on 5.2 appliance in these groups if version.current_version() < "5.3": _remove_page(roles, "evmgroup-super_administrator", ["clouds_tenants"]) _remove_page(roles, "evmgroup-user", ["services_requests", "infrastructure_requests"]) if version.current_version() < "5.4": _remove_from_all(roles, "clouds_stacks") _remove_from_all(roles, "infrastructure_config_management") return roles
def initialize(self): """Initializes the menu object by collapsing the grafted tree items onto the tree""" if not self._branches: self._branches = self._branch_convert(self.sections) self.add_branch('toplevel', self._branches) while self._branch_stack: name, branches = self._branch_stack.pop(0) self.add_branch(name, branches) if version.current_version() < "5.6.0.1" or version.current_version() == version.LATEST: self.CURRENT_TOP_MENU = "//ul[@id='maintab']/li[not(contains(@class, 'drop'))]/a[2]" else: self.CURRENT_TOP_MENU = "{}{}".format(self.ROOT, self.ACTIVE_LEV)
def visible_pages(self): """Return a list of all the menu pages currently visible top- and second-level pages Mainly useful for RBAC testing """ if version.current_version() < "5.6.0.1" or version.current_version() == version.LATEST: displayed_menus = self._old_visible_pages() else: displayed_menus = self._new_visible_pages() return sorted( [self.reverse_lookup(*displayed) for displayed in displayed_menus if displayed])
def form(self): fields = [ ('token_secret_55', Input('bearer_token')), ('google_service_account', Input('service_account')), ] tab_fields = { ("Default", ('default_when_no_tabs', )): [ ('default_principal', Input("default_userid")), ('default_secret', Input("default_password")), ('default_verify_secret', Input("default_verify")), ('token_secret', { version.LOWEST: Input('bearer_password'), '5.6': Input('default_password') }), ('token_verify_secret', { version.LOWEST: Input('bearer_verify'), '5.6': Input('default_verify') }), ], "RSA key pair": [ ('ssh_user', Input("ssh_keypair_userid")), ('ssh_key', FileInput("ssh_keypair_password")), ], "C & U Database": [ ('candu_principal', Input("metrics_userid")), ('candu_secret', Input("metrics_password")), ('candu_verify_secret', Input("metrics_verify")), ], } fields_end = [ ('validate_btn', form_buttons.validate), ] if version.current_version() >= '5.6': amevent = "Events" else: amevent = "AMQP" tab_fields[amevent] = [] if version.current_version() >= "5.6": tab_fields[amevent].append(('event_selection', Radio('event_stream_selection'))) tab_fields[amevent].extend([ ('amqp_principal', Input("amqp_userid")), ('amqp_secret', Input("amqp_password")), ('amqp_verify_secret', Input("amqp_verify")), ]) return TabStripForm(fields=fields, tab_fields=tab_fields, fields_end=fields_end)
def test_timeprofile_description_required_error_validation(): tp = st.Timeprofile( description=None, scope='Current User', timezone="(GMT-10:00) Hawaii") if version.current_version() > "5.7": tp.create(cancel=True) assert tp.timeprofile_form.description.angular_help_block == "Required" if version.current_version() > "5.8": assert form_buttons.save.is_dimmed else: assert form_buttons.add.is_dimmed else: with error.expected("Description is required"): tp.create()
def wait_analysis_finished(): if current_version() > "5.5": vm.api.reload() try: return vm.api.last_scan_on.replace(tzinfo=None) >= switched_on except AttributeError: return False
def create(self): sel.force_navigate('catalog_item_new', context={'provider_type': self.item_type}) sel.wait_for_element(basic_info_form.name_text) fill(basic_info_form, {'name_text': self.name, 'description_text': self.description, 'display_checkbox': self.display_in, 'select_catalog': self.catalog, 'select_dialog': self.dialog, 'select_orch_template': self.orch_template, 'select_provider': self.provider_type}) if current_version() >= "5.4": sel.click(basic_info_form.field_entry_point) dynamic_tree.click_path("Datastore", self.domain, "Service", "Provisioning", "StateMachines", "ServiceProvision_Template", "default") sel.click(basic_info_form.apply_btn) if(self.catalog_name is not None): tabstrip.select_tab("Request Info") template = template_select_form.template_table.find_row_by_cells({ 'Name': self.catalog_name, 'Provider': self.provider }) sel.click(template) request_form.fill(self.provisioning_data) sel.click(template_select_form.add_button)
def test_provider_crud(request, rest_api, from_detail): """Test the CRUD on provider using REST API. Steps: * POST /api/providers (method ``create``) <- {"hostname":..., "name":..., "type": "EmsVmware"} * Remember the provider ID. * Delete it either way: * DELETE /api/providers/<id> * POST /api/providers (method ``delete``) <- list of dicts containing hrefs to the providers, in this case just list with one dict. Metadata: test_flag: rest """ if "create" not in rest_api.collections.providers.action.all: pytest.skip("Create action is not implemented in this version") if current_version() < "5.5": provider_type = "EmsVmware" else: provider_type = "ManageIQ::Providers::Vmware::InfraManager" provider = rest_api.collections.providers.action.create( hostname=fauxfactory.gen_alphanumeric(), name=fauxfactory.gen_alphanumeric(), type=provider_type, )[0] if from_detail: provider.action.delete() provider.wait_not_exists(num_sec=30, delay=0.5) else: rest_api.collections.providers.action.delete(provider) provider.wait_not_exists(num_sec=30, delay=0.5)
def edit_schema(self, add_fields=None, remove_fields=None): sel.force_navigate("automate_explorer_schema_edit", context={'tree_item': self}) for remove_field in remove_fields or []: f = remove_field.get_form() fill(f, {}, action=f.remove_entry_button, action_always=True) if version.current_version() < "5.5.0.7": sel.handle_alert() for add_field in add_fields or []: sel.click(self.schema_edit_page.add_field_btn) f = add_field.get_form(blank=True) fill(f, {'name_text': add_field.name, 'type_select': add_field.type_, 'data_type_select': add_field.data_type, 'default_value_text': add_field.default_value, 'description_text': add_field.description, 'sub_cb': add_field.sub, 'collect_text': add_field.collect, 'message_text': add_field.message, 'on_entry_text': add_field.on_entry, 'on_exit_text': add_field.on_exit, 'max_retries_text': add_field.max_retries, 'max_time_text': add_field.max_time}, action=f.add_entry_button) sel.click(form_buttons.save) flash.assert_success_message('Schema for Automate Class "%s" was saved' % self.name)
def _filter_restricted_version(self, provider): """ Filters by yaml version restriction; not applied if SSH is not available """ if self.restrict_version: # TODO # get rid of this since_version hotfix by translating since_version # to restricted_version; in addition, restricted_version should turn into # "version_restrictions" and it should be a sequence of restrictions with operators # so that we can create ranges like ">= 5.6" and "<= 5.8" version_restrictions = [] since_version = provider.data.get('since_version') if since_version: version_restrictions.append('>= {}'.format(since_version)) restricted_version = provider.data.get('restricted_version') if restricted_version: version_restrictions.append(restricted_version) for restriction in version_restrictions: for op, comparator in ProviderFilter._version_operator_map.items(): # split string by op; if the split works, version won't be empty head, op, ver = restriction.partition(op) if not ver: # This means that the operator was not found continue try: curr_ver = version.current_version() except: return True if not comparator(curr_ver, ver): return False break else: raise Exception('Operator not found in {}'.format(restriction)) return None
def _rpms_present_packages(): # autogenerate the rpms to test based on the current appliance version # and the list of possible packages that can be installed current_version = version.current_version() possible_packages = [ "cfme", "cfme-appliance", "cfme-lib", "nfs-utils", "nfs-utils-lib", "libnfsidmap", "mingw32-cfme-host", "ipmitool", "prince", "netapp-manageability-sdk", "rhn-client-tools", "rhn-check", "rhnlib", ] def package_filter(package): package_tests = [ # stopped shipping this with 5.4 package == "mingw32-cfme-host" and current_version >= "5.4", # stopped shipping these with 5.5 package in ("cfme-lib", "netapp-manageability-sdk") and current_version >= 5.5, # nfs-utils-lib was superseded by libnfsidmap in el7/cfme 5.5 # so filter out nfs-utils-lib on 5.5 and up, and libnfsidmap below 5.5 package == "nfs-utils-lib" and current_version >= "5.5", package == "libnfsidmap" and current_version < "5.5", ] # If any of the package tests eval'd to true, filter this package out return not any(package_tests) return filter(package_filter, possible_packages)
def test_group_roles( request, configure_aws_iam_auth_mode, group_name, group_data, setup_first_provider): """Basic default AWS_IAM group role RBAC test Validates expected menu and submenu names are present for default AWS IAM groups """ request.addfinalizer(login_admin) # This should be removed but currently these roles are subject to a bug if version.current_version() >= '5.4' and group_name in ['evmgroup-administrator', 'evmgroup-approver', 'evmgroup-auditor', 'evmgroup-operator', 'evmgroup-security', 'evmgroup-support', 'evmgroup-user']: pytest.skip("This role currently fails this test") try: iam_group_name = group_name + '_aws_iam' username = credentials[iam_group_name]['username'] password = credentials[iam_group_name]['password'] except KeyError: pytest.fail('No match in credentials file for group "{}"'.format(iam_group_name)) login(simple_user(username, password))
def test_providers_summary(soft_assert, setup_a_provider): """Checks some informations about the provider. Does not check memory/frequency as there is presence of units and rounding.""" path = ["Configuration Management", "Providers", "Providers Summary"] report = CannedSavedReport.new(path) for provider in report.data.rows: if any(ptype in provider["MS Type"] for ptype in {"ec2", "openstack"}): # Skip cloud continue provider_fake_obj = Provider(name=provider["Name"]) sel.force_navigate("infrastructure_provider", context={"provider": provider_fake_obj}) hostname = version.pick({ version.LOWEST: ("Hostname", "Hostname"), "5.5": ("Host Name", "Hostname")}) soft_assert( provider_props(hostname[0]) == provider[hostname[1]], "Hostname does not match at {}".format(provider["Name"])) if version.current_version() < "5.4": # In 5.4, hostname and IP address are shared under Hostname (above) soft_assert( provider_props("IP Address") == provider["IP Address"], "IP Address does not match at {}".format(provider["Name"])) soft_assert( provider_props("Aggregate Host CPU Cores") == provider["Total Number of Logical CPUs"], "Logical CPU count does not match at {}".format(provider["Name"])) soft_assert( provider_props("Aggregate Host CPUs") == provider["Total Number of Physical CPUs"], "Physical CPU count does not match at {}".format(provider["Name"]))
def get_vm_object(vm_name): """Looks up the CFME database for the VM. Args: vm_name: VM name Returns: If found, :py:class:`utils.miq_soap.MiqVM` for 5.4 and :py:class:`utils.api.Entity` for 5.5 If not, `None` """ if current_version() < "5.5": vm_table = store.current_appliance.db['vms'] for vm in store.current_appliance.db.session.query(vm_table.name, vm_table.guid)\ .filter(vm_table.template == False): # NOQA # Previous line is ok, if you change it to `is`, it won't work! if vm.name == vm_name: return MiqVM(vm.guid) else: return None else: rest_api = pytest.store.current_appliance.rest_api results = rest_api.collections.vms.find_by(name=vm_name) if len(results) > 0: return results[0] else: return None
def domain(request): if version.current_version() < "5.3": return None domain = Domain(name=fauxfactory.gen_alphanumeric(), enabled=True) domain.create() request.addfinalizer(lambda: domain.delete() if domain.exists() else None) return domain
def test_relationships_tables(provider, data_set): """This test verifies the integrity of the Relationships table. clicking on each field in the Relationships table takes the user to either Summary page where we verify that the field that appears in the Relationships table also appears in the Properties table, or to the page where the number of rows is equal to the number that is displayed in the Relationships table. """ if current_version() < "5.7" and data_set.obj == Template: pytest.skip('Templates are not exist in CFME version smaller than 5.7. skipping...') rows = navigate_and_get_rows(provider, data_set.obj, data_set.paged_tbl, 1) if not rows: pytest.skip('No objects to test for relationships for {}'.format(data_set.obj.__name__)) row = rows[-1] if data_set.obj is Container: instance = data_set.obj(row.name.text, row.pod_name.text) elif data_set.obj is ImageRegistry: instance = data_set.obj(row.host.text, provider) elif data_set.obj is Image: instance = data_set.obj(row.name.text, row.tag.text, provider) else: instance = data_set.obj(row.name.text, provider) check_relationships(instance)
def provision_success_message(name): success_message = 'Service ' if version.current_version() >= '5.7': # 5.7 success message includes catalog item name in brackets success_message += '[{}] '.format(name) success_message += 'Provisioned Successfully' return success_message
def test_group_roles(configure_ldap_auth_mode, group_name, group_data, setup_first_provider): """Basic default LDAP group role RBAC test Validates expected menu and submenu names are present for default LDAP group roles """ # This should be removed but currently these roles are subject to a bug if version.current_version() >= '5.4' and group_name in ['evmgroup-administrator', 'evmgroup-approver', 'evmgroup-auditor', 'evmgroup-operator', 'evmgroup-security', 'evmgroup-support', 'evmgroup-user']: pytest.skip("This role currently fails this test") try: username = credentials[group_name]['username'] password = credentials[group_name]['password'] except KeyError: pytest.fail('No match in credentials file for group "{}"'.format(group_name)) login(simple_user(username, password)) assert set(menu.nav.visible_pages()) == set(group_data)
def test_current_user_login_delete(request): """Test for deleting current user login. Steps: * Login as Admin user * Create a new user * Login with the new user * Try deleting the user """ group_user = Group("EvmGroup-super_administrator") user = User( name='user' + fauxfactory.gen_alphanumeric(), credential=new_credential(), email='*****@*****.**', group=group_user) user.create() request.addfinalizer(user.delete) request.addfinalizer(login.login_admin) with user: if version.current_version() >= '5.7': navigate_to(user, 'Details') menu_item = ('Configuration', 'Delete this User') assert tb.exists(*menu_item) and tb.is_greyed(*menu_item), "Delete User is not dimmed" else: with error.expected("Current EVM User \"{}\" cannot be deleted".format(user.name)): user.delete()
def test_bundles_in_bundle(catalog_item): bundle_name = "first_" + fauxfactory.gen_alphanumeric() catalog_bundle = CatalogBundle(name=bundle_name, description="catalog_bundle", display_in=True, catalog=catalog_item.catalog, dialog=catalog_item.dialog) catalog_bundle.create([catalog_item.name]) sec_bundle_name = "sec_" + fauxfactory.gen_alphanumeric() sec_catalog_bundle = CatalogBundle(name=sec_bundle_name, description="catalog_bundle", display_in=True, catalog=catalog_item.catalog, dialog=catalog_item.dialog) sec_catalog_bundle.create([bundle_name]) third_bundle_name = "third_" + fauxfactory.gen_alphanumeric() third_catalog_bundle = CatalogBundle(name=third_bundle_name, description="catalog_bundle", display_in=True, catalog=catalog_item.catalog, dialog=catalog_item.dialog) third_catalog_bundle.create([bundle_name, sec_bundle_name]) service_catalogs = ServiceCatalogs(third_bundle_name) service_catalogs.order() flash.assert_no_errors() logger.info('Waiting for cfme provision request for service %s', bundle_name) row_description = third_bundle_name cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells, True], fail_func=requests.reload, num_sec=900, delay=20) # Success message differs between 5.6 and 5.7 if version.current_version() >= '5.7': assert 'Service [{}] Provisioned Successfully'.format(third_bundle_name)\ in row.last_message.text else: assert row.last_message.text == 'Request complete'
def pytest_runtest_protocol(item): global session_ver global session_build global session_stream if not session_ver: session_ver = str(version.current_version()) session_build = store.current_appliance.build session_stream = store.current_appliance.version.stream() art_client.fire_hook('session_info', version=session_ver, build=session_build, stream=session_stream) name, location = get_test_idents(item) tier = item.get_marker('tier') if tier: tier = tier.args[0] try: params = item.callspec.params param_dict = {p: get_name(v) for p, v in params.iteritems()} except: param_dict = {} # This pre_start_test hook is needed so that filedump is able to make get the test # object set up before the logger starts logging. As the logger fires a nested hook # to the filedumper, and we can't specify order inriggerlib. art_client.fire_hook('pre_start_test', test_location=location, test_name=name, slaveid=SLAVEID, ip=appliance_ip_address) art_client.fire_hook('start_test', test_location=location, test_name=name, slaveid=SLAVEID, ip=appliance_ip_address, tier=tier, param_dict=param_dict) yield
def _rpms_present_packages(): # autogenerate the rpms to test based on the current appliance version # and the list of possible packages that can be installed current_version = version.current_version() possible_packages = [ 'cfme', 'cfme-appliance', 'cfme-lib', 'nfs-utils', 'nfs-utils-lib', 'libnfsidmap', 'mingw32-cfme-host', 'ipmitool', 'prince', 'netapp-manageability-sdk', 'rhn-client-tools', 'rhn-check', 'rhnlib' ] def package_filter(package): package_tests = [ # stopped shipping this with 5.4 package == 'mingw32-cfme-host' and current_version >= '5.4', # stopped shipping these with 5.5 package in ('cfme-lib', 'netapp-manageability-sdk') and current_version >= 5.5, # nfs-utils-lib was superseded by libnfsidmap in el7/cfme 5.5 # so filter out nfs-utils-lib on 5.5 and up, and libnfsidmap below 5.5 package == 'nfs-utils-lib' and current_version >= '5.5', package == 'libnfsidmap' and current_version < '5.5', ] # If any of the package tests eval'd to true, filter this package out return not any(package_tests) return filter(package_filter, possible_packages)
def test_evmserverd_stop(appliance): """Tests whether stopping the evmserverd really stops the CFME server processes. Steps: * Remember all server names from ``service evmserverd status`` command. * Or the bin/rake evm:status on 5.5+ since the systemd status does not show that, this applies also for next references to status. * Issue a ``service evmserverd stop`` command. * Periodically check output of ``service evmserverd status`` that all servers are stopped. * For 5.5+: Really call ``service evmserverd status`` and check that the mentions of stopping the service are present. """ server_name_key = version.pick({ version.LOWEST: 'Server Name', '5.8': 'Server' }) server_names = {server[server_name_key] for server in appliance.ssh_client.status["servers"]} assert appliance.ssh_client.run_command("systemctl stop evmserverd").rc == 0 @wait_for_decorator(timeout="2m", delay=5) def servers_stopped(): status = { server[server_name_key]: server for server in appliance.ssh_client.status["servers"] } for server_name in server_names: if status[server_name]["Status"] != "stopped": return False return True if version.current_version() >= "5.5": status = appliance.ssh_client.run_command("systemctl status evmserverd") assert "Stopped EVM server daemon" in status.output assert "code=exited" in status.output
def _num_vm_stat(self): if current_version() < '5.3': filter_tenants = False else: filter_tenants = True return len(self._get_all_instances(filter_tenants))
yield finally: candu.disable_all() # Blow away all providers when done - collecting metrics for all of them is too much @pytest.yield_fixture(scope="module") def clean_setup_provider(request, provider): BaseProvider.clear_providers() setup_or_skip(request, provider) yield BaseProvider.clear_providers() @pytest.mark.uncollectif( lambda provider: current_version() < "5.7" and provider.type == 'gce') def test_metrics_collection(clean_setup_provider, provider, enable_candu): """Check the db is gathering collection data for the given provider Metadata: test_flag: metrics_collection """ metrics_tbl = store.current_appliance.db.client['metrics'] mgmt_systems_tbl = store.current_appliance.db.client[ 'ext_management_systems'] logger.info("Fetching provider ID for %s", provider.key) mgmt_system_id = store.current_appliance.db.client.session.query( mgmt_systems_tbl).filter(mgmt_systems_tbl.name == conf.cfme_data.get( 'management_systems', {})[provider.key]['name']).first().id
import pytest from cfme.containers.provider import ContainersProvider from cfme.configure.access_control import User from utils import testgen from utils.ansible import setup_ansible_script, run_ansible, \ fetch_miq_ansible_module, create_tmp_directory, remove_tmp_files from utils.appliance.implementations.ui import navigate_to from utils.version import current_version pytestmark = [ pytest.mark.uncollectif(lambda: current_version() < "5.7"), pytest.mark.usefixtures('setup_provider') ] pytest_generate_tests = testgen.generate([ContainersProvider], scope='module') users_values_to_create = {'fullname': 'User One', 'name': 'userone'} users_values_to_update = {'fullname': 'User One Edit', 'name': 'userone'} user_to_delete = users_values_to_create.get('name') @pytest.yield_fixture(scope='function') def ansible_users(): create_tmp_directory() fetch_miq_ansible_module() yield remove_tmp_files() @pytest.mark.polarion('CMP-10554')
# -*- coding: utf-8 -*- import fauxfactory import pytest from utils.version import current_version pytestmark = [ pytest.mark.ignore_stream("upstream"), pytest.mark.uncollectif(lambda: current_version() >= "5.3"), ] @pytest.fixture(scope="module") def backup_file_name(): return "/tmp/ae_backup_{}.xml".format(fauxfactory.gen_alphanumeric(32)) @pytest.yield_fixture(scope="module") def rake(ssh_client, backup_file_name): ssh_client.run_command("rm -f {}".format(backup_file_name)) ssh_client.run_rake_command( "evm:automate:backup FILE={}".format(backup_file_name)) yield lambda command: ssh_client.run_rake_command(command) ssh_client.run_rake_command("evm:automate:clear") ssh_client.run_rake_command( "evm:automate:restore FILE={}".format(backup_file_name)) def test_import_export_5_2(ssh_client, rake): """Test that import and export work for Control.
yield cg cg.delete() @pytest.yield_fixture(scope="module") def tag(category): tag = Tag(name=fauxfactory.gen_alpha(8).lower(), display_name=fauxfactory.gen_alphanumeric(length=32), category=category) tag.create() yield tag tag.delete() @pytest.mark.uncollectif( lambda location: location in {"clouds_tenants"} and current_version() < "5.4") def test_tag_item_through_selecting(request, location, tag): """Add a tag to an item with going through the details page. Prerequisities: * Have a tag category and tag created. * Be on the page you want to test. Steps: * Select any quadicon. * Select ``Policy/Edit Tags`` and assign the tag to it. * Click on the quadicon and verify the tag is assigned. (TODO) * Go back to the quadicon view and select ``Policy/Edit Tags`` and remove the tag. * Click on the quadicon and verify the tag is not present. (TODO) """ pytest.sel.force_navigate(location)
def go_to_grid(page): sel.force_navigate(page) tb.select('Grid View') @pytest.yield_fixture(scope="module") def set_cloud_provider_quad(): visual.cloud_provider_quad = False yield visual.cloud_provider_quad = True @pytest.mark.meta(blockers=[1267148]) @pytest.mark.parametrize('page', grid_pages, scope="module") @pytest.mark.uncollectif( lambda page: page == "clouds_stacks" and current_version() < "5.4") def test_grid_page_per_item(request, setup_a_provider, page, set_grid): """ Tests grid items per page Metadata: test_flag: visuals """ request.addfinalizer(lambda: go_to_grid(page)) limit = visual.grid_view_limit sel.force_navigate(page) tb.select('Grid View') if paginator.rec_total() is not None: if int(paginator.rec_total()) >= int(limit): assert int(paginator.rec_end()) == int(limit), \ "Gridview Failed for page {}!".format(page)
class TestRESTSnapshots(object): """Tests actions with VM/instance snapshots using REST API""" def test_create_snapshot(self, vm_snapshot): """Creates VM/instance snapshot using REST API Metadata: test_flag: rest """ vm, snapshot = vm_snapshot vm.snapshots.get(name=snapshot.name) @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_snapshot_from_detail(self, appliance, vm_snapshot, method): """Deletes VM/instance snapshot from detail using REST API Metadata: test_flag: rest """ vm, snapshot = vm_snapshot if method == 'post': del_action = snapshot.action.delete.POST else: del_action = snapshot.action.delete.DELETE del_action() assert_response(appliance) wait_for(lambda: not vm.snapshots.find_by(name=snapshot.name), num_sec=300, delay=5) # this will fail once BZ1466225 is fixed del_action() assert_response(appliance, success=False) def test_delete_snapshot_from_collection(self, appliance, vm_snapshot): """Deletes VM/instance snapshot from collection using REST API Metadata: test_flag: rest """ vm, snapshot = vm_snapshot vm.snapshots.action.delete.POST(snapshot) assert_response(appliance) wait_for(lambda: not vm.snapshots.find_by(name=snapshot.name), num_sec=300, delay=5) # this will fail once BZ1466225 is fixed vm.snapshots.action.delete.POST(snapshot) assert_response(appliance, success=False) @pytest.mark.uncollectif(lambda provider: not provider.one_of( InfraProvider) or current_version() < '5.8') def test_revert_snapshot(self, appliance, vm_snapshot): """Reverts VM/instance snapshot using REST API Metadata: test_flag: rest """ __, snapshot = vm_snapshot task = snapshot.action.revert() assert_response(appliance) # TODO: https://github.com/ManageIQ/manageiq-api-client-python/pull/24 # branch below can be removed once this PR is released if isinstance(task, dict): task = appliance.rest_api.get_entity('tasks', task['task_id']) def _revert_finished(): task.reload() return task.state.lower() == 'finished' wait_for(_revert_finished, num_sec=300, delay=5) assert task.status.lower() == 'ok'
import fauxfactory from cfme import test_requirements from cfme.cloud.provider.openstack import OpenStackProvider from cfme.common.vm import VM from cfme.infrastructure.provider import InfraProvider from cfme.infrastructure.provider.virtualcenter import VMwareProvider from utils import testgen from utils.generators import random_vm_name from utils.log import logger from utils.rest import assert_response from utils.version import current_version from utils.wait import wait_for pytestmark = [ pytest.mark.uncollectif(lambda: current_version() < '5.8'), pytest.mark.long_running, pytest.mark.tier(2), test_requirements.snapshot ] def pytest_generate_tests(metafunc): argnames, argvalues, idlist = testgen.providers_by_class( metafunc, [VMwareProvider, OpenStackProvider]) testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope='module')
@pytest.fixture(scope='function') def vm(request, a_provider, rest_api): # Don't use cfme.rest.vm because we don't need finalizer and delete vm after test provider_rest = rest_api.collections.providers.get(name=a_provider.name) vm_name = deploy_template( a_provider.key, "test_rest_vm_{}".format(fauxfactory.gen_alphanumeric(length=6))) provider_rest.action.refresh() wait_for(lambda: len(rest_api.collections.vms.find_by(name=vm_name)) > 0, num_sec=600, delay=5) return vm_name @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: current_version() <= "5.5.2.4") @pytest.mark.parametrize("multiple", [True, False], ids=["from_collection", "from_detail"]) def test_retire_vm_now(rest_api, vm, multiple): """Test retirement of vm Prerequisities: * An appliance with ``/api`` available. * VM Steps: * POST /api/vms/<id> (method ``retire``) OR * POST /api/vms (method ``retire``) with ``href`` of vm vm or vms
class TestCategoriesViaREST(object): @pytest.fixture(scope="function") def categories(self, request, rest_api): return _categories(request, rest_api, num=5) @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') @pytest.mark.parametrize( "multiple", [False, True], ids=["one_request", "multiple_requests"]) def test_edit_categories(self, rest_api, categories, multiple): if "edit" not in rest_api.collections.categories.action.all: pytest.skip("Edit categories action is not implemented in this version") if multiple: new_names = [] ctgs_data_edited = [] for ctg in categories: new_name = fauxfactory.gen_alphanumeric().lower() new_names.append(new_name) ctg.reload() ctgs_data_edited.append({ "href": ctg.href, "description": "test_category_{}".format(new_name), }) rest_api.collections.categories.action.edit(*ctgs_data_edited) for new_name in new_names: wait_for( lambda: rest_api.collections.categories.find_by(description=new_name), num_sec=180, delay=10, ) else: ctg = rest_api.collections.categories.get(description=categories[0].description) new_name = 'test_category_{}'.format(fauxfactory.gen_alphanumeric().lower()) ctg.action.edit(description=new_name) wait_for( lambda: rest_api.collections.categories.find_by(description=new_name), num_sec=180, delay=10, ) @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') @pytest.mark.parametrize( "multiple", [False, True], ids=["one_request", "multiple_requests"]) def test_delete_categories(self, rest_api, categories, multiple): if "delete" not in rest_api.collections.categories.action.all: pytest.skip("Delete categories action is not implemented in this version") if multiple: rest_api.collections.categories.action.delete(*categories) with error.expected("ActiveRecord::RecordNotFound"): rest_api.collections.categories.action.delete(*categories) else: ctg = categories[0] ctg.action.delete() with error.expected("ActiveRecord::RecordNotFound"): ctg.action.delete()
def wait_for_request(cells, partial_check=False): """helper function checks if a request is complete After finding the request's row using the ``cells`` argument, this will wait for a request to reach the 'Finished' state and return it. In the event of an 'Error' state, it will raise an AssertionError, for use with ``pytest.raises``, if desired. Args: cells: A dict of cells use to identify the request row to inspect in the :py:attr:`request_list` Table. See :py:meth:`cfme.web_ui.Table.find_rows_by_cells` for more. Usage: # Filter on the "Description" column description = 'Provision from [%s] to [%s]' % (template_name, vm_name) cells = {'Description': description} # Filter on the "Request ID" column # Text must match exactly, you can use "{:,}".format(request_id) to add commas if needed. request_id = '{:,}'.format(1000000000001) # Becomes '1,000,000,000,001', as in the table cells = {'Request ID': request_id} # However you construct the cells dict, pass it to wait_for_request # Provisioning requests often take more than 5 minutes but less than 10. wait_for(wait_for_request, [cells], num_sec=600) Raises: AssertionError: if the matched request has status 'Error' RequestException: if multiple matching requests were found Returns: The matching :py:class:`cfme.web_ui.Table.Row` if found, ``False`` otherwise. """ for page in paginator.pages(): # We check only for the SplitTable. Can't think of better detection. if version.current_version() < "5.5.0.8"\ and sel.elements(fields.request_list._header_loc) and\ not sel.is_displayed(fields.request_list): # The table exists but it is hidden - no cells return False results = fields.request_list.find_rows_by_cells(cells, partial_check) if len(results) == 0: # row not on this page, assume it has yet to appear continue elif len(results) > 1: raise RequestException( 'Multiple requests with matching content found - be more specific!' ) else: # found the row! row = results[0] logger.debug(' Request Message: %s', row.last_message.text) break else: # Request not found at all, can't continue return False if row.request_state.text in REQUEST_FINISHED_STATES: return row else: return False
def newer_version(self): return version.current_version() >= "5.5"
urls = process_url(cfme_data['basic_info']['update_url']) output = build_file(urls) with tempfile.NamedTemporaryFile('w') as f: f.write(output) f.flush() os.fsync(f.fileno()) temp_appliance_preconfig_funcscope_upgrade.ssh_client.put_file( f.name, '/etc/yum.repos.d/update.repo') return temp_appliance_preconfig_funcscope_upgrade @pytest.mark.ignore_stream('5.5', 'upstream') @pytest.mark.tier(2) @pytest.mark.uncollectif( lambda db_version: db_version >= version.current_version() or version.get_stream(db_version) == version.current_stream()) @pytest.mark.meta( blockers=[BZ(1354466, unblock=lambda db_url: 'ldap' not in db_url)]) def test_db_migrate(app_creds, temp_appliance_extended_db, db_url, db_version, db_desc): app = temp_appliance_extended_db # Download the database logger.info("Downloading database: {}".format(db_desc)) url_basename = os_path.basename(db_url) rc, out = app.ssh_client.run_command( 'curl -o "/tmp/{}" "{}"'.format(url_basename, db_url), timeout=30) assert rc == 0, "Failed to download database: {}".format(out) # The v2_key is potentially here v2key_url = os_path.join(os_path.dirname(db_url), "v2_key")
request.addfinalizer(lambda: profile.delete() if profile.exists else None) profile.create() # Now assign this malformed profile to a VM vm = VM.factory(Vm.get_first_vm_title(provider=setup_a_provider), setup_a_provider) vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check pytest.sel.force_navigate("dashboard") pytest.sel.force_navigate("infrastructure_virtual_machines") assert "except" not in pytest.sel.title().lower() vm.unassign_policy_profiles(profile.description) @pytest.mark.tier(3) @pytest.mark.meta(blockers=[1209538], automates=[1209538]) @pytest.mark.skipif(current_version() > "5.5", reason="requires cfme 5.5 and lower") def test_folder_field_scope(request, vmware_provider, vmware_vm): """This test tests the bug that makes the folder filter in expression not work. Prerequisities: * A VMware provider. * A VM on the provider. * A tag to assign. Steps: * Read the VM's 'Parent Folder Path (VMs & Templates)' from its summary page. * Create an action for assigning the tag to the VM. * Create a policy, for scope use ``Field``, field name ``VM and Instance : Parent Folder Path (VMs & Templates)``, ``INCLUDES`` and the folder name as stated on the VM's summary page.
class TestProvidersRESTAPI(object): @pytest.yield_fixture(scope="function") def custom_attributes(self, rest_api, infra_provider): provider = rest_api.collections.providers.get(name=infra_provider.name) body = [] attrs_num = 2 for _ in range(attrs_num): uid = fauxfactory.gen_alphanumeric(5) body.append({ 'name': 'ca_name_{}'.format(uid), 'value': 'ca_value_{}'.format(uid) }) attrs = provider.custom_attributes.action.add(*body) assert len(attrs) == attrs_num yield attrs, provider provider.custom_attributes.reload() ids = [attr.id for attr in attrs] delete_attrs = [ attr for attr in provider.custom_attributes if attr.id in ids ] if delete_attrs: provider.custom_attributes.action.delete(*delete_attrs) @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_add_custom_attributes(self, rest_api, custom_attributes): """Test adding custom attributes to provider using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes for attr in attributes: record = provider.custom_attributes.get(id=attr.id) assert rest_api.response.status_code == 200 assert record.name == attr.name assert record.value == attr.value @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_custom_attributes_from_detail(self, rest_api, custom_attributes, method): """Test deleting custom attributes from detail using REST API. Metadata: test_flag: rest """ status = 204 if method == 'delete' else 200 attributes, _ = custom_attributes for entity in attributes: entity.action.delete(force_method=method) assert rest_api.response.status_code == status with error.expected('ActiveRecord::RecordNotFound'): entity.action.delete(force_method=method) assert rest_api.response.status_code == 404 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_delete_custom_attributes_from_collection(self, rest_api, custom_attributes): """Test deleting custom attributes from collection using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes provider.custom_attributes.action.delete(*attributes) assert rest_api.response.status_code == 200 with error.expected('ActiveRecord::RecordNotFound'): provider.custom_attributes.action.delete(*attributes) assert rest_api.response.status_code == 404 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_delete_single_custom_attribute_from_collection( self, rest_api, custom_attributes): """Test deleting single custom attribute from collection using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes attribute = attributes[0] provider.custom_attributes.action.delete(attribute) assert rest_api.response.status_code == 200 with error.expected('ActiveRecord::RecordNotFound'): provider.custom_attributes.action.delete(attribute) assert rest_api.response.status_code == 404 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_custom_attributes(self, rest_api, custom_attributes, from_detail): """Test editing custom attributes using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes response_len = len(attributes) body = [] for _ in range(response_len): uid = fauxfactory.gen_alphanumeric(5) body.append({ 'name': 'ca_name_{}'.format(uid), 'value': 'ca_value_{}'.format(uid), 'section': 'metadata' }) if from_detail: edited = [] for i in range(response_len): edited.append(attributes[i].action.edit(**body[i])) assert rest_api.response.status_code == 200 else: for i in range(response_len): body[i].update(attributes[i]._ref_repr()) edited = provider.custom_attributes.action.edit(*body) assert rest_api.response.status_code == 200 assert len(edited) == response_len for i in range(response_len): assert edited[i].name == body[i]['name'] assert edited[i].value == body[i]['value'] @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_custom_attributes_bad_section(self, rest_api, custom_attributes, from_detail): """Test that editing custom attributes using REST API and adding invalid section fails. Metadata: test_flag: rest """ attributes, provider = custom_attributes response_len = len(attributes) body = [] for _ in range(response_len): body.append({'section': 'bad_section'}) if from_detail: for i in range(response_len): with error.expected('Api::BadRequestError'): attributes[i].action.edit(**body[i]) assert rest_api.response.status_code == 400 else: for i in range(response_len): body[i].update(attributes[i]._ref_repr()) with error.expected('Api::BadRequestError'): provider.custom_attributes.action.edit(*body) assert rest_api.response.status_code == 400 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_add_custom_attributes_bad_section(self, rest_api, infra_provider): """Test that adding custom attributes with invalid section to provider using REST API fails. Metadata: test_flag: rest """ provider = rest_api.collections.providers.get(name=infra_provider.name) uid = fauxfactory.gen_alphanumeric(5) body = { 'name': 'ca_name_{}'.format(uid), 'value': 'ca_value_{}'.format(uid), 'section': 'bad_section' } with error.expected('Api::BadRequestError'): provider.custom_attributes.action.add(body) assert rest_api.response.status_code == 400
# U with update(policy): policy.notes = "Modified!" sel.force_navigate("host_compliance_policy_edit", context={"policy_name": policy.description}) soft_assert( sel.text(policy.form.notes).strip() == "Modified!", "Modification failed!") # D policy.delete() soft_assert(not policy.exists, "The policy {} exists!".format(policy.description)) @pytest.mark.tier(2) @pytest.mark.skipif(current_version() < "5.6", reason="requires cfme 5.6 and higher") def test_container_image_control_policy_crud(soft_assert): policy = explorer.ContainerImageControlPolicy( fauxfactory.gen_alphanumeric()) # CR policy.create() soft_assert(policy.exists, "The policy {} does not exist!".format(policy.description)) # U with update(policy): policy.notes = "Modified!" sel.force_navigate("container_image_control_policy_edit", context={"policy_name": policy.description}) soft_assert( sel.text(policy.form.notes).strip() == "Modified!",
def test_vmware_vimapi_hotadd_disk(request, testing_group, provider, testing_vm, domain, namespace, cls): """ Tests hot adding a disk to vmware vm. This test exercises the ``VMware_HotAdd_Disk`` method, located either in ``/Integration/VimApi/`` (<5.3) or ``/Integration/VMware/VimApi`` (5.3 and up). Steps: * It creates an instance in ``System/Request`` that can be accessible from eg. a button. * Then it creates a service dialog that contains a field with the desired disk size, the text field name should be ``size`` * Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``. The button shall belong in the VM and instance button group. * After the button is created, it goes to a VM's summary page, clicks the button, enters the size of the disk and submits the dialog. * The test waits until the number of disks is raised. Metadata: test_flag: hotdisk, provision """ # Instance that calls the method and is accessible from the button if current_version() < "5.3": rel = "/Integration/VimApi/VMware_HotAdd_Disk" else: rel = "/Integration/VMware/VimApi/VMware_HotAdd_Disk" instance = Instance(name="VMware_HotAdd_Disk", values={ "rel5": rel, }, cls=cls) if not instance.exists(): request.addfinalizer(lambda: instance.delete() if instance.exists() else None) instance.create() # Dialog to put the disk capacity return element_data = { 'ele_label': "Disk size", 'ele_name': "size", 'ele_desc': "Disk size", 'choose_type': "Text Box", 'default_text_box': "Default text" } dialog = ServiceDialog( label=fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alphanumeric(), submit=True, tab_label=fauxfactory.gen_alphanumeric(), tab_desc=fauxfactory.gen_alphanumeric(), box_label=fauxfactory.gen_alphanumeric(), box_desc=fauxfactory.gen_alphanumeric(), ) dialog.create(element_data) request.addfinalizer(lambda: dialog.delete()) # Button that will invoke the dialog and action button_name = fauxfactory.gen_alphanumeric() button = Button(group=testing_group, text=button_name, hover=button_name, dialog=dialog, system="Request", request="VMware_HotAdd_Disk") request.addfinalizer(button.delete_if_exists) button.create() # Now do the funny stuff def _get_disk_count(): return int( testing_vm.get_detail(properties=("Datastore Allocation Summary", "Number of Disks")).strip()) original_disk_count = _get_disk_count() toolbar.select(testing_group.text, button.text) fill(Input("size"), "1") pytest.sel.click(submit) flash.assert_no_errors() wait_for(lambda: original_disk_count + 1 == _get_disk_count(), num_sec=180, delay=5)
err = version.pick({ version.LOWEST: "Host Name can't be blank", '5.6': FlashMessageException }) with error.expected(err): prov.create() if version.current_version() >= 5.6: assert prov.properties_form.hostname_text.angular_help_block == "Required" assert prov.add_provider_button.is_dimmed @pytest.mark.tier(3) @pytest.mark.meta(blockers=[1209756]) @pytest.mark.uncollectif(lambda: version.current_version() > "5.4.0.0.24") def test_ip_required_validation(): """Test to validate the ip address while adding a provider""" prov = VMwareProvider(name=fauxfactory.gen_alphanumeric(5), hostname=fauxfactory.gen_alphanumeric(5), ip_address=None) with error.expected("IP Address can't be blank"): prov.create() @pytest.mark.tier(3) @test_requirements.provider_discovery def test_name_max_character_validation(request, infra_provider): """Test to validate max character for name field""" request.addfinalizer(lambda: infra_provider.delete_if_exists(cancel=False))
def provider_by_type(metafunc, provider_types, *fields, **options): """Get the values of the named field keys from ``cfme_data.get('management_systems', {})`` Args: provider_types: A list of provider types to include. If None, all providers are considered *fields: Names of keys in an individual provider dict whose values will be returned when used as test function arguments **options: Explained below The ``**options`` available are defined below: * ``require_fields``: when fields passed are not present, skip them, boolean * ``template_location``: Specification where a required tempalte lies in the yaml, If not found in the provider, warning is printed and the test not collected. The spec is a tuple or list where each item is a key to the next field (str or int). The following test function arguments are special: ``provider`` the provider's CRUD object, either a :py:class:`cfme.cloud.provider.Provider` or a :py:class:`cfme.infrastructure.provider.Provider` Returns: An tuple of ``(argnames, argvalues, idlist)`` for use in a pytest_generate_tests hook, or with the :py:func:`parametrize` helper. Usage: # In the function itself def pytest_generate_tests(metafunc): argnames, argvalues, idlist = testgen.provider_by_type( ['openstack', 'ec2'], 'type', 'name', 'credentials', 'provider', 'hosts' ) metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module') # Using the parametrize wrapper pytest_generate_tests = testgen.parametrize(testgen.provider_by_type, ['openstack', 'ec2'], 'type', 'name', 'credentials', 'provider', 'hosts', scope='module') Note: Using the default 'function' scope, each test will be run individually for each provider before moving on to the next test. To group all tests related to single provider together, parametrize tests in the 'module' scope. Note: testgen for providers now requires the usage of test_flags for collection to work. Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests for more details. """ metafunc.function = pytest.mark.uses_testgen()(metafunc.function) argnames = list(fields) argvalues = [] idlist = [] template_location = options.pop("template_location", None) if 'provider' in metafunc.fixturenames and 'provider' not in argnames: argnames.append('provider') for provider, data in cfme_data.get('management_systems', {}).iteritems(): # Check provider hasn't been filtered out with --use-provider if provider not in filtered: continue try: prov_obj = get_crud(provider) except UnknownProviderType: continue if not prov_obj: logger.debug("Whilst trying to create an object for %s we failed", provider) continue skip = False if provider_types is not None and prov_obj.type not in provider_types: # Skip unwanted types continue restricted_version = data.get('restricted_version', None) if restricted_version: logger.info('we found a restricted version') for op, comparator in _version_operator_map.items(): # split string by op; if the split works, version won't be empty head, op, ver = restricted_version.partition(op) if not ver: # This means that the operator was not found continue if not comparator(version.current_version(), ver): skip = True break else: raise Exception( 'Operator not found in {}'.format(restricted_version)) # Test to see the test has meta data, if it does and that metadata contains # a test_flag kwarg, then check to make sure the provider contains that test_flag # if not, do not collect the provider for this particular test. # Obtain the tests flags meta = getattr(metafunc.function, 'meta', None) test_flags = getattr(meta, 'kwargs', {}) \ .get('from_docs', {}).get('test_flag', '').split(',') if test_flags != ['']: test_flags = [flag.strip() for flag in test_flags] defined_flags = cfme_data.get('test_flags', '').split(',') defined_flags = [flag.strip() for flag in defined_flags] excluded_flags = data.get('excluded_test_flags', '').split(',') excluded_flags = [flag.strip() for flag in excluded_flags] allowed_flags = set(defined_flags) - set(excluded_flags) if set(test_flags) - allowed_flags: logger.info( "Skipping Provider %s for test %s in module %s because " "it does not have the right flags, " "%s does not contain %s", provider, metafunc.function.func_name, metafunc.function.__module__, list(allowed_flags), list(set(test_flags) - allowed_flags)) continue try: if "since_version" in data: # Ignore providers that are not supported in this version yet if version.current_version() < data["since_version"]: continue except Exception: # No SSH connection continue # Get values for the requested fields, filling in with None for undefined fields data_values = {field: data.get(field, None) for field in fields} # Go through the values and handle the special 'data' name # report the undefined fields to the log for key in data_values.keys(): if data_values[key] is None: if 'require_fields' not in options: options['require_fields'] = True if options['require_fields']: skip = True logger.warning( 'Field "%s" not defined for provider "%s", skipping' % (key, provider)) else: logger.debug( 'Field "%s" not defined for provider "%s", defaulting to None' % (key, provider)) if skip: continue # Check the template presence if requested if template_location is not None: o = data try: for field in template_location: o = o[field] except (IndexError, KeyError): logger.info( "Cannot apply %s to %s in the template specification, ignoring.", repr(field), repr(o)) else: if not isinstance(o, basestring): raise ValueError( "{} is not a string! (for template)".format(repr(o))) templates = TEMPLATES.get(provider, None) if templates is not None: if o not in templates: logger.info( "Wanted template %s on %s but it is not there!\n", o, provider) # Skip collection of this one continue values = [] for arg in argnames: if arg == 'provider': metafunc.function = pytest.mark.provider_related()( metafunc.function) values.append(prov_obj) elif arg in data_values: values.append(data_values[arg]) # skip when required field is not present and option['require_field'] == True argvalues.append(values) # Use the provider name for idlist, helps with readable parametrized test output idlist.append(provider) return argnames, argvalues, idlist
# Cloud "clouds_providers", "clouds_instances", "clouds_availability_zones", "clouds_flavors", "clouds_tenants", # "clouds_security_groups", # Does not have grid view selector ]), pytest.mark.usefixtures("setup_first_provider"), pytest.mark.tier(3) ] @pytest.mark.uncollectif(lambda location: location in {"clouds_tenants"} and current_version() < "5.4") def test_tag_item_through_selecting(request, location, tag): """Add a tag to an item with going through the details page. Prerequisities: * Have a tag category and tag created. * Be on the page you want to test. Steps: * Select any quadicon. * Select ``Policy/Edit Tags`` and assign the tag to it. * Click on the quadicon and verify the tag is assigned. (TODO) * Go back to the quadicon view and select ``Policy/Edit Tags`` and remove the tag. * Click on the quadicon and verify the tag is not present. (TODO) """ pytest.sel.force_navigate(location)
ems = db['ext_management_systems'] ems_events = db['ems_events'] with db.transaction: providers = (db.session.query(ems_events.id).join( ems, ems_events.ems_id == ems.id).filter(ems.name == provider.name)) query = db.session.query(ems_events).filter( ems_events.id.in_(providers.subquery())) event_count = query.count() return event_count @pytest.mark.meta( blockers=BZ(1201923, unblock=lambda provider: provider.type != 'ec2'), ) @pytest.mark.uncollectif( lambda provider: current_version() < "5.4" and provider.type != 'openstack' ) def test_provider_event(setup_provider, provider, gen_events, test_instance): """ Tests provider events on timelines Metadata: test_flag: timelines, provision """ def nav_step(): pytest.sel.force_navigate('cloud_provider_timelines', context={'provider': provider}) wait_for(count_events, [test_instance.name, nav_step], timeout=60, fail_condition=0, message="events to appear")
@pytest.fixture(scope="module") def temp_appliance_extended_db(temp_appliance_preconfig): app = temp_appliance_preconfig app.evmserverd.stop() app.extend_db_partition() app.start_evm_service() return app @pytest.mark.ignore_stream('5.5', 'upstream') @pytest.mark.tier(2) @pytest.mark.uncollectif( lambda db_version: db_version >= version.current_version() or version.get_stream(db_version) == version.current_stream()) @pytest.mark.meta( blockers=[BZ(1354466, unblock=lambda db_url: 'ldap' not in db_url)]) def test_db_migrate(app_creds, temp_appliance_extended_db, db_url, db_version, db_desc): app = temp_appliance_extended_db # Download the database logger.info("Downloading database: {}".format(db_desc)) url_basename = os_path.basename(db_url) rc, out = app.ssh_client.run_command( 'curl -o "/tmp/{}" "{}"'.format(url_basename, db_url), timeout=30) assert rc == 0, "Failed to download database: {}".format(out) # The v2_key is potentially here v2key_url = os_path.join(os_path.dirname(db_url), "v2_key")
def status(self): """Parses the output of the ``systemctl status evmserverd``. Returns: A dictionary containing ``servers`` and ``workers``, both lists. Each of the lists contains dictionaries, one per line. You can refer inside the dictionary using the headers. """ matcher = re.compile('|'.join([ 'DEPRECATION WARNING', 'called from block in', 'Please use .* instead', 'key :terminate is duplicated and overwritten', ])) if version.current_version() < "5.5": data = self.run_command("systemctl status evmserverd") else: data = self.run_rake_command("evm:status") if data.rc != 0: raise Exception("systemctl status evmserverd $?={}".format( data.rc)) data = data.output.strip().split("\n\n") if len(data) == 2: srvs, wrks = data else: srvs = data[0] wrks = "" if "checking evm status" not in srvs.lower(): raise Exception("Wrong command output:\n{}".format(data.output)) def _process_dict(d): d["PID"] = int(d["PID"]) d["ID"] = int(d["ID"]) try: d["SPID"] = int(d["SPID"]) except ValueError: d["SPID"] = None if "Active Roles" in d: d["Active Roles"] = set(d["Active Roles"].split(":")) if "Last Heartbeat" in d: d["Last Heartbeat"] = iso8601.parse_date(d["Last Heartbeat"]) if "Started On" in d: d["Started On"] = iso8601.parse_date(d["Started On"]) # Servers part srvs = [ line for line in srvs.split("\n")[1:] if matcher.search(line) is None ] srv_headers = [h.strip() for h in srvs[0].strip().split("|")] srv_body = srvs[2:] servers = [] for server in srv_body: fields = [f.strip() for f in server.strip().split("|")] srv = dict(zip(srv_headers, fields)) _process_dict(srv) servers.append(srv) # Workers part # TODO: Figure more permanent solution for ignoring the warnings wrks = [ line for line in wrks.split("\n") if matcher.search(line) is None ] workers = [] if wrks: wrk_headers = [h.strip() for h in wrks[0].strip().split("|")] wrk_body = wrks[2:] for worker in wrk_body: fields = [f.strip() for f in worker.strip().split("|")] wrk = dict(zip(wrk_headers, fields)) _process_dict(wrk) workers.append(wrk) return {"servers": servers, "workers": workers}
def test_rpms_present(appliance, package): """Verifies nfs-util rpms are in place needed for pxe & nfs operations""" exit, stdout = appliance.ssh_client.run_command( 'rpm -q {}'.format(package)) assert 'is not installed' not in stdout assert exit == 0 @pytest.mark.uncollectif(store.current_appliance.is_pod) def test_selinux_enabled(appliance): """Verifies selinux is enabled""" stdout = appliance.ssh_client.run_command('getenforce')[1] assert 'Enforcing' in stdout @pytest.mark.uncollectif(lambda: version.current_version() >= '5.6', reason='Only valid for <5.6') @pytest.mark.uncollectif(store.current_appliance.is_pod) def test_iptables_running(appliance): """Verifies iptables service is running on the appliance""" stdout = appliance.ssh_client.run_command('systemctl status iptables')[1] assert 'is not running' not in stdout @pytest.mark.uncollectif(lambda: version.current_version() < '5.6', reason='Only valid for >5.7') @pytest.mark.uncollectif(store.current_appliance.is_pod) def test_firewalld_running(appliance): """Verifies iptables service is running on the appliance""" stdout = appliance.ssh_client.run_command('systemctl status firewalld')[1] assert 'active (running)' in stdout
profile = explorer.PolicyProfile( fauxfactory.gen_alphanumeric(), policies=[random_vm_control_policy, random_host_control_policy] ) profile.create() soft_assert(profile.exists, "Policy profile {} does not exist!".format(profile.description)) with update(profile): profile.notes = "Modified!" sel.force_navigate("policy_profile", context={"policy_profile_name": profile.description}) soft_assert(sel.text(profile.form.notes).strip() == "Modified!") profile.delete() soft_assert(not profile.exists, "The policy profile {} exists!".format(profile.description)) # RUBY expression type is no longer supported. @pytest.mark.uncollectif(lambda expression: "RUBY" in expression and current_version() >= "5.5") @pytest.mark.parametrize(("expression", "verify"), VM_EXPRESSIONS_TO_TEST) def test_modify_vm_condition_expression( vm_condition_for_expressions, expression, verify, soft_assert): with update(vm_condition_for_expressions): vm_condition_for_expressions.expression = expression flash.assert_no_errors() if verify is not None: sel.force_navigate("vm_condition_edit", context={"condition_name": vm_condition_for_expressions.description}) vm_condition_for_expressions.form.expression.show_func() soft_assert(expression_editor.get_expression_as_text() == verify) def test_alert_crud(soft_assert): alert = explorer.Alert(
* Try deleting the user """ user = User(name='Administrator') navigate_to(User, 'All') row = records_table.find_row_by_cells({'Full Name': user.name}) sel.check(sel.element(".//input[@type='checkbox']", root=row[0])) tb.select('Configuration', 'Delete selected Users', invokes_alert=True) sel.handle_alert() flash.assert_message_match( 'Default EVM User "{}" cannot be deleted'.format(user.name)) @pytest.mark.tier(3) @pytest.mark.meta(automates=[BZ(1090877)]) @pytest.mark.meta(blockers=[BZ(1408479)], forced_streams=["5.7", "upstream"]) @pytest.mark.uncollectif(lambda: version.current_version() >= "5.7") def test_current_user_login_delete(request): """Test for deleting current user login. Steps: * Login as Admin user * Create a new user * Login with the new user * Try deleting the user """ group_user = Group("EvmGroup-super_administrator") user = User(name='user' + fauxfactory.gen_alphanumeric(), credential=new_credential(), email='*****@*****.**', group=group_user) user.create()
template.delete_all_templates() service_catalogs = ServiceCatalogs("service_name", stack_data) service_catalogs.order_stack_item(catalog.name, catalog_item) logger.info('Waiting for cfme provision request for service %s' % item_name) row_description = item_name cells = {'Description': row_description} row, __ = wait_for(requests.wait_for_request, [cells, True], fail_func=requests.reload, num_sec=2500, delay=20) assert row.last_message.text == 'Service Provisioned Successfully' @pytest.mark.uncollectif(lambda: version.current_version() < '5.5') def test_reconfigure_service(setup_provider, provider, provisioning, dialog, catalog, request): """Tests stack provisioning Metadata: test_flag: provision """ dialog_name, template = dialog method = METHOD_TORSO.replace('"Description" : "AWS', '"Description" : "Aamzon Web') template.create(method) template.create_service_dialog_from_template(dialog_name, template.template_name) item_name = fauxfactory.gen_alphanumeric()
import pytest from cfme.fixtures import pytest_selenium as sel from cfme.containers.provider import ContainersProvider from utils import testgen from utils.ansible import setup_ansible_script, run_ansible, \ fetch_miq_ansible_module, create_tmp_directory, remove_tmp_files from utils.version import current_version pytestmark = [ pytest.mark.uncollectif(lambda provider: current_version() < "5.7") ] pytest_generate_tests = testgen.generate([ContainersProvider], scope='function') custom_attributes_to_add = { 'name': 'custom1', 'value': 'first value' }, { 'name': 'custom2', 'value': 'second value' } custom_attributes_to_edit = { 'name': 'custom1', 'value': 'third value' }, { 'name': 'custom2', 'value': 'fourth value' }
def resetter(self): # Reset view and selection if version.current_version() >= '5.7': # no view selector in 5.6 view_selector = self.view.toolbar.view_selector if view_selector.selected != 'Summary View': view_selector.select('Summary View')
def quad_name(self): if version.current_version() >= '5.8' and self.type == 'Ansible Tower': return '{} Automation Manager'.format(self.name) else: return '{} Configuration Manager'.format(self.name)