def _uncollectif(provider, collection_name): return ( current_version() < '5.8' or (current_version() < '5.9' and collection_name in COLLECTIONS_ADDED_IN_59) or (provider.one_of(InfraProvider) and collection_name == 'instances') or (provider.one_of(CloudProvider) and collection_name == 'vms') )
def test_create_instance(new_instance, soft_assert): """Creates an instance and verifies it appears on UI Polarion: assignee: rhcf3_machine initialEstimate: 1/4h """ view = navigate_to(new_instance, 'Details') prov_data = new_instance.provider.data['provisioning'] power_state = view.entities.summary('Power Management').get_text_of('Power State') assert power_state == OpenStackInstance.STATE_ON vm_tmplt = view.entities.summary('Relationships').get_text_of('VM Template') soft_assert(vm_tmplt == prov_data['image']['name']) # Assert other relationships in a loop props = [('Availability Zone', 'availability_zone'), ('Cloud Tenants', 'cloud_tenant'), ('Flavor', 'instance_type')] if current_version() >= '5.7': props.append(('Virtual Private Cloud', 'cloud_network')) for p in props: v = view.entities.summary('Relationships').get_text_of(p[0]) soft_assert(v == prov_data[p[1]])
def test_duplicite_provider_creation(provider): """Tests that creation of already existing provider fails.""" message = 'Name has already been taken, Host Name has already been taken' if current_version() >= '5.8': message = 'Name has already been taken' with error.expected(message): provider.create(cancel=False, validate_credentials=True)
def resolve_blocker(self, blocker, version=None, ignore_bugs=None, force_block_streams=None): # ignore_bugs is mutable but is not mutated here! Same force_block_streams force_block_streams = force_block_streams or [] ignore_bugs = set([]) if not ignore_bugs else ignore_bugs if isinstance(id, BugWrapper): bug = blocker else: bug = self.get_bug(blocker) if version is None: version = current_version() if version == LATEST: version = bug.product.latest_version is_upstream = version == bug.product.latest_version variants = self.get_bug_variants(bug) filtered = set([]) version_series = ".".join(str(version).split(".")[:2]) for variant in sorted(variants, key=lambda variant: variant.id): if variant.id in ignore_bugs: continue if variant.version is not None and variant.version > version: continue if variant.release_flag is not None and version.is_in_series(variant.release_flag): logger.info('Found matching bug for %d by release - #%d', bug.id, variant.id) filtered.clear() filtered.add(variant) break elif is_upstream and variant.release_flag == 'future': # It is an upstream bug logger.info('Found a matching upstream bug #%d for bug #%d', variant.id, bug.id) return variant elif (isinstance(variant.version, Version) and isinstance(variant.target_release, Version) and (variant.version.is_in_series(version_series) or variant.target_release.is_in_series(version_series))): filtered.add(variant) else: logger.warning( "ATTENTION!!: No release flags, wrong versions, ignoring %s", variant.id) if not filtered: # No appropriate bug was found for forced_stream in force_block_streams: # Find out if we force this bug. if version.is_in_series(forced_stream): return bug else: # No bug, yipee :) return None # First, use versions for bug in filtered: if (isinstance(bug.version, Version) and isinstance(bug.target_release, Version) and check_fixed_in(bug.fixed_in, version_series) and (bug.version.is_in_series(version_series) or bug.target_release.is_in_series(version_series))): return bug # Otherwise prefer release_flag for bug in filtered: if bug.release_flag and version.is_in_series(bug.release_flag): return bug return None
def _filter_restricted_version(self, provider): """ Filters by yaml version restriction; not applied if SSH is not available """ if self.restrict_version: # TODO # get rid of this since_version hotfix by translating since_version # to restricted_version; in addition, restricted_version should turn into # "version_restrictions" and it should be a sequence of restrictions with operators # so that we can create ranges like ">= 5.6" and "<= 5.8" version_restrictions = [] since_version = provider.data.get('since_version') if since_version: version_restrictions.append('>= {}'.format(since_version)) restricted_version = provider.data.get('restricted_version') if restricted_version: version_restrictions.append(restricted_version) for restriction in version_restrictions: for op, comparator in ProviderFilter._version_operator_map.items(): # split string by op; if the split works, version won't be empty head, op, ver = restriction.partition(op) if not ver: # This means that the operator was not found continue try: curr_ver = version.current_version() except: return True if not comparator(curr_ver, ver): return False break else: raise Exception('Operator not found in {}'.format(restriction)) return None
def test_control_import_invalid_yaml_file(appliance, import_invalid_yaml_file): if current_version() < "5.5": error_message = ("Error during 'Policy Import': undefined method `collect' " 'for "Invalid yaml":String') else: error_message = "Error during 'Policy Import': Invalid YAML file" with error.expected(error_message): import_export.import_file(appliance, import_invalid_yaml_file)
def get_tags(tag="My Company Tags"): tags = [] tagpath = "//*[(self::th or self::td) and normalize-space(.)={}]/../.."\ "//td[img[contains(@src, 'smarttag')]]" if current_version() < '5.8'\ else "//td[i[contains(@class, 'fa-tag')]]" for row in sel.elements(tagpath.format(quoteattr(tag))): tags.append(sel.text(row).strip()) return tags
def resolve_blockers(item, blockers): if not isinstance(blockers, (list, tuple, set)): raise ValueError("Type of the 'blockers' parameter must be one of: list, tuple, set") # Prepare the global env for the kwarg insertion global_env = dict( appliance_version=version.current_version(), appliance_downstream=version.appliance_is_downstream(), item=item, blockers=blockers, ) # We will now extend the env with fixtures, so they can be used in the guard functions # We will however add only those that are not in the global_env otherwise we could overwrite # our own stuff. params = extract_fixtures_values(item) for funcarg, value in params.iteritems(): if funcarg not in global_env: global_env[funcarg] = value # Check blockers use_blockers = [] # Bugzilla shortcut blockers = map(lambda b: "BZ#{}".format(b) if isinstance(b, int) else b, blockers) for blocker in map(Blocker.parse, blockers): if blocker.blocks: use_blockers.append(blocker) # Unblocking discard_blockers = set([]) for blocker in use_blockers: unblock_func = kwargify(blocker.kwargs.get("unblock")) local_env = {"blocker": blocker} local_env.update(global_env) if unblock_func(**local_env): discard_blockers.add(blocker) for blocker in discard_blockers: use_blockers.remove(blocker) # We now have those that block testing, so we have to skip # Let's go in the order that they were added # Custom actions first for blocker in use_blockers: if "custom_action" in blocker.kwargs: action = kwargify(blocker.kwargs["custom_action"]) local_env = {"blocker": blocker} local_env.update(global_env) action(**local_env) # And then skip if use_blockers: bugs = [bug.bug_id for bug in use_blockers if hasattr(bug, "bug_id")] skip_data = {'type': 'blocker', 'reason': bugs} fire_art_test_hook(item, 'skip_test', skip_data=skip_data) pytest.skip("Skipping due to these blockers:\n{}".format( "\n".join( "- {}".format(str(blocker)) for blocker in use_blockers ) ))
def blocks(self): if self.upstream_only and version.appliance_is_downstream(): return False if self.data.state == "closed": return False # Now let's check versions if self.since is None and self.until is None: # No version specifics return True elif self.since is not None and self.until is not None: # since inclusive, until exclusive return self.since <= version.current_version() < self.until elif self.since is not None: # Only since return version.current_version() >= self.since elif self.until is not None: # Only until return version.current_version() < self.until
def view_value_mapping(self): out = { 'hostname': self.hostname, 'api_port': self.api_port } if current_version() >= '5.8': out['sec_protocol'] = self.sec_protocol if self.sec_protocol.lower() == 'ssl trusting custom ca': out['trusted_ca_certificates'] = OpenshiftDefaultEndpoint.get_ca_cert() return out
def test_version(): """Check version presented in UI against version retrieved directly from the machine. Version retrieved from appliance is in this format: 1.2.3.4 Version in the UI is always: 1.2.3.4.20140505xyzblabla So we check whether the UI version starts with SSH version """ ssh_version = str(version.current_version()) ui_version = about.get_detail(about.VERSION) assert ui_version.startswith(ssh_version), "UI: {}, SSH: {}".format(ui_version, ssh_version)
def get_appliances(): """Returns two database-owning appliances """ ver_to_prov = str(version.current_version()) appl1 = provision_appliance(ver_to_prov, 'test_back') appl2 = provision_appliance(ver_to_prov, 'test_rest') appl1.configure(region=0) appl1.ipapp.wait_for_web_ui() appl2.configure(region=0) appl2.ipapp.wait_for_web_ui() return (appl1, appl2)
def view_value_mapping(self): out = {'hostname': self.hostname, 'password': self.token, 'api_port': self.api_port, 'sec_protocol': self.sec_protocol} if self.sec_protocol.lower() == 'ssl trusting custom ca' and hasattr(self, 'get_ca_cert'): out['trusted_ca_certificates'] = self.get_ca_cert() if version.current_version() < '5.9': out['confirm_password'] = self.token return out
def get_distributed_appliances(): """Returns one database-owning appliance, and a second appliance that connects to the database of the first. """ ver_to_prov = str(version.current_version()) appl1 = provision_appliance(ver_to_prov, 'long-test_childDB_A') appl2 = provision_appliance(ver_to_prov, 'long-test_childDB_B') appl1.configure(region=1, patch_ajax_wait=False) appl1.ipapp.wait_for_web_ui() appl2.configure(region=1, patch_ajax_wait=False, key_address=appl1.hostname, db_address=appl1.hostname) appl2.ipapp.wait_for_web_ui() return appl1, appl2
def pytest_generate_tests(metafunc): # Filter out providers without host provisioning data defined argnames, argvalues, idlist = testgen.providers_by_class( metafunc, [InfraProvider], required_fields=[ ['host_provisioning', 'pxe_server'], ['host_provisioning', 'pxe_image'], ['host_provisioning', 'pxe_image_type'], ['host_provisioning', 'pxe_kickstart'], ['host_provisioning', 'datacenter'], ['host_provisioning', 'cluster'], ['host_provisioning', 'datastores'], ['host_provisioning', 'hostname'], ['host_provisioning', 'root_password'], ['host_provisioning', 'ip_addr'], ['host_provisioning', 'subnet_mask'], ['host_provisioning', 'gateway'], ['host_provisioning', 'dns'], ]) pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc) pxe_server_names = [pval[0] for pval in pargvalues] new_idlist = [] new_argvalues = [] for i, argvalue_tuple in enumerate(argvalues): args = dict(zip(argnames, argvalue_tuple)) try: prov_data = args['provider'].data['host_provisioning'] except KeyError: # No host provisioning data available continue stream = prov_data.get('runs_on_stream', '') if not version.current_version().is_in_series(str(stream)): continue pxe_server_name = prov_data.get('pxe_server', '') if pxe_server_name not in pxe_server_names: continue pxe_cust_template = prov_data.get('pxe_kickstart', '') if pxe_cust_template not in cfme_data.get('customization_templates', {}).keys(): continue new_idlist.append(idlist[i]) new_argvalues.append(argvalues[i]) testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
def test_select_attributes(appliance, collection_name): """Tests that it's possible to limit returned attributes. Metadata: test_flag: rest """ if collection_name in COLLECTIONS_BUGGY_ATTRS and current_version() < '5.9': pytest.skip("Affected by BZ 1437201, cannot test.") collection = getattr(appliance.rest_api.collections, collection_name) response = appliance.rest_api.get( '{}{}'.format(collection._href, '?expand=resources&attributes=id')) assert_response(appliance) for resource in response.get('resources', []): assert 'id' in resource expected_len = 2 if 'href' in resource else 1 if 'fqname' in resource: expected_len += 1 assert len(resource) == expected_len
def test_attributes_present(appliance, collection_name): """Tests that the expected attributes are present in all collections. Metadata: test_flag: rest """ attrs = 'href,id,href_slug' collection = getattr(appliance.rest_api.collections, collection_name) response = appliance.rest_api.get( '{0}{1}{2}'.format(collection._href, '?expand=resources&attributes=', attrs)) assert_response(appliance) for resource in response.get('resources', []): assert 'id' in resource assert 'href' in resource assert resource['href'] == '{}/{}'.format(collection._href, resource['id']) if current_version() >= '5.8': assert 'href_slug' in resource assert resource['href_slug'] == '{}/{}'.format(collection.name, resource['id'])
def test_password_mismatch_validation(provider, soft_assert): """ Tests password mismatch check """ prov = copy(provider) endpoints = deepcopy(prov.endpoints) endpoints['default'].credentials = Credential( principal='bad', secret=fauxfactory.gen_alphanumeric(5), verify_secret=fauxfactory.gen_alphanumeric(6) ) prov.endpoints = endpoints message = 'Credential validation was successful' if current_version() >= '5.9': message = 'Credential validation was not successful: Invalid credentials' with error.expected(message): prov.create() add_view = provider.create_view(MiddlewareProviderAddView) endp_view = prov.endpoints_form(parent=add_view) soft_assert(not endp_view.validate.active) soft_assert(not add_view.add.active)
def test_create_instance(new_instance, soft_assert): """Creates an instance and verifies it appears on UI""" navigate_to(new_instance, 'Details') prov_data = new_instance.provider.data['provisioning'] power_state = new_instance.get_detail(properties=('Power Management', 'Power State')) assert power_state == OpenStackInstance.STATE_ON vm_tmplt = new_instance.get_detail(properties=('Relationships', 'VM Template')) soft_assert(vm_tmplt == prov_data['image']['name']) # Assert other relationships in a loop props = [('Availability Zone', 'availability_zone'), ('Cloud Tenants', 'cloud_tenant'), ('Flavor', 'instance_type')] if current_version() >= '5.7': props.append(('Virtual Private Cloud', 'cloud_network')) for p in props: v = new_instance.get_detail(properties=('Relationships', p[0])) soft_assert(v == prov_data[p[1]])
def blocks(self): try: bug = self.data if bug is None: return False result = False if bug.is_opened: result = True if bug.upstream_bug: if not version.appliance_is_downstream() and bug.can_test_on_upstream: result = False if not result and version.appliance_is_downstream(): if bug.fixed_in is not None: return version.current_version() < bug.fixed_in return result except six.moves.xmlrpc_client.Fault as e: code = e.faultCode s = e.faultString.strip().split("\n")[0] logger.error("Bugzilla thrown a fault: %s/%s", code, s) logger.warning("Ignoring and taking the bug as non-blocking") store.terminalreporter.write( "Bugzila made a booboo: {}/{}\n".format(code, s), bold=True) return False
def resolve_blocker(self, blocker, version=None, ignore_bugs=None, force_block_streams=None): # ignore_bugs is mutable but is not mutated here! Same force_block_streams force_block_streams = force_block_streams or [] ignore_bugs = set([]) if not ignore_bugs else ignore_bugs if isinstance(id, BugWrapper): bug = blocker else: bug = self.get_bug(blocker) if version is None: version = current_version() if version == LATEST: version = bug.product.latest_version is_upstream = version == bug.product.latest_version variants = self.get_bug_variants(bug) filtered = set([]) version_series = ".".join(str(version).split(".")[:2]) for variant in sorted(variants, key=lambda variant: variant.id): if variant.id in ignore_bugs: continue if variant.version is not None and variant.version > version: continue if variant.release_flag is not None and version.is_in_series( variant.release_flag): logger.info('Found matching bug for %d by release - #%d', bug.id, variant.id) filtered.clear() filtered.add(variant) break elif is_upstream and variant.release_flag == 'future': # It is an upstream bug logger.info('Found a matching upstream bug #%d for bug #%d', variant.id, bug.id) return variant elif (isinstance(variant.version, Version) and isinstance(variant.target_release, Version) and (variant.version.is_in_series(version_series) or variant.target_release.is_in_series(version_series))): filtered.add(variant) else: logger.warning( "ATTENTION!!: No release flags, wrong versions, ignoring %s", variant.id) if not filtered: # No appropriate bug was found for forced_stream in force_block_streams: # Find out if we force this bug. if version.is_in_series(forced_stream): return bug else: # No bug, yipee :) return None # First, use versions for bug in filtered: if (isinstance(bug.version, Version) and isinstance(bug.target_release, Version) and check_fixed_in(bug.fixed_in, version_series) and (bug.version.is_in_series(version_series) or bug.target_release.is_in_series(version_series))): return bug # Otherwise prefer release_flag for bug in filtered: if bug.release_flag and version.is_in_series(bug.release_flag): return bug return None
from cfme.utils.version import current_version from server_group_methods import (verify_server_group_stopped, verify_server_group_running, verify_server_group_suspended, check_group_deployment_enabled, check_group_deployment_disabled, check_group_deployment_content) from deployment_methods import deploy from deployment_methods import RESOURCE_JAR_NAME, RESOURCE_WAR_NAME from deployment_methods import WAR_EXT, RESOURCE_WAR_NAME_NEW from deployment_methods import RESOURCE_WAR_CONTENT, RESOURCE_WAR_CONTENT_NEW from server_methods import get_domain_container_server pytestmark = [ pytest.mark.usefixtures('setup_provider'), pytest.mark.uncollectif(lambda: current_version() < '5.7'), pytest.mark.provider([HawkularProvider], scope="function"), ] ITEMS_LIMIT = 1 # when we have big list, limit number of items to test @pytest.yield_fixture(scope="function") def main_server_group(provider): domain_list = MiddlewareDomain.domains_in_db(provider=provider, strict=False) assert domain_list, "Domain was not found in DB" domain = domain_list[0] server_group_list = MiddlewareServerGroup.server_groups_in_db( domain=domain, name="main-server-group", strict=False) assert server_group_list, "Server group was not found in DB"
provisioning_data, vm_name, delayed=None): do_vm_provisioning(appliance, template_name=template, provider=provider, vm_name=vm_name, provisioning_data=provisioning_data, request=None, smtp_test=None, wait=False) return _provisioner @pytest.mark.uncollectif(lambda: version.current_version() >= '5.5') def test_group_quota_max_memory_check_by_tagging(appliance, provisioner, prov_data, template_name, provider, request, vm_name, set_group_memory, bug): """ Test group Quota-Max Memory by tagging. Prerequisities: * A provider set up, supporting provisioning in CFME Steps: * Set the group quota for memory by tagging * Open the provisioning dialog. * Apart from the usual provisioning settings, set RAM greater then group quota memory. * Submit the provisioning request and wait for it to finish. * Visit the requests page. The last message should state quota validation message.
logger.warning('Exception during volume deletion - skipping..') @pytest.mark.tier(3) def test_storage_volume_backup_create(backup): assert backup.exists assert backup.size == STORAGE_SIZE @pytest.mark.tier(3) def test_storage_volume_backup_edit_tag_from_detail(backup): # add tag with category Department and tag communication backup.add_tag('Department', 'Communication') tag_available = backup.get_tags() assert tag_available[0].display_name == 'Communication' assert tag_available[0].category.display_name == 'Department' # remove assigned tag backup.remove_tag('Department', 'Communication') tag_available = backup.get_tags() assert not tag_available @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: version.current_version() < '5.9') def test_storage_volume_backup_delete(backup): """ Volume backup deletion method not support by 5.8 """ backup.parent.delete(backup) assert not backup.exists
class TestProvidersRESTAPI(object): @pytest.fixture(scope="function") def arbitration_profiles(self, request, appliance, cloud_provider): num_profiles = 2 response = _arbitration_profiles(request, appliance.rest_api, cloud_provider, num=num_profiles) assert appliance.rest_api.response.status_code == 200 assert len(response) == num_profiles return response @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_cloud_networks_query(self, cloud_provider, appliance, from_detail): """Tests querying cloud providers and cloud_networks collection for network info. Metadata: test_flag: rest """ if from_detail: networks = appliance.rest_api.collections.providers.get( name=cloud_provider.name).cloud_networks else: networks = appliance.rest_api.collections.cloud_networks assert appliance.rest_api.response.status_code == 200 assert networks assert len(networks) == networks.subcount assert len(networks.find_by(enabled=True)) >= 1 assert 'CloudNetwork' in networks[0].type @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') def test_security_groups_query(self, cloud_provider, appliance): """Tests querying cloud networks subcollection for security groups info. Metadata: test_flag: rest """ network = appliance.rest_api.collections.providers.get( name=cloud_provider.name).cloud_networks[0] network.reload(attributes='security_groups') security_groups = network.security_groups # "security_groups" needs to be present, even if it's just an empty list assert isinstance(security_groups, list) # if it's not empty, check type if security_groups: assert 'SecurityGroup' in security_groups[0]['type'] @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') def test_create_arbitration_profiles(self, appliance, arbitration_profiles): """Tests creation of arbitration profiles. Metadata: test_flag: rest """ for profile in arbitration_profiles: record = appliance.rest_api.collections.arbitration_profiles.get( id=profile.id) assert appliance.rest_api.response.status_code == 200 assert record._data == profile._data assert 'ArbitrationProfile' in profile.type @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') @pytest.mark.parametrize('method', ['post', 'delete']) def test_delete_arbitration_profiles_from_detail(self, appliance, arbitration_profiles, method): """Tests delete arbitration profiles from detail. Metadata: test_flag: rest """ status = 204 if method == 'delete' else 200 for entity in arbitration_profiles: entity.action.delete(force_method=method) assert appliance.rest_api.response.status_code == status with error.expected('ActiveRecord::RecordNotFound'): entity.action.delete(force_method=method) assert appliance.rest_api.response.status_code == 404 @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') def test_delete_arbitration_profiles_from_collection( self, appliance, arbitration_profiles): """Tests delete arbitration profiles from collection. Metadata: test_flag: rest """ collection = appliance.rest_api.collections.arbitration_profiles collection.action.delete(*arbitration_profiles) assert appliance.rest_api.response.status_code == 200 with error.expected('ActiveRecord::RecordNotFound'): collection.action.delete(*arbitration_profiles) assert appliance.rest_api.response.status_code == 404 @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_arbitration_profiles(self, appliance, arbitration_profiles, from_detail): """Tests editing of arbitration profiles. Metadata: test_flag: rest """ response_len = len(arbitration_profiles) zone = appliance.rest_api.collections.availability_zones[-1] locators = [{'id': zone.id}, {'href': zone.href}] new = [{ 'availability_zone': locators[i % 2] } for i in range(response_len)] if from_detail: edited = [] for i in range(response_len): edited.append(arbitration_profiles[i].action.edit(**new[i])) assert appliance.rest_api.response.status_code == 200 else: for i in range(response_len): new[i].update(arbitration_profiles[i]._ref_repr()) edited = appliance.rest_api.collections.arbitration_profiles.action.edit( *new) assert appliance.rest_api.response.status_code == 200 assert len(edited) == response_len for i in range(response_len): assert edited[i].availability_zone_id == zone.id @pytest.mark.tier(3) # arbitration_rules were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9' or store.current_appliance.version < '5.8') def test_create_arbitration_rules_with_profile(self, request, appliance, arbitration_profiles): """Tests creation of arbitration rules referencing arbitration profiles. Metadata: test_flag: rest """ num_rules = 2 profile = arbitration_profiles[0] references = [{'id': profile.id}, {'href': profile._href}] data = [] for index in range(num_rules): data.append({ 'description': 'test admin rule {}'.format(fauxfactory.gen_alphanumeric(5)), 'operation': 'inject', 'arbitration_profile': references[index % 2], 'expression': { 'EQUAL': { 'field': 'User-userid', 'value': 'admin' } } }) response = creating_skeleton(request, appliance.rest_api, 'arbitration_rules', data) assert appliance.rest_api.response.status_code == 200 assert len(response) == num_rules for rule in response: record = appliance.rest_api.collections.arbitration_rules.get( id=rule.id) assert record.arbitration_profile_id == rule.arbitration_profile_id == profile.id @pytest.mark.tier(3) # arbitration_rules were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9' or store.current_appliance.version < '5.8') def test_create_arbitration_rule_with_invalid_profile( self, request, appliance): """Tests creation of arbitration rule referencing invalid arbitration profile. Metadata: test_flag: rest """ data = [{ 'description': 'test admin rule {}'.format(fauxfactory.gen_alphanumeric(5)), 'operation': 'inject', 'arbitration_profile': 'invalid_value', 'expression': { 'EQUAL': { 'field': 'User-userid', 'value': 'admin' } } }] response = creating_skeleton(request, appliance.rest_api, 'arbitration_rules', data) # this will fail once BZ 1433477 is fixed - change and expand the test accordingly assert appliance.rest_api.response.status_code == 200 for rule in response: assert not hasattr(rule, 'arbitration_profile_id')
import fauxfactory import pytest from cfme.configure.configuration import Category, Tag from cfme.utils import error, version from cfme.utils.update import update from cfme.utils.testgen import config_managers, generate from cfme.utils.blockers import BZ pytest_generate_tests = generate(gen_func=config_managers) pytestmark = [ pytest.mark.uncollectif( lambda config_manager_obj: config_manager_obj.type == "Ansible Tower" and version.current_version() > "5.6"), pytest.mark.meta(blockers=[BZ(1393987)]) ] @pytest.yield_fixture def config_manager(config_manager_obj): """ Fixture that provides a random config manager and sets it up""" config_manager_obj.create() yield config_manager_obj config_manager_obj.delete() @pytest.fixture def config_system(config_manager): return fauxfactory.gen_choice(config_manager.systems) @pytest.yield_fixture(scope="module")
from cfme.cloud.availability_zone import AvailabilityZone from cfme.cloud.instance import Instance from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.openstack import OpenStackProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.utils import version from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.tier(2), pytest.mark.uncollectif(lambda provider: provider.one_of(EC2Provider) and version.current_version() < '5.8'), pytest.mark.usefixtures("setup_provider_modscope"), pytest.mark.provider([AzureProvider, OpenStackProvider, EC2Provider], scope="module") ] def ec2_sleep(): # CFME currently obtains events from AWS Config thru AWS SNS # EC2 Config creates config diffs apprx. every 10 minutes # This workaround is needed until CFME starts using CloudWatch + CloudTrail instead sleep(900) @pytest.fixture(scope="module") def new_instance(request, provider):
object_name = vm_name with appliance.db.client.transaction: provs = ( appliance.db.client.session.query(metrics_tbl.id) .join(ems, metrics_tbl.parent_ems_id == ems.id) .filter(metrics_tbl.resource_name == object_name, ems.name == provider.name) ) return appliance.db.client.session.query(metrics_tbl).filter( metrics_tbl.id.in_(provs.subquery())) # Tests to check that specific metrics are being collected @pytest.mark.uncollectif( lambda provider: current_version() < "5.7" and provider.type == 'gce') def test_raw_metric_vm_cpu(metrics_collection, appliance, provider): vm_name = provider.data['cap_and_util']['capandu_vm'] if provider.category == "infra": query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average', vm_name) average_rate = attrgetter('cpu_usagemhz_rate_average') elif provider.category == "cloud": query = query_metric_db(appliance, provider, 'cpu_usage_rate_average', vm_name) average_rate = attrgetter('cpu_usage_rate_average') for record in query: if average_rate(record) is not None: assert average_rate(record) > 0, 'Zero VM CPU Usage' break
def testing_instance(request, setup_provider, provider, provisioning, vm_name): """ Fixture to prepare instance parameters for provisioning """ image = provisioning['image']['name'] note = ( 'Testing provisioning from image {} to vm {} on provider {}'.format( image, vm_name, provider.key)) instance = Instance.factory(vm_name, provider, image) inst_args = dict() # Base instance info inst_args['request'] = { 'email': '*****@*****.**', 'first_name': 'Image', 'last_name': 'Provisioner', 'notes': note, } # TODO Move this into helpers on the provider classes recursive_update(inst_args, {'catalog': {'vm_name': vm_name}}) # Check whether auto-selection of environment is passed try: auto = request.param except AttributeError: # in case nothing was passed just skip auto = False # All providers other than Azure if not provider.one_of(AzureProvider): recursive_update( inst_args, { 'properties': { 'instance_type': provisioning['instance_type'], 'guest_keypair': provisioning['guest_keypair'] }, 'environment': { 'availability_zone': None if auto else provisioning['availability_zone'], 'security_groups': None if auto else provisioning['security_group'], 'automatic_placement': auto } }) # Openstack specific if provider.one_of(OpenStackProvider): recursive_update( inst_args, { 'environment': { 'cloud_network': None if auto else provisioning['cloud_network'] } }) # GCE specific if provider.one_of(GCEProvider): recursive_update( inst_args, { 'environment': { 'cloud_network': None if auto else provisioning['cloud_network'] }, 'properties': { 'boot_disk_size': provisioning['boot_disk_size'], 'is_preemptible': True if current_version() >= "5.7" else None } }) # Azure specific if provider.one_of(AzureProvider): # Azure uses different provisioning keys for some reason try: template = provider.data.templates.small_template vm_user = credentials[template.creds].username vm_password = credentials[template.creds].password except AttributeError: pytest.skip( 'Could not find small_template or credentials for {}'.format( provider.name)) recursive_update( inst_args, { 'environment': { 'automatic_placement': auto, 'cloud_network': None if auto else provisioning['virtual_net'], 'cloud_subnet': None if auto else provisioning['subnet_range'], 'security_groups': None if auto else [provisioning['network_nsg']], 'resource_groups': None if auto else provisioning['resource_group'] }, 'properties': { 'instance_type': provisioning['vm_size'].lower() }, 'customize': { 'admin_username': vm_user, 'root_password': vm_password } }) yield instance, inst_args, image try: if instance.does_vm_exist_on_provider(): instance.delete_from_provider() except Exception as ex: logger.warning( 'Exception while deleting instance fixture, continuing: {}'.format( ex.message))
def resetter(self): # Reset view and selection if version.current_version() >= '5.7': # no view selector in 5.6 view_selector = self.view.toolbar.view_selector if view_selector.selected != 'Summary View': view_selector.select('Summary View')
app.db.fix_auth_dbyml() # start evmserverd, wait for web UI to start and try to log in try: app.start_evm_service() except ApplianceException: rc, out = app.ssh_client.run_rake_command("evm:start") assert rc == 0, "Couldn't start evmserverd: {}".format(out) app.wait_for_web_ui(timeout=600) app.db.reset_user_pass() wait_for(lambda: navigate_to(app.server, 'LoginScreen'), handle_exception=True) app.server.login(app.user) @pytest.mark.uncollectif( lambda dbversion: dbversion == 'scvmm_58' and version.current_version( ) < "5.9" or dbversion == 'ec2_5540' and version.current_version() < "5.9") @pytest.mark.parametrize('dbversion', ['ec2_5540', 'azure_5620', 'rhev_57', 'scvmm_58'], ids=['55', '56', '57', '58']) def test_db_migrate_replication(temp_appliance_remote, dbversion, temp_appliance_global_region): app = temp_appliance_remote app2 = temp_appliance_global_region # Download the database logger.info("Downloading database: {}".format(dbversion)) db_url = cfme_data['db_backups'][dbversion]['url'] url_basename = os_path.basename(db_url) rc, out = app.ssh_client.run_command('curl -o "/tmp/{}" "{}"'.format( url_basename, db_url), timeout=30) assert rc == 0, "Failed to download database: {}".format(out)
class TestPoliciesRESTAPI(object): @pytest.fixture(scope='function') def policies(self, request, appliance): num_policies = 2 response = _policies(request, appliance.rest_api, num=num_policies) assert_response(appliance) assert len(response) == num_policies return response @pytest.mark.uncollectif(lambda: current_version() < '5.8') def test_create_policies(self, appliance, policies): """Tests create policies. Metadata: test_flag: rest """ for policy in policies: record = appliance.rest_api.collections.policies.get(id=policy.id) assert record.description == policy.description @pytest.mark.uncollectif(lambda: current_version() < '5.8') def test_delete_policies_from_detail_post(self, policies, appliance): """Tests delete policies from detail using POST method. Metadata: test_flag: rest """ for policy in policies: policy.action.delete.POST() assert_response(appliance) wait_for( lambda: not appliance.rest_api.collections.policies.find_by( name=policy.name), num_sec=100, delay=5 ) with error.expected('ActiveRecord::RecordNotFound'): policy.action.delete.POST() assert_response(appliance, http_status=404) @pytest.mark.uncollectif(lambda: current_version() < '5.9') @pytest.mark.meta(blockers=[BZ(1435773, forced_streams=['5.9'])]) def test_delete_policies_from_detail_delete(self, policies, appliance): """Tests delete policies from detail using DELETE method. Metadata: test_flag: rest """ for policy in policies: policy.action.delete.DELETE() assert_response(appliance) wait_for( lambda: not appliance.rest_api.collections.policies.find_by( name=policy.name), num_sec=100, delay=5 ) with error.expected('ActiveRecord::RecordNotFound'): policy.action.delete.DELETE() assert_response(appliance, http_status=404) @pytest.mark.uncollectif(lambda: current_version() < '5.8') def test_delete_policies_from_collection(self, policies, appliance): """Tests delete policies from collection. Metadata: test_flag: rest """ collection = appliance.rest_api.collections.policies delete_resources_from_collection(collection, policies, num_sec=100, delay=5) @pytest.mark.uncollectif(lambda: current_version() < '5.8') @pytest.mark.meta(blockers=[BZ(1435777, forced_streams=['5.8', 'upstream'])]) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_policies(self, policies, appliance, from_detail): """Tests edit policies. Metadata: test_flag: rest """ num_policies = len(policies) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)] new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_policies): edited.append(policies[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_policies): new[index].update(policies[index]._ref_repr()) edited = appliance.rest_api.collections.policies.action.edit(*new) assert_response(appliance) assert len(edited) == num_policies for index, policy in enumerate(policies): record, __ = wait_for( lambda: appliance.rest_api.collections.policies.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, ) policy.reload() assert policy.description == edited[index].description == record[0].description
volume.create_backup(backup_name) backup = backup_collection.instantiate(backup_name, provider) yield backup try: if backup.exists: backup_collection.delete(backup) if volume.exists: volume.delete(wait=False) except Exception: logger.warning('Exception during volume deletion - skipping..') @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: version.current_version() < '5.8') def test_storage_volume_backup_create(backup): assert backup.exists assert backup.size == STORAGE_SIZE @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: version.current_version() < '5.8') def test_storage_volume_backup_edit_tag_from_detail(backup): # add tag with category Department and tag communication backup.add_tag('Department', 'Communication') tag_available = backup.get_tags() assert tag_available[0].display_name == 'Communication' assert tag_available[0].category.display_name == 'Department' # remove assigned tag
endpoint = VirtualCenterEndpoint(hostname=None) prov = VMwareProvider(name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint) with pytest.raises(AssertionError): prov.create() view = prov.create_view(prov.endpoints_form) assert view.hostname.help_block == "Required" view = prov.create_view(InfraProviderAddView) assert not view.add.active @pytest.mark.tier(3) @pytest.mark.meta(blockers=[1209756]) @pytest.mark.uncollectif(lambda: version.current_version() > "5.4.0.0.24") def test_ip_required_validation(): """Test to validate the ip address while adding a provider""" prov = VMwareProvider(name=fauxfactory.gen_alphanumeric(5), hostname=fauxfactory.gen_alphanumeric(5), ip_address=None) with error.expected("IP Address can't be blank"): prov.create() @pytest.mark.tier(3) @test_requirements.provider_discovery def test_name_max_character_validation(request, infra_provider): """Test to validate max character for name field""" request.addfinalizer(lambda: infra_provider.delete_if_exists(cancel=False))
try: vm.delete_from_provider() except Exception: logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name) def new_snapshot(test_vm, has_name=True, memory=False): return Vm.Snapshot( name="snpshot_{}".format(fauxfactory.gen_alphanumeric(8)) if has_name else None, description="snapshot_{}".format(fauxfactory.gen_alphanumeric(8)), memory=memory, parent_vm=test_vm) @pytest.mark.uncollectif(lambda provider: (provider.one_of(RHEVMProvider) and provider.version < 4) or current_version() < '5.8', 'Must be RHEVM provider version >= 4') def test_memory_checkbox(small_test_vm, provider, soft_assert): """Tests snapshot memory checkbox Memory checkbox should be displayed and active when VM is running ('Power On'). Memory checkbox should not be displayed when VM is stopped ('Power Off'). """ # Make sure the VM is powered on small_test_vm.power_control_from_cfme(option=small_test_vm.POWER_ON, cancel=False) # Try to create snapshot with memory on powered on VM has_name = not provider.one_of(RHEVMProvider) snapshot1 = new_snapshot(small_test_vm, has_name=has_name, memory=True) snapshot1.create() assert snapshot1.exists # Power off the VM small_test_vm.power_control_from_cfme(option=small_test_vm.POWER_OFF, cancel=False)
"service_dialogs", "service_orders", "service_requests", "service_templates", "services", "settings", "tags", "tasks", "templates", "tenants", "users", "virtual_templates", "vms", "zones" } # non-typical collections without "id" and "resources" COLLECTIONS_OMMITED = {"settings"} @pytest.mark.tier(3) @pytest.mark.parametrize("collection_name", COLLECTIONS_ALL) @pytest.mark.uncollectif( lambda collection_name: (collection_name in COLLECTIONS_OMMITED) or (collection_name in COLLECTIONS_REMOVED_IN_59 and current_version() >= "5.9") ) def test_query_simple_collections(appliance, collection_name): """This test tries to load each of the listed collections. 'Simple' collection means that they have no usable actions that we could try to run Steps: * GET /api/<collection_name> Metadata: test_flag: rest """ collection = getattr(appliance.rest_api.collections, collection_name) assert_response(appliance) collection.reload() list(collection)
evm_tail.fix_before_start() command2 = 'appliance_console_cli --extauth-opts="/authentication/{}=false"'.format( auth_type) ipa_crud.ssh_client.run_command(command2) evm_tail.validate_logs() @pytest.mark.uncollect('No IPA servers currently available') def test_appliance_console_cli_ipa_crud(ipa_creds, configured_appliance): configured_appliance.appliance_console_cli.configure_ipa( ipa_creds['ipaserver'], ipa_creds['username'], ipa_creds['password'], ipa_creds['domain'], ipa_creds['realm']) configured_appliance.appliance_console_cli.uninstall_ipa_client() @pytest.mark.uncollectif(lambda: version.current_version() < '5.9') def test_appliance_console_cli_extend_storage(unconfigured_appliance): unconfigured_appliance.ssh_client.run_command( 'appliance_console_cli -t auto') def is_storage_extended(): assert unconfigured_appliance.ssh_client.run_command( "df -h | grep /var/www/miq_tmp") wait_for(is_storage_extended) @pytest.mark.uncollectif(lambda: version.current_version() < '5.9') def test_appliance_console_cli_extend_log_storage(unconfigured_appliance): unconfigured_appliance.ssh_client.run_command( 'appliance_console_cli -l auto')
# (CustomReport, 'All'), (ComputeRate, 'All'), (Instance, 'All'), (Vm, 'VMsOnly'), # (ISODatastore, 'All'), (Server, 'Configuration'), (DatastoreCollection, 'All'), # (ConfigManager, 'All'), (Utilization, 'All'), (InfraNetworking, 'All'), (Bottlenecks, 'All') # ] LOCATIONS = [(Server, 'ControlExplorer'), (Server, 'AutomateExplorer'), (Server, 'AutomateCustomization'), (MyService, 'All'), (Server, 'ServiceCatalogsDefault'), (Server, 'Configuration'), (Utilization, 'All'), (InfraNetworking, 'All')] pytestmark = [ pytest.mark.parametrize( "location", LOCATIONS, ids=["{}-{}".format(loc[0].__name__, loc[1]) for loc in LOCATIONS]), pytest.mark.uncollectif(lambda location: location[0] == InfraNetworking and version.current_version() < '5.7') ] @pytest.mark.meta(blockers=[ BZ(1380443, forced_streams=['5.6', '5.7', '5.8'], unblock=lambda location: location[0] != Bottlenecks) ]) @pytest.mark.requirement('general_ui') @pytest.mark.tier(3) def test_pull_splitter_persistence(request, location, appliance): splitter = Splitter(parent=appliance.browser.widgetastic) request.addfinalizer(splitter.reset)
timeline_event=True, driving_event="Hourly Timer") request.addfinalizer(alert.delete) alert_profile = alert_profile_collection.create( alert_profile_class, fauxfactory.gen_alphanumeric(), alerts=[alert.description]) with update(alert_profile): alert_profile.notes = "Modified!" alert_profile.delete() @pytest.mark.tier(2) @pytest.mark.meta(blockers=[BZ(1416311, forced_streams=["5.7"])]) def test_alert_profile_assigning(alert_profile): if isinstance(alert_profile, alert_profiles.ServerAlertProfile): alert_profile.assign_to("Selected Servers", selections=["Servers", "EVM"]) else: alert_profile.assign_to("The Enterprise") @pytest.mark.tier(2) @pytest.mark.uncollectif(lambda: current_version() < "5.8") def test_control_is_ansible_playbook_available_in_actions_dropdown( action_collection): view = navigate_to(action_collection, "Add") assert "Run Ansible Playbook" in [ option.text for option in view.action_type.all_options ]
finally: candu.disable_all() appliance.server.settings.update_server_roles_db(original_roles) # Blow away all providers when done - collecting metrics for all of them is too much @pytest.yield_fixture(scope="module") def clean_setup_provider(request, provider): BaseProvider.clear_providers() setup_or_skip(request, provider) yield BaseProvider.clear_providers() @pytest.mark.uncollectif( lambda provider: current_version() < "5.7" and provider.type == 'gce') def test_metrics_collection(clean_setup_provider, provider, enable_candu): """Check the db is gathering collection data for the given provider Metadata: test_flag: metrics_collection """ metrics_tbl = store.current_appliance.db.client['metrics'] mgmt_systems_tbl = store.current_appliance.db.client['ext_management_systems'] logger.info("Fetching provider ID for %s", provider.key) mgmt_system_id = store.current_appliance.db.client.session.query(mgmt_systems_tbl).filter( mgmt_systems_tbl.name == conf.cfme_data.get('management_systems', {})[provider.key]['name'] ).first().id logger.info("ID fetched; testing metrics collection now")
class TestArbitrationRulesRESTAPI(object): @pytest.fixture(scope='function') def arbitration_rules(self, request, appliance): num_rules = 2 response = _arbitration_rules(request, appliance.rest_api, num=num_rules) assert_response(appliance) assert len(response) == num_rules return response @pytest.mark.uncollectif(lambda: current_version() >= '5.9') def test_create_arbitration_rules(self, arbitration_rules, appliance): """Tests create arbitration rules. Metadata: test_flag: rest """ for rule in arbitration_rules: record = appliance.rest_api.collections.arbitration_rules.get(id=rule.id) assert record.description == rule.description # there's no test for the DELETE method as it is not working and won't be fixed, see BZ 1410504 @pytest.mark.uncollectif(lambda: current_version() >= '5.9') def test_delete_arbitration_rules_from_detail_post(self, arbitration_rules, appliance): """Tests delete arbitration rules from detail. Metadata: test_flag: rest """ for entity in arbitration_rules: entity.action.delete.POST() assert_response(appliance) with error.expected('ActiveRecord::RecordNotFound'): entity.action.delete.POST() assert_response(appliance, http_status=404) @pytest.mark.uncollectif(lambda: current_version() >= '5.9') def test_delete_arbitration_rules_from_collection(self, arbitration_rules, appliance): """Tests delete arbitration rules from collection. Metadata: test_flag: rest """ collection = appliance.rest_api.collections.arbitration_rules collection.action.delete(*arbitration_rules) assert_response(appliance) with error.expected('ActiveRecord::RecordNotFound'): collection.action.delete(*arbitration_rules) assert_response(appliance, http_status=404) @pytest.mark.uncollectif(lambda: current_version() >= '5.9') @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_arbitration_rules(self, arbitration_rules, appliance, from_detail): """Tests edit arbitration rules. Metadata: test_flag: rest """ num_rules = len(arbitration_rules) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_rules)] new = [{'description': 'new test admin rule {}'.format(u)} for u in uniq] if from_detail: edited = [] for i in range(num_rules): edited.append(arbitration_rules[i].action.edit(**new[i])) assert_response(appliance) else: for i in range(num_rules): new[i].update(arbitration_rules[i]._ref_repr()) edited = appliance.rest_api.collections.arbitration_rules.action.edit(*new) assert_response(appliance) assert len(edited) == num_rules for i in range(num_rules): assert edited[i].description == new[i]['description']
import pytest from cfme.containers.provider import ContainersProvider from cfme.fixtures import pytest_selenium as sel from cfme.web_ui import toolbar as tb from cfme.utils import testgen, version from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ pytestmark = [ pytest.mark.uncollectif( lambda provider: version.current_version() < "5.7"), pytest.mark.usefixtures('setup_provider'), pytest.mark.tier(1) ] pytest_generate_tests = testgen.generate([ContainersProvider], scope='function') @pytest.mark.polarion('CMP-10255') @pytest.mark.meta(blockers=[BZ(1406772, forced_streams=["5.7", "5.8"])]) def test_cockpit_button_access(provider, appliance, soft_assert): """ The test verifies the existence of cockpit "Web Console" button on master node, then presses on the button and opens up the cockpit main page in a new window. Then we verify the title of the main cockpit page. The test will not work until the single sign-on bug is fixed """ collection = appliance.collections.nodes
visibility): """ Tests infra provider and its items honors tag visibility Prerequisites: Catalog, tag, role, group and restricted user should be created Steps: 1. As admin add tag 2. Login as restricted user, item is visible for user 3. As admin remove tag 4. Login as restricted user, item is not visible for user """ check_item_visibility(testing_vis_object, visibility) @pytest.mark.meta(blockers=[BZ(1441637)]) @pytest.mark.uncollectif(lambda: version.current_version() >= "5.7") @pytest.mark.parametrize('visibility', [True, False], ids=['visible', 'notVisible']) def test_tagvis_cloud_keypair(check_item_visibility, key_pair, visibility): """ Tests infra provider and its items honors tag visibility Prerequisites: Catalog, tag, role, group and restricted user should be created Additionally cloud key_pair should be created Steps: 1. As admin add tag to key_pair 2. Login as restricted user, key_pair is visible for user 3. As admin remove tag from key_pair 4. Login as restricted user, key_pair is not visible for user """ check_item_visibility(key_pair, visibility)
import pytest from cfme.middleware.provider import MiddlewareProvider from cfme.utils import testgen from cfme.utils import version from cfme.utils.version import current_version from cfme.middleware.server import MiddlewareServer from cfme.middleware.messaging import MiddlewareMessaging from cfme.middleware.datasource import MiddlewareDatasource from cfme.middleware.domain import MiddlewareDomain from cfme.middleware.server_group import MiddlewareServerGroup from cfme.middleware.deployment import MiddlewareDeployment pytestmark = [ pytest.mark.uncollectif(lambda: current_version() < '5.7'), ] pytest_generate_tests = testgen.generate([MiddlewareProvider], scope='function') TOPOLOGY_TYPES = {"servers": {"MiddlewareServer"}, "deployments": {"MiddlewareDeployment", "MiddlewareDeploymentWar", "MiddlewareDeploymentEar"}, "datasources": {"MiddlewareDatasource"}, "messaging": {"MiddlewareMessaging"}, "vms": {"Vm"}, "containers": {"Container"}, "domains": {"MiddlewareDomain"}, "server_groups": {"MiddlewareServerGroup"}}
import pytest from cfme.utils.version import current_version from cfme.utils.appliance.implementations.ui import navigate_to from cfme.containers.provider import ContainersProvider, ContainersTestItem NUM_OF_DEFAULT_LOG_ROUTES = 2 pytestmark = [ pytest.mark.uncollectif(lambda provider: current_version() < "5.8"), pytest.mark.usefixtures('setup_provider'), pytest.mark.tier(1), pytest.mark.provider([ContainersProvider], scope='function') ] TEST_ITEMS = [ pytest.mark.polarion('CMP-10634')(ContainersTestItem( ContainersProvider, 'CMP-10634')) # TODO Add Node back into the list when other classes are updated to use WT views and widgets. # pytest.mark.polarion('CMP-10635')(ContainersTestItem(Node, 'CMP-10635')) ] @pytest.fixture(scope="function") def logging_routes(provider): routers = [ router for router in provider.mgmt.o_api.get('route')[1]['items'] if "logging" in router["metadata"]["name"] ] all_routers_up = all([
login_page = navigate_to(appliance.server, 'LoginScreen') login_page.login_admin() logged_in_page = navigate_to(appliance.server, 'LoggedIn') return logged_in_page.is_displayed def set_to_default(page, my_settings): view = navigate_to(my_settings, 'Visual') view.tabs.visual.start_page.show_at_login.fill(page) view.tabs.visual.save.click() @test_requirements.settings @pytest.mark.parametrize( "start_page", LANDING_PAGES if current_version() > "5.10" else LANDING_PAGES_5_9, scope="module", ) def test_landing_page_admin(start_page, appliance, my_settings, request): """ This test checks the functioning of the landing page; 'Start at Login' option on 'Visual' tab of setting page for administrator. This test case doesn't check the exact page but verifies that all the landing page options works properly. Polarion: assignee: pvala casecomponent: Configuration caseimportance: medium initialEstimate: 1/8h tags: settings """
server_log_depot = appliance.server.collect_logs with update(server_log_depot): server_log_depot.depot_type = 'anon_ftp' server_log_depot.depot_name = fauxfactory.gen_alphanumeric() server_log_depot.uri = fauxfactory.gen_alphanumeric() view = navigate_to(server_log_depot, 'DiagnosticsCollectLogs') # check button is enable after adding log depot assert view.toolbar.collect.item_enabled('Collect all logs') is True server_log_depot.clear() # check button is disable after removing log depot assert view.toolbar.collect.item_enabled('Collect all logs') is False @pytest.mark.uncollectif(lambda from_slave: from_slave and BZ.bugzilla.get_bug(1443927).is_opened and current_version() >= '5.8') @pytest.mark.meta(blockers=[BZ(1436367, forced_streams=["5.8"])]) @pytest.mark.parametrize('from_slave', [True, False], ids=['from_slave', 'from_master']) @pytest.mark.parametrize('zone_collect', [True, False], ids=['zone_collect', 'server_collect']) @pytest.mark.parametrize('collect_type', ['all', 'current'], ids=['collect_all', 'collect_current']) @pytest.mark.tier(3) def test_collect_multiple_servers(log_depot, temp_appliance_preconfig, depot_machine_ip, request, configured_external_appliance, zone_collect, collect_type, from_slave): appliance = temp_appliance_preconfig log_depot.machine_ip = depot_machine_ip collect_logs = ( appliance.server.zone.collect_logs if zone_collect else appliance.server.collect_logs) request.addfinalizer(collect_logs.clear)
from collections import namedtuple from copy import copy from fauxfactory import gen_alphanumeric, gen_integer import pytest from cfme.containers.provider import ContainersProvider from cfme.utils.version import current_version from cfme.common.provider_views import ContainerProvidersView pytestmark = [ pytest.mark.uncollectif(lambda: current_version() < "5.8.0.3"), pytest.mark.provider([ContainersProvider], scope='module') ] alphanumeric_name = gen_alphanumeric(10) long_alphanumeric_name = gen_alphanumeric(100) integer_name = str(gen_integer(0, 100000000)) provider_names = alphanumeric_name, integer_name, long_alphanumeric_name AVAILABLE_SEC_PROTOCOLS = ('SSL trusting custom CA', 'SSL without validation', 'SSL') DEFAULT_SEC_PROTOCOLS = ('SSL trusting custom CA', 'SSL without validation', 'SSL') checked_item = namedtuple('TestItem', ['default_sec_protocol', 'metrics_sec_protocol']) TEST_ITEMS = ( checked_item('SSL trusting custom CA', 'SSL trusting custom CA'), checked_item('SSL trusting custom CA', 'SSL without validation'), checked_item('SSL trusting custom CA', 'SSL'), checked_item('SSL without validation', 'SSL trusting custom CA'),
"service_dialogs", "service_orders", "service_requests", "service_templates", "services", "settings", "tags", "tasks", "templates", "tenants", "users", "virtual_templates", "vms", "zones" } # non-typical collections without "id" and "resources" COLLECTIONS_OMMITED = {"settings"} @pytest.mark.tier(3) @pytest.mark.parametrize("collection_name", COLLECTIONS_ALL) @pytest.mark.uncollectif( lambda collection_name: (collection_name in COLLECTIONS_OMMITED) or (collection_name in COLLECTIONS_ADDED_IN_58 and current_version() < "5.8") or (collection_name in COLLECTIONS_REMOVED_IN_59 and current_version() >= "5.9") ) def test_query_simple_collections(appliance, collection_name): """This test tries to load each of the listed collections. 'Simple' collection means that they have no usable actions that we could try to run Steps: * GET /api/<collection_name> Metadata: test_flag: rest """ collection = getattr(appliance.rest_api.collections, collection_name) assert_response(appliance) collection.reload() list(collection)
import pytest from cfme.containers.node import Node from cfme.containers.node import NodeCollection from cfme.containers.provider import ContainersProvider from cfme.containers.provider import ContainersTestItem from cfme.markers.env_markers.provider import providers from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.providers import ProviderFilter from cfme.utils.version import current_version pytestmark = [ pytest.mark.uncollectif(lambda provider: current_version() < "5.8"), pytest.mark.usefixtures('setup_provider'), pytest.mark.tier(1), pytest.mark.provider(gen_func=providers, filters=[ProviderFilter(classes=[ContainersProvider], required_flags=['cmqe_logging'])], scope='function')] TEST_ITEMS = [ ContainersTestItem(ContainersProvider, 'test_logging_containerprovider', collection_obj=None), ContainersTestItem(Node, 'test_logging_node', collection_obj=NodeCollection)] NUM_OF_DEFAULT_LOG_ROUTES = 2 @pytest.fixture(scope="function") def kibana_logging_url(provider): """ This fixture verifies the correct setup of the Kibana logging namespace and returns the Kibana logging router url """
from cfme.middleware.provider.hawkular import HawkularProvider from cfme.middleware.server import MiddlewareServer from cfme.middleware.datasource import MiddlewareDatasource from cfme.middleware.deployment import MiddlewareDeployment from cfme.middleware.domain import MiddlewareDomain from cfme.middleware.server_group import MiddlewareServerGroup from cfme.middleware.messaging import MiddlewareMessaging from random_methods import get_random_object, get_random_domain from random_methods import get_random_server, get_random_server_group from cfme.utils import version from cfme.utils.version import current_version pytestmark = [ pytest.mark.usefixtures('setup_provider'), pytest.mark.uncollectif(lambda: current_version() < '5.7'), pytest.mark.provider([HawkularProvider], scope="function"), ] FILETYPES = ["txt", "csv", "pdf"] @pytest.mark.parametrize("filetype", FILETYPES) @pytest.mark.parametrize("objecttype", [MiddlewareDatasource, MiddlewareDeployment, HawkularProvider, MiddlewareServer, MiddlewareDomain, MiddlewareMessaging]) @pytest.mark.uncollectif(lambda filetype: filetype in {"pdf"} and current_version() == version.UPSTREAM) def test_download_lists_base(filetype, objecttype): """ Download the items from base lists. """ objecttype.download(filetype)
class TestConditionsRESTAPI(object): @pytest.fixture(scope='function') def conditions(self, request, appliance): num_conditions = 2 response = _conditions(request, appliance.rest_api, num=num_conditions) assert_response(appliance) assert len(response) == num_conditions return response @pytest.mark.uncollectif(lambda: current_version() < '5.8') def test_create_conditions(self, appliance, conditions): """Tests create conditions. Metadata: test_flag: rest """ for condition in conditions: record = appliance.rest_api.collections.conditions.get(id=condition.id) assert record.description == condition.description @pytest.mark.uncollectif(lambda: current_version() < '5.8') @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_conditions_from_detail(self, conditions, appliance, method): """Tests delete conditions from detail. Metadata: test_flag: rest """ for condition in conditions: if method == 'post': del_action = condition.action.delete.POST else: del_action = condition.action.delete.DELETE del_action() assert_response(appliance) wait_for( lambda: not appliance.rest_api.collections.conditions.find_by( name=condition.name), num_sec=100, delay=5 ) with error.expected('ActiveRecord::RecordNotFound'): del_action() assert_response(appliance, http_status=404) @pytest.mark.uncollectif(lambda: current_version() < '5.8') def test_delete_conditions_from_collection(self, conditions, appliance): """Tests delete conditions from collection. Metadata: test_flag: rest """ collection = appliance.rest_api.collections.conditions delete_resources_from_collection(collection, conditions, num_sec=100, delay=5) @pytest.mark.uncollectif(lambda: current_version() < '5.8') @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_conditions(self, conditions, appliance, from_detail): """Tests edit conditions. Metadata: test_flag: rest """ num_conditions = len(conditions) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)] new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_conditions): edited.append(conditions[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_conditions): new[index].update(conditions[index]._ref_repr()) edited = appliance.rest_api.collections.conditions.action.edit(*new) assert_response(appliance) assert len(edited) == num_conditions for index, condition in enumerate(conditions): record, __ = wait_for( lambda: appliance.rest_api.collections.conditions.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, ) condition.reload() assert condition.description == edited[index].description == record[0].description
import pytest # from cfme.middleware.datasource import MiddlewareDatasource # from cfme.middleware.messaging import MiddlewareMessaging from cfme.middleware.provider.hawkular import HawkularProvider # from cfme.middleware.server import MiddlewareServer # from random_methods import get_random_object from cfme.utils.version import current_version pytestmark = [ pytest.mark.usefixtures('setup_provider'), pytest.mark.uncollectif(lambda: current_version() < '5.7'), pytest.mark.provider([HawkularProvider], scope="function"), ] # Removed until Utilization is refactored # @pytest.mark.parametrize("object_type", [MiddlewareServer, # MiddlewareDatasource, # MiddlewareMessaging]) # def test_object_utilization(provider, object_type): # """Tests utilization charts in all pages that has utilization charts # # Steps: # * Select a utilization object of provided object_type randomly from database # * Run `validate_utilization` with `utilization_obj` input # """ # utilization_obj = get_random_object(provider=provider, objecttype=object_type, load_from="db") # validate_utilization(utilization_obj=utilization_obj.utilization) # #
def new_snapshot(test_vm, has_name=True, memory=False, create_description=True): name = fauxfactory.gen_alphanumeric(8) return InfraVm.Snapshot( name="snpshot_{}".format(name) if has_name else None, description="snapshot_{}".format(name) if create_description else None, memory=memory, parent_vm=test_vm) @pytest.mark.rhv2 @pytest.mark.uncollectif(lambda provider: (provider.one_of( RHEVMProvider) and provider.version < 4) or current_version() < '5.8', 'Must be RHEVM provider version >= 4') def test_memory_checkbox(small_test_vm, provider, soft_assert): """Tests snapshot memory checkbox Memory checkbox should be displayed and active when VM is running ('Power On'). Memory checkbox should not be displayed when VM is stopped ('Power Off'). """ # Make sure the VM is powered on small_test_vm.power_control_from_cfme(option=small_test_vm.POWER_ON, cancel=False) # Try to create snapshot with memory on powered on VM has_name = not provider.one_of(RHEVMProvider) snapshot1 = new_snapshot(small_test_vm, has_name=has_name, memory=True) snapshot1.create() assert snapshot1.exists
from cfme.utils import conf, error, version pytestmark = pytest.mark.usefixtures('browser') @test_requirements.drift @pytest.mark.tier(1) @pytest.mark.sauce @pytest.mark.smoke @pytest.mark.parametrize('context, method', [(ViaUI, 'click_on_login'), (ViaUI, 'press_enter_after_password'), (ViaUI, '_js_auth_fn'), (ViaSSUI, 'click_on_login'), (ViaSSUI, 'press_enter_after_password')]) @pytest.mark.uncollectif(lambda context: context == ViaSSUI and version.current_version() == version.UPSTREAM) def test_login(context, method, appliance): """ Tests that the appliance can be logged into and shows dashboard page. """ with appliance.context.use(context): logged_in_page = appliance.server.login() assert logged_in_page.is_displayed logged_in_page.logout() logged_in_page = appliance.server.login_admin(method=method) assert logged_in_page.is_displayed logged_in_page.logout() @test_requirements.drift @pytest.mark.tier(2)
temp_appliance_preconfig_funcscope_upgrade.db.extend_partition() urls = process_url(cfme_data['basic_info'][update_url]) output = build_file(urls) with tempfile.NamedTemporaryFile('w') as f: f.write(output) f.flush() os.fsync(f.fileno()) temp_appliance_preconfig_funcscope_upgrade.ssh_client.put_file( f.name, '/etc/yum.repos.d/update.repo') return temp_appliance_preconfig_funcscope_upgrade @pytest.mark.ignore_stream('5.5', 'upstream') @pytest.mark.tier(2) @pytest.mark.uncollectif( lambda db_version: db_version >= version.current_version( ) or version.get_stream(db_version) == version.current_stream()) @pytest.mark.meta( blockers=[BZ(1354466, unblock=lambda db_url: 'ldap' not in db_url)]) def test_db_migrate(app_creds, temp_appliance_extended_db, db_url, db_version, db_desc): app = temp_appliance_extended_db # Download the database logger.info("Downloading database: {}".format(db_desc)) url_basename = os_path.basename(db_url) # MBU Backup is to large for /tmp loc = "/tmp/" if db_desc != "MBU Backup" else "/" rc, out = app.ssh_client.run_command('curl -o "{}{}" "{}"'.format( loc, url_basename, db_url), timeout=30) assert rc == 0, "Failed to download database: {}".format(out)
server_log_depot = appliance.server.collect_logs with update(server_log_depot): server_log_depot.depot_type = 'anon_ftp' server_log_depot.depot_name = fauxfactory.gen_alphanumeric() server_log_depot.uri = fauxfactory.gen_alphanumeric() view = navigate_to(server_log_depot, 'DiagnosticsCollectLogs') # check button is enable after adding log depot assert view.toolbar.collect.item_enabled('Collect all logs') is True server_log_depot.clear() # check button is disable after removing log depot assert view.toolbar.collect.item_enabled('Collect all logs') is False @pytest.mark.uncollectif(lambda from_slave: from_slave and BZ.bugzilla.get_bug( 1443927).is_opened and current_version() >= '5.8') @pytest.mark.meta(blockers=[BZ(1436367, forced_streams=["5.8"])]) @pytest.mark.parametrize('from_slave', [True, False], ids=['from_slave', 'from_master']) @pytest.mark.parametrize('zone_collect', [True, False], ids=['zone_collect', 'server_collect']) @pytest.mark.parametrize('collect_type', ['all', 'current'], ids=['collect_all', 'collect_current']) @pytest.mark.tier(3) def test_collect_multiple_servers(log_depot, temp_appliance_preconfig, depot_machine_ip, request, configured_external_appliance, zone_collect, collect_type, from_slave): appliance = temp_appliance_preconfig log_depot.machine_ip = depot_machine_ip
"Provisioning failed with the message {}".format(provision_request.row.last_message.text) instance.wait_to_appear(timeout=800) provider.refresh_provider_relationships() logger.info("Refreshing provider relationships and power states") refresh_timer = RefreshTimer(time_for_refresh=300) wait_for(provider.is_refreshed, [refresh_timer], message="is_refreshed", num_sec=1000, delay=60, handle_exception=True) soft_assert(instance.does_vm_exist_on_provider(), "Instance wasn't provisioned") @pytest.mark.uncollectif(lambda provider: not provider.one_of(GCEProvider) or current_version() < "5.7") def test_gce_preemtible_provision(provider, testing_instance, soft_assert): instance, inst_args, image = testing_instance instance.create(**inst_args) instance.wait_to_appear(timeout=800) provider.refresh_provider_relationships() logger.info("Refreshing provider relationships and power states") refresh_timer = RefreshTimer(time_for_refresh=300) wait_for(provider.is_refreshed, [refresh_timer], message="is_refreshed", num_sec=1000, delay=60, handle_exception=True) soft_assert( 'Yes' in instance.get_detail(properties=("Properties", "Preemptible")), "GCE Instance isn't Preemptible")
class TestTagsViaREST(object): COLLECTIONS_BULK_TAGS = ("services", "vms") def _service_body(self, **kwargs): uid = fauxfactory.gen_alphanumeric(5) body = { 'name': 'test_rest_service_{}'.format(uid), 'description': 'Test REST Service {}'.format(uid), } body.update(kwargs) return body def _create_services(self, request, rest_api, num=3): # create simple service using REST API bodies = [self._service_body() for __ in range(num)] collection = rest_api.collections.services new_services = collection.action.create(*bodies) assert_response(rest_api) new_services_backup = list(new_services) @request.addfinalizer def _finished(): collection.reload() ids = [service.id for service in new_services_backup] delete_entities = [ service for service in collection if service.id in ids ] if delete_entities: collection.action.delete(*delete_entities) return new_services @pytest.fixture(scope="function") def services(self, request, appliance): return self._create_services(request, appliance.rest_api) @pytest.fixture(scope="function") def categories(self, request, appliance, num=3): return _categories(request, appliance.rest_api, num) @pytest.fixture(scope="function") def tags(self, request, appliance, categories): return _tags(request, appliance.rest_api, categories) @pytest.fixture(scope="module") def services_mod(self, request, appliance): return self._create_services(request, appliance.rest_api) @pytest.fixture(scope="module") def categories_mod(self, request, appliance, num=3): return _categories(request, appliance.rest_api, num) @pytest.fixture(scope="module") def tags_mod(self, request, appliance, categories_mod): return _tags(request, appliance.rest_api, categories_mod) @pytest.fixture(scope="module") def tenants(self, request, appliance): return _tenants(request, appliance.rest_api, num=1) @pytest.fixture(scope="module") def a_provider(self, request): return _a_provider(request) @pytest.fixture(scope="module") def service_templates(self, request, appliance): return _service_templates(request, appliance) @pytest.fixture(scope="module") def vm(self, request, a_provider, appliance): return _vm(request, a_provider, appliance.rest_api) @pytest.mark.tier(2) def test_edit_tags(self, appliance, tags): """Tests tags editing from collection. Metadata: test_flag: rest """ collection = appliance.rest_api.collections.tags tags_len = len(tags) tags_data_edited = [] for tag in tags: tags_data_edited.append({ "href": tag.href, "name": "test_tag_{}".format(fauxfactory.gen_alphanumeric().lower()), }) edited = collection.action.edit(*tags_data_edited) assert_response(appliance, results_num=tags_len) for index in range(tags_len): record, _ = wait_for(lambda: collection.find_by(name="%/{}".format( tags_data_edited[index]["name"])) or False, num_sec=180, delay=10) assert record[0].id == edited[index].id assert record[0].name == edited[index].name @pytest.mark.tier(2) def test_edit_tag(self, appliance, tags): """Tests tag editing from detail. Metadata: test_flag: rest """ edited = [] new_names = [] for tag in tags: new_name = 'test_tag_{}'.format(fauxfactory.gen_alphanumeric()) new_names.append(new_name) edited.append(tag.action.edit(name=new_name)) assert_response(appliance) for index, name in enumerate(new_names): record, _ = wait_for(lambda: appliance.rest_api.collections.tags. find_by(name="%/{}".format(name)) or False, num_sec=180, delay=10) assert record[0].id == edited[index].id assert record[0].name == edited[index].name @pytest.mark.tier(3) @pytest.mark.parametrize("method", ["post", "delete"], ids=["POST", "DELETE"]) def test_delete_tags_from_detail(self, appliance, tags, method): """Tests deleting tags from detail. Metadata: test_flag: rest """ for tag in tags: tag.action.delete(force_method=method) assert_response(appliance) with error.expected("ActiveRecord::RecordNotFound"): tag.action.delete(force_method=method) assert_response(appliance, http_status=404) @pytest.mark.tier(3) def test_delete_tags_from_collection(self, appliance, tags): """Tests deleting tags from collection. Metadata: test_flag: rest """ appliance.rest_api.collections.tags.action.delete(*tags) assert_response(appliance) with error.expected("ActiveRecord::RecordNotFound"): appliance.rest_api.collections.tags.action.delete(*tags) assert_response(appliance, http_status=404) @pytest.mark.tier(3) def test_create_tag_with_wrong_arguments(self, appliance): """Tests creating tags with missing category "id", "href" or "name". Metadata: test_flag: rest """ data = { "name": "test_tag_{}".format(fauxfactory.gen_alphanumeric().lower()), "description": "test_tag_{}".format(fauxfactory.gen_alphanumeric().lower()) } with error.expected( "BadRequestError: Category id, href or name needs to be specified" ): appliance.rest_api.collections.tags.action.create(data) assert_response(appliance, http_status=400) @pytest.mark.tier(3) @pytest.mark.meta(blockers=[BZ(1451025, forced_streams=['5.7'])]) @pytest.mark.parametrize("collection_name", [ "clusters", "hosts", "data_stores", "providers", "resource_pools", "services", "service_templates", "tenants", "vms" ]) def test_assign_and_unassign_tag(self, appliance, tags_mod, a_provider, services_mod, service_templates, tenants, vm, collection_name): """Tests assigning and unassigning tags. Metadata: test_flag: rest """ collection = getattr(appliance.rest_api.collections, collection_name) collection.reload() if not collection.all: pytest.skip("No available entity in {} to assign tag".format( collection_name)) entity = collection[-1] tag = tags_mod[0] entity.tags.action.assign(tag) assert_response(appliance) entity.reload() assert tag.id in [t.id for t in entity.tags.all] entity.tags.action.unassign(tag) assert_response(appliance) entity.reload() assert tag.id not in [t.id for t in entity.tags.all] @pytest.mark.uncollectif(lambda: current_version() < '5.8') @pytest.mark.tier(3) @pytest.mark.parametrize("collection_name", COLLECTIONS_BULK_TAGS) def test_bulk_assign_and_unassign_tag(self, appliance, tags_mod, services_mod, vm, collection_name): """Tests bulk assigning and unassigning tags. Metadata: test_flag: rest """ collection = getattr(appliance.rest_api.collections, collection_name) collection.reload() entities = collection.all[-2:] new_tags = [] for index, tag in enumerate(tags_mod): identifiers = [{'href': tag._href}, {'id': tag.id}] new_tags.append(identifiers[index % 2]) # add some more tags in supported formats new_tags.append({'category': 'department', 'name': 'finance'}) new_tags.append({'name': '/managed/department/presales'}) tags_ids = {t.id for t in tags_mod} tags_ids.add( appliance.rest_api.collections.tags.get( name='/managed/department/finance').id) tags_ids.add( appliance.rest_api.collections.tags.get( name='/managed/department/presales').id) tags_count = len(new_tags) * len(entities) response = collection.action.assign_tags(*entities, tags=new_tags) assert_response(appliance, results_num=tags_count) for index, entity in enumerate(entities): entity.tags.reload() response[index].id = entity.id assert tags_ids.issubset({t.id for t in entity.tags.all}) collection.action.unassign_tags(*entities, tags=new_tags) assert_response(appliance, results_num=tags_count) for entity in entities: entity.tags.reload() assert len({t.id for t in entity.tags.all} - tags_ids) == entity.tags.subcount @pytest.mark.uncollectif(lambda: current_version() < '5.8') @pytest.mark.tier(3) @pytest.mark.parametrize("collection_name", COLLECTIONS_BULK_TAGS) def test_bulk_assign_and_unassign_invalid_tag(self, appliance, services_mod, vm, collection_name): """Tests bulk assigning and unassigning invalid tags. Metadata: test_flag: rest """ collection = getattr(appliance.rest_api.collections, collection_name) collection.reload() entities = collection.all[-2:] new_tags = ['invalid_tag1', 'invalid_tag2'] tags_count = len(new_tags) * len(entities) tags_per_entities_count = [] for entity in entities: entity.tags.reload() tags_per_entities_count.append(entity.tags.subcount) def _check_tags_counts(): for index, entity in enumerate(entities): entity.tags.reload() assert entity.tags.subcount == tags_per_entities_count[index] collection.action.assign_tags(*entities, tags=new_tags) assert_response(appliance, success=False, results_num=tags_count) _check_tags_counts() collection.action.unassign_tags(*entities, tags=new_tags) assert_response(appliance, success=False, results_num=tags_count) _check_tags_counts() @pytest.mark.uncollectif(lambda: current_version() < '5.9') @pytest.mark.tier(3) def test_query_by_multiple_tags(self, appliance, tags, services): """Tests support for multiple tag specification in query. Metadata: test_flag: rest """ collection = appliance.rest_api.collections.services collection.reload() new_tags = [tag._ref_repr() for tag in tags] tagged_services = services[1:] # assign tags to selected services collection.action.assign_tags(*tagged_services, tags=new_tags) assert_response(appliance) # get only services that has all the tags assigned by_tag = ','.join([tag.name.replace('/managed', '') for tag in tags]) query_results = collection.query_string(by_tag=by_tag) assert len(tagged_services) == len(query_results) result_ids = {item.id for item in query_results} tagged_ids = {item.id for item in tagged_services} assert result_ids == tagged_ids
class TestProvidersRESTAPI(object): @pytest.yield_fixture(scope="function") def custom_attributes(self, appliance, infra_provider): provider = appliance.rest_api.collections.providers.get( name=infra_provider.name) body = [] attrs_num = 2 for _ in range(attrs_num): uid = fauxfactory.gen_alphanumeric(5) body.append({ 'name': 'ca_name_{}'.format(uid), 'value': 'ca_value_{}'.format(uid) }) attrs = provider.custom_attributes.action.add(*body) assert len(attrs) == attrs_num yield attrs, provider provider.custom_attributes.reload() ids = [attr.id for attr in attrs] delete_attrs = [ attr for attr in provider.custom_attributes if attr.id in ids ] if delete_attrs: provider.custom_attributes.action.delete(*delete_attrs) @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_add_custom_attributes(self, appliance, custom_attributes): """Test adding custom attributes to provider using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes for attr in attributes: record = provider.custom_attributes.get(id=attr.id) assert appliance.rest_api.response.status_code == 200 assert record.name == attr.name assert record.value == attr.value @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_delete_custom_attributes_from_detail_post(self, appliance, custom_attributes): """Test deleting custom attributes from detail using POST method. Metadata: test_flag: rest """ attributes, _ = custom_attributes for entity in attributes: entity.action.delete.POST() assert appliance.rest_api.response with error.expected('ActiveRecord::RecordNotFound'): entity.action.delete.POST() assert appliance.rest_api.response.status_code == 404 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.meta( blockers=[BZ(1422596, forced_streams=['5.7', '5.8', 'upstream'])]) @pytest.mark.tier(3) @test_requirements.rest def test_delete_custom_attributes_from_detail_delete( self, appliance, custom_attributes): """Test deleting custom attributes from detail using DELETE method. Metadata: test_flag: rest """ attributes, _ = custom_attributes for entity in attributes: entity.action.delete.DELETE() assert appliance.rest_api.response with error.expected('ActiveRecord::RecordNotFound'): entity.action.delete.DELETE() assert appliance.rest_api.response.status_code == 404 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_delete_custom_attributes_from_collection(self, appliance, custom_attributes): """Test deleting custom attributes from collection using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes provider.custom_attributes.action.delete(*attributes) assert appliance.rest_api.response.status_code == 200 with error.expected('ActiveRecord::RecordNotFound'): provider.custom_attributes.action.delete(*attributes) assert appliance.rest_api.response.status_code == 404 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_delete_single_custom_attribute_from_collection( self, appliance, custom_attributes): """Test deleting single custom attribute from collection using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes attribute = attributes[0] provider.custom_attributes.action.delete(attribute) assert appliance.rest_api.response.status_code == 200 with error.expected('ActiveRecord::RecordNotFound'): provider.custom_attributes.action.delete(attribute) assert appliance.rest_api.response.status_code == 404 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_custom_attributes(self, appliance, custom_attributes, from_detail): """Test editing custom attributes using REST API. Metadata: test_flag: rest """ attributes, provider = custom_attributes response_len = len(attributes) body = [] for _ in range(response_len): uid = fauxfactory.gen_alphanumeric(5) body.append({ 'name': 'ca_name_{}'.format(uid), 'value': 'ca_value_{}'.format(uid), 'section': 'metadata' }) if from_detail: edited = [] for i in range(response_len): edited.append(attributes[i].action.edit(**body[i])) assert appliance.rest_api.response.status_code == 200 else: for i in range(response_len): body[i].update(attributes[i]._ref_repr()) edited = provider.custom_attributes.action.edit(*body) assert appliance.rest_api.response.status_code == 200 assert len(edited) == response_len for i in range(response_len): assert edited[i].name == body[i]['name'] assert edited[i].value == body[i]['value'] @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_custom_attributes_bad_section(self, appliance, custom_attributes, from_detail): """Test that editing custom attributes using REST API and adding invalid section fails. Metadata: test_flag: rest """ attributes, provider = custom_attributes response_len = len(attributes) body = [] for _ in range(response_len): body.append({'section': 'bad_section'}) if from_detail: for i in range(response_len): with error.expected('Api::BadRequestError'): attributes[i].action.edit(**body[i]) assert appliance.rest_api.response.status_code == 400 else: for i in range(response_len): body[i].update(attributes[i]._ref_repr()) with error.expected('Api::BadRequestError'): provider.custom_attributes.action.edit(*body) assert appliance.rest_api.response.status_code == 400 @pytest.mark.uncollectif(lambda: version.current_version() < '5.7') @pytest.mark.tier(3) @test_requirements.rest def test_add_custom_attributes_bad_section(self, appliance, infra_provider): """Test that adding custom attributes with invalid section to provider using REST API fails. Metadata: test_flag: rest """ provider = appliance.rest_api.collections.providers.get( name=infra_provider.name) uid = fauxfactory.gen_alphanumeric(5) body = { 'name': 'ca_name_{}'.format(uid), 'value': 'ca_value_{}'.format(uid), 'section': 'bad_section' } with error.expected('Api::BadRequestError'): provider.custom_attributes.action.add(body) assert appliance.rest_api.response.status_code == 400