def auth_user_data(provider_key, user_type): """Grab user data attrdict from auth provider's user data in yaml Expected formatting of yaml containing user data: test_users: - username: ldapuser2 password: mysecretpassworddontguess fullname: Ldap User2 groups: - customgroup1 providers: - freeipa01 user_types: - uid Only include user data for users where the user_type matches that under test Assert the data isn't empty, and skip the test if so """ try: data = [user for user in auth_data.test_data.test_users if provider_key in user.providers and user_type in user.user_types] assert data except (KeyError, AttributeError, AssertionError): logger.warning('Exception fetching auth_user_data from key %s and type %s', provider_key, user_type) return None return data
def from_config(cls, prov_config, prov_key): endpoints = { RHOSEndpoint.name: RHOSEndpoint(**prov_config['endpoints'][RHOSEndpoint.name]) } event_endpoint_config = prov_config['endpoints'].get(EventsEndpoint.name, {}) if event_endpoint_config: if (event_endpoint_config.get('event_stream') == 'AMQP' and BZ(1618700, forced_streams=["5.9", "5.10", "upstream"]).blocks): logger.warning('Skipping AMQP event config due to BZ 1618700') else: endpoints[EventsEndpoint.name] = EventsEndpoint(**event_endpoint_config) from cfme.utils.providers import get_crud infra_prov_key = prov_config.get('infra_provider_key') infra_provider = get_crud(infra_prov_key) if infra_prov_key else None return cls.appliance.collections.cloud_providers.instantiate( prov_class=cls, name=prov_config['name'], api_port=prov_config['port'], api_version=prov_config.get('api_version', 'Keystone v2'), endpoints=endpoints, zone=prov_config['server_zone'], key=prov_key, keystone_v3_domain_id=prov_config.get('domain_id'), sec_protocol=prov_config.get('sec_protocol', "Non-SSL"), tenant_mapping=prov_config.get('tenant_mapping', False), infra_provider=infra_provider)
def fix_missing_hostname(appliance): """Fix for hostname missing from the /etc/hosts file Note: Affects RHOS-based appliances but can't hurt the others so it's applied on all. """ if isinstance(appliance, DummyAppliance) or appliance.is_dev: return ssh_client = appliance.ssh_client logger.info("Checking appliance's /etc/hosts for its own hostname") if ssh_client.run_command('grep $(hostname) /etc/hosts').failed: logger.info('Setting appliance hostname') host_out = appliance.ssh_client.run_command('host {}'.format(appliance.hostname)) if host_out.success and 'domain name pointer' in host_out.output: # resolvable and reverse lookup, hostname property is an IP addr fqdn = host_out.output.split(' ')[-1].rstrip('\n').rstrip('.') elif host_out.success and 'has address' in host_out.output: # resolvable and address returned, hostname property is name fqdn = appliance.hostname else: # not resolvable, just use hostname output through appliance_console_cli to modify ret = ssh_client.run_command('hostname') logger.warning('Unable to resolve hostname, using output from `hostname`: %s', ret.output) fqdn = ret.output.rstrip('\n') logger.info('Setting hostname: %s', fqdn) appliance.appliance_console_cli.set_hostname(fqdn) if ssh_client.run_command('grep $(hostname) /etc/hosts').failed: logger.error('Failed to mangle /etc/hosts')
def delete_entity(entity): # TODO: replace this with neutron client request try: if entity.exists: entity.delete() except Exception: logger.warning('Exception during network entity deletion - skipping..')
def test_start_from_suspend(self, appliance, testing_vm, ensure_vm_suspended, soft_assert): """Tests start from suspend Polarion: assignee: ghubale initialEstimate: 1/6h casecomponent: Infra caseimportance: high tags: power """ try: testing_vm.provider.refresh_provider_relationships() testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise view = navigate_to(testing_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time") testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False, from_details=True) view.flash.assert_success_message(text='Start initiated', partial=True) if_scvmm_refresh_provider(testing_vm.provider) testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600) soft_assert(testing_vm.mgmt.is_running, "vm not running")
def resolve_blocker(self, blocker, version=None, ignore_bugs=None, force_block_streams=None): # ignore_bugs is mutable but is not mutated here! Same force_block_streams force_block_streams = force_block_streams or [] ignore_bugs = set([]) if not ignore_bugs else ignore_bugs if isinstance(id, BugWrapper): bug = blocker else: bug = self.get_bug(blocker) if version is None: version = current_version() if version == LATEST: version = bug.product.latest_version is_upstream = version == bug.product.latest_version variants = self.get_bug_variants(bug) filtered = set([]) version_series = ".".join(str(version).split(".")[:2]) for variant in sorted(variants, key=lambda variant: variant.id): if variant.id in ignore_bugs: continue if variant.version is not None and variant.version > version: continue if variant.release_flag is not None and version.is_in_series(variant.release_flag): logger.info('Found matching bug for %d by release - #%d', bug.id, variant.id) filtered.clear() filtered.add(variant) break elif is_upstream and variant.release_flag == 'future': # It is an upstream bug logger.info('Found a matching upstream bug #%d for bug #%d', variant.id, bug.id) return variant elif (isinstance(variant.version, Version) and isinstance(variant.target_release, Version) and (variant.version.is_in_series(version_series) or variant.target_release.is_in_series(version_series))): filtered.add(variant) else: logger.warning( "ATTENTION!!: No release flags, wrong versions, ignoring %s", variant.id) if not filtered: # No appropriate bug was found for forced_stream in force_block_streams: # Find out if we force this bug. if version.is_in_series(forced_stream): return bug else: # No bug, yipee :) return None # First, use versions for bug in filtered: if (isinstance(bug.version, Version) and isinstance(bug.target_release, Version) and check_fixed_in(bug.fixed_in, version_series) and (bug.version.is_in_series(version_series) or bug.target_release.is_in_series(version_series))): return bug # Otherwise prefer release_flag for bug in filtered: if bug.release_flag and version.is_in_series(bug.release_flag): return bug return None
def upload_template(self): cmd_args = [ "ovftool --noSSLVerify", # prefer the datastore from template_upload "--datastore={}".format(self.provider_data.provisioning.datastore), # move later "--name={}".format(self.temp_template_name), "--vCloudTemplate=True", "--overwrite", self.raw_image_url, "'vi://{}:{}@{}/{}/host/{}/'".format(self.mgmt.username, self.mgmt.password, self.mgmt.hostname, self.template_upload_data.datacenter, self.template_upload_data.cluster) ] if 'proxy' in self.template_upload_data.keys(): cmd_args.append("--proxy={}".format(self.template_upload_data.proxy)) command = ' '.join(cmd_args) for i in range(0, 1): # run command against the tool client machine upload_result = self.execute_ssh_command(command, client_args=self.tool_client_args) if upload_result.success: return True else: logger.error('Failure running ovftool: %s', upload_result.output) logger.warning('Retrying template upload via ovftool')
def test_labels_remove(provider, soft_assert, random_labels): # Removing the labels for instance, label_name, label_value, status_code, _ in random_labels: if status_code: instance.remove_label(label_name) else: logger.warning('Cannot remove label ({} = {}) for {} {}. (failed to add it previously)' .format(label_name, label_value, instance.__class__.__name__, instance.name)) provider.refresh_provider_relationships() # Verify that the labels removed successfully from UI: for instance, label_name, label_value, status_code, _ in random_labels: if status_code: soft_assert( wait_for( lambda: not check_labels_in_ui(instance, label_name, label_value), num_sec=180, delay=10, message='Verifying label ({} = {}) for {} {} removed' .format(label_name, label_value, instance.__class__.__name__, instance.name), silent_failure=True), 'Label ({} = {}) for {} {} found in UI (but should be removed).' .format(label_name, label_value, instance.__class__.__name__, instance.name) )
def test_suspend(self, appliance, testing_vm, verify_vm_running, soft_assert): """Tests suspend Metadata: test_flag: power_control, provision """ testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = testing_vm.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm.power_control_from_cfme(option=testing_vm.SUSPEND, cancel=False, from_details=True) view = appliance.browser.create_view(BaseLoggedInPage) view.flash.assert_success_message(text='Suspend initiated', partial=True) if_scvmm_refresh_provider(testing_vm.provider) try: testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise e soft_assert( testing_vm.provider.mgmt.is_vm_suspended( testing_vm.name), "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not testing_vm.provider.one_of(RHEVMProvider): new_last_boot_time = testing_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def all(self): """returning all backup objects for respective storage manager type""" view = navigate_to(self, 'All') view.toolbar.view_selector.select("List View") backups = [] try: if 'provider' in self.filters: for item in view.entities.elements.read(): if self.filters.get('provider').name in item['Storage Manager']: backups.append(self.instantiate(name=item['Name'], provider=self.filters.get('provider'))) else: for item in view.entities.elements.read(): provider_name = item['Storage Manager'].split()[0] provider = get_crud_by_name(provider_name) backups.append(self.instantiate(name=item['Name'], provider=provider)) except NoSuchElementException: if backups: # In the middle of reading, that may be bad logger.error( 'VolumeBackupCollection: NoSuchElementException in the middle of entities read') raise else: # This is probably fine, just warn logger.warning('The volume backup table is probably not present (=empty)') return backups
def _wait_for_state_refresh(): try: state = view.entities.summary('Power Management').get_text_of('State Changed On') return state_change_time != state except NameError: logger.warning('NameError caught while waiting for state change, continuing') return False
def is_successfully_finished(self, silent_failure=False, *tasks): view = navigate_to(self, self.tab) tab_view = getattr(view.tabs, self.tab.lower()) rows = [] # expected_status support also regular expression pattern expected_status = re.compile('finished', re.IGNORECASE) for task in tasks: try: rows.append(list(tab_view.table.rows(task_name=task, state=expected_status)).pop()) except IndexError: logger.warn('IndexError exception suppressed when searching for task row,' ' no match found.') return False for row in rows: message = row.message.text.lower() if row[1].browser.is_displayed('i[@class="pficon pficon-error-circle-o"]', parent=row[1]): if silent_failure: logger.warning("Task {} error: {}".format(row.task_name.text, message)) return False elif 'timed out' in message: raise TimedOutError("Task {} timed out: {}".format(row.task_name.text, message)) else: Exception("Task {} error: {}".format(row.task_name.text, message)) return True
def pytest_generate_tests(metafunc): argnames, argvalues, idlist = testgen.providers_by_class(metafunc, [InfraProvider], required_fields=['hosts']) argnames = argnames + ['host_type', 'host_name'] new_argvalues = [] new_idlist = [] for index, argvalue_tuple in enumerate(argvalues): args = dict(zip(argnames, argvalue_tuple)) prov_hosts = args['provider'].data.hosts for test_host in prov_hosts: if not test_host.get('test_fleece', False): continue if test_host.get('type') not in HOST_TYPES: logger.warning('host type must be set to [{}] for smartstate analysis tests' .format('|'.join(HOST_TYPES))) continue new_argvalue_list = [args['provider'], test_host['type'], test_host['name']] test_id = '{}-{}-{}'.format(args['provider'].key, test_host['type'], test_host['name']) new_argvalues.append(new_argvalue_list) new_idlist.append(test_id) testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
def test_contents(appliance, soft_assert): """Test title of each document. Polarion: assignee: anikifor casecomponent: WebUI caseimportance: low initialEstimate: 1/10h """ view = navigate_to(appliance.server, 'Documentation') cur_ver = appliance.version for doc_type, title in doc_titles.items(): doc_widget = getattr(view.links, doc_type, None) if not doc_widget: logger.warning('Skipping contents check for document: "{}: {}", no widget to read' .format(doc_type, title)) href = view.browser.get_attribute(attr='href', locator=doc_widget.link.locator) data = requests.get(href, verify=False) pdf_titlepage_text_low = pdf_get_text(BytesIO(data.content), [0]).lower() if not isinstance(pdf_titlepage_text_low, str): pdf_titlepage_text_low = pdf_titlepage_text_low.decode('utf-8', 'replace') # don't include the word 'guide' expected = [title] if cur_ver == version.LATEST: expected.append('manageiq') else: expected.append('cloudforms') assert cur_ver.product_version() is not None expected.append(cur_ver.product_version()) for exp_str in expected: soft_assert(exp_str in pdf_titlepage_text_low, "{} not in {}" .format(exp_str, pdf_titlepage_text_low))
def all(self): """returning all Snapshot objects for respective storage manager type""" view = navigate_to(self, 'All') view.toolbar.view_selector.select("List View") snapshots = [] try: if 'provider' in self.filters: for item in view.entities.elements.read(): if self.filters.get('provider').name in item['Storage Manager']: snapshots.append(self.instantiate(name=item['Name'], provider=self.filters.get('provider'))) else: for item in view.entities.elements.read(): provider_name = item['Storage Manager'].split()[0] provider = get_crud_by_name(provider_name) snapshots.append(self.instantiate(name=item['Name'], provider=provider)) except NoSuchElementException: if snapshots: logger.error('VolumeSnapshotCollection: ' 'NoSuchElementException in the middle of entities read') else: logger.warning('The snapshot table is probably not present or empty') return snapshots
def provisioning(provider): try: return provider.data['provisioning'] except KeyError: logger.warning('Tests using the provisioning fixture ' 'should include required_fields in their ProviderFilter marker') pytest.skip('Missing "provisioning" field in provider data')
def test_suspend(self, appliance, testing_vm, ensure_vm_running, soft_assert): """Tests suspend Metadata: test_flag: power_control, provision Polarion: assignee: ghubale initialEstimate: 1/6h """ testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) view = navigate_to(testing_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time") testing_vm.power_control_from_cfme(option=testing_vm.SUSPEND, cancel=False, from_details=True) view.flash.assert_success_message(text='Suspend initiated', partial=True) if_scvmm_refresh_provider(testing_vm.provider) try: testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise e soft_assert(testing_vm.mgmt.is_suspended, "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not testing_vm.provider.one_of(RHEVMProvider): new_last_boot_time = view.entities.summary("Power Management").get_text_of( "Last Boot Time") soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def fqdn_appliance(appliance, preconfigured): sp = SproutClient.from_config() available_providers = set(sp.call_method('available_providers')) required_providers = set(cfme_data['fqdn_providers']) usable_providers = available_providers & required_providers version = appliance.version.vstring stream = get_stream(appliance.version) for provider in usable_providers: try: apps, pool_id = sp.provision_appliances( count=1, preconfigured=preconfigured, version=version, stream=stream, provider=provider ) break except Exception as e: logger.warning("Couldn't provision appliance with following error:") logger.warning("{}".format(e)) continue else: logger.error("Couldn't provision an appliance at all") raise SproutException('No provision available') yield apps[0] apps[0].ssh_client.close() sp.destroy_pool(pool_id)
def test_start_from_suspend( self, appliance, testing_vm, verify_vm_suspended, soft_assert): """Tests start from suspend Metadata: test_flag: power_control, provision """ try: testing_vm.provider.refresh_provider_relationships() testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise last_boot_time = testing_vm.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False, from_details=True) view = appliance.browser.create_view(BaseLoggedInPage) view.flash.assert_success_message(text='Start initiated', partial=True) if_scvmm_refresh_provider(testing_vm.provider) testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600) soft_assert( testing_vm.provider.mgmt.is_vm_running(testing_vm.name), "vm not running")
def backup(appliance, provider): volume_collection = appliance.collections.volumes storage_manager = '{} Cinder Manager'.format(provider.name) backup_collection = appliance.collections.volume_backups.filter({'provider': provider}) # create new volume volume = volume_collection.create(name=fauxfactory.gen_alpha(), storage_manager=storage_manager, tenant=provider.data['provisioning']['cloud_tenant'], size=STORAGE_SIZE, provider=provider) # create new backup for crated volume if volume.status == 'available': backup_name = fauxfactory.gen_alpha() volume.create_backup(backup_name) backup = backup_collection.instantiate(backup_name, provider) yield backup else: pytest.skip('Skipping volume backup tests, provider side volume creation fails') try: if backup.exists: backup_collection.delete(backup) if volume.exists: volume.delete(wait=False) except Exception: logger.warning('Exception during volume deletion - skipping..')
def test_links(appliance): """Test whether the PDF documents are present. Polarion: assignee: anikifor casecomponent: WebUI caseimportance: low initialEstimate: 1/20h """ view = navigate_to(appliance.server, 'Documentation') for link_widget in view.links.sub_widgets: # link_widget is nested view, we care about 'link' widget here try: href = view.browser.get_attribute(attr='href', locator=link_widget.link.locator) except AttributeError: logger.warning('Skipping link check, No link widget defined for {}'.format( link_widget.TEXT)) continue # Check the link is reachable try: resp = requests.head(href, verify=False, timeout=10) except (requests.Timeout, requests.ConnectionError) as ex: pytest.fail(str(ex)) assert 200 <= resp.status_code < 400, \ "Unable to access URL '{}' from doc link ({})".format(href, link_widget.read())
def test_contents(appliance, soft_assert): """Test title of each document.""" view = navigate_to(appliance.server, 'Documentation') cur_ver = appliance.version for doc_type, title in doc_titles.items(): doc_widget = getattr(view.links, doc_type, None) if not doc_widget: logger.warning('Skipping contents check for document: "{}: {}", no widget to read' .format(doc_type, title)) href = view.browser.get_attribute(attr='href', locator=doc_widget.link.locator) data = requests.get(href, verify=False) pdf_titlepage_text_low = pdf_get_text(StringIO(data.content), [0]).lower() # don't include the word 'guide' expected = [title] if cur_ver == version.LATEST: expected.append('manageiq') else: expected.append('cloudforms') maj_min = '{}.{}'.format(cur_ver.version[0], cur_ver.version[1]) expected.append(version.get_product_version(maj_min)) for exp_str in expected: soft_assert(exp_str in pdf_titlepage_text_low, "{} not in {}" .format(exp_str, pdf_titlepage_text_low))
def _group_cleanup(group): try: appliance.server.login_admin() group.delete() except Exception: logger.warning('Exception deleting group for cleanup, continuing.') pass
def cleanup_vm(vm_name, provider): try: logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key) provider.mgmt.delete_vm(vm_name) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
def _wait_for_state_refresh(): try: navigate_to(instance, 'Details') return state_change_time != instance.get_detail(properties=("Power Management", "State Changed On")) except NameError: logger.warning('NameError caught while waiting for state change, continuing') return False
def test_group_roles(appliance, setup_aws_auth_provider, group_name, role_access, context, soft_assert): """Basic default AWS_IAM group role auth + RBAC test Validates expected menu and submenu names are present for default AWS IAM groups NOTE: Only tests vertical navigation tree at the moment, not accordions within the page Polarion: assignee: apagac caseimportance: medium initialEstimate: 1/4h tags: rbac """ group_access = role_access[group_name] try: iam_group_name = group_name + '_aws_iam' username = credentials[iam_group_name]['username'] password = credentials[iam_group_name]['password'] fullname = credentials[iam_group_name]['fullname'] except KeyError: pytest.fail('No match in credentials file for group "{}"'.format(iam_group_name)) with appliance.context.use(context): # fullname overrides user.name attribute, but doesn't impact login with username credential user = appliance.collections.users.simple_user(username, password, fullname=fullname) with user: view = navigate_to(appliance.server, 'LoggedIn') assert appliance.server.current_full_name() == user.name assert group_name.lower() in [name.lower() for name in appliance.server.group_names()] nav_visible = view.navigation.nav_item_tree() # RFE BZ 1526495 shows up as an extra requests link in nav # TODO BZ remove assert skip when BZ is fixed in 59z bz = BZ(1526495, forced_streams=['5.8', '5.9'], unblock=lambda group_name: group_name not in ['evmgroup-user', 'evmgroup-approver', 'evmgroup-desktop', 'evmgroup-vm_user', 'evmgroup-administrator', 'evmgroup-super_administrator']) for area in group_access.keys(): # using .get() on nav_visibility because it may not have `area` key diff = DeepDiff(group_access[area], nav_visible.get(area, {}), verbose_level=0, # If any higher, will flag string vs unicode ignore_order=True) nav_extra = diff.get('iterable_item_added') if nav_extra and 'Requests' in nav_extra.values() and bz.blocks: logger.warning('Skipping RBAC verification for group "%s" in "%s" due to %r', group_name, area, bz) continue else: soft_assert(diff == {}, '{g} RBAC mismatch (expected first) for {a}: {d}' .format(g=group_name, a=area, d=diff)) appliance.server.login_admin() assert user.exists
def patch_file(self, local_path, remote_path, md5=None): """ Patches a single file on the appliance Args: local_path: Path to patch (diff) file remote_path: Path to file to be patched (on the appliance) md5: MD5 checksum of the original file to check if it has changed Returns: True if changes were applied, False if patching was not necessary Note: If there is a .bak file present and the file-to-be-patched was not patched by the current patch-file, it will be used to restore it first. Recompiling assets and restarting appropriate services might be required. """ logger.info('Patching %s', remote_path) # Upload diff to the appliance diff_remote_path = os_path.join('/tmp/', os_path.basename(remote_path)) self.put_file(local_path, diff_remote_path) # If already patched with current file, exit logger.info('Checking if already patched') result = self.run_command( 'patch {} {} -f --dry-run -R'.format(remote_path, diff_remote_path)) if result.success: return False # If we have a .bak file available, it means the file is already patched # by some older patch; in that case, replace the file-to-be-patched by the .bak first logger.info("Checking if %s.bak is available", remote_path) result = self.run_command('test -e {}.bak'.format(remote_path)) if result.success: logger.info("%s.bak found; using it to replace %s", remote_path, remote_path) result = self.run_command('mv {}.bak {}'.format(remote_path, remote_path)) if result.failed: raise Exception( "Unable to replace {} with {}.bak".format(remote_path, remote_path)) else: logger.info("%s.bak not found", remote_path) # If not patched and there's MD5 checksum available, check it if md5: logger.info("MD5 sum check in progress for %s", remote_path) result = self.run_command('md5sum -c - <<< "{} {}"'.format(md5, remote_path)) if result.success: logger.info('MD5 sum check result: file not changed') else: logger.warning('MD5 sum check result: file has been changed!') # Create the backup and patch result = self.run_command( 'patch {} {} -f -b -z .bak'.format(remote_path, diff_remote_path)) if result.failed: raise Exception("Unable to patch file {}: {}".format(remote_path, result.output)) return True
def vm_name(provider): # also tries to delete the VM that gets made with this name vm_name = random_vm_name('scat') yield vm_name try: logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key) provider.mgmt.delete_vm(vm_name) except: # The mgmt_sys classes raise Exception :\ logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
def test_shelve_instance(new_instance): new_instance.power_control_from_cfme(from_details=True, option=OpenStackInstance.SHELVE) try: new_instance.wait_for_instance_state_change(OpenStackInstance.STATE_SHELVED) except TimedOutError: logger.warning("Timeout when waiting for instance state: 'shelved'. Skipping") view = navigate_to(new_instance, 'Details') state = view.entities.summary('Power Management').get_text_of('Power State') assert state in (OpenStackInstance.STATE_SHELVED_OFFLOAD, OpenStackInstance.STATE_SHELVED)
def set_auth_mode(self, reset=False, auth_mode='Database', **kwargs): """ Set up authentication mode Args: reset: Set True, to reset all changes for the page. Default value: False auth_mode: The authentication mode - Database, Amazon, LDAP, LDAPS, External (httpd) kwargs: A dict of keyword arguments used to initialize auth mode if you want not to use yamls settings, auth_mode='your_mode_type_here' should be a mandatory in your kwargs ex. auth_settings.set_auth_mode( reset= True, mode='Amazon', access_key=key, secret_key=secret_key) """ self.auth_mode = auth_mode fill_data = {'auth_mode': auth_mode} settings = {} # for auth_settings if kwargs and auth_mode != 'Database': for key, value in kwargs.items(): if key not in ['default_groups']: if key == 'hosts': assert len(value) <= 3, "You can specify only 3 LDAP hosts" for enum, host in enumerate(value): settings["ldap_host_{}".format(enum + 1)] = host elif key == 'user_type': settings[key] = self.user_type_dict[value] else: settings[key] = value else: settings[key] = value else: logger.warning('set_auth_mode called with kwargs and Database, ignoring kwargs') fill_data['auth_settings'] = settings view = navigate_to(self.appliance.server, 'Authentication', wait_for_view=True) changed = view.form.fill(fill_data) if reset: view.reset.click() view.flash.assert_message('All changes have been reset') # Can't save the form if nothing was changed logger.info('Authentication form reset, returning') return elif changed: if self.auth_mode == 'Amazon': view.form.auth_settings.validate.click() view.flash.assert_no_error() view.save.click() # TODO move this flash message assert into test and only assert no error flash_message = ( 'Authentication settings saved for {} Server "{} [{}]" in Zone "{}"'.format( self.appliance.product_name, self.appliance.server.name, self.appliance.server.sid, self.appliance.server.zone.name)) view.flash.assert_message(flash_message) else: logger.info('No authentication settings changed, not saving form.')
def create(self, element_data): for element in element_data: view = navigate_to(self, "Add") dragged_element = element.get('element_information').get( 'choose_type') view.dd.drag_and_drop(dragged_element, self.parent.box_label) view.fill(element) view.ele_save_button.click() if view.save_button.disabled: logger.warning( 'Save button disabled during Dialog Element creation') return False else: view.save_button.click() view.flash.wait_displayed(timeout=5) view.flash.assert_no_error() return self.instantiate(element_data=element_data)
def is_displayed(self): expected_name = self.context['object'].name expected_provider = self.context['object'].provider.name try: # Not displayed when the instance is archived relationships = self.entities.summary('Relationships') relationship_provider_name = relationships.get_text_of( 'Cloud Provider') except (NameError, NoSuchElementException): logger.warning( 'No "Cloud Provider" Relationship, assume instance view not displayed' ) raise NotImplementedError( "This view has no unique markers for is_displayed check") return (self.in_cloud_instance and self.entities.title.text == 'Instance "{}"'.format(expected_name) and relationship_provider_name == expected_provider)
def tenant(provider, setup_provider, appliance): collection = appliance.collections.cloud_tenants tenant = collection.create(name=fauxfactory.gen_alphanumeric(8), provider=provider) yield tenant try: if tenant.exists: tenant.delete() except Exception: logger.warning( 'Exception while attempting to delete tenant fixture, continuing') finally: if tenant.name in provider.mgmt.list_tenant(): provider.mgmt.remove_tenant(tenant.name)
def all(self): """returning all containers objects for respective Cloud Provider""" # TODO(ndhandre): Need to implement with REST. view = navigate_to(self, 'All') containers = [] try: for item in view.entities.elements.read(): if self.filters.get('provider').name in item['Cloud Provider']: containers.append(self.instantiate(key=item['Key'], provider=self.filters.get('provider'))) except NoSuchElementException: logger.warning('The containers table is probably not present or empty') return containers
def test_shelve_offload_instance(new_instance): new_instance.power_control_from_cfme(from_details=True, option=OpenStackInstance.SHELVE) new_instance.wait_for_instance_state_change( OpenStackInstance.STATE_SHELVED) try: new_instance.power_control_from_cfme( from_details=True, option=OpenStackInstance.SHELVE_OFFLOAD) except TimeoutException: logger.warning( "Timeout when initiating power state 'Shelve Offload'. Skipping") new_instance.wait_for_instance_state_change( OpenStackInstance.STATE_SHELVED_OFFLOAD) state = new_instance.get_detail(properties=('Power Management', 'Power State')) assert state == OpenStackInstance.STATE_SHELVED_OFFLOAD
def report(): # TODO parameterize on path, for now test infrastructure reports path = [ "Configuration Management", "Hosts", "Virtual Infrastructure Platforms" ] report = CannedSavedReport.new(path) report_time = report.datetime logger.debug('Created report for path {} and time {}'.format( path, report_time)) yield report try: report.delete() except Exception: logger.warning( 'Failed to delete report for path {} and time {}'.format( path, report_time))
def unset_ownership(self): """Remove user ownership and return group to EvmGroup-Administrator""" view = navigate_to(self, 'SetOwnership') fill_result = view.form.fill({ 'user_name': '<No Owner>', 'group_name': 'EvmGroup-administrator' }) if fill_result: view.form.save_button.click() msg = 'Ownership saved for selected {}'.format(self.VM_TYPE) else: view.form.cancel_button.click() logger.warning('No change during unset_ownership') msg = 'Set Ownership was cancelled by the user' view = self.create_view(navigator.get_class(self, 'Details').VIEW) view.flash.assert_success_message(msg)
def incremental_backup(volume_backup, provider): backup_collection = provider.appliance.collections.volume_backups.filter( {'provider': provider}) volume = volume_backup.appliance.collections.volumes.instantiate( volume_backup.volume, provider) # create incremental backup for a volume with existing backup backup_name = fauxfactory.gen_alpha() volume.create_backup(backup_name, incremental=True) incremental_backup = backup_collection.instantiate(backup_name, provider) yield incremental_backup try: if incremental_backup.exists: backup_collection.delete(incremental_backup) except Exception: logger.warning('Exception during volume backup deletion - skipping..')
def retire_ec2_s3_vm(provider): """Fixture for creating an S3 backed paravirtual instance, template is a public ec2 AMI Args: provider: provider crud object from fixture """ vm = VM.factory(random_vm_name('retire'), provider, template_name='amzn-ami-pv-2015.03.rc-1.x86_64-s3') vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm try: if provider.mgmt.does_vm_exist(vm.name): provider.mgmt.delete_vm(vm.name) except Exception: logger.warning('Failed to delete vm from provider: {}'.format(vm.name))
def catalog_item(appliance, dialog, catalog): cat_item = appliance.collections.catalog_items.create( appliance.collections.catalog_items.GENERIC, name='test_item_{}'.format(fauxfactory.gen_alphanumeric()), description="my catalog item", display_in=True, catalog=catalog, dialog=dialog) yield cat_item # fixture cleanup try: cat_item.delete() except NoSuchElementException: logger.warning( 'test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" ' 'not found'.format(cat_item.name))
def _setup_provider_verbose(request, provider, appliance=None): if appliance is None: appliance = store.current_appliance try: if request.config.option.provider_limit > 0: existing_providers = [ p for p in appliance.managed_known_providers if p.key != provider.key ] random.shuffle(existing_providers) maximum_current_providers = request.config.option.provider_limit - 1 if len(existing_providers) > maximum_current_providers: providers_to_remove = existing_providers[ maximum_current_providers:] store.terminalreporter.write_line( 'Removing extra providers: {}'.format(', '.join( [p.key for p in providers_to_remove]))) for p in providers_to_remove: logger.info('removing provider %r', p.key) p.delete(cancel=False) # Decoupled wait for better performance for p in providers_to_remove: logger.info('waiting for provider %r to disappear', p.key) p.wait_for_delete() store.terminalreporter.write_line( "Trying to set up provider {}\n".format(provider.key), green=True) provider.setup() return True except Exception as e: logger.exception(e) _setup_failures[provider] += 1 if _setup_failures[provider] >= SETUP_FAIL_LIMIT: _problematic_providers.add(provider) message = "Provider {} is now marked as problematic and won't be used again."\ " {}: {}".format(provider.key, type(e).__name__, str(e)) logger.warning(message) store.terminalreporter.write_line(message + "\n", red=True) if provider.exists: # Remove it in order to not explode on next calls provider.delete(cancel=False) provider.wait_for_delete() message = "Provider {} was deleted because it failed to set up.".format( provider.key) logger.warning(message) store.terminalreporter.write_line(message + "\n", red=True) return False
def _param_check(metafunc, argnames, argvalues): """Helper function to check if parametrizing is necessary * If no argnames were specified, parametrization is unnecessary. * If argvalues were generated, parametrization is necessary. * If argnames were specified, but no values were generated, the test cannot run successfully, and will be uncollected using the :py:mod:`markers.uncollect` mark. See usage in :py:func:`parametrize` Args: metafunc: metafunc objects from pytest_generate_tests argnames: argnames list for use in metafunc.parametrize argvalues: argvalues list for use in metafunc.parametrize Returns: * ``True`` if this test should be parametrized * ``False`` if it shouldn't be parametrized * ``None`` if the test will be uncollected """ assert isinstance(argvalues, list), "iterators break pytest expectations" # If no parametrized args were named, don't parametrize if not argnames: return False # If parametrized args were named and values were generated, parametrize elif any(argvalues): return True # If parametrized args were named, but no values were generated, mark this test to be # removed from the test collection. Otherwise, py.test will try to find values for the # items in argnames by looking in its fixture pool, which will almost certainly fail. else: # module and class are optional, but function isn't modname = getattr(metafunc.module, '__name__', None) classname = getattr(metafunc.cls, '__name__', None) funcname = metafunc.function.__name__ test_name = '.'.join( [_f for _f in (modname, classname, funcname) if _f]) uncollect_msg = 'Parametrization for {} yielded no values,'\ ' marked for uncollection'.format(test_name) logger.warning(uncollect_msg) # apply the mark pytest.mark.uncollect(reason=uncollect_msg)(metafunc.function)
def test_shelve_offload_instance(new_instance): new_instance.power_control_from_cfme(from_details=True, option=OpenStackInstance.SHELVE) new_instance.wait_for_instance_state_change( OpenStackInstance.STATE_SHELVED) try: new_instance.power_control_from_cfme( from_details=True, option=OpenStackInstance.SHELVE_OFFLOAD) except TimeoutException: logger.warning( "Timeout when initiating power state 'Shelve Offload'. Skipping") new_instance.wait_for_instance_state_change( OpenStackInstance.STATE_SHELVED_OFFLOAD) view = navigate_to(new_instance, 'Details') state = view.entities.summary('Power Management').get_text_of( 'Power State') assert state == OpenStackInstance.STATE_SHELVED_OFFLOAD
def volume(appliance, provider): # create new volume volume_collection = appliance.collections.volumes manager_name = '{} Cinder Manager'.format(provider.name) volume = volume_collection.create( name=fauxfactory.gen_alpha(), storage_manager=manager_name, tenant=provider.data['provisioning']['cloud_tenant'], size=STORAGE_SIZE, provider=provider) yield volume try: if volume.exists: volume.delete(wait=True) except Exception as e: logger.warning("{name}:{msg} Volume deletion - skipping...".format( name=type(e).__name__, msg=str(e)))
def retire_vm(small_template, provider): """Fixture for creating a generic vm/instance Args: small_template: small template fixture, template on provider provider: provider crud object from fixture """ vm = VM.factory(random_vm_name('retire'), provider, template_name=small_template.name) vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm try: if provider.mgmt.does_vm_exist(vm.name): provider.mgmt.delete_vm(vm.name) except Exception: logger.warning('Failed to delete vm from provider: {}'.format(vm.name))
def _save_action(self, view, updates, reset): """ Take care of actions to do after updates """ if reset: try: view.reset_button.click() flash_message = 'All changes have been reset' except Exception: logger.warning('No values was changed') else: view.save_button.click() self.appliance.server_details_changed() flash_message = ( 'Configuration settings saved for {} Server "{} [{}]" in Zone "{}"' .format(self.appliance.product_name, self.appliance.server_name(), self.appliance.server_id(), self.appliance.server.zone.name)) view.flash.assert_message(flash_message)
def get_stream_from_image_url(image_url, quiet=False): """Get default image URL for a given stream name""" # strip trailing / from URL, and strip build number or link (5.11.0.1, latest, stable) # to get just https://url/builds/[cfme/manageiq]/[build-stream] image_base = '/'.join(image_url.strip('/').split('/')[:-1]) if not quiet: # don't log (goes to stdout) when just printing name, for Jenkins logger.info('Matching stream name based on image_url base: %s', image_base) # look for image_base URL component in basic_info dict matching_streams = [key for key, value in ALL_STREAMS.items() if image_base in value] if matching_streams: # sometimes multiple match, use first if len(matching_streams) > 1: logger.warning('warning: Multiple stream name matches: %s for URL %s, using first', matching_streams, image_url) return matching_streams[0] else: logger.error('Cannot find stream in image url: %s', image_url) raise TemplateUploadException("Cannot find stream from image URL.")
def test_suspend(self, appliance, testing_vm, ensure_vm_running, soft_assert): """Tests suspend Polarion: assignee: ghubale initialEstimate: 1/6h casecomponent: Infra caseimportance: high tags: power """ testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) view = navigate_to(testing_vm, "Details") last_boot_time = view.entities.summary("Power Management").get_text_of( "Last Boot Time") testing_vm.power_control_from_cfme(option=testing_vm.SUSPEND, cancel=False, from_details=True) view.flash.assert_success_message(text='Suspend initiated', partial=True) if_scvmm_refresh_provider(testing_vm.provider) try: testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise e soft_assert(testing_vm.mgmt.is_suspended, "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not testing_vm.provider.one_of(RHEVMProvider): new_last_boot_time = view.entities.summary( "Power Management").get_text_of("Last Boot Time") soft_assert( new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def is_displayed(self): expected_name = self.context['object'].name expected_provider = self.context['object'].provider.name try: # Not displayed when the instance is archived relationships = self.entities.summary('Relationships') relationship_provider_name = relationships.get_text_of( 'Cloud Provider') return (self.in_cloud_instance and self.entities.title.text == f'Instance "{expected_name}"' and relationship_provider_name == expected_provider) except (NameError, NoSuchElementException): logger.warning( 'No "Cloud Provider" Relationship, assume instance view not displayed' ) # for archived instances the relationship_provider_name is removed from the summary # table return (self.in_cloud_instance and self.entities.title.text == f'Instance "{expected_name}"')
def catalog_item(appliance, dialog, catalog): cat_item = appliance.collections.catalog_items.create( appliance.collections.catalog_items.GENERIC, name=fauxfactory.gen_alphanumeric(15, start="cat_item_"), description="my catalog item", display_in=True, catalog=catalog, dialog=dialog ) view = cat_item.create_view(AllCatalogItemView) assert view.is_displayed view.flash.assert_success_message('Service Catalog Item "{}" was added'.format( cat_item.name)) yield cat_item # fixture cleanup try: cat_item.delete() except NoSuchElementException: logger.warning('test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" ' 'not found'.format(cat_item.name))
def test_links(appliance): """Test whether the PDF documents are present.""" view = navigate_to(appliance.server, 'Documentation') for link_widget in view.links.sub_widgets: # link_widget is nested view, we care about 'link' widget here try: href = view.browser.get_attribute(attr='href', locator=link_widget.link.locator) except AttributeError: logger.warning('Skipping link check, No link widget defined for {}'.format( link_widget.TEXT)) continue # Check the link is reachable try: resp = requests.head(href, verify=False, timeout=10) except (requests.Timeout, requests.ConnectionError) as ex: pytest.fail(str(ex)) assert 200 <= resp.status_code < 400, \ "Unable to access URL '{}' from doc link ({})".format(href, link_widget.read())
def _vm_cleanup(mgmt, vm_name): """Separated to make the logic able to propagate the exceptions directly.""" try: logger.info("VM/Instance status: %s", mgmt.vm_status(vm_name)) except Exception as f: logger.error("Could not retrieve VM/Instance status: %s: %s", type(f).__name__, str(f)) logger.info('Attempting cleanup on VM/instance %s', vm_name) try: if mgmt.does_vm_exist(vm_name): # Stop the vm first logger.warning('Destroying VM/instance %s', vm_name) if mgmt.delete_vm(vm_name): logger.info('VM/instance %s destroyed', vm_name) else: logger.error('Error destroying VM/instance %s', vm_name) except Exception as f: logger.error('Could not destroy VM/instance %s (%s: %s)', vm_name, type(f).__name__, str(f))
def catalog_bundle(catalog_item): """ Create catalog bundle Args: catalog_item: as resource for bundle creation """ catalog_item.create() bundle_name = "bundle" + fauxfactory.gen_alphanumeric() catalog_bundle = CatalogBundle(name=bundle_name, description="catalog_bundle", display_in=True, catalog=catalog_item.catalog, dialog=catalog_item.dialog, catalog_items=[catalog_item.name]) yield catalog_bundle # fixture cleanup try: catalog_bundle.delete() except NoSuchElementException: logger.warning('test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" ' 'not found'.format(catalog_bundle.name))
def set_replication(self, updates=None, replication_type=None, reset=False): """ Set replication settings Args: updates(dict): Replication update values, mandatory is host, db creds get from credentials replication_type(str): Replication type, use 'global' or 'remote' reset: Pass True to reset made changes """ db_creds = conf.credentials.database if not replication_type: view = navigate_to(self, 'Details') view.replication_type.fill('<None>') elif replication_type == 'global': view = navigate_to(self, 'GlobalAdd') view.fill({ 'database': (updates.get('database') if updates.get('database') else 'vmdb_production'), 'host': updates.get('host'), 'port': updates.get('port') if updates.get('port') else '5432', 'username': (updates.get('username') if updates.get('username') else db_creds.username), 'password': (updates.get('password') if updates.get('password') else db_creds.password) }) else: view = navigate_to(self, 'RemoteAdd') # TODO fill remote settings will be done after widget added if reset: view.reset_button.click() view.flash.assert_message('All changes have been reset') else: try: view.accept_button.click() view.save_button.click() except Exception: logger.warning('Nothing was updated, please check the data')
def pytest_generate_tests(metafunc): if metafunc.function in {test_rh_updates}: return """ Generates tests specific to RHSM or SAT6 with proxy-on or off """ argnames = ['reg_method', 'reg_data', 'proxy_url', 'proxy_creds'] argvalues = [] idlist = [] try: holder = metafunc.config.pluginmanager.get_plugin('appliance-holder') stream = holder.held_appliance.version.stream() all_reg_data = conf.cfme_data.get('redhat_updates', {})['streams'][stream] except KeyError: logger.warning('Could not find rhsm data for stream in yaml') pytest.mark.uncollect( metafunc.function, message='Could not find rhsm data for stream in yaml') return if 'reg_method' in metafunc.fixturenames: for reg_method in REG_METHODS: reg_data = all_reg_data.get(reg_method) if not reg_data or not reg_data.get('test_registration', False): continue proxy_data = conf.cfme_data.get('redhat_updates', {}).get('http_proxy', False) if proxy_data and reg_data.get('use_http_proxy', False): proxy_url = proxy_data['url'] proxy_creds_key = proxy_data['credentials'] proxy_creds = conf.credentials[proxy_creds_key] argval = [reg_method, reg_data, proxy_url, proxy_creds] argid = '{}-{}'.format(reg_method, 'proxy_on') idlist.append(argid) argvalues.append(argval) argval = [reg_method, reg_data, None, None] argid = '{}-{}'.format(reg_method, 'proxy_off') idlist.append(argid) argvalues.append(argval) parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
def test_aws_smartstate_pod(temp_ssa_pod_appliance, ssa_vm, provider, aws_provider): """ deploy aws smartstate pod and that it works Polarion: assignee: izapolsk casecomponent: Containers caseimportance: medium initialEstimate: 1h testSteps: 1. pull smartstate image from registry 2. tag it accordingly and push to externally available registry 3. setup appliance to use that image for smartstate in aws 4. add aws provider 5. find 24/7 vm in aws and perform smartstate analysis endsin: 5.10 """ appliance = temp_ssa_pod_appliance if BZ(1684203, forced_streams=['5.10']).blocks: logger.info( "stopping & starting appliance in order to re-read new AMI name") provider.mgmt.stop_vm(appliance.project) provider.mgmt.start_vm(appliance.project) provider.mgmt.wait_vm_running(appliance.project) for _ in range(3): try: # there is issue caused by unexpected log out and etc. this is workaround # it will be removed along with above BZ when it is fixed navigate_to(aws_provider, 'Details') break except Exception as e: logger.warning("attempt to go to aws_provider " "failed with '{e}'".format(e=e.message)) # run SSA against cu24x7 vm ssa_vm.smartstate_scan(wait_for_task_result=True) # check SSA has been run and there are some results c_lastanalyzed = ssa_vm.last_analysed assert c_lastanalyzed != 'Never', "Last Analyzed is set to Never"
def user(request, data, add_group, appliance): if not data: pytest.skip("No data specified for user") username, password = data["username"], data["password"] if 'evm_default_group' in add_group: username, password = data['default_username'], data['default_password'] data['fullname'] = data['default_userfullname'] credentials = Credential( principal=username, secret=password, verify_secret=password, ) user_obj = appliance.collections.users.instantiate(name=data['fullname'], credential=credentials) try: request.addfinalizer(user_obj.delete) except CandidateNotFound: logger.warning('User was not found during deletion') return user_obj
def myservice(appliance, setup_provider, provider, catalog_item, request): vm_name = catalog_item.provisioning_data["vm_name"] request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider)) catalog_item.create() service_catalogs = ServiceCatalogs(catalog_item.catalog, catalog_item.name) service_catalogs.order() logger.info('Waiting for cfme provision request for service %s', catalog_item.name) request_description = catalog_item.name provision_request = RequestCollection(appliance).instantiate(request_description, partial_check=True) provision_request.wait_for_request() assert provision_request.is_finished() service = MyService(appliance, catalog_item.name, vm_name) yield service try: service.delete() except Exception as ex: logger.warning('Exception while deleting MyService, continuing: {}'.format(ex.message))
def pytest_collection_modifyitems(session, config, items): from cfme.utils.log import logger for item in items: try: item._metadata = AttrDict(item.function.meta.kwargs) except AttributeError: logger.warning( 'AttributeError getting metadata from item: {}'.format( str(item.nodeid))) item._metadata = AttrDict() meta = item.get_marker("meta") if meta is None: continue metas = reversed([x.kwargs for x in meta ]) # Extract the kwargs, reverse the order for meta in metas: item._metadata.update(meta) yield
def test_shelve_instance(new_instance): """ Polarion: assignee: rhcf3_machine initialEstimate: 1/4h """ new_instance.power_control_from_cfme(from_details=True, option=OpenStackInstance.SHELVE) try: new_instance.wait_for_instance_state_change( OpenStackInstance.STATE_SHELVED) except TimedOutError: logger.warning( "Timeout when waiting for instance state: 'shelved'. Skipping") view = navigate_to(new_instance, 'Details') state = view.entities.summary('Power Management').get_text_of( 'Power State') assert state in (OpenStackInstance.STATE_SHELVED_OFFLOAD, OpenStackInstance.STATE_SHELVED)