def test_positive_host_configuration_status(session): """Check if the Host Configuration Status Widget links are working :id: ffb0a6a1-2b65-4578-83c7-61492122d865 :customerscenario: true :Steps: 1. Navigate to Monitor -> Dashboard 2. Review the Host Configuration Status 3. Navigate to each of the links which has search string associated with it. :expectedresults: Each link shows the right info :BZ: 1631219 :CaseLevel: Integration """ org = entities.Organization().create() loc = entities.Location().create() host = entities.Host(organization=org, location=loc).create() criteria_list = [ 'Hosts that had performed modifications without error', 'Hosts in error state', 'Good host reports in the last 30 minutes', 'Hosts that had pending changes', 'Out of sync hosts', 'Hosts with alerts disabled', 'Hosts with no reports', ] search_strings_list = [ 'last_report > \"30 minutes ago\" and (status.applied > 0 or' ' status.restarted > 0) and (status.failed = 0)', 'last_report > \"30 minutes ago\" and (status.failed > 0 or' ' status.failed_restarts > 0) and status.enabled = true', 'last_report > \"30 minutes ago\" and status.enabled = true and' ' status.applied = 0 and status.failed = 0 and status.pending = 0', 'last_report > \"30 minutes ago\" and status.pending > 0 and status.enabled = true', 'last_report < \"30 minutes ago\" and status.enabled = true', 'status.enabled = false', 'not has last_report and status.enabled = true', ] if is_open('BZ:1631219'): criteria_list.pop() search_strings_list.pop() with session: session.organization.select(org_name=org.name) session.location.select(loc_name=loc.name) dashboard_values = session.dashboard.read('HostConfigurationStatus') for criteria in criteria_list: if criteria == 'Hosts with no reports': assert dashboard_values['status_list'][criteria] == 1 else: assert dashboard_values['status_list'][criteria] == 0 for criteria, search in zip(criteria_list, search_strings_list): if criteria == 'Hosts with no reports': session.dashboard.action({'HostConfigurationStatus': {'status_list': criteria}}) values = session.host.read_all() assert values['searchbox'] == search assert len(values['table']) == 1 assert values['table'][0]['Name'] == host.name else: session.dashboard.action({'HostConfigurationStatus': {'status_list': criteria}}) values = session.host.read_all() assert values['searchbox'] == search assert len(values['table']) == 0
def test_positive_capsule_sync(self, capsule_configured): """Create repository, add it to lifecycle environment, assign lifecycle environment with a capsule, sync repository, sync it once again, update repository (add 1 new package), sync repository once again. :id: 35513099-c918-4a8e-90d0-fd4c87ad2f82 :customerscenario: true :BZ: 1394354, 1439691 :expectedresults: 1. Repository sync triggers capsule sync 2. After syncing capsule contains same repo content as satellite 3. Syncing repository which has no changes for a second time does not trigger any new publish task 4. Repository revision on capsule remains exactly the same after second repo sync with no changes 5. Syncing repository which was updated will update the content on capsule :CaseLevel: System """ repo_name = gen_string('alphanumeric') # Create and publish custom repository with 2 packages in it repo_url = create_repo(repo_name, constants.repos.FAKE_1_YUM_REPO, constants.YUM_REPO_RPMS[0:2]) # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization( smart_proxy=[capsule_configured.nailgun_capsule.id]).create() product = entities.Product(organization=org).create() repo = entities.Repository(product=product, url=repo_url).create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule_configured.nailgun_capsule.content_add_lifecycle_environment( data={'environment_id': lce.id}) result = capsule_configured.nailgun_capsule.content_lifecycle_environments( ) assert len(result['results']) assert lce.id in [ capsule_lce['id'] for capsule_lce in result['results'] ] # Create a content view with the repository cv = entities.ContentView(organization=org, repository=[repo]).create() # Sync repository repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() assert len(cv.version) == 1 cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule_configured.nailgun_capsule.content_get_sync() assert len( sync_status['active_sync_tasks']) or sync_status['last_sync_time'] # Content of the published content view in # lifecycle environment should equal content of the # repository lce_repo_path = form_repo_path(org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label) cvv_repo_path = form_repo_path(org=org.label, cv=cv.label, cvv=cvv.version, prod=product.label, repo=repo.label) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule_configured.nailgun_capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # If BZ1439691 is open, need to sync repo once more, as repodata # will change on second attempt even with no changes in repo if is_open('BZ:1439691'): repo.sync() repo = repo.read() cv.publish() cv = cv.read() assert len(cv.version) == 2 cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 sync_status = capsule_configured.nailgun_capsule.content_get_sync() assert (len(sync_status['active_sync_tasks']) or sync_status['last_sync_time'] != last_sync_time) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule_configured.nailgun_capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Assert that the content published on the capsule is exactly the # same as in repository on satellite lce_revision_capsule = get_repomd_revision( lce_repo_path, hostname=capsule_configured.ip_addr) assert get_repo_files( lce_repo_path, hostname=capsule_configured.ip_addr) == get_repo_files( cvv_repo_path) # Sync repository for a second time result = repo.sync() # Assert that the task summary contains a message that says the # publish was skipped because content had not changed assert result['result'] == 'success' assert result['output']['post_sync_skipped'] assert result['humanized']['output'] == 'No new packages.' # Publish a new version of content view cv.publish() cv = cv.read() cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() # Promote new content view version to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Wait till capsule sync finishes sync_status = capsule_configured.nailgun_capsule.content_get_sync() tasks = [] if not sync_status['active_sync_tasks']: assert sync_status['last_sync_time'] != last_sync_time else: for task in sync_status['active_sync_tasks']: tasks.append(entities.ForemanTask(id=task['id'])) tasks[-1].poll() # Assert that the value of repomd revision of repository in # lifecycle environment on the capsule has not changed new_lce_revision_capsule = get_repomd_revision( lce_repo_path, hostname=capsule_configured.ip_addr) assert lce_revision_capsule == new_lce_revision_capsule # Update a repository with 1 new rpm create_repo(repo_name, constants.repos.FAKE_1_YUM_REPO, constants.YUM_REPO_RPMS[-1:]) # Sync, publish and promote the repository repo.sync() repo = repo.read() cv.publish() cv = cv.read() cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule_configured.nailgun_capsule.content_get_sync() assert (len(sync_status['active_sync_tasks']) or sync_status['last_sync_time'] != last_sync_time) # Assert that packages count in the repository is updated assert repo.content_counts['package'] == 3 # Assert that the content of the published content view in # lifecycle environment is exactly the same as content of the # repository cvv_repo_path = form_repo_path(org=org.label, cv=cv.label, cvv=cvv.version, prod=product.label, repo=repo.label) assert repo.content_counts['package'] == cvv.package_count assert get_repo_files(lce_repo_path) == get_repo_files(cvv_repo_path) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Assert that the content published on the capsule is exactly the # same as in the repository assert get_repo_files( lce_repo_path, hostname=capsule_configured.ip_addr) == get_repo_files( cvv_repo_path)
def test_positive_lock_clone_nodelete_unlock_report(self): """Lock report template. Check it can be cloned and can't be deleted or edited. Unlock. Check it can be deleted and edited. :id: a4c577db-144e-4761-a42e-e83887464986 :setup: User with reporting access rights, some report template that is not locked :steps: 1. Create template 2. Lock template 3. Clone template, check cloned data 4. Try to delete template 5. Try to edit template 6. Unlock template 7. Edit template 8. Delete template :expectedresults: Report is locked :CaseImportance: High :BZ: 1680458 """ # 1. Create template template_name = gen_string('alpha').lower() template_clone_name = gen_string('alpha').lower() template1 = gen_string('alpha') template2 = gen_string('alpha') rt = entities.ReportTemplate(name=template_name, template=template1).create() # 2. Lock template entities.ReportTemplate(id=rt.id, locked=True).update(["locked"]) rt = rt.read() self.assertTrue(rt.locked) # 3. Clone template, check cloned data rt.clone(data={'name': template_clone_name}) cloned_rt = (entities.ReportTemplate().search( query={'search': 'name="{}"'.format(template_clone_name)}) [0].read()) self.assertEquals(template_clone_name, cloned_rt.name) self.assertEquals(template1, cloned_rt.template) # 4. Try to delete template if not is_open('BZ:1680458'): with self.assertRaises(HTTPError): rt.delete() # In BZ1680458, exception is thrown but template is deleted anyway self.assertNotEquals( 0, len(entities.ReportTemplate().search( query={'search': 'name="{}"'.format(template_name)})), ) # 5. Try to edit template with self.assertRaises(HTTPError): entities.ReportTemplate(id=rt.id, template=template2).update(["template"]) rt = rt.read() self.assertEquals(template1, rt.template) # 6. Unlock template entities.ReportTemplate(id=rt.id, locked=False).update(["locked"]) rt = rt.read() self.assertFalse(rt.locked) # 7. Edit template entities.ReportTemplate(id=rt.id, template=template2).update(["template"]) rt = rt.read() self.assertEquals(template2, rt.template) # 8. Delete template rt.delete() self.assertEquals( 0, len(entities.ReportTemplate().search( query={'search': 'name="{}"'.format(template_name)})), )
def test_positive_reboot_all_pxe_hosts(self, _module_user, discovered_host_cleanup, discovery_settings, provisioning_env): """Rebooting all pxe-based discovered hosts :id: 69c807f8-5646-4aa6-8b3c-5ecdb69560ed :parametrized: yes :Setup: Provisioning should be configured and a hosts should be discovered via PXE boot. :Steps: PUT /api/v2/discovered_hosts/reboot_all :expectedresults: All disdcovered host should be rebooted successfully :CaseAutomation: Automated :CaseImportance: Medium """ cfg = get_nailgun_config() if _module_user: cfg.auth = (_module_user[0].login, _module_user[1]) # open ssh channels and attach them to foreman-tail output channel_1, channel_2 = ssh.get_client().invoke_shell(), ssh.get_client( ).invoke_shell() channel_1.send('foreman-tail\r') channel_2.send('foreman-tail\r') with LibvirtGuest() as pxe_host_1: _assert_discovered_host(pxe_host_1, channel_1, user_config=cfg) with LibvirtGuest() as pxe_host_2: _assert_discovered_host(pxe_host_2, channel_2, user_config=cfg) # reboot_all method leads to general /discovered_hosts/ path, so it doesn't matter # what DiscoveredHost object we execute this on try: entities.DiscoveredHost().reboot_all() except simplejson.errors.JSONDecodeError as e: if is_open('BZ:1893349'): pass else: raise e # assert that server receives DHCP discover from hosts PXELinux # this means that the hosts got rebooted for pxe_host in [(pxe_host_1, channel_1), (pxe_host_2, channel_2)]: for pattern in [ ( f"DHCPDISCOVER from {pxe_host[0].mac}", "DHCPDISCOVER", ), (f"DHCPACK on [0-9.]+ to {pxe_host[0].mac}", "DHCPACK"), ]: try: _wait_for_log(pxe_host[1], pattern[0], timeout=30) except TimedOutError: # raise assertion error raise AssertionError( f'Timed out waiting for {pattern[1]} from ' f'{pxe_host[0].mac}')