def test_positive_run_receptor_installer(self): """Run Receptor installer ("Configure Cloud Connector") :CaseComponent: RHCloud-CloudConnector :Assignee: lhellebr :id: 811c7747-bec6-1a2d-8e5c-b5045d3fbc0d :expectedresults: The job passes, installs Receptor that peers with c.r.c :BZ: 1818076 """ # Set Host parameter source_display_name to something random. # To avoid 'name has already been taken' error when run multiple times # on a machine with the same hostname. host_id = Host.info({'name': settings.server.hostname})['id'] Host.set_parameter({ 'host-id': host_id, 'name': 'source_display_name', 'value': gen_string('alpha') }) template_name = 'Configure Cloud Connector' invocation = make_job_invocation({ 'async': True, 'job-template': template_name, 'inputs': f'satellite_user="******",\ satellite_password="******"', 'search-query': f'name ~ {settings.server.hostname}', }) invocation_id = invocation['id'] wait_for( lambda: entities.JobInvocation(id=invocation_id).read(). status_label in ["succeeded", "failed"], timeout="1500s", ) assert entities.JobInvocation(id=invocation_id).read().status == 0 result = ' '.join( JobInvocation.get_output({ 'id': invocation_id, 'host': settings.server.hostname })) assert 'project-receptor.satellite_receptor_installer' in result assert 'Exit status: 0' in result # check that there is one receptor conf file and it's only readable # by the receptor user and root result = ssh.command( 'stat /etc/receptor/*/receptor.conf --format "%a:%U"') assert result.stdout[0] == '400:foreman-proxy' result = ssh.command('ls -l /etc/receptor/*/receptor.conf | wc -l') assert result.stdout[0] == '1'
def test_post_scenario_remoteexecution_satellite(self): """Run a REX job on pre-upgrade created client registered with Satellite. :id: postupgrade-ad3b1564-d3e6-4ada-9337-3a6ee6863bae :steps: 1. Run a REX job on content host. :expectedresults: 1. The job should successfully executed on pre-upgrade created client. """ client_name = get_entity_data(self.__class__.__name__)['client_name'] job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': { 'command': "ls" }, 'targeting_type': 'static_query', 'search_query': f"name = {client_name}", }) self.assertEqual(job['output']['success_count'], 1) cleanup_of_provisioned_server(hostname=client_name, provisioning_server=self.libvirt_vm, distro=DISTRO_RHEL7)
def test_pre_scenario_remoteexecution_satellite(self): """Run REX job on client registered with Satellite :id: preupgrade-3f338475-fa69-43ef-ac86-f00f4d324b33 :steps: 1. Create Subnet. 2. Create Content host. 3. Install katello-ca package and register to Satellite host. 4. Add rex ssh_key of Satellite on content host. 5. Run the REX job on client vm. :expectedresults: 1. It should create with pre-required details. 2. REX job should run on it. """ try: default_loc_id = entities.Location().search( query={'search': 'name="{}"'.format(DEFAULT_LOC)})[0].id sn = entities.Subnet( domain=self.vm_domain, gateway=self.gateway, ipam='DHCP', location=[default_loc_id], mask=self.netmask, network=self.subnet, organization=[self.org.id], remote_execution_proxy=[entities.SmartProxy(id=1)], ).create() client = VirtualMachine(distro=DISTRO_RHEL7, provisioning_server=self.libvirt_vm, bridge=self.bridge) client.create() client.install_katello_ca() client.register_contenthost(org=self.org.label, lce='Library') add_remote_execution_ssh_key(hostname=client.ip_addr) host = entities.Host().search( query={'search': 'name="{}"'.format(client.hostname)}) host[0].subnet = sn host[0].update(['subnet']) job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': { 'command': "ls" }, 'targeting_type': 'static_query', 'search_query': "name = {0}".format(client.hostname) }) self.assertEqual(job['output']['success_count'], 1) global_dict = { self.__class__.__name__: { 'client_name': client.hostname } } create_dict(global_dict) except Exception as exp: if client._created: self._vm_cleanup(hostname=client.hostname) raise Exception(exp)
def test_post_scenario_remoteexecution_external_capsule(self): """Run a REX job on pre-upgrade created client registered with external capsule. :id: postupgrade-00ed2a25-b0bd-446f-a3fc-09149c57fe94 :steps: 1. Run a REX job on content host. :expectedresults: 1. The job should successfully executed on pre-upgrade created client. """ client_name = get_entity_data(self.__class__.__name__)['client_name'] job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': { 'command': "ls" }, 'targeting_type': 'static_query', 'search_query': "name = {0}".format(client_name) }) self.assertEqual(job['output']['success_count'], 1) cleanup_of_provisioned_server(hostname=client_name, provisioning_server=self.libvirt_vm, distro=DISTRO_RHEL7)
def test_positive_run_receptor_installer(self): """Run Receptor installer ("Configure Cloud Connector") :CaseComponent: RHCloud-CloudConnector :id: 811c7747-bec6-1a2d-8e5c-b5045d3fbc0d :expectedresults: The job passes, installs Receptor that peers with c.r.c :BZ: 1818076 """ template_name = 'Configure Cloud Connector' invocation = make_job_invocation({ 'async': True, 'job-template': template_name, 'inputs': 'satellite_user="******",satellite_password="******"'.format( settings.server.admin_username, settings.server.admin_password), 'search-query': "name ~ {0}".format(settings.server.hostname), }) invocation_id = invocation['id'] wait_for( lambda: entities.JobInvocation(id=invocation_id).read(). status_label in ["succeeded", "failed"], timeout="1500s", ) assert entities.JobInvocation(id=invocation_id).read().status == 0 result = ' '.join( JobInvocation.get_output({ 'id': invocation_id, 'host': settings.server.hostname })) assert 'project-receptor.satellite_receptor_installer' in result assert 'Exit status: 0' in result # check that there is one receptor conf file and it's only readable # by the receptor user and root result = ssh.command( 'stat /etc/receptor/*/receptor.conf --format "%a:%U"') assert result.stdout[0] == '400:foreman-proxy' result = ssh.command('ls -l /etc/receptor/*/receptor.conf | wc -l') assert result.stdout[0] == '1'
def test_positive_run_capsule_upgrade_playbook(): """Run Capsule Upgrade playbook against an External Capsule :id: 9ec6903d-2bb7-46a5-8002-afc74f06d83b :steps: 1. Create a Capsule VM, add REX key. 2. Run the Capsule Upgrade Playbook. :expectedresults: Capsule is upgraded successfully :CaseImportance: Medium """ with CapsuleVirtualMachine() as capsule_vm: template_id = (entities.JobTemplate().search( query={'search': 'name="Capsule Upgrade Playbook"'})[0].id) add_remote_execution_ssh_key(capsule_vm.ip_addr) job = entities.JobInvocation().run( synchronous=False, data={ 'job_template_id': template_id, 'inputs': { 'target_version': CAPSULE_TARGET_VERSION, 'whitelist_options': "repositories-validate,repositories-setup", }, 'targeting_type': "static_query", 'search_query': f"name = {capsule_vm.hostname}", }, ) wait_for_tasks( f"resource_type = JobInvocation and resource_id = {job['id']}") result = entities.JobInvocation(id=job['id']).read() assert result.succeeded == 1 result = capsule_vm.run('foreman-maintain health check') assert result.return_code == 0 for line in result.stdout: assert 'FAIL' not in line result = entities.SmartProxy(id=entities.SmartProxy( name=capsule_vm.hostname).search()[0].id).refresh() feature_list = [feat['name'] for feat in result['features']] assert {'Discovery', 'Dynflow', 'Ansible', 'SSH', 'Logs', 'Pulp'}.issubset(feature_list)
def test_negative_run_capsule_upgrade_playbook_on_satellite(default_org): """Run Capsule Upgrade playbook against the Satellite itself :id: 99462a11-5133-415d-ba64-4354da539a34 :steps: 1. Add REX key to the Satellite server. 2. Run the Capsule Upgrade Playbook. 3. Check the job output for proper failure reason. :expectedresults: Should fail :CaseImportance: Medium """ sat = entities.Host().search( query={'search': f'name={settings.server.hostname}'})[0].read() template_id = (entities.JobTemplate().search( query={'search': 'name="Capsule Upgrade Playbook"'})[0].id) add_remote_execution_ssh_key(sat.name) with pytest.raises(TaskFailedError) as error: entities.JobInvocation().run( data={ 'job_template_id': template_id, 'inputs': { 'target_version': CAPSULE_TARGET_VERSION, 'whitelist_options': "repositories-validqqate,repositories-setup", }, 'targeting_type': "static_query", 'search_query': f"name = {sat.name}", }) assert 'A sub task failed' in error.value.args[0] job = entities.JobInvocation().search( query={ 'search': f'host={sat.name},status=failed,description="Capsule Upgrade Playbook"' })[0] response = client.get( f'https://{sat.name}/api/job_invocations/{job.id}/hosts/{sat.id}', auth=get_credentials(), verify=False, ) assert 'This playbook cannot be executed on a Satellite server.' in response.text
def test_pre_scenario_remoteexecution_satellite(self, request, compute_resource_setup, default_location, rhel7_contenthost, default_sat): """Run REX job on client registered with Satellite :id: preupgrade-3f338475-fa69-43ef-ac86-f00f4d324b33 :steps: 1. Create Subnet. 2. Create Content host. 3. Install katello-ca package and register to Satellite host. 4. Add rex ssh_key of Satellite on content host. 5. Run the REX job on client vm. :expectedresults: 1. It should create with pre-required details. 2. REX job should run on it. :parametrized: yes """ sn = entities.Subnet( domain=self.vm_domain, gateway=self.gateway, ipam='DHCP', location=[default_location.id], mask=self.netmask, network=self.subnet, organization=[self.org.id], remote_execution_proxy=[entities.SmartProxy(id=1)], ).create() rhel7_contenthost.configure_rex(satellite=default_sat, org=self.org, by_ip=False) host = rhel7_contenthost.nailgun_host host[0].subnet = sn host[0].update(['subnet']) job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': { 'command': 'ls' }, 'targeting_type': 'static_query', 'search_query': f'name = {rhel7_contenthost.hostname}', }) assert job['output']['success_count'] == 1 global_dict = { self.__class__.__name__: { 'client_name': rhel7_contenthost.hostname } } create_dict(global_dict)
def test_pre_scenario_remoteexecution_external_capsule( self, request, default_location, rhel7_contenthost): """Run REX job on client registered with external capsule :id: preupgrade-261dd2aa-be01-4c34-b877-54b8ee346561 :steps: 1. Create Subnet. 2. Create Content host. 3. Install katello-ca package and register to Satellite host. 4. add rex ssh_key of external capsule on content host. 5. run the REX job on client vm. :expectedresults: 1. Content host should create with pre-required details. 2. REX job should run on it. :parametrized: yes """ sn = entities.Subnet( domain=self.vm_domain, gateway=self.gateway, ipam='DHCP', location=[default_location.id], mask=self.netmask, network=self.subnet, organization=[self.org.id], remote_execution_proxy=[entities.SmartProxy(id=2)], ).create() rhel7_contenthost.install_capsule_katello_ca(capsule=self.proxy_name) rhel7_contenthost.register_contenthost(org=self.org.label, lce='Library') capsule = Capsule(self.proxy_name) rhel7_contenthost.add_rex_key(satellite=capsule) host = entities.Host().search( query={'search': f'name="{rhel7_contenthost.hostname}"'}) host[0].subnet = sn host[0].update(['subnet']) job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': { 'command': 'ls' }, 'targeting_type': 'static_query', 'search_query': f'name = {rhel7_contenthost.hostname}', }) assert job['output']['success_count'] == 1 global_dict = { self.__class__.__name__: { 'client_name': rhel7_contenthost.hostname } } create_dict(global_dict)
def test_positive_install_multiple_in_host(module_org, activation_key, custom_repo, rh_repo, rhel7_contenthost): """For a host with multiple applicable errata install one and ensure the rest of errata is still available :id: 67b7e95b-9809-455a-a74e-f1815cc537fc :customerscenario: true :BZ: 1469800, 1528275 :expectedresults: errata installation task succeeded, available errata counter decreased by one; it's possible to schedule another errata installation :CaseLevel: System """ rhel7_contenthost.install_katello_ca() rhel7_contenthost.register_contenthost(module_org.label, activation_key.name) assert rhel7_contenthost.subscribed rhel7_contenthost.enable_repo(constants.REPOS['rhst7']['id']) host = rhel7_contenthost.nailgun_host for package in constants.FAKE_9_YUM_OUTDATED_PACKAGES: _install_package(module_org, clients=[rhel7_contenthost], host_ids=[host.id], package_name=package) host = host.read() applicable_errata_count = host.content_facet_attributes['errata_counts'][ 'total'] assert applicable_errata_count > 1 add_remote_execution_ssh_key(rhel7_contenthost.hostname) for errata in constants.FAKE_9_YUM_ERRATUM[:2]: entities.JobInvocation().run(data={ 'feature': 'katello_errata_install', 'inputs': { 'errata': f'{errata}' }, 'targeting_type': 'static_query', 'search_query': f'name = {rhel7_contenthost.hostname}', 'organization_id': module_org.id, }, ) host = host.read() applicable_errata_count -= 1 assert host.content_facet_attributes['errata_counts'][ 'total'] == applicable_errata_count
def test_positive_install_in_hc(module_org, activation_key, custom_repo, rh_repo): """Install errata in a host-collection :id: 6f0242df-6511-4c0f-95fc-3fa32c63a064 :Setup: Errata synced on satellite server. :Steps: PUT /api/v2/hosts/bulk/update_content :expectedresults: errata is installed in the host-collection. :CaseLevel: System """ with VMBroker(nick=DISTRO_RHEL7, host_classes={'host': ContentHost}, _count=2) as clients: for client in clients: client.install_katello_ca() client.register_contenthost(module_org.label, activation_key.name) assert client.subscribed client.enable_repo(constants.REPOS['rhst7']['id']) add_remote_execution_ssh_key(client.hostname) host_ids = [client.nailgun_host.id for client in clients] _install_package( module_org, clients=clients, host_ids=host_ids, package_name=constants.FAKE_1_CUSTOM_PACKAGE, ) host_collection = entities.HostCollection( organization=module_org).create() host_ids = [client.nailgun_host.id for client in clients] host_collection.host_ids = host_ids host_collection = host_collection.update(['host_ids']) entities.JobInvocation().run( data={ 'feature': 'katello_errata_install', 'inputs': { 'errata': f'{CUSTOM_REPO_ERRATA_ID}' }, 'targeting_type': 'static_query', 'search_query': f'host_collection_id = {host_collection.id}', 'organization_id': module_org.id, }) _validate_package_installed(clients, constants.FAKE_2_CUSTOM_PACKAGE)
def test_positive_install_in_host( module_org, activation_key, custom_repo, rh_repo, rhel7_contenthost ): """Install errata in a host :id: 1e6fc159-b0d6-436f-b945-2a5731c46df5 :Setup: Errata synced on satellite server. :Steps: POST /api/v2/job_invocations/{hash} :expectedresults: errata is installed in the host. :CaseLevel: System :BZ: 1983043 """ rhel7_contenthost.install_katello_ca() rhel7_contenthost.register_contenthost(module_org.label, activation_key.name) assert rhel7_contenthost.subscribed rhel7_contenthost.enable_repo(constants.REPOS['rhst7']['id']) host_id = rhel7_contenthost.nailgun_host.id _install_package( module_org, clients=[rhel7_contenthost], host_ids=[host_id], package_name=constants.FAKE_1_CUSTOM_PACKAGE, ) add_remote_execution_ssh_key(rhel7_contenthost.hostname) entities.JobInvocation().run( data={ 'feature': 'katello_errata_install', 'inputs': {'errata': f'{CUSTOM_REPO_ERRATA_ID}'}, 'targeting_type': 'static_query', 'search_query': f'name = {rhel7_contenthost.hostname}', 'organization_id': module_org.id, }, ) _validate_package_installed([rhel7_contenthost], constants.FAKE_2_CUSTOM_PACKAGE)
def test_post_scenario_remoteexecution_satellite(self): """Run a REX job on pre-upgrade created client registered with Satellite. :id: postupgrade-ad3b1564-d3e6-4ada-9337-3a6ee6863bae :steps: 1. Run a REX job on content host. :expectedresults: 1. The job should successfully executed on pre-upgrade created client. """ client_name = get_entity_data(self.__class__.__name__)['client_name'] job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': {'command': 'ls'}, 'targeting_type': 'static_query', 'search_query': f'name = {client_name}', } ) assert job['output']['success_count'] == 1
def test_post_scenario_remoteexecution_external_capsule(self): """Run a REX job on pre-upgrade created client registered with external capsule. :id: postupgrade-00ed2a25-b0bd-446f-a3fc-09149c57fe94 :steps: 1. Run a REX job on content host. :expectedresults: 1. The job should successfully executed on pre-upgrade created client. """ client_name = get_entity_data(self.__class__.__name__)['client_name'] job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': {'command': 'ls'}, 'targeting_type': 'static_query', 'search_query': f'name = {client_name}', } ) assert job['output']['success_count'] == 1
def test_pre_scenario_remoteexecution_external_capsule(self): """Run REX job on client registered with external capsule :id: preupgrade-261dd2aa-be01-4c34-b877-54b8ee346561 :steps: 1. Create Subnet. 2. Create Content host. 3. Install katello-ca package and register to Satellite host. 4. add rex ssh_key of external capsule on content host. 5. run the REX job on client vm. :expectedresults: 1. Content host should create with pre-required details. 2. REX job should run on it. """ try: default_loc_id = (entities.Location().search( query={'search': f'name="{DEFAULT_LOC}"'})[0].id) sn = entities.Subnet( domain=self.vm_domain, gateway=self.gateway, ipam='DHCP', location=[default_loc_id], mask=self.netmask, network=self.subnet, organization=[self.org.id], remote_execution_proxy=[entities.SmartProxy(id=2)], ).create() client = VirtualMachine(distro=DISTRO_RHEL7, provisioning_server=self.libvirt_vm, bridge=self.bridge) client.create() client.install_capsule_katello_ca(capsule=self.proxy_name) client.register_contenthost(org=self.org.label, lce='Library') add_remote_execution_ssh_key(hostname=client.ip_addr, proxy_hostname=self.proxy_name) host = entities.Host().search( query={'search': f'name="{client.hostname}"'}) host[0].subnet = sn host[0].update(['subnet']) job = entities.JobInvocation().run( data={ 'job_template_id': 89, 'inputs': { 'command': "ls" }, 'targeting_type': 'static_query', 'search_query': f"name = {client.hostname}", }) self.assertEqual(job['output']['success_count'], 1) global_dict = { self.__class__.__name__: { 'client_name': client.hostname } } create_dict(global_dict) except Exception as exp: if client._created: cleanup_of_provisioned_server( hostname=client.hostname, provisioning_server=self.libvirt_vm, distro=DISTRO_RHEL7, ) raise Exception(exp)
def test_positive_configure_cloud_connector(session, default_sat, subscribe_satellite, fixture_enable_receptor_repos): """Install Cloud Connector through WebUI button :id: 67e45cfe-31bb-51a8-b88f-27918c68f32e :Steps: 1. Navigate to Configure > Inventory Upload 2. Click Configure Cloud Connector 3. Open the started job and wait until it is finished :expectedresults: The Cloud Connector has been installed and the service is running :CaseLevel: Integration :CaseComponent: RHCloud-CloudConnector :CaseImportance: Medium :assignee: lhellebr :BZ: 1818076 """ # Copy foreman-proxy user's key to root@localhost user's authorized_keys default_sat.add_rex_key(satellite=default_sat) # Set Host parameter source_display_name to something random. # To avoid 'name has already been taken' error when run multiple times # on a machine with the same hostname. host_id = Host.info({'name': default_sat.hostname})['id'] Host.set_parameter({ 'host-id': host_id, 'name': 'source_display_name', 'value': gen_string('alpha') }) with session: if session.cloudinventory.is_cloud_connector_configured(): pytest.skip( 'Cloud Connector has already been configured on this system. ' 'It is possible to reconfigure it but then the test would not really ' 'check if everything is correctly configured from scratch. Skipping.' ) session.cloudinventory.configure_cloud_connector() template_name = 'Configure Cloud Connector' invocation_id = (entities.JobInvocation().search( query={'search': f'description="{template_name}"'})[0].id) wait_for( lambda: entities.JobInvocation(id=invocation_id).read().status_label in ["succeeded", "failed"], timeout="1500s", ) result = JobInvocation.get_output({ 'id': invocation_id, 'host': default_sat.hostname }) logger.debug(f"Invocation output>>\n{result}\n<<End of invocation output") # if installation fails, it's often due to missing rhscl repo -> print enabled repos repolist = default_sat.execute('yum repolist') logger.debug(f"Repolist>>\n{repolist}\n<<End of repolist") assert entities.JobInvocation(id=invocation_id).read().status == 0 assert 'project-receptor.satellite_receptor_installer' in result assert 'Exit status: 0' in result # check that there is one receptor conf file and it's only readable # by the receptor user and root result = default_sat.execute( 'stat /etc/receptor/*/receptor.conf --format "%a:%U"') assert all(filestats == '400:foreman-proxy' for filestats in result.stdout.strip().split('\n')) result = default_sat.execute('ls -l /etc/receptor/*/receptor.conf | wc -l') assert int(result.stdout.strip()) >= 1
def test_positive_run_receptor_installer(self, default_sat, subscribe_satellite, fixture_enable_receptor_repos): """Run Receptor installer ("Configure Cloud Connector") :CaseComponent: RHCloud-CloudConnector :Assignee: lhellebr :id: 811c7747-bec6-1a2d-8e5c-b5045d3fbc0d :expectedresults: The job passes, installs Receptor that peers with c.r.c :BZ: 1818076 """ result = default_sat.execute('stat /etc/receptor/*/receptor.conf') if result.status == 0: pytest.skip( 'Cloud Connector has already been configured on this system. ' 'It is possible to reconfigure it but then the test would not really ' 'check if everything is correctly configured from scratch. Skipping.' ) # Copy foreman-proxy user's key to root@localhost user's authorized_keys default_sat.add_rex_key(satellite=default_sat) # Set Host parameter source_display_name to something random. # To avoid 'name has already been taken' error when run multiple times # on a machine with the same hostname. host_id = Host.info({'name': default_sat.hostname})['id'] Host.set_parameter({ 'host-id': host_id, 'name': 'source_display_name', 'value': gen_string('alpha') }) template_name = 'Configure Cloud Connector' invocation = make_job_invocation({ 'async': True, 'job-template': template_name, 'inputs': f'satellite_user="******",\ satellite_password="******"', 'search-query': f'name ~ {default_sat.hostname}', }) invocation_id = invocation['id'] wait_for( lambda: entities.JobInvocation(id=invocation_id).read(). status_label in ['succeeded', 'failed'], timeout='1500s', ) result = JobInvocation.get_output({ 'id': invocation_id, 'host': default_sat.hostname }) logger.debug( f'Invocation output>>\n{result}\n<<End of invocation output') # if installation fails, it's often due to missing rhscl repo -> print enabled repos repolist = default_sat.execute('yum repolist') logger.debug(f'Repolist>>\n{repolist}\n<<End of repolist') assert entities.JobInvocation(id=invocation_id).read().status == 0 assert 'project-receptor.satellite_receptor_installer' in result assert 'Exit status: 0' in result # check that there is one receptor conf file and it's only readable # by the receptor user and root result = default_sat.execute( 'stat /etc/receptor/*/receptor.conf --format "%a:%U"') assert all(filestats == '400:foreman-proxy' for filestats in result.stdout.strip().split('\n')) result = default_sat.execute( 'ls -l /etc/receptor/*/receptor.conf | wc -l') assert int(result.stdout.strip()) >= 1