def run_puppet_agent(self): """Retrieves the client configuration from the puppet master and applies it to the local host. This is required to make sure that we have reports available. """ ssh.command('puppet agent -t')
def _restore_from_savepoint(self, savepoint): """Restore from savepoint""" if savepoint == '': self.logger.warning('No savepoint while continuing test!') return self.logger.info('Reset db from /home/backup/{0}'.format(savepoint)) ssh.command('./reset-db.sh /home/backup/{0}'.format(savepoint))
def suspend(self, ensure=False): """Suspend the virtual machine. :param bool ensure: ensure that the virtual machine is unreachable Notes: 1. The virtual machine will consume system RAM but not processor resources. Disk and network I/O does not occur while the guest is suspended. 2. This operation is immediate and the guest can be restarted with resume. """ result = ssh.command( u'virsh suspend {0}'.format(self._target_image), hostname=self.provisioning_server ) suspended = True if result.return_code == 0 else False if suspended and ensure: # ping one time the virtual machine to ensure that it's unreachable result = ssh.command( 'ping -c 1 {}'.format(self.hostname), hostname=self.provisioning_server ) suspended = True if result.return_code != 0 else False return suspended
def resume(self, ensure=False, timeout=None, connection_timeout=30): """Restore from a suspended state :param bool ensure: ensure that the virtual machine is reachable :param int timeout: Time to wait for the ssh command to finish. :param int connection_timeout: Time to wait for establishing the connection. Note: This operation is immediate """ result = ssh.command( u'virsh resume {0}'.format(self._target_image), hostname=self.provisioning_server, timeout=timeout, connection_timeout=connection_timeout, ) resumed = True if result.return_code == 0 else False if resumed and ensure: # ping one time the virtual machine to ensure that it's reachable result = ssh.command( 'ping -c 1 {}'.format(self.ip_addr), hostname=self.provisioning_server, connection_timeout=connection_timeout, ) resumed = True if result.return_code == 0 else False return resumed
def setUpClass(cls): super(PermissionTestCase, cls).setUpClass() cls.permissions = PERMISSIONS.copy() if get_server_software() == 'upstream': cls.permissions[None].extend(cls.permissions.pop('DiscoveryRule')) cls.permissions[None].remove('app_root') cls.permissions[None].remove('attachments') cls.permissions[None].remove('configuration') cls.permissions[None].remove('logs') cls.permissions[None].remove('view_cases') cls.permissions[None].remove('view_log_viewer') result = ssh.command('rpm -qa | grep rubygem-foreman_openscap') if result.return_code != 0: cls.permissions.pop('ForemanOpenscap::Policy') cls.permissions.pop('ForemanOpenscap::ScapContent') cls.permissions[None].remove('destroy_arf_reports') cls.permissions[None].remove('view_arf_reports') cls.permissions[None].remove('create_arf_reports') result = ssh.command( 'rpm -qa | grep rubygem-foreman_remote_execution' ) if result.return_code != 0: cls.permissions.pop('JobInvocation') cls.permissions.pop('JobTemplate') cls.permissions.pop('RemoteExecutionFeature') cls.permissions.pop('TemplateInvocation') #: e.g. ['Architecture', 'Audit', 'AuthSourceLdap', …] cls.permission_resource_types = list(cls.permissions.keys()) #: e.g. ['view_architectures', 'create_architectures', …] cls.permission_names = list( chain.from_iterable(cls.permissions.values()))
def configure_puppet(self, rhel_repo=None): """Configures puppet on the virtual machine/Host. :param rhel_repo: Red Hat repository link from properties file. :return: None. """ sat6_hostname = settings.server.hostname self.configure_rhel_repo(rhel_repo) puppet_conf = ( 'pluginsync = true\n' 'report = true\n' 'ignoreschedules = true\n' 'daemon = false\n' 'ca_server = {0}\n' 'server = {1}\n' .format(sat6_hostname, sat6_hostname) ) result = self.run(u'yum install puppet -y') if result.return_code != 0: raise VirtualMachineError( 'Failed to install the puppet rpm') self.run( 'echo "{0}" >> /etc/puppet/puppet.conf' .format(puppet_conf) ) # This particular puppet run on client would populate a cert on sat6 # under the capsule --> certifcates or via cli "puppet cert list", so # that we sign it. self.run(u'puppet agent -t') ssh.command(u'puppet cert sign --all') # This particular puppet run would create the host entity under # 'All Hosts' and let's redirect stderr to /dev/null as errors at this # stage can be ignored. self.run(u'puppet agent -t 2> /dev/null')
def test_positive_insights_puppet_package_availability(self): """Check `redhat-access-insights-puppet` package availability for capsule :BZ: 1315844 :id: a31b0e21-aa5d-44e2-a408-5e01b79db3a1 :customerscenario: true :expectedresults: `redhat-access-insights-puppet` package is delivered in capsule repo and is available for installation on capsule via yum :CaseLevel: System """ package_name = 'redhat-access-insights-puppet' result = ssh.command( 'yum list {0} | grep @capsule'.format(package_name), hostname=self.capsule_ip ) if result.return_code != 0: result = ssh.command( 'yum list available | grep {0}'.format(package_name), hostname=self.capsule_ip ) self.assertEqual(result.return_code, 0)
def remove_capsule_katello_ca(self, capsule=None): """Removes katello-ca rpm and reset rhsm.conf from the virtual machine. :param: str capsule: Capsule hostname :raises robottelo.vm.VirtualMachineError: If katello-ca wasn't removed. """ ssh.command( 'yum erase -y $(rpm -qa |grep katello-ca-consumer)', self.ip_addr ) result = ssh.command( 'rpm -q katello-ca-consumer-{0}'.format(capsule), self.ip_addr) if result.return_code == 0: raise VirtualMachineError('Failed to remove the katello-ca rpm') rhsm_updates = [ 's/^hostname.*/hostname=subscription.rhn.redhat.com/', 's|^prefix.*|prefix=/subscription|', 's|^baseurl.*|baseurl=https://cdn.redhat.com|', 's/^repo_ca_cert.*/repo_ca_cert=%(ca_cert_dir)sredhat-uep.pem/', ] for command in rhsm_updates: result = ssh.command( 'sed -i -e "{0}" /etc/rhsm/rhsm.conf'.format(command), self.ip_addr ) if result.return_code != 0: raise VirtualMachineError('Failed to reset the rhsm.conf')
def organization_with_tr_data_manifests(cls, options=None): """Import Organizations (from spacewalk-report users) with manifests. :returns: A tuple of SSHCommandResult and a Dictionary containing the transition data of the Import """ # prepare manifests for every organization manifest_list = [] csv_records = cls.csv_to_dataset([options['csv-file']]) man_dir = ssh.command(u'mktemp -d').stdout[0] for org in set([rec['organization'] for rec in csv_records]): for char in [' ', '.', '#']: org = org.replace(char, '_') man_file = manifests.clone() ssh.upload_file(man_file, u'{0}/{1}.zip'.format(man_dir, org)) manifest_list.append(u'{0}/{1}.zip'.format(man_dir, org)) os.remove(man_file) options.update({'upload-manifests-from': man_dir}) result = cls.organization(options) ssh.command(u'rm -rf {0}'.format(man_dir)) transition_data = cls.read_transition_csv( ssh.command( u'ls -v ${HOME}/.transition_data/organizations*' ).stdout[:-1] ) return (result, transition_data)
def setUpClass(cls): """Create Org, Lifecycle Environment, Content View, Activation key """ super(RemoteExecutionTestCase, cls).setUpClass() cls.org = make_org() ssh.command( '''echo 'getenforce' > {0}'''.format(TEMPLATE_FILE) ) cls.env = make_lifecycle_environment({ u'organization-id': cls.org['id'], }) cls.content_view = make_content_view({ u'organization-id': cls.org['id'], }) cls.activation_key = make_activation_key({ u'lifecycle-environment-id': cls.env['id'], u'organization-id': cls.org['id'], }) # Add subscription to Satellite Tools repo to activation key setup_org_for_a_rh_repo({ u'product': PRDS['rhel'], u'repository-set': REPOSET['rhst7'], u'repository': REPOS['rhst7']['name'], u'organization-id': cls.org['id'], u'content-view-id': cls.content_view['id'], u'lifecycle-environment-id': cls.env['id'], u'activationkey-id': cls.activation_key['id'], })
def test_selinux_foreman_module(self): """@Test: SELinux foreman module have the right version @Feature: Server health @Assert: Foreman RPM and SELinux module versions match """ rpm_result = ssh.command('rpm -q foreman-selinux') self.assertEqual(rpm_result.return_code, 0) semodule_result = ssh.command('semodule -l | grep foreman') self.assertEqual(semodule_result.return_code, 0) # Sample rpm output: foreman-selinux-1.7.2.8-1.el7sat.noarch rpm_version = self.version_regex.search( ''.join(rpm_result.stdout)).group(1) # Sample semodule output: foreman 1.7.2.8.1 semodule_version = self.version_regex.search( ''.join(semodule_result.stdout)).group(1) if rpm_version.endswith('-0'): # Examples of matching RPM and semodule version numbers: # # 1.7.2.8-0 1.7.2.8 # 1.7.2.8-1 1.7.2.8.1 # 1.7.2.8-2 1.7.2.8.2 rpm_version = rpm_version[:-2] self.assertEqual(rpm_version.replace('-', '.'), semodule_version)
def setUpClass(cls): """Create Org, Lifecycle Environment, Content View, Activation key """ super(RemoteExecutionTestCase, cls).setUpClass() cls.org = entities.Organization().create() ssh.command( '''echo 'getenforce' > {0}'''.format(TEMPLATE_FILE) ) # create subnet for current org, default loc and domain # using API due BZ#1370460 cls.sn = entities.Subnet( domain=[1], gateway=settings.vlan_networking.gateway, ipam='DHCP', location=[DEFAULT_LOC_ID], mask=settings.vlan_networking.netmask, network=settings.vlan_networking.subnet, organization=[cls.org.id] ).create() # add rex proxy to subnet, default is internal proxy (id 1) if bz_bug_is_open(1328322): cls.sn.remote_execution_proxy_ids = [1] cls.sn.update(["remote_execution_proxy_ids"]) else: cls.sn.remote_execution_proxy_id = 1 cls.sn.update(["remote_execution_proxy_id"])
def test_positive_check_installer_services(self): """Check if services start correctly :id: 85fd4388-6d94-42f5-bed2-24be38e9f104 :expectedresults: All services {'elasticsearch', 'foreman-proxy', 'foreman-tasks', 'httpd', 'mongod', 'postgresql', 'pulp_celerybeat', 'pulp_resource_manager', 'pulp_workers', 'qdrouterd', 'qpidd', 'tomcat'} are started """ major_version = get_host_info()[1] services = ( 'elasticsearch', 'foreman-proxy', 'foreman-tasks', 'httpd', 'mongod', 'postgresql', 'pulp_celerybeat', 'pulp_resource_manager', 'pulp_workers', 'qdrouterd', 'qpidd', 'tomcat6' if major_version == RHEL_6_MAJOR_VERSION else 'tomcat', ) # check `services` status using service command if major_version >= RHEL_7_MAJOR_VERSION: status_format = 'systemctl status {0}' else: status_format = 'service {0} status' for service in services: result = ssh.command(status_format.format(service)) if (major_version == RHEL_6_MAJOR_VERSION and service is 'qpidd' and not bz_bug_is_open(1246152)): # This is a note to fix this test once Bug 1246152 is fixed self.fail('Bug 1246152 is fixed. Fix Me.') else: continue self.assertEqual(result.return_code, 0) self.assertEqual(len(result.stderr), 0) # check status reported by hammer ping command result = ssh.command(u'hammer -u {0[0]} -p {0[1]} ping'.format( settings.server.get_credentials() )) # iterate over the lines grouping every 3 lines # example [1, 2, 3, 4, 5, 6] will return [(1, 2, 3), (4, 5, 6)] for service, status, server_response in zip( *[iter(result.stdout)] * 3): service = service.replace(':', '').strip() status = status.split(':')[1].strip().lower() server_response = server_response.split(':', 1)[1].strip() self.assertEqual( status, 'ok', '{0} responded with {1}'.format(service, server_response) )
def organization_with_tr_data_manifests(cls, options=None): """Import Organizations (from spacewalk-report users) with manifests. :returns: A tuple of SSHCommandResult and a Dictionary containing the transition data of the Import """ # prepare manifests for every organization manifest_list = [] csv_records = cls.csv_to_dataset([options["csv-file"]]) man_dir = ssh.command(u"mktemp -d").stdout[0] for org in set([rec["organization"] for rec in csv_records]): for char in [" ", ".", "#"]: org = org.replace(char, "_") with manifests.clone() as manifest: ssh.upload_file(manifest.content, u"{0}/{1}.zip".format(man_dir, org)) manifest_list.append(u"{0}/{1}.zip".format(man_dir, org)) options.update({"upload-manifests-from": man_dir}) result = cls.organization(options) ssh.command(u"rm -rf {0}".format(man_dir)) transition_data = cls.read_transition_csv( ssh.command(u"ls -v ${HOME}/.transition_data/organizations*").stdout[:-1] ) return (result, transition_data)
def default_url_on_new_port(oldport, newport): """Creates context where the default smart-proxy is forwarded on a new port :param int oldport: Port to be forwarded. :param int newport: New port to be used to forward `oldport`. :return: A string containing the new capsule URL with port. :rtype: str """ logger = logging.getLogger('robottelo') domain = settings.server.hostname user = settings.server.ssh_username key = settings.server.ssh_key ssh.upload_file(key, '/tmp/dsa_{0}'.format(newport)) ssh.command('chmod 700 /tmp/dsa_{0}'.format(newport)) with ssh._get_connection() as connection: command = u'ssh -i {0} -L {1}:{2}:{3} {4}@{5}'.format( '/tmp/dsa_{0}'.format(newport), newport, domain, oldport, user, domain) logger.debug('Creating tunnel {0}'.format(command)) # Run command and timeout in 30 seconds. _, _, stderr = connection.exec_command(command, 30) stderr = stderr.read() if len(stderr) > 0: logger.debug('Tunnel failed: {0}'.format(stderr)) # Something failed, so raise an exception. raise SSHTunnelError(stderr) yield 'https://{0}:{1}'.format(domain, newport)
def test_positive_foreman_module(self): """Check if SELinux foreman module has the right version @id: a0736b3a-3d42-4a09-a11a-28c1d58214a5 @Assert: Foreman RPM and SELinux module versions match """ rpm_result = ssh.command('rpm -q foreman-selinux') self.assertEqual(rpm_result.return_code, 0) semodule_result = ssh.command('semodule -l | grep foreman') self.assertEqual(semodule_result.return_code, 0) # Sample rpm output: foreman-selinux-1.7.2.8-1.el7sat.noarch rpm_version = self.version_regex.search( ''.join(rpm_result.stdout)).group(1) # Sample semodule output: foreman 1.7.2.8.1 semodule_version = self.version_regex.search( ''.join(semodule_result.stdout)).group(1) if rpm_version.endswith('-0'): # Examples of matching RPM and semodule version numbers: # # 1.7.2.8-0 1.7.2.8 # 1.7.2.8-1 1.7.2.8.1 # 1.7.2.8-2 1.7.2.8.2 rpm_version = rpm_version[:-2] self.assertEqual(rpm_version.replace('-', '.'), semodule_version)
def create(self): """Creates a virtual machine on the provisioning server using snap-guest :raises robottelo.vm.VirtualMachineError: Whenever a virtual machine could not be executed. """ if self._created: return command_args = [ 'snap-guest', '-b {source_image}', '-t {target_image}', '-m {vm_ram}', '-c {vm_cpu}', '-n bridge=br0 -f', ] if self.image_dir is not None: command_args.append('-p {image_dir}') if self._domain is None: try: self._domain = self.provisioning_server.split('.', 1)[1] except IndexError: raise VirtualMachineError( u"Failed to fetch domain from provisioning server: {0} " .format(self.provisioning_server)) command = u' '.join(command_args).format( source_image=u'{0}-base'.format(self.distro), target_image=u'{0}.{1}'.format(self._target_image, self._domain), vm_ram=self.ram, vm_cpu=self.cpu, image_dir=self.image_dir, ) result = ssh.command(command, self.provisioning_server) if result.return_code != 0: raise VirtualMachineError( u'Failed to run snap-guest: {0}'.format(result.stderr)) # Give some time to machine boot time.sleep(60) result = ssh.command( u'ping -c 1 {0}.local'.format(self._target_image), self.provisioning_server ) if result.return_code != 0: raise VirtualMachineError( 'Failed to fetch virtual machine IP address information') output = ''.join(result.stdout) self.ip_addr = output.split('(')[1].split(')')[0] self.hostname = u'{0}.{1}'.format(self._target_image, self._domain) self._created = True
def setUpClass(cls): """Create an organization to be reused in tests.""" super(JobTemplateTestCase, cls).setUpClass() cls.organization = make_org() ssh.command( '''echo '<%= input("command") %>' > {0}'''.format(TEMPLATE_FILE) ) ssh.command('touch {0}'.format(TEMPLATE_FILE_EMPTY))
def fixture_org(): org = entities.Organization().create() ssh.command( '''echo 'echo Enforcing' > {0}'''.format(TEMPLATE_FILE) ) # needed to work around BZ#1656480 ssh.command('''sed -i '/ProxyCommand/s/^/#/g' /etc/ssh/ssh_config''') return org
def single_register_attach(cls, sub_id, default_org, environment, vm_ip): """Subscribe VM to Satellite by Register + Attach""" ssh.command('subscription-manager clean', hostname=vm_ip) time_reg = cls.sub_mgr_register_authentication( default_org, environment, vm_ip) time_att = cls.sub_mgr_attach(sub_id, vm_ip) return (time_reg, time_att)
def _pxe_boot_host(self, mac): """PXE boot a unknown host""" libvirt_server = 'qemu+tcp://{0}:16509/system'.format( conf.properties['main.server.hostname']) ssh.command('virt-install --hvm --network=bridge:virbr1, --mac={0} ' '--pxe --name {1} --ram=1024 --vcpus=1 --os-type=linux ' '--os-variant=rhel7 --disk path={2},size=10 --connect {3} ' '--noautoconsole' .format(mac, self.name, self.image_path, libvirt_server)) sleep(30)
def _capsule_setup_name_resolution(self): """Setup a name resolution so the capsule and satellite are resolvable """ self.run('echo "{0} {1} {2}" >> /etc/hosts'.format( self.ip_addr, self._capsule_hostname, self._capsule_instance_name)) # add the capsule reverse record to the satellite hosts file ssh.command( u'sed -i \'/{0}/d\' /etc/hosts &&' u' echo "{1} {0}" >> /etc/hosts' .format(self._capsule_hostname, self.ip_addr), hostname=settings.server.hostname ) if self.distro[:-1] == DISTRO_RHEL7: self.run('hostnamectl set-hostname {}'.format( self._capsule_hostname)) def ensure_host_resolved( ssh_func, host_to_ping, ip_addr, time_sleep=60, retries=10): resolved = False retry_max_index = retries - 1 for retry_index in range(retries): ssh_func_result = ssh_func('ping -c 1 {}'.format(host_to_ping)) ssh_func_output = ''.join(ssh_func_result.stdout) if ssh_func_result.return_code == 0 and ( '({})'.format(ip_addr) in ssh_func_output): resolved = True break if retry_index != retry_max_index: # do not sleep at last index time.sleep(time_sleep) return resolved # Ensure capsule hostname is resolvable from the server host hostname_resolved = ensure_host_resolved( ssh.command, self._capsule_hostname, self.ip_addr) if not hostname_resolved: raise CapsuleVirtualMachineError( 'Failed to resolve the capsule hostname from the server') # Ensure capsule hostname is resolvable at capsule host '''hostname_resolved = ensure_host_resolved( self.run, self._capsule_hostname, '127.0.0.1', retries=1) if not hostname_resolved: raise CapsuleVirtualMachineError( 'Failed to resolver the capsule hostname from capsule') ''' if self.distro[:-1] == DISTRO_RHEL7: # Add RH-Satellite-6 service to firewall public zone self.run('firewall-cmd --zone=public --add-service={}'.format( SATELLITE_FIREWALL_SERVICE_NAME))
def setUpClass(cls): """Create an organization which can be re-used in tests.""" super(RemoteExecutionTestCase, cls).setUpClass() cls.organization = make_org() ssh.command("""echo 'getenforce' > {0}""".format(TEMPLATE_FILE)) cls.client = VirtualMachine(distro=DISTRO_RHEL7) cls.addCleanup(vm_cleanup, cls.client) cls.client.create() cls.client.install_katello_ca() cls.client.register_contenthost(cls.organization["label"], lce="Library") cls.client.enable_repo(REPOS["rhst7"]["id"]) cls.client.install_katello_agent() add_remote_execution_ssh_key(cls.client.hostname)
def test_positive_export_import_cv(self): """Export CV version contents in directory and Import them. :id: b08e9f24-f18e-43b7-9189-ad7b596ccb5b :steps: 1. Export whole CV version contents to a directory 3. Import those contents from some other org/satellite. :expectedresults: 1. Whole CV version contents has been exported to directory 2. All The exported contents has been imported in org/satellite. :CaseLevel: System """ ContentView.version_export({ 'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id }) exported_tar = '{0}/export-{1}.tar'.format(self.export_dir, self.exporting_cvv_id) result = ssh.command("[ -f {0} ]".format(exported_tar)) self.assertEqual(result.return_code, 0) exported_packages = Package.list({'content-view-version-id': self.exporting_cvv_id}) self.set_importing_org(self.exporting_prod, self.exporting_repo, self.exporting_cv) ContentView.version_import({ 'export-tar': exported_tar, 'organization-id': self.importing_org['id'] }) importing_cvv_id = ContentView.info({ u'id': self.importing_cv['id'] })['versions'][0]['id'] imported_packages = Package.list({'content-view-version-id': importing_cvv_id}) self.assertEqual(len(exported_packages), len(imported_packages))
def _traverse_command_tree(self, command): """Recursively walk through the hammer commands tree and assert that the expected options are present. """ output = hammer.parse_help( ssh.command('{0} --help'.format(command)).stdout ) command_options = set([option['name'] for option in output['options']]) command_subcommands = set( [subcommand['name'] for subcommand in output['subcommands']] ) if 'discovery_rule' in command and bz_bug_is_open(1219610): # Adjust the discovery_rule subcommand name. The expected data is # already with the naming convetion name expected = _fetch_command_info( command.replace('discovery_rule', 'discovery-rule')) else: expected = _fetch_command_info(command) expected_options = set() expected_subcommands = set() if expected is not None: expected_options = set( [option['name'] for option in expected['options']] ) expected_subcommands = set( [subcommand['name'] for subcommand in expected['subcommands']] ) if command == 'hammer' and bz_bug_is_open(1219610): # Adjust the discovery_rule subcommand name command_subcommands.discard('discovery_rule') command_subcommands.add('discovery-rule') added_options = tuple(command_options - expected_options) removed_options = tuple(expected_options - command_options) added_subcommands = tuple(command_subcommands - expected_subcommands) removed_subcommands = tuple(expected_subcommands - command_subcommands) if (added_options or added_subcommands or removed_options or removed_subcommands): diff = { 'added_command': expected is None, } if added_options: diff['added_options'] = added_options if removed_options: diff['removed_options'] = removed_options if added_subcommands: diff['added_subcommands'] = added_subcommands if removed_subcommands: diff['removed_subcommands'] = removed_subcommands self.differences[command] = diff if len(output['subcommands']) > 0: for subcommand in output['subcommands']: self._traverse_command_tree( '{0} {1}'.format(command, subcommand['name']) )
def single_register_activation_key(cls, ak_name, default_org, vm_ip): """Subscribe VM to Satellite by Register + ActivationKey""" # note: must create ssh keys for vm if running on local result = ssh.command('subscription-manager clean', hostname=vm_ip) result = ssh.command( 'time -p subscription-manager register --activationkey={0} ' '--org={1}'.format(ak_name, default_org), hostname=vm_ip ) if result.return_code != 0: LOGGER.error('Fail to subscribe {0} by ak!'.format(vm_ip)) else: LOGGER.info('Subscribe client {0} successfully'.format(vm_ip)) return cls.get_real_time(result.stderr)
def get_host_info(hostname=None): """Get remote host's distribution information :param str hostname: Hostname or IP address of the remote host. If ``None`` the hostname will be get from ``main.server.hostname`` config. :returns: A tuple in the form ``(distro, major, minor)``. ``major`` and ``minor`` are integers. ``minor`` can be ``None`` if not available. """ result = ssh.command('cat /etc/redhat-release', hostname) if result.return_code != 0: raise HostInfoError('Not able to cat /etc/redhat-release "{0}"'.format( result.stderr )) match = re.match( r'(?P<distro>.+) release (?P<major>\d+)(.(?P<minor>\d+))?', result.stdout[0], ) if match is None: raise HostInfoError( u'Not able to parse release string "{0}"'.format(result.stdout[0])) groups = match.groupdict() return ( groups['distro'], int(groups['major']), groups['minor'] if groups['minor'] is None else int(groups['minor']) )
def test_positive_install_multiple_packages_with_a_job_by_ip(self): """Run job to install several packages on host by ip :id: 8b73033f-83c9-4024-83c3-5e442a79d320 :expectedresults: Verify the packages were successfully installed on host """ # set connecting to host by ip Host.set_parameter({ 'host': self.client.hostname, 'name': 'remote_execution_connect_by_ip', 'value': 'True', }) packages = ["cow", "dog", "lion"] # Create a custom repo repo = entities.Repository( content_type='yum', product=entities.Product(organization=self.org).create(), url=FAKE_0_YUM_REPO, ).create() repo.sync() prod = repo.product.read() subs = entities.Subscription().search( query={'search': 'name={0}'.format(prod.name)} ) self.assertGreater( len(subs), 0, 'No subscriptions matching the product returned' ) ak = entities.ActivationKey( organization=self.org, content_view=self.org.default_content_view, environment=self.org.library ).create() ak.add_subscriptions(data={'subscriptions': [{'id': subs[0].id}]}) self.client.register_contenthost( org=self.org.label, activation_key=ak.name ) invocation_command = make_job_invocation({ 'job-template': 'Install Package - Katello SSH Default', 'inputs': 'package={0} {1} {2}'.format(*packages), 'search-query': "name ~ {0}".format(self.client.hostname), }) try: self.assertEqual(invocation_command['success'], u'1') except AssertionError: result = 'host output: {0}'.format( ' '.join(JobInvocation.get_output({ 'id': invocation_command[u'id'], 'host': self.client.hostname}) ) ) raise AssertionError(result) result = ssh.command( "rpm -q {0}".format(" ".join(packages)), hostname=self.client.ip_addr ) self.assertEqual(result.return_code, 0)
def test_installer_options_and_flags(): """Look for changes on installer options and flags :id: a51d3b9f-f347-4a96-a31a-770349db08c7 :Steps: 1. parse installer options and flags 2. compare with last options :expectedresults: Ideally options should not change on zstreams. Documentation must be updated accordingly when such changes occur. So when this test fail we QE can act on it, asking dev if changes occurs on zstream and checking docs are up to date. """ stdout = ssh.command('satellite-installer --full-help').stdout current_installer_options = set(extract_params(stdout or [])) removed_options = list( PREVIOUS_INSTALLER_OPTIONS - current_installer_options) removed_options.sort() added_options = list( current_installer_options - PREVIOUS_INSTALLER_OPTIONS) added_options.sort() msg = "###Removed options:\n{}\n###Added options:\n{}".format( removed_options, added_options ) assert PREVIOUS_INSTALLER_OPTIONS == current_installer_options, msg
def get_services_status(): """Check if core services are running""" major_version = get_host_info()[1] services = ( 'foreman-proxy', 'foreman-tasks', 'httpd', 'mongod', 'postgresql', 'pulp_celerybeat', 'pulp_resource_manager', 'pulp_streamer', 'pulp_workers', 'qdrouterd', 'qpidd', 'smart_proxy_dynflow_core', 'squid', 'tomcat6' if major_version == RHEL_6_MAJOR_VERSION else 'tomcat', ) # check `services` status using service command if major_version >= RHEL_7_MAJOR_VERSION: status_format = '''(for i in {0}; do systemctl is-active $i -q; rc=$?; if [[ $rc != 0 ]]; then systemctl status $i; exit $rc; fi; done);''' else: status_format = '''(for i in {0}; do service $i status &>/dev/null; rc=$?; if [[ $rc != 0 ]]; then service $i status; exit $rc; fi; done);''' result = ssh.command(status_format.format(' '.join(services))) return[result.return_code, result.stdout]
def get_available_capsule_port(port_pool=None): """returns a list of unused ports dedicated for fake capsules This calls an ss command on the server prompting for a port range. ss returns a list of ports which have a PID assigned (a list of ports which are already used). This function then substracts unavailable ports from the other ones and returns one of available ones randomly. :param port_pool: A list of ports used for fake capsules (for RHEL7+: don't forget to set a correct selinux context before otherwise you'll get Connection Refused error) :return: Random available port from interval <9091, 9190>. :rtype: int """ if port_pool is None: port_pool_range = settings.fake_capsules.port_range if type(port_pool_range) is str: port_pool_range = tuple(port_pool_range.split('-')) if type(port_pool_range) is tuple and len(port_pool_range) == 2: port_pool = range(int(port_pool_range[0]), int(port_pool_range[1])) else: raise TypeError( 'Expected type of port_range is a tuple of 2 elements,' f'got {type(port_pool_range)} instead') # returns a list of strings ss_cmd = ssh.command( f"ss -tnaH sport ge {port_pool[0]} sport le {port_pool[-1]}" " | awk '{n=split($4, p, \":\"); print p[n]}' | sort -u") if ss_cmd.stderr: raise CapsuleTunnelError( f'Failed to create ssh tunnel: Error getting port status: {ss_cmd.stderr}' ) # converts a List of strings to a List of integers try: print(ss_cmd) used_ports = map( int, [val for val in ss_cmd.stdout[:-1] if val != 'Cannot stat file ']) except ValueError: raise CapsuleTunnelError( f'Failed parsing the port numbers from stdout: {ss_cmd.stdout[:-1]}' ) try: # take the list of available ports and return randomly selected one return random.choice( [port for port in port_pool if port not in used_ports]) except IndexError: raise CapsuleTunnelError( 'Failed to create ssh tunnel: No more ports available for mapping')
def test_positive_export_json_output(self, create_import_export_local_dir, module_org): """Assert template export output returns template names :id: 141b893d-72a3-47c2-bb03-004c757bcfc9 :Steps: 1. Using nailgun or direct API call Export all the templates :expectedresults: 1. Assert json output has all the exported template names and typewise :Requirement: Take Templates out of tech preview :CaseImportance: Low """ prefix = gen_string('alpha') imported_templates = entities.Template().imports( data={ 'repo': FOREMAN_TEMPLATE_IMPORT_URL, 'branch': 'automation', 'organization_ids': [module_org.id], 'prefix': prefix, 'dirname': 'import', } ) imported_count = [ template['imported'] for template in imported_templates['message']['templates'] ].count(True) assert imported_count == 18 # Total Count # Export some filtered templates to local dir _, dir_path = create_import_export_local_dir exported_templates = entities.Template().exports( data={'repo': dir_path, 'organization_ids': [module_org.id], 'filter': prefix} ) exported_count = [ template['exported'] for template in exported_templates['message']['templates'] ].count(True) assert exported_count == 18 assert 'name' in exported_templates['message']['templates'][0].keys() assert ( ssh.command( f'[ -d {dir_path}/job_templates ] && ' f'[ -d {dir_path}/partition_tables_templates ] && ' f'[ -d {dir_path}/provisioning_templates ] && ' f'[ -d {dir_path}/report_templates ]' ).return_code == 0 )
def install_katello_ca(hostname=None, sat_hostname=None): """Downloads and installs katello-ca rpm :param str hostname: Hostname or IP address of the remote host. If ``None`` the hostname will be get from ``main.server.hostname`` config :return: None. :raises: AssertionError: If katello-ca wasn't installed. """ if sat_hostname: cert_rpm_url = f'http://{sat_hostname}/pub/katello-ca-consumer-latest.noarch.rpm' else: sat_hostname = settings.server.hostname cert_rpm_url = get_cert_rpm_url() ssh.command(f'rpm -Uvh {cert_rpm_url}', hostname) # Not checking the return_code here, as rpm could be installed before # and installation may fail result = ssh.command(f'rpm -q katello-ca-consumer-{sat_hostname}', hostname) # Checking the return_code here to verify katello-ca rpm is actually # present in the system if result.return_code != 0: raise AssertionError('Failed to install the katello-ca rpm')
def runcmd(cmd, system=None, timeout=600, output_format='base'): """Return the retcode and stdout. :param str cmd: The command line will be executed in the target system. :param dict system: the system account which ssh will connect to, it will connect to the satellite host if the system is None. :param int timeout: Time to wait for establish the connection. :param str output_format: base|json|csv|list """ system = system or get_system('satellite') result = ssh.command(cmd, **system, timeout=timeout, output_format=output_format) ret = result.return_code stdout = result.stdout.strip() return ret, stdout
def test_positive_failed_login_attempts_limit(self): """automate brute force protection limit configurable function :id: f95407ed-451b-4387-ac9b-2959ae2f51ae :steps: 1. Make sure login works. 2. Save current value and set it to some lower value: 3. Try to login with wrong password till failed_login_attempts_limit 4. Make sure login now does not work: 5. Wait timeout - 5 minutes + 1 second 6. Verify you can now login fine 7. Return the setting to previous value :CaseImportance: Critical :CaseLevel: System :expectedresults: failed_login_attempts_limit works as expected :CaseAutomation: automated """ result = ssh.command('hammer -u {0} -p {1} user list'.format( self.foreman_user, self.foreman_password)) self.assertEqual(result.return_code, 0) Settings.set({u'name': u'failed_login_attempts_limit', u'value': '5'}) for i in range(5): output = ssh.command('hammer -u {0} -p BAD_PASS user list'.format( self.foreman_user)) self.assertEqual(output.return_code, 129) result = ssh.command('hammer -u {0} -p {1} user list'.format( self.foreman_user, self.foreman_password)) self.assertEqual(result.return_code, 129) sleep(301) result = ssh.command('hammer -u {0} -p {1} user list'.format( self.foreman_user, self.foreman_password)) self.assertEqual(result.return_code, 0)
def cleanup_virt_who(server=None): if not server: server = settings.server.hostname path = VIRTWHO_CONFIG_FILE_PATH_PATTERN.format('*') ssh.command("rm -rf {}".format(path), hostname=server) ssh.command("systemctl stop virt-who", hostname=server) ssh.command("systemctl disable virt-who", hostname=server)
def test_positive_disable_hammer_defaults(self): """Verify hammer disable defaults command. :id: d0b65f36-b91f-4f2f-aaf8-8afda3e23708 :steps: 1. Add hammer defaults as organization-id. 2. Verify hammer product list successful. 3. Run hammer --no-use-defaults product list. :expectedresults: Hammer --no-use-defaults product list should fail. :CaseImportance: Critical :BZ: 1640644 """ default_org = make_org() default_product_name = gen_string('alpha') make_product({'name': default_product_name, 'organization-id': default_org['id']}) try: Defaults.add({'param-name': 'organization_id', 'param-value': default_org['id']}) # Verify --organization-id is not required to pass if defaults are set result = ssh.command('hammer product list') self.assertEqual(result.return_code, 0) # Verify product list fail without using defaults result = ssh.command('hammer --no-use-defaults product list') self.assertNotEqual(result.return_code, 0) self.assertFalse(default_product_name in "".join(result.stdout)) # Verify --organization-id is not required to pass if defaults are set result = ssh.command('hammer --use-defaults product list') self.assertEqual(result.return_code, 0) self.assertTrue(default_product_name in "".join(result.stdout)) finally: Defaults.delete({'param-name': 'organization_id'}) result = ssh.command('hammer defaults list') self.assertTrue(default_org['id'] not in "".join(result.stdout))
def test_positive_check_debug_log_levels(): """Enabling debug log level in candlepin via hammer logging :id: 029c80f1-2bc5-494e-a04a-7d6beb0f769a :expectedresults: Verify enabled debug log level :customerscenario: true :CaseImportance: Medium :BZ: 1760773 """ Admin.logging({'all': True, 'level-debug': True}) # Verify value of `log4j.logger.org.candlepin` as `DEBUG` result = ssh.command('grep DEBUG /etc/candlepin/candlepin.conf') assert result.return_code == 0 assert 'log4j.logger.org.candlepin = DEBUG' in result.stdout Admin.logging({"all": True, "level-production": True}) # Verify value of `log4j.logger.org.candlepin` as `WARN` result = ssh.command('grep WARN /etc/candlepin/candlepin.conf') assert result.return_code == 0 assert 'log4j.logger.org.candlepin = WARN' in result.stdout
def install_katello_ca(hostname=None): """Downloads and installs katello-ca rpm :param str hostname: Hostname or IP address of the remote host. If ``None`` the hostname will be get from ``main.server.hostname`` config :return: None. :raises: AssertionError: If katello-ca wasn't installed. """ ssh.command( u'rpm -Uvh {0}'.format(settings.server.get_cert_rpm_url()), hostname ) # Not checking the return_code here, as rpm could be installed before # and installation may fail result = ssh.command( u'rpm -q katello-ca-consumer-{0}' .format(settings.server.hostname), hostname ) # Checking the return_code here to verify katello-ca rpm is actually # present in the system if result.return_code != 0: raise AssertionError('Failed to install the katello-ca rpm')
def delete_puppet_class( puppetclass_name, puppet_module=None, proxy_hostname=None, environment_name=None ): """Removes puppet class entity and uninstall puppet module from Capsule if puppet module name and Capsule details provided. :param str puppetclass_name: Name of the puppet class entity that should be removed. :param str puppet_module: Name of the module that should be uninstalled via puppet. :param str proxy_hostname: Hostname of the Capsule from which puppet module should be removed. :param str environment_name: Name of environment where puppet module was imported. """ # Find puppet class puppet_classes = entities.PuppetClass().search(query={'search': f'name = "{puppetclass_name}"'}) # And all subclasses puppet_classes.extend( entities.PuppetClass().search(query={'search': f'name ~ "{puppetclass_name}::"'}) ) for puppet_class in puppet_classes: # Search and remove puppet class from affected hostgroups for hostgroup in puppet_class.read().hostgroup: hostgroup.delete_puppetclass(data={'puppetclass_id': puppet_class.id}) # Search and remove puppet class from affected hosts for host in entities.Host().search(query={'search': f'class={puppet_class.name}'}): host.delete_puppetclass(data={'puppetclass_id': puppet_class.id}) # Remove puppet class entity puppet_class.delete() # And remove puppet module from the system if puppet_module name provided if puppet_module and proxy_hostname and environment_name: ssh.command(f'puppet module uninstall --force {puppet_module}') env = entities.Environment().search(query={'search': f'name="{environment_name}"'})[0] proxy = entities.SmartProxy(name=proxy_hostname).search()[0] proxy.import_puppetclasses(environment=env)
def repository_with_tr_data(cls, options=None): """Import repositories (from spacewalk-report repositories). :returns: A tuple of SSHCommandResult and a List containing the transition data of the Import """ result = cls.repository(options) transition_data = [ cls.read_transition_csv(ssh.command(cmd).stdout[:-1], key) for cmd, key in ( (u'ls -v ${HOME}/.transition_data/products*', u'org_id'), (u'ls -v ${HOME}/.transition_data/repositories*', u'sat5'), ) ] return (result, transition_data)
def test_positive_foreman_version(self): """Check if /usr/share/foreman/VERSION does not contain the develop tag. @Feature: Smoke Test @Assert: The file content does not have the develop tag. """ result = ssh.command('cat /usr/share/foreman/VERSION') self.assertEqual(result.return_code, 0) if get_server_software() == 'downstream': self.assertNotIn('develop', u''.join(result.stdout)) else: self.assertIn('develop', u''.join(result.stdout))
def run(self, cmd): """Runs a ssh command on the virtual machine :param str cmd: Command to run on the virtual machine :return: A :class:`robottelo.ssh.SSHCommandResult` instance with the commands results :rtype: robottelo.ssh.SSHCommandResult :raises robottelo.vm.VirtualMachineError: If the virtual machine is not created. """ if not self._created: raise VirtualMachineError( 'The virtual machine should be created before running any ssh ' 'command') return ssh.command(cmd, hostname=self.ip_addr)
def test_installer_options_and_flags(self): """Look for changes on installer options and flags :id: a51d3b9f-f347-4a96-a31a-770349db08c7 :Steps: 1. parse installer options and flags 2. compare with last options :expectedresults: Ideally options should not change on zstreams. Documentation must be updated accordingly when such changes occur. So when this test fail we QE can act on it, asking dev if changes occurs on zstream and checking docs are up to date. """ stdout = ssh.command('satellite-installer --help').stdout self.assertEqual(INSTALLER_OPTIONS, set(extract_params(stdout or [])))