Ejemplo n.º 1
0
def test_positive_reboot_all_pxe_hosts(_module_user, discovered_host_cleanup,
                                       discovery_settings, provisioning_env):
    """Rebooting all pxe-based discovered hosts

    :id: 69c807f8-5646-4aa6-8b3c-5ecdb69560ed

    :parametrized: yes

    :Setup: Provisioning should be configured and a hosts should be discovered via PXE boot.

    :Steps: PUT /api/v2/discovered_hosts/reboot_all

    :expectedresults: All disdcovered host should be rebooted successfully

    :CaseAutomation: Automated

    :CaseImportance: Medium
    """
    cfg = get_nailgun_config()
    if _module_user:
        cfg.auth = (_module_user[0].login, _module_user[1])

    # open ssh channels and attach them to foreman-tail output
    channel_1, channel_2 = ssh.get_client().invoke_shell(), ssh.get_client(
    ).invoke_shell()
    channel_1.send('foreman-tail\r')
    channel_2.send('foreman-tail\r')

    with LibvirtGuest() as pxe_host_1:
        _assert_discovered_host(pxe_host_1, channel_1, user_config=cfg)
        with LibvirtGuest() as pxe_host_2:
            _assert_discovered_host(pxe_host_2, channel_2, user_config=cfg)
            # reboot_all method leads to general /discovered_hosts/ path, so it doesn't matter
            # what DiscoveredHost object we execute this on
            try:
                entities.DiscoveredHost().reboot_all()
            except simplejson.errors.JSONDecodeError as e:
                if is_open('BZ:1893349'):
                    pass
                else:
                    raise e
            # assert that server receives DHCP discover from hosts PXELinux
            # this means that the hosts got rebooted
            for pxe_host in [(pxe_host_1, channel_1), (pxe_host_2, channel_2)]:
                for pattern in [
                    (
                        f"DHCPDISCOVER from {pxe_host[0].mac}",
                        "DHCPDISCOVER",
                    ),
                    (f"DHCPACK on [0-9.]+ to {pxe_host[0].mac}", "DHCPACK"),
                ]:
                    try:
                        _wait_for_log(pxe_host[1], pattern[0], timeout=30)
                    except TimedOutError:
                        # raise assertion error
                        raise AssertionError(
                            f'Timed out waiting for {pattern[1]} from '
                            f'{pxe_host[0].mac}')
Ejemplo n.º 2
0
    def create(cls, options=None):
        """
        Creates a new record using the arguments passed via dictionary.
        """

        cls.command_sub = 'create'

        if options is None:
            options = {}

        if options['file'] is None:
            tmpl = 'file content is required for {0}.creation'
            raise CLIError(tmpl.format(cls.__name__))

        if options['file'] == REPORT_TEMPLATE_FILE:
            local_path = get_data_file(REPORT_TEMPLATE_FILE)
        else:
            local_path = ''

        # --- create file at remote machine --- #
        (_, layout) = mkstemp(text=True)
        chmod(layout, 0o700)

        if not local_path:
            with open(layout, 'w') as rt:
                rt.write(options['file'])
            # End - Special handling of temporary file
        else:
            with open(local_path) as file:
                file_data = file.read()
            with open(layout, 'w') as rt:
                rt.write(file_data)
        ssh.get_client().put(layout, layout)
        # -------------------------------------- #

        options['file'] = layout

        result = cls.execute(cls._construct_command(options), output_format='csv')

        # Extract new object ID if it was successfully created
        if len(result) > 0 and 'id' in result[0]:
            obj_id = result[0]['id']

            # Fetch new object
            # Some Katello obj require the organization-id for subcommands
            info_options = {'id': obj_id}
            if cls.command_requires_org:
                if 'organization-id' not in options:
                    tmpl = 'organization-id option is required for {0}.create'
                    raise CLIError(tmpl.format(cls.__name__))
                info_options['organization-id'] = options['organization-id']

            new_obj = cls.info(info_options)
            # stdout should be a dictionary containing the object
            if len(new_obj) > 0:
                result = new_obj

        return result
Ejemplo n.º 3
0
def upload_manifest_locked(org_id, manifest=None, interface=INTERFACE_API, timeout=None):
    """Upload a manifest with locking, using the requested interface.

    :type org_id: int
    :type manifest: robottelo.manifests.Manifest
    :type interface: str
    :type timeout: int

    :returns: the upload result

    Note: The manifest uploading is strictly locked only when using this
        function

    Usage::

        # for API interface
        manifest = manifests.clone()
        upload_manifest_locked(org_id, manifest, interface=INTERFACE_API)

        # for CLI interface
        manifest = manifests.clone()
        upload_manifest_locked(org_id, manifest, interface=INTERFACE_CLI)

        # or in one line with default interface
        result = upload_manifest_locked(org_id, manifests.clone())
        subscription_id = result[id']
    """

    if interface not in [INTERFACE_API, INTERFACE_CLI]:
        raise ValueError(f'upload manifest with interface "{interface}" not supported')
    if manifest is None:
        manifest = clone()
    if timeout is None:
        # Set the timeout to 1500 seconds to align with the API timeout.
        # And as we are in locked state, other functions/tests can try to upload the manifest in
        # other processes and we do not want to be interrupted by the default configuration
        # ssh_client timeout.
        timeout = 1500000
    if interface == INTERFACE_API:
        with manifest:
            result = entities.Subscription().upload(
                data={'organization_id': org_id}, files={'content': manifest.content}
            )
    else:
        # interface is INTERFACE_CLI
        with manifest:
            ssh.get_client().put(manifest, manifest.filename)

        result = Subscription.upload(
            {'file': manifest.filename, 'organization-id': org_id}, timeout=timeout
        )

    return result
Ejemplo n.º 4
0
def default_url_on_new_port(oldport, newport):
    """Creates context where the default capsule is forwarded on a new port

    :param int oldport: Port to be forwarded.
    :param int newport: New port to be used to forward `oldport`.

    :return: A string containing the new capsule URL with port.
    :rtype: str

    """
    domain = settings.server.hostname

    client = ssh.get_client()
    pre_ncat_procs = client.execute('pgrep ncat').stdout.splitlines()

    with client.session.shell() as channel:
        # if ncat isn't backgrounded, it prevents the channel from closing
        command = f'ncat -kl -p {newport} -c "ncat {domain} {oldport}" &'
        # broker 0.1.25 makes these debug messages redundant
        logger.debug(f'Creating tunnel: {command}')
        channel.send(command)
        post_ncat_procs = client.execute('pgrep ncat').stdout.splitlines()
        ncat_pid = set(post_ncat_procs).difference(set(pre_ncat_procs))
        if not len(ncat_pid):
            stderr = channel.get_exit_status()[1]
            logger.debug(f'Tunnel failed: {stderr}')
            # Something failed, so raise an exception.
            raise CapsuleTunnelError(f'Starting ncat failed: {stderr}')
        forward_url = f'https://{domain}:{newport}'
        logger.debug(f'Yielding capsule forward port url: {forward_url}')
        try:
            yield forward_url
        finally:
            logger.debug(f'Killing ncat pid: {ncat_pid}')
            client.execute(f'kill {ncat_pid.pop()}')
Ejemplo n.º 5
0
 def sm_execute(
     cls,
     command,
     hostname=None,
     timeout=None,
 ):
     """Executes the satellite-maintain cli commands on the server via ssh"""
     client = get_client(hostname=hostname or cls.hostname)
     result = client.execute(f'satellite-maintain {command}',
                             timeout=timeout)
     return result
Ejemplo n.º 6
0
def deploy_configure_by_script(
    script_content, hypervisor_type, debug=False, org='Default_Organization'
):
    """Deploy and run virt-who service by the shell script.
    :param str script_content: get the script by UI or API.
    :param str hypervisor_type: esx, libvirt, rhevm, xen, libvirt, kubevirt
    :param bool debug: if VIRTWHO_DEBUG=1, this option should be True.
    :param str org: Organization Label
    """
    script_filename = "/tmp/deploy_script.sh"
    script_content = script_content.replace('&amp;', '&').replace('&gt;', '>').replace('&lt;', '<')
    virtwho_cleanup()
    register_system(get_system(hypervisor_type), org=org)
    with open(script_filename, 'w') as fp:
        fp.write(script_content)
    ssh.get_client().put(script_filename)
    ret, stdout = runcmd(f'sh {script_filename}')
    if ret != 0 or 'Finished successfully' not in stdout:
        raise VirtWhoError(f"Failed to deploy configure by {script_filename}")
    if debug:
        return deploy_validation(hypervisor_type)
Ejemplo n.º 7
0
def test_positive_provision_pxe_host(_module_user, discovery_settings,
                                     provisioning_env):
    """Provision a pxe-based discovered hosts

    :id: e805b9c5-e8f6-4129-a0e6-ab54e5671ddb

    :parametrized: yes

    :Setup: Provisioning should be configured and a host should be
        discovered

    :Steps: PUT /api/v2/discovered_hosts/:id

    :expectedresults: Host should be provisioned successfully

    :CaseImportance: Critical
    """
    cfg = get_nailgun_config()
    if _module_user:
        cfg.auth = (_module_user[0].login, _module_user[1])

    # open a ssh channel and attach it to foreman-tail output
    ssh_client = ssh.get_client()
    with ssh_client.invoke_shell() as channel:
        channel.send('foreman-tail\r')

        with LibvirtGuest() as pxe_host:
            discovered_host = _assert_discovered_host(pxe_host,
                                                      channel,
                                                      user_config=cfg)
            # Provision just discovered host
            discovered_host.hostgroup = entities.HostGroup(
                cfg, id=provisioning_env['hostgroup']['id']).read()
            discovered_host.root_pass = gen_string('alphanumeric')
            discovered_host.update(['hostgroup', 'root_pass'])
            # Assertions
            provisioned_host = entities.Host(cfg).search(
                query={
                    'search':
                    'name={}.{}'.format(discovered_host.name,
                                        provisioning_env['domain']['name'])
                })[0]
            assert provisioned_host.subnet.read(
            ).name == provisioning_env['subnet']['name']
            assert (provisioned_host.operatingsystem.read().ptable[0].read().
                    name == provisioning_env['ptable']['name'])
            assert provisioned_host.operatingsystem.read(
            ).title == provisioning_env['os']['title']
            assert not entities.DiscoveredHost(cfg).search(
                query={'search': f'name={discovered_host.name}'})
Ejemplo n.º 8
0
def test_positive_reboot_pxe_host(_module_user, discovery_settings,
                                  provisioning_env):
    """Rebooting a pxe based discovered host

    :id: 69c807f8-5646-4aa6-8b3c-5ecab69560fc

    :parametrized: yes

    :Setup: Provisioning should be configured and a host should be discovered via PXE boot.

    :Steps: PUT /api/v2/discovered_hosts/:id/reboot

    :expectedresults: Selected host should be rebooted successfully

    :CaseAutomation: Automated

    :CaseImportance: Medium
    """
    cfg = get_nailgun_config()
    if _module_user:
        cfg.auth = (_module_user[0].login, _module_user[1])

    # open a ssh channel and attach it to foreman-tail output
    ssh_client = ssh.get_client()
    with ssh_client.invoke_shell() as channel:
        channel.send('foreman-tail\r')

        with LibvirtGuest() as pxe_host:
            discovered_host = _assert_discovered_host(pxe_host,
                                                      channel,
                                                      user_config=cfg)
            discovered_host.reboot()

            # assert that server receives DHCP discover from hosts PXELinux
            # this means that the host got rebooted
            for pattern in [
                (
                    f"DHCPDISCOVER from {pxe_host.mac}",
                    "DHCPDISCOVER",
                ),
                (f"DHCPACK on [0-9.]+ to {pxe_host.mac}", "DHCPACK"),
            ]:
                try:
                    _wait_for_log(channel, pattern[0], timeout=30)
                except TimedOutError:
                    # raise assertion error
                    raise AssertionError(
                        f'Timed out waiting for {pattern[1]} from VM')
Ejemplo n.º 9
0
def test_positive_auto_provision_pxe_host(_module_user, module_org,
                                          module_location, discovery_settings,
                                          provisioning_env):
    """Auto provision a pxe-based host by executing discovery rules

    :id: c93fd7c9-41ef-4eb5-8042-f72e87e67e10

    :parametrized: yes

    :Setup: Provisioning should be configured and a host should be
        discovered

    :Steps: POST /api/v2/discovered_hosts/:id/auto_provision

    :expectedresults: Selected Host should be auto-provisioned successfully

    :CaseAutomation: Automated

    :CaseImportance: Critical
    """
    cfg = get_nailgun_config()
    if _module_user:
        cfg.auth = (_module_user[0].login, _module_user[1])

    # open a ssh channel and attach it to foreman-tail output
    ssh_client = ssh.get_client()
    with ssh_client.invoke_shell() as channel:
        channel.send('foreman-tail\r')

        with LibvirtGuest() as pxe_host:
            discovered_host = _assert_discovered_host(pxe_host,
                                                      channel,
                                                      user_config=cfg)
            # Provision just discovered host
            discovered_host.hostgroup = entities.HostGroup(
                cfg, id=provisioning_env['hostgroup']['id']).read()

            # create a discovery rule that will match hosts MAC address
            entities.DiscoveryRule(
                name=gen_string('alphanumeric'),
                search_=f"mac = {discovered_host.mac}",
                organization=[module_org],
                location=[module_location],
                hostgroup=entities.HostGroup(
                    cfg, id=provisioning_env['hostgroup']['id']).read(),
            ).create()
            # Auto-provision the host
            discovered_host.auto_provision()

            # Assertions
            provisioned_host = entities.Host(cfg).search(
                query={
                    'search':
                    'name={}.{}'.format(discovered_host.name,
                                        provisioning_env['domain']['name'])
                })[0]
            assert provisioned_host.subnet.read(
            ).name == provisioning_env['subnet']['name']
            assert (provisioned_host.operatingsystem.read().ptable[0].read().
                    name == provisioning_env['ptable']['name'])
            assert provisioned_host.operatingsystem.read(
            ).title == provisioning_env['os']['title']
            assert not entities.DiscoveredHost(cfg).search(
                query={'search': f'name={discovered_host.name}'})
Ejemplo n.º 10
0
def upload_rhsso_entity(json_content, entity_name):
    """Helper method upload the entity json request as file on RHSSO Server"""
    with open(entity_name, "w") as file:
        json.dump(json_content, file)
    ssh.get_client(hostname=settings.rhsso.host_name).put(entity_name)
Ejemplo n.º 11
0
    def test_positive_provision_pxe_host_dhcp_change(self, discovery_settings,
                                                     provisioning_env):
        """Discovered host is provisioned in dhcp range defined in subnet entity

        :id: 7ab654de-16dd-4a8b-946d-f6adde310340

        :bz: 1367549

        :Setup: Provisioning should be configured and a host should be
            discovered

        :Steps:
            1. Set some dhcp range in dhcpd.conf in satellite.
            2. Create subnet entity in satellite with a range different from whats defined
                in `dhcpd.conf`.
            3. Create Hostgroup with the step 2 subnet.
            4. Discover a new host in satellite.
            5. Provision a host with the hostgroup created in step 3.

        :expectedresults:
            1. The discovered host should be discovered with range defined in dhcpd.conf
            2. But provisoning the discovered host should acquire an IP from dhcp range
                defined in subnet entity.

        :CaseImportance: Critical
        """
        subnet = entities.Subnet(id=provisioning_env['subnet']['id']).read()
        # Updating satellite subnet component and dhcp conf ranges
        # Storing now for restoring later
        old_sub_from = subnet.from_
        old_sub_to = subnet.to
        old_sub_to_4o = old_sub_to.split('.')[-1]
        # Calculating Subnet's new `from` range in Satellite Subnet Component
        new_subnet_from = subnet.from_[:subnet.from_.rfind('.') +
                                       1] + str(int(old_sub_to_4o) - 9)
        # Same time, calculating dhcp confs new `to` range
        new_dhcp_conf_to = subnet.to[:subnet.to.rfind('.') +
                                     1] + str(int(old_sub_to_4o) - 10)

        cfg = get_nailgun_config()
        ssh_client = ssh.get_client()
        with ssh_client.invoke_shell() as channel:
            channel.send('foreman-tail\r')
            try:
                # updating the ranges in component and in dhcp.conf
                subnet.from_ = new_subnet_from
                subnet.update(['from_'])
                ssh_client.exec_command(
                    f'cp /etc/dhcp/dhcpd.conf /etc/dhcp/dhcpd_backup.conf && '
                    f'sed -ie \'s/{subnet.to}/{new_dhcp_conf_to}/\' /etc/dhcp/dhcpd.conf && '
                    f'systemctl restart dhcpd')
                with LibvirtGuest() as pxe_host:
                    discovered_host = _assert_discovered_host(
                        pxe_host, channel, cfg)
                    # Assert Discovered host discovered within dhcp.conf range before provisioning
                    assert int(discovered_host.ip.split('.')[-1]) <= int(
                        new_dhcp_conf_to.split('.')[-1])
                    # Provision just discovered host
                    discovered_host.hostgroup = entities.HostGroup(
                        id=provisioning_env['hostgroup']['id']).read()
                    discovered_host.root_pass = gen_string('alphanumeric')
                    discovered_host.update(['hostgroup', 'root_pass'])
                    # Assertions
                    provisioned_host = entities.Host().search(
                        query={
                            'search':
                            'name={}.{}'.format(
                                discovered_host.name,
                                provisioning_env['domain']['name'])
                        })[0]
                    assert int(provisioned_host.ip.split('.')[-1]) >= int(
                        new_subnet_from.split('.')[-1])
                    assert int(provisioned_host.ip.split('.')[-1]) <= int(
                        old_sub_to_4o)
                    assert not entities.DiscoveredHost().search(
                        query={'search': f'name={discovered_host.name}'})
            finally:
                subnet.from_ = old_sub_from
                subnet.update(['from_'])
                ssh_client.exec_command(
                    'mv /etc/dhcp/dhcpd_backup.conf /etc/dhcp/dhcpd.conf /etc/dhcp/dhcpd.conf'
                )
Ejemplo n.º 12
0
    def test_usergroup_with_usergroup_sync(self, ipa_data, ldap_tear_down):
        """Verify the usergroup-sync functionality in Ldap Auth Source

        :id: 2b63e886-2c53-11ea-9da5-db3ae0527554

        :expectedresults: external user-group sync works as expected automatically
            based on user-sync

        :CaseImportance: Medium
        """
        self._clean_up_previous_ldap()
        self.ipa_host = ssh.get_client(hostname=ipa_data['ldap_hostname'])
        self.ldap_ipa_user_passwd = ipa_data['ldap_user_passwd']
        ipa_group_base_dn = ipa_data['group_base_dn'].replace(
            'foobargroup', 'foreman_group')
        member_username = '******'
        member_group = 'foreman_group'
        LOGEDIN_MSG = "Using configured credentials for user '{0}'."
        auth_source_name = gen_string('alpha')
        auth_source = make_ldap_auth_source({
            'name':
            auth_source_name,
            'onthefly-register':
            'true',
            'usergroup-sync':
            'true',
            'host':
            ipa_data['ldap_hostname'],
            'server-type':
            LDAP_SERVER_TYPE['CLI']['ipa'],
            'attr-login':
            LDAP_ATTR['login'],
            'attr-firstname':
            LDAP_ATTR['firstname'],
            'attr-lastname':
            LDAP_ATTR['surname'],
            'attr-mail':
            LDAP_ATTR['mail'],
            'account':
            ipa_data['ldap_user_cn'],
            'account-password':
            ipa_data['ldap_user_passwd'],
            'base-dn':
            ipa_data['base_dn'],
            'groups-base':
            ipa_group_base_dn,
        })
        auth_source = LDAPAuthSource.info({'id': auth_source['server']['id']})

        # Adding User in IPA UserGroup
        self._add_user_in_IPA_usergroup(member_username, member_group)
        viewer_role = Role.info({'name': 'Viewer'})
        user_group = make_usergroup()
        ext_user_group = make_usergroup_external({
            'auth-source-id':
            auth_source['server']['id'],
            'user-group-id':
            user_group['id'],
            'name':
            member_group,
        })
        UserGroup.add_role({
            'id': user_group['id'],
            'role-id': viewer_role['id']
        })
        assert ext_user_group['auth-source'] == auth_source['server']['name']
        user_group = UserGroup.info({'id': user_group['id']})
        assert len(user_group['users']) == 0
        result = Auth.with_user(username=member_username,
                                password=self.ldap_ipa_user_passwd).status()
        assert LOGEDIN_MSG.format(member_username) in result[0]['message']
        list = Role.with_user(username=member_username,
                              password=self.ldap_ipa_user_passwd).list()
        assert len(list) > 1
        user_group = UserGroup.info({'id': user_group['id']})
        assert len(user_group['users']) == 1
        assert user_group['users'][0] == member_username

        # Removing User in IPA UserGroup
        self._remove_user_in_IPA_usergroup(member_username, member_group)
        with pytest.raises(CLIReturnCodeError) as error:
            Role.with_user(username=member_username,
                           password=self.ldap_ipa_user_passwd).list()
        assert 'Missing one of the required permissions' in error.value.message
        user_group = UserGroup.info({'id': user_group['id']})
        assert len(user_group['users']) == 0