Exemple #1
0
def _check_known_hosts(host_list):
    """Ensure all hosts have entries in 'known_hosts' to avoid
    Ansible's clunky yes/no prompting to accept keys (all prompts are
    printed at once).

    If any hosts are missing the user will be prompted to add it.

    Args:
        host_list (list): List of hostnames or IP addresses
    """
    known_hosts_files = [os.path.join(Path.home(), ".ssh", "known_hosts")]
    user_name, user_home_dir = get_user_and_home()
    if os.environ['USER'] == 'root' and user_name != 'root':
        known_hosts_files.append('/root/.ssh/known_hosts')
        if not os.path.isdir('/root/.ssh'):
            os.mkdir('/root/.ssh')
            os.chmod('/root/.ssh', 0o700)

    for host in host_list:
        for known_hosts in known_hosts_files:
            cmd = (f'ssh-keygen -F {host} -f {known_hosts}')
            resp, err, rc = sub_proc_exec(cmd)
            if rc != 0:
                cmd = (f'ssh-keyscan -H {host}')
                resp, err, rc = sub_proc_exec(cmd)
                print(f'Adding \'{host}\' host keys to \'{known_hosts}\'')
                append_line(known_hosts, resp, check_exists=False)
def create_base_dir(base_dir):
    log = logger.getlogger()
    print('\nMove or Copy the existing software server directories?')
    ch, action = get_selection('move\ncopy', ('m', 'c'))
    if action == 'copy':
        statvfs = os.statvfs(base_dir)
        freespace = statvfs.f_frsize * statvfs.f_bavail
        if freespace < 18000000000:
            sys.exit('Insufficient space on disk')
    arch = ''
    exists = glob('/srv/repos/dependencies/rhel7/*')
    if not exists:
        log.error('\nNo dependencies folder found. Unable to perform move.\n')
        sys.exit()
    for path in exists:
        if 'p8' in path or 'p9' in path:
            arch = 'ppc64le'
            break
        elif 'x86_64' in path:
            arch = 'x86_64'
            break
    if not arch:
        log.error('\nUnable to determine architecture. Unable to perform move.\n')
        sys.exit()
    if os.path.exists(f'{base_dir}/wmla120-{arch}'):
        print(f'Destination path {base_dir}/wmla120-{arch} already exists.')
        if action == 'copy':
            if not get_yesno('Okay to proceed with force copy? '):
                sys.exit('Exit at user request')
    else:
        os.mkdir(f'{base_dir}/wmla120-{arch}/')
    for _dir in (('repos', 'anaconda', 'spectrum-conductor', 'spectrum-dli',
                  'wmla-license',)):
        path = os.path.join('/srv/', _dir, '')

        if os.path.isdir(path):
            print(f'Found dir: {path}')
            if action == 'move':
                try:
                    _dir = f'{base_dir}/wmla120-{arch}/'
                    move(path, _dir)
                    cmd = f'sudo chcon -Rv --type=httpd_sys_content_t {_dir}'
                    _, err, rc = sub_proc_exec(cmd)
                    if rc != 0:
                        log.error(f'chtype of directory {_dir} failed {err}')
                except shutil_Error as exc:
                    print(exc)
            elif action == 'copy':
                cmd = f'cp -rf {path} {base_dir}/wmla120-{arch}/'
                try:
                    _, err, rc = sub_proc_exec(cmd)
                except:
                    pass
                if rc != 0:
                    log.error('Copy error {err}')
        else:
            log.error(f'Path {path} missing')
    print('Done')
def _update_firewall(br_label):
    """Update iptables FORWARD table to forward all traffic coming into
    the specified bridge.
    """
    if _is_firewall_running():
        fwd_tbl, err, rc = sub_proc_exec('iptables -vL FORWARD')
        if br_label not in fwd_tbl:
            LOG.debug(f'Updating firewall. Forward {br_label} packets.')
            cmd = (f'iptables -I FORWARD -p all -i {br_label} '
                   f'-s 0.0.0.0/0 -d 0.0.0.0/0 -j ACCEPT')
            res, err, rc = sub_proc_exec(cmd)
            if rc:
                LOG.warning('An error occured while updating the firewall. '
                            f'Error {err}. RC: {rc}')
Exemple #4
0
def _validate_client_hostnames(software_hosts_file_path, hosts_list):
    """Validate hostnames listed in inventory match client hostnames

    Args:
        software_hosts_file_path (str): Path to software inventory file
        host_list (list): List of hostnames or IP addresses

    Returns:
        bool: True if all client hostnames match

    Raises:
        UserException: If any hostname does not match
    """
    base_cmd = (f'{get_ansible_path()} -i {software_hosts_file_path} ')
    msg = ""

    for host in hosts_list:
        cmd = base_cmd + f'{host} -a "hostname --fqdn"'
        resp, err, rc = sub_proc_exec(cmd, shell=True)

        hostname = resp.splitlines()[-1]

        if hostname != host:
            msg += (f"Inventory hostname mis-match: '{host}' is reporting "
                    f"an FQDN of '{hostname}'\n")
    if msg != "":
        raise UserException(msg)
    else:
        return True
def create_pup_repo_mirror(repo_base_dir):
    """ Download POWER-Up public repository in full

    Args:
        repo_base_dir (str): Base directory path
        arch (str, optional): Select package architecture to save
        py_ver (int, optional): Python version number
    """
    log = logger.getlogger()

    if not repo_base_dir.endswith('.git'):
        repo_base_dir = path.join(repo_base_dir, 'power-up.git')

    makedirs(repo_base_dir, exist_ok=True)

    if len(listdir(repo_base_dir)) != 0:
        log.info(f"The directory '{repo_base_dir}' already exists and is not"
                 " empty.")
        if get_yesno("Permanently remove existing contents and re-clone? "):
            rmtree(repo_base_dir)
            makedirs(repo_base_dir, exist_ok=True)
        else:
            log.debug("User selected to continue without re-cloning")
            return

    url_path = rlinput("POWER-Up Repository path/URL: ", get_github_url())

    resp, err, rc = sub_proc_exec(f'git clone --mirror {url_path} '
                                  f'{repo_base_dir}')
    if rc != 0:
        log.error('An error occurred while cloning mirror of power-up repo: '
                  f'{err}')
Exemple #6
0
def create_ssh_key_pair(name):
    """Create an SSH private/public key pair in ~/.ssh/

    If an SSH key pair exists with "name" then the private key path is
    returned *without* creating anything new.

    Args:
        name (str): Filename of private key file

    Returns:
        str: Private ssh key path

    Raises:
        UserException: If ssh-keygen command fails
    """
    log = logger.getlogger()
    ssh_dir = os.path.join(Path.home(), ".ssh")
    private_key_path = os.path.join(ssh_dir, name)
    if not os.path.isdir(ssh_dir):
        os.mkdir(ssh_dir, mode=0o700)
    if os.path.isfile(private_key_path):
        log.info(f'SSH key \'{private_key_path}\' already exists, continuing')
    else:
        print(bold(f'Creating SSH key \'{private_key_path}\''))
        cmd = ('ssh-keygen -t rsa -b 4096 '
               '-C "Generated by Power-Up Software Installer" '
               f'-f {private_key_path} -N ""')
        resp, err, rc = sub_proc_exec(cmd, shell=True)
        if str(rc) != "0":
            msg = 'ssh-keygen failed:\n{}'.format(resp)
            log.debug(msg)
            raise UserException(msg)
    return private_key_path
Exemple #7
0
def _check_known_hosts(host_list):
    """Ensure all hosts have entries in 'known_hosts' to avoid
    Ansible's clunky yes/no prompting to accept keys (all prompts are
    printed at once).

    If any hosts are missing the user will be prompted to add it.

    Args:
        host_list (list): List of hostnames or IP addresses
    """
    log = logger.getlogger()
    known_hosts_files = set()

    user_name, user_home_dir = get_user_and_home()
    user_uid = pwd.getpwnam(user_name).pw_uid
    user_gid = grp.getgrnam(user_name).gr_gid

    user_known_hosts = os.path.join(Path.home(), ".ssh", "known_hosts")
    known_hosts_files.add(user_known_hosts)
    if not os.path.isdir(os.path.join(Path.home(), ".ssh")):
        log.debug(f"Creating user ~/.ssh dir")
        os.mkdir(os.path.join(Path.home(), ".ssh"))
        os.chmod(os.path.join(Path.home(), ".ssh"), 0o700)
        os.chown(os.path.join(Path.home(), ".ssh"), user_uid, user_gid)
    if not os.path.isfile(user_known_hosts):
        log.debug(f"Creating user known hosts '{user_known_hosts}' file")
        Path(user_known_hosts).touch(mode=0o600)
        os.chown(user_known_hosts, user_uid, user_gid)
    if os.environ['USER'] == 'root' and user_name != 'root':
        known_hosts_files.add('/root/.ssh/known_hosts')
        if not os.path.isdir('/root/.ssh'):
            log.debug("Creating root '/root/.ssh' dir")
            os.mkdir('/root/.ssh')
            os.chmod('/root/.ssh', 0o700)
        if not os.path.isfile('/root/.ssh/known_hosts'):
            log.debug("Creating root '/root/.ssh/known_hosts' file")
            Path('/root/.ssh/known_hosts').touch(mode=0o600)

    for host in host_list:
        for known_hosts in known_hosts_files:
            cmd = (f'ssh-keygen -F {host} -f {known_hosts}')
            resp, err, rc = sub_proc_exec(cmd)
            if rc != 0:
                cmd = (f'ssh-keyscan -H {host}')
                resp, err, rc = sub_proc_exec(cmd)
                print(f'Adding \'{host}\' host keys to \'{known_hosts}\'')
                append_line(known_hosts, resp, check_exists=False)
Exemple #8
0
def get_system_inventory(host, user, pw):
    log = logger.getlogger()
    cmd = f'ipmitool -I lanplus -H {host} -U {user} -P {pw} fru'
    res, err, rc = u.sub_proc_exec(cmd)
    if rc == 0:
        return res
    else:
        log.debug(f'Unable to read system information from {host}, rc: {rc}')
    def _exec_cmd(self, cmd, stdout=PIPE, stderr=PIPE):
        """Execute a command in the namespace

        Args:
            log (object): Log
            cmd (string)
        """
        cmd = 'ip netns exec {} {}'.format(self.name, cmd)
        std_out, std_err, rc = sub_proc_exec(cmd, stdout, stderr)
        return std_out, std_err, rc
Exemple #10
0
    def __init__(self, args):
        self.args = args
        self.config_file_path = gen.GEN_PATH
        self.cont_config_file_path = gen.CONTAINER_PACKAGE_PATH + '/'

        ssh_log = os.path.join(gen.GEN_LOGS_PATH, 'ssh_paramiko')
        if not os.path.isfile(ssh_log):
            os.mknod(ssh_log)
        if not os.access(ssh_log, os.W_OK):
            cmd = f'sudo chmod 666 {ssh_log}'
            res, err, rc = sub_proc_exec(cmd)
def _update_firewall(br_label):
    """gets the FORWARD iptable and remove all entries which contain 'br_label'
    """
    if _is_firewall_running():
        rmv = []
        fwd_tbl, err, rc = sub_proc_exec(
            'iptables -nvL FORWARD --line-numbers')
        for line in fwd_tbl.splitlines():
            if br_label in line:
                rmv.append(int(line[0:2]))
        # reverse the entry number list so that they can be removed sequentially
        # without affecting the number earlier entries.
        rmv.reverse()
        for item in rmv:
            cmd = f'iptables -D FORWARD {item}'
            res, err, rc = sub_proc_exec(cmd)
            if rc:
                LOG.warning(
                    f'An error occured while removing {br_label} from the '
                    f'iptables FORWARD table')
Exemple #12
0
    def find_unused_addr_and_add_to_ifc(self, ifc, cidr, offset=4, loc='top'):
        """ Finds an available address in the given subnet. nmap -PR is used to
        scan the subnet on the specified interface. Searching starts at either the
        top or the bottom of the subnet at an offset specified by offset.
        """
        status = False
        mult = 1 if loc == 'bot' else -1
        # check for an existing address on the interface in the subnet
        for addr in self.get_interface_addresses(ifc):
            if is_overlapping_addr(addr, cidr):
                status = True
                break

        if not status:
            # Find an available address on the subnet.
            # if no route exists, add one temporarily so the subnet can be scanned
            routes = self.get_interfaces_routes()
            if ifc not in routes or cidr not in routes[ifc]:
                self.route('add',
                           dst=cidr,
                           oif=self.link_lookup(ifname=ifc)[0])
            # Get an address near the top of the subnet
            if loc == 'top':
                st_addr = add_offset_to_address(
                    cidr,
                    get_network_size(cidr) - offset)
            else:
                st_addr = add_offset_to_address(cidr, offset)
            for i in range(get_network_size(cidr) - offset):
                addr = add_offset_to_address(st_addr, mult * i)
                cmd = f'nmap -PR {addr}'
                res, err, rc = sub_proc_exec(cmd)
                if not re.search(r'\d+\.\d+\.\d+\.\d+', res, re.DOTALL):
                    # Found an unused address
                    # First remove the temp route
                    res = self.route('del',
                                     dst=cidr,
                                     oif=self.link_lookup(ifname=ifc)[0])
                    if res[0]['header']['error']:
                        self.log.error(
                            f'Error occurred removing route from {ifc}')
                    # Add the address to the BMC interface
                    self.log.info(f'Adding address {addr} to ifc {ifc}')
                    idx = self.link_lookup(ifname=ifc)[0]
                    self.addr('add',
                              index=idx,
                              address=addr,
                              mask=int(cidr.rsplit('/')[1]))
                    status = True
                    break
        # Update self.ifcs
        self.ifcs = self.get_interfaces_dict()
        return status
Exemple #13
0
def setup_nginx_yum_repo(root_dir='/srv', repo_id='nginx'):
    """Install and setup nginx http server

    Args:
        root_dir (str): Path to root directory for requests
        repo_id (str): Name of nginx yum repository

    Returns:
        int: Return code from 'yum makecache'
    """

    log = logger.getlogger()

    baseurl = 'http://nginx.org/packages/rhel/7/' + platform.machine()
    repo_file = os.path.join('/etc/yum.repos.d', repo_id + '.repo')

    if os.path.isfile(repo_file):
        with open(repo_file, 'r') as f:
            content = f.read()
        if baseurl in content:
            rc = 0
        else:
            line_in_file(repo_file, r'^baseurl=.+', f'baseurl={baseurl}')
            for cmd in ['yum clean all', 'yum makecache']:
                resp, err, rc = sub_proc_exec(cmd)
                if rc != 0:
                    log.error(f"A problem occured while running '{cmd}'")
                    log.error(f'Response: {resp}\nError: {err}\nRC: {rc}')
    else:
        repo_name = 'nginx.org public'
        repo = PowerupRepo(repo_id, repo_name, root_dir)
        content = repo.get_yum_dotrepo_content(baseurl, gpgcheck=0)
        repo.write_yum_dot_repo_file(content)
        cmd = 'yum makecache'
        resp, err, rc = sub_proc_exec(cmd)
        if rc != 0:
            log.error('A problem occured while creating the yum ' 'caches')
            log.error(f'Response: {resp}\nError: {err}\nRC: {rc}')

    return rc
    def _teardown_ns(self, ns):
        # kill dnsmasq
        dns_list, stderr, rc = sub_proc_exec('pgrep dnsmasq')
        dns_list = dns_list.splitlines()

        for pid in dns_list:
            ns_name, stderr, rc = sub_proc_exec('ip netns identify ' + pid)
            if ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing dnsmasq {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        # kill tcpdump
        tcpdump_list, stderr, rc = sub_proc_exec('pgrep tcpdump')
        tcpdump_list = tcpdump_list.splitlines()

        for pid in tcpdump_list:
            ns_name, stderr, rc = sub_proc_exec('ip netns identify ' + pid)
            if ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing tcpdump {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        # reconnect the veth pair to the container
        ns._reconnect_container()

        # Destroy the namespace
        self.log.debug('Destroying namespace')
        ns._destroy_name_sp()
Exemple #15
0
def get_ansible_hostvars(software_hosts_file_path):
    """Get Ansible generated 'hostvars' dictionary

    Args:
        software_hosts_file_path (str): Path to software inventory file

    Returns:
        dict: Ansible 'hostvars' dictionary
    """
    cmd = (f'ansible-inventory --inventory {software_hosts_file_path} --list')
    resp, err, rc = sub_proc_exec(cmd, shell=True)
    hostvars = json.loads(resp)['_meta']['hostvars']
    return hostvars
Exemple #16
0
 def create_meta(self, update=False):
     action = ('update', 'Updating') if update else ('create', 'Creating')
     self.log.info(f'{action[1]} repository metadata and databases')
     print('This may take a few minutes.')
     if not update:
         cmd = f'createrepo -v {self.repo_dir}'
     else:
         cmd = f'createrepo -v --update {self.repo_dir}'
     resp, err, rc = sub_proc_exec(cmd)
     if rc != 0:
         self.log.error(f'Repo creation error: rc: {rc} stderr: {err}')
     else:
         self.log.info(
             f'Repo {action[0]} process for {self.repo_id} finished'
             ' succesfully')
Exemple #17
0
 def get_rpm_path(self, filepath='/home/**/*.rpm'):
     """Interactive search for the rpm path.
     Returns: Path to file or None
     """
     while True:
         self.rpm_path = get_file_path(filepath)
         # Check for .rpm files in the chosen file
         cmd = 'rpm -qlp self.rpm_path'
         resp, err, rc = sub_proc_exec(cmd)
         if self.rpm_path:
             if '.rpm' not in resp:
                 print('There are no ".rpm" files in the selected path')
                 if get_yesno('Use selected path? ', default='n'):
                     return self.rpm_path
         else:
             return None
def _is_ifc_attached_elsewhere(ifc, bridge):
    """ Checks to see if ifc is in use on a bridge other than that specified
    Args:
        ifc (str) interface name
        bridge (str) name of bridge the interface is intended for
    Returns:
        True if the interface is already being used (is unavailable)
    """
    br_list, err, rc = sub_proc_exec('brctl show')
    br_list = br_list.splitlines()
    output = []
    for line in br_list[1:]:
        if line.startswith('\t'):
            output[len(output) - 1] = output[len(output) - 1] + line
        else:
            output.append(line)
        if ifc in output[len(output) - 1] \
                and bridge not in output[len(output) - 1]:
            return True
    return False
Exemple #19
0
def copy_ssh_key_pair_to_hosts(private_key_path,
                               software_hosts_file_path,
                               global_pass=None):
    """Copy an SSH public key into software hosts authorized_keys files

    TODO: detailed description

    Args:
        private_key_path (str) : Filename of private key file
        software_hosts_file_path (str): Path to software inventory file
        global_pass (str, optional): Global client default SSH password

    Returns:
        bool: True iff rc of all commands are "0"
    """
    hosts_list = _validate_inventory_count(software_hosts_file_path, 0)
    all_zero_returns = True

    hostvars = get_ansible_hostvars(software_hosts_file_path)

    for host in hosts_list:
        print(bold(f'Copy SSH Public Key to {host}'))
        cmd = f'ssh-copy-id -i {private_key_path} '
        if "ansible_port" in hostvars[host]:
            cmd += f'-p {hostvars[host]["ansible_port"]} '
        if "ansible_ssh_common_args" in hostvars[host]:
            cmd += f'{hostvars[host]["ansible_ssh_common_args"]} '
        cmd += f'{hostvars[host]["ansible_user"]}@{host}'

        if 'ansible_ssh_pass' not in hostvars[host]:
            cmd = f'SSHPASS=\'{global_pass}\' sshpass -e ' + cmd

        resp, err, rc = sub_proc_exec(cmd, shell=True)
        if rc != 0:
            all_zero_returns = False
            print(err)

    return all_zero_returns
Exemple #20
0
    def extract_rpm(self, src_path):
        """Extracts files from the selected rpm file to a repository directory
        under /srv/repoid/rhel7/repoid. If a repodata directory is included in
        the extracted data, then the path to repodata directory is returned
        Inputs: Uses self.repo_dir and self.repo_id
        Outputs:
            repodata_dir : absolute path to repodata directory if one exists
        """
        extract_dir = self.repo_dir
        if not os.path.exists(extract_dir):
            os.makedirs(extract_dir)
        os.chdir(extract_dir)
        cmd = f'rpm2cpio {src_path} | sudo cpio -div'
        resp, err, rc = sub_proc_exec(cmd, shell=True)
        if rc != 0:
            self.log.error(f'Failed extracting {src_path}')

        repodata_dir = glob.glob(f'{extract_dir}/**/repodata', recursive=True)
        if repodata_dir:
            self.repo_dir = os.path.dirname(repodata_dir[0])
            return self.repo_dir
        else:
            return None
Exemple #21
0
def get_user_and_home():
    """Get user name and home directory path

    Returns the user account calling the script, *not* 'root' even
    when called with 'sudo'.

    Returns:
        user_name, user_home_dir (tuple): User name and home dir path

    Raises:
        UserException: If 'getent' command fails
    """
    log = logger.getlogger()
    user_name = getlogin()

    cmd = f'getent passwd {user_name}'
    resp, err, rc = sub_proc_exec(cmd, shell=True)
    if str(rc) != "0":
        msg = 'getent failed:\n{}'.format(err)
        log.debug(msg)
        raise UserException(msg)
    user_home_dir = resp.split(':')[5].rstrip()

    return (user_name, user_home_dir)
    def validate_pxe(self, bootdev='default', persist=True):
        # if self.inv.check_all_nodes_pxe_macs():
        #     self.log.info("Inventory exists with PXE MACs populated.")
        #     if not self.ran_ipmi:
        #         return
        #     print("\nPress Enter to continue cluster deployment without "
        #           "running PXE hardware validation.")
        #     print("Type 'C' to validate cluster nodes defined in current "
        #           "'config.yml'")
        #     resp = input("Type 'T' to terminate Power-Up ")
        #     if resp == 'T':
        #         resp = input("Type 'y' to confirm ")
        #         if resp == 'y':
        #             self.log.info("'{}' entered. Terminating Power-Up at user "
        #                           "request".format(resp))
        #             sys.exit(1)
        #     elif resp == 'C':
        #         self.log.info("'{}' entered. Continuing with hardware "
        #                       "validation".format(resp))
        #     else:
        #         print()
        #         return
        # if not self.ran_ipmi:
        #     return
        if not self.node_table_ipmi:
            raise UserCriticalException('No BMCs discovered')
        self.log.debug("Checking PXE networks and client PXE"
                       " ports ________\n")
        self.log.debug('Boot device: {}'.format(bootdev))
        ipmi_cnt, pxe_cnt = self._get_port_cnts()
        pxe_addr, bridge_addr, pxe_prefix, pxe_vlan = self._get_network('pxe')
        pxe_network = pxe_addr + '/' + str(pxe_prefix)
        addr = IPNetwork(bridge_addr + '/' + str(pxe_prefix))
        netmask = str(addr.netmask)
        addr.value += NAME_SPACE_OFFSET_ADDR
        addr = str(addr)
        foundall = False
        dhcp_st = get_dhcp_pool_start()
        pxe_ns = NetNameSpace('pxe-ns-', 'br-pxe-' + str(pxe_vlan), addr)

        # setup DHCP. save start and end addr raw numeric values
        self.log.debug('Installing DHCP server in network namespace')
        addr_st = self._add_offset_to_address(pxe_network, dhcp_st)
        addr_end = self._add_offset_to_address(pxe_network,
                                               dhcp_st + pxe_cnt + 2)

        dns_list, stderr, rc = sub_proc_exec('pgrep dnsmasq')
        dns_list = dns_list.splitlines()

        if os.path.exists(self.dhcp_pxe_leases_file):
            os.remove(self.dhcp_pxe_leases_file)

        # delete any remnant dnsmasq processes
        for pid in dns_list:
            ns_name, stderr, rc = sub_proc_exec(
                'ip netns identify {}'.format(pid))
            if pxe_ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing dnsmasq. pid {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        cmd = (f'dnsmasq --dhcp-leasefile={self.dhcp_pxe_leases_file} '
               f'--interface={pxe_ns._get_name_sp_ifc_name()} '
               f'--dhcp-range={addr_st},{addr_end},{netmask},3600')
        stdout, stderr, rc = pxe_ns._exec_cmd(cmd)
        if rc != 0:
            self.log.warning(f'Error configuring dnsmasq. rc: {rc}')

        if os.path.exists(self.tcp_dump_file):
            os.remove(self.tcp_dump_file)

        tcpdump_list, stderr, rc = sub_proc_exec('pgrep tcpdump')
        tcpdump_list = tcpdump_list.splitlines()

        # delete any remnant tcpdump processes
        for pid in tcpdump_list:
            ns_name, stderr, rc = sub_proc_exec('ip netns identify ' + pid)
            if pxe_ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing tcpdump. pid {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        cmd = (f'sudo tcpdump -X -U -i {pxe_ns._get_name_sp_ifc_name()} '
               f'-w {self.tcp_dump_file} --immediate-mode  port 67')
        proc = pxe_ns._launch_cmd(cmd)
        if not isinstance(proc, object):
            self.log.error(
                f'Failure to launch process of tcpdump monitor {proc}')

        # Scan up to 25 times. Delay 10 seconds between scans
        # Allow infinite number of retries
        self.log.info('Scanning pxe network on 10 s intervals.')
        cnt = 0
        cnt_prev = 0
        cnt_down = 25
        mac_list = []
        dump = ''
        while cnt < pxe_cnt:
            print()
            cmd = 'sudo tcpdump -r {} -xx'.format(self.tcp_dump_file)
            for i in range(cnt_down):
                print('\r{} of {} nodes requesting PXE boot. Scan cnt: {} '.
                      format(cnt, pxe_cnt, cnt_down - i),
                      end="")
                sys.stdout.flush()
                time.sleep(10)
                # read the tcpdump file if size is not 0
                if os.path.exists(self.tcp_dump_file) and os.path.getsize(
                        self.tcp_dump_file):
                    dump, stderr, rc = sub_proc_exec(cmd)
                    if rc != 0:
                        self.log.warning(
                            f'Error reading tcpdump file. rc: {rc}')
                    if 'reading' not in stderr:
                        self.log.warning(
                            f'Failure reading tcpdump file - {stderr}')
                mac_list = self._get_macs(mac_list, dump)
                cnt = len(mac_list)
                if cnt > cnt_prev:
                    cnt_prev = cnt
                    # Pause briefly for in flight DHCP to complete and lease file to update
                    time.sleep(5)
                    self._build_port_table_pxe(mac_list)
                if cnt >= pxe_cnt:
                    foundall = True
                    print(
                        '\r{} of {} nodes requesting PXE boot. Scan count: {} '
                        .format(cnt, pxe_cnt, cnt_down - i),
                        end="")
                    break
            self.log.debug('Table of found PXE ports: {}'.format(
                self.node_table_pxe))
            for switch in self.node_table_pxe:
                print('\n\nSwitch: {}'.format(switch))
                print(
                    tabulate(self.node_table_pxe[switch],
                             headers=('port', 'MAC address', 'IP address')))
                print()

            if cnt >= pxe_cnt:
                break
            print('\n\nPress Enter to continue scanning for cluster nodes.')
            print(
                "Or enter 'C' to continue cluster deployment with a subset of nodes"
            )
            print("Or enter 'R' to cycle power to missing nodes")
            resp = input("Or enter 'T' to terminate Power-Up ")
            if resp == 'T':
                resp = input("Enter 'y' to confirm ")
                if resp == 'y':
                    self.log.info("'{}' entered. Terminating Power-Up at user"
                                  " request".format(resp))
                    self._teardown_ns(self.ipmi_ns)
                    self._teardown_ns(pxe_ns)
                    sys.exit(1)
            elif resp == 'R':
                self._reset_unfound_nodes()
            elif resp == 'C':
                print('\nNot all nodes have been discovered')
                resp = input("Enter 'y' to confirm continuation of"
                             " deployment without all nodes ")
                if resp == 'y':
                    self.log.info(
                        "'{}' entered. Continuing Power-Up".format(resp))
                    break
        if cnt < pxe_cnt:
            self.log.warning('Failed to validate expected number of nodes')

        self._teardown_ns(pxe_ns)

        # Cycle power on all discovered nodes if bootdev set to 'network'
        if bootdev == 'network':
            self.log.debug('\nCycling power to discovered nodes.\n')
            set_power_clients('off', clients=self.bmc_ai)

            set_power_clients('on', clients=self.bmc_ai)

            set_bootdev_clients('network', clients=self.bmc_ai)

        self._teardown_ns(self.ipmi_ns)

        # Reset BMCs to insure they acquire a new address from container
        # during inv_add_ports. Avoids conflicting addresses during redeploy
        self._reset_existing_bmcs(self.node_list, self._get_cred_list())

        self.log.info('Cluster nodes validation complete')
        if not foundall:
            raise UserException('Not all node PXE ports validated')
    def __init__(self, name, bridge, addr):
        """
        Args:
            name (str): namespace name
            bridge (str): name of bridge to attach to
            addr (str): cidr of namespace address
        """
        self.log = logger.getlogger()
        self.addr = addr
        self.bridge = bridge
        self.vlan = bridge.split('-')[-1]
        self.name = name + self.vlan
        self.ip = IPRoute()
        self._disconnect_container()
        self.log.debug('Creating network namespace {}'.format(self.name))

        stdout, stderr, rc = sub_proc_exec('ip netns add {}'.format(self.name))
        if rc:
            self.log.debug(
                'An error occurred while creating namespace '
                f' {self.name}.\nreturn code: {rc}\nWarning: {stderr}')
        if stderr:
            if 'File exists' in stderr:
                self.log.debug(stderr)
            else:
                self.log.error('Unable to create namespace')
                sys.exit(1)

        self.br_ifc = 'veth-br-' + self.name.split('-')[0] + '-' + self.vlan
        self.peer_ifc = 'veth-' + self.name

        try:
            self.ip.link("add",
                         ifname=self.br_ifc,
                         peer=self.peer_ifc,
                         kind='veth')
        except NetlinkError as exc:
            if 'File exists' not in str(exc):
                self.log.error('Failed creating veth pair. {}'.format(exc))
                sys.exit(1)

        try:
            # peer interface side disappears from host space once attached to
            # the namespace
            idx_ns_ifc = self.ip.link_lookup(ifname=self.peer_ifc)[0]
            self.ip.link('set', index=idx_ns_ifc, net_ns_fd=self.name)
        except IndexError:
            self.log.debug('Peer ifc already attached.')
        except NetlinkError:
            self.log.debug('Peer ifc already attached.')
        idx_br = self.ip.link_lookup(ifname=bridge)[0]
        self.idx_br_ifc = self.ip.link_lookup(ifname=self.br_ifc)[0]
        self.ip.link('set', index=self.idx_br_ifc, master=idx_br)

        # bring up the interfaces
        cmd = 'ip netns exec {} ip link set dev {} up'.format(
            self.name, self.peer_ifc)
        stdout, stderr, rc = sub_proc_exec(cmd)

        cmd = 'ip netns exec {} ip link set dev lo up'.format(self.name)
        stdout, stderr, rc = sub_proc_exec(cmd)

        cmd = 'ip netns exec {} ip addr add {} dev {} brd +' \
            .format(self.name, addr, self.peer_ifc)
        stdout, stderr, rc = sub_proc_exec(cmd)

        # verify address setup
        # cmd = 'ip netns exec {} ip addr show'.format(self.name)
        # proc = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)
        # stdout, stderr = proc.communicate()

        self.ip.link('set', index=self.idx_br_ifc, state='up')
Exemple #24
0
    def sync(self, pkg_list, alt_url=None, py_ver=27):
        """
        inputs:
            pkg_list (str): list of packages separated by space(s). Packages can
                include versions. ie Keras==2.0.5
        """
        if not os.path.isdir(self.pypirepo_dir):
            os.mkdir(self.pypirepo_dir)
        pkg_cnt = len(pkg_list.split())
        print(
            f'Downloading {pkg_cnt} python{py_ver} packages plus dependencies:\n'
        )

        pkg_list2 = pkg_list.split()
        if alt_url:
            host = re.search(r'http://([^/]+)', alt_url).group(1)
            cmd = host  # Dummy assign to silence tox
            # wait on 'f' string formatting since 'pkg' is not available yet
            cmd = ("f'python -m pip download --python-version {py_ver} "
                   "--platform ppc64le --no-deps --index-url={alt_url} "
                   "-d {self.pypirepo_dir} {pkg} --trusted-host {host}'")
        else:
            cmd = (
                "f'python -m pip download --python-version {py_ver} "
                "--platform ppc64le --no-deps -d {self.pypirepo_dir} {pkg}'")
        for pkg in pkg_list2:
            print(pkg)
            resp, err, rc = sub_proc_exec(eval(cmd), shell=True)
            if rc != 0:
                if 'functools32' in resp and 'for Python 2.7 only' in resp:
                    pass
                else:
                    self.log.error(
                        'Error occured while downloading python packages: '
                        f'\nResp: {resp} \nRet code: {rc} \nerr: {err}')

        if not os.path.isdir(self.pypirepo_dir + '/simple'):
            os.mkdir(self.pypirepo_dir + '/simple')
        dir_list = os.listdir(self.pypirepo_dir)
        cnt = 0

        for item in dir_list:
            if item[0] != '.' and os.path.isfile(self.pypirepo_dir + '/' +
                                                 item):
                res = re.search(r'([-_+\w\.]+)(?=-(\d+\.\d+){1,3}).+', item)
                if res:
                    cnt += 1
                    name = res.group(1)
                    name = name.replace('.', '-')
                    name = name.replace('_', '-')
                    name = name.lower()
                    if not os.path.isdir(self.pypirepo_dir +
                                         f'/simple/{name}'):
                        os.mkdir(self.pypirepo_dir + f'/simple/{name}')
                    if not os.path.islink(self.pypirepo_dir +
                                          f'/simple/{name}/{item}'):
                        os.symlink(
                            self.pypirepo_dir + f'/{item}',
                            self.pypirepo_dir + f'/simple/{name}/{item}')
                else:
                    self.log.error(
                        f'mismatch: {item}. There was a problem entering '
                        f'{item}\ninto the python package index')
        self.log.info(
            f'A total of {cnt} packages exist or were added to the python '
            'package repository')
    def validate_ipmi(self):
        self.log.info("Discover and validate cluster nodes")
        # if self.inv.check_all_nodes_ipmi_macs() and self.inv.check_all_nodes_pxe_macs():
        #     self.log.info("Inventory exists with IPMI and PXE MACs populated.")
        #     print("\nPress Enter to continue cluster deployment without "
        #           "running IPMI hardware validation.")
        #     print("Type 'C' to validate cluster nodes defined in current "
        #           "'config.yml'")
        #     resp = input("Type 'T' to terminate Power-Up ")
        #     if resp == 'T':
        #         resp = input("Type 'y' to confirm ")
        #         if resp == 'y':
        #             self.log.info("'{}' entered. Terminating Power-Up at user "
        #                           "request".format(resp))
        #             sys.exit(1)
        #     elif resp == 'C':
        #         self.log.info("'{}' entered. Continuing with hardware "
        #                       "validation".format(resp))
        #     else:
        #         print()
        #         return
        ipmi_cnt, pxe_cnt = self._get_port_cnts()
        ipmi_addr, bridge_addr, ipmi_prefix, ipmi_vlan = self._get_network(
            'ipmi')
        ipmi_network = ipmi_addr + '/' + str(ipmi_prefix)
        addr = IPNetwork(bridge_addr + '/' + str(ipmi_prefix))
        netmask = str(addr.netmask)
        ipmi_size = addr.size
        addr.value += NAME_SPACE_OFFSET_ADDR
        addr = str(addr)
        cred_list = self._get_cred_list()
        rc = False
        dhcp_st = get_dhcp_pool_start()
        self.ipmi_ns = NetNameSpace('ipmi-ns-', 'br-ipmi-' + str(ipmi_vlan),
                                    addr)

        # setup DHCP, unless already running in namesapce
        # save start and end addr raw numeric values
        self.log.debug('Installing DHCP server in network namespace')
        addr_st = self._add_offset_to_address(ipmi_network, dhcp_st)
        addr_end = self._add_offset_to_address(ipmi_network, ipmi_size - 2)
        dhcp_end = self._add_offset_to_address(ipmi_network,
                                               dhcp_st + ipmi_cnt + 2)

        # scan ipmi network for nodes with pre-existing ip addresses
        cmd = 'fping -r0 -a -g {} {}'.format(addr_st, addr_end)
        node_list, stderr, rc = sub_proc_exec(cmd)
        if rc not in (0, 1):
            self.log.warning(f'Error scanning IPMI network. rc: {rc}')
        self.log.debug('Pre-existing node list: \n{}'.format(node_list))
        node_list = node_list.splitlines()

        self._reset_existing_bmcs(node_list, cred_list)

        if len(node_list) > 0:
            print('Pause 60s for BMCs to begin reset')
            time.sleep(60)

        dns_list, stderr, rc = sub_proc_exec('pgrep dnsmasq')
        if rc not in [0, 1]:
            self.log.warning(f'Error looking for dnsmasq. rc: {rc}')
        dns_list = dns_list.splitlines()

        for pid in dns_list:
            ns_name, stderr, rc = sub_proc_exec(
                'ip netns identify {}'.format(pid))
            if self.ipmi_ns._get_name_sp_name() in ns_name:
                self.log.debug('DHCP already running in {}'.format(ns_name))
                break
        else:
            cmd = (f'dnsmasq --dhcp-leasefile={self.dhcp_ipmi_leases_file} '
                   f'--interface={self.ipmi_ns._get_name_sp_ifc_name()} '
                   f'--dhcp-range={addr_st},{dhcp_end},{netmask},600')
            stdout, stderr, rc = self.ipmi_ns._exec_cmd(cmd)
            if rc != 0:
                self.log.warning(f'Error setting up dnsmasq. rc: {rc}')
            print(stderr)

        # Scan up to 25 times. Delay 5 seconds between scans
        # Allow infinite number of retries
        self.log.info('Scanning BMC network on 5 s intervals')
        cnt = 0
        cnt_down = 25
        while cnt < ipmi_cnt:
            print()
            for i in range(cnt_down):
                print(
                    '\r{} of {} nodes requesting DHCP address. Scan count: {} '
                    .format(cnt, ipmi_cnt, cnt_down - i),
                    end="")
                sys.stdout.flush()
                time.sleep(5)
                cmd = 'fping -r0 -a -g {} {}'.format(addr_st, dhcp_end)
                stdout, stderr, rc = sub_proc_exec(cmd)
                node_list = stdout.splitlines()
                cnt = len(node_list)
                if cnt >= ipmi_cnt:
                    rc = True
                    print(
                        '\r{} of {} nodes requesting DHCP address. Scan count: {} '
                        .format(cnt, ipmi_cnt, cnt_down - i),
                        end="")
                    break

            self._get_port_table_ipmi(node_list)
            self.log.debug('Table of found IPMI ports: {}'.format(
                self.node_table_ipmi))
            for switch in self.node_table_ipmi:
                print('\n\nSwitch: {}                '.format(switch))
                print(
                    tabulate(self.node_table_ipmi[switch],
                             headers=('port', 'MAC address', 'IP address')))
                print()

            if cnt >= ipmi_cnt:
                break
            print(
                '\n\nPress Enter to continue scanning for cluster nodes.\nOr')
            print(
                "Or enter 'C' to continue cluster deployment with a subset of nodes"
            )
            resp = input("Or Enter 'T' to terminate Power-Up ")
            if resp == 'T':
                resp = input("Enter 'y' to confirm ")
                if resp == 'y':
                    self.log.info(
                        "'{}' entered. Terminating Power-Up at user request".
                        format(resp))
                    self._teardown_ns(self.ipmi_ns)
                    sys.exit(1)
            elif resp == 'C':
                print('\nNot all nodes have been discovered')
                resp = input("Enter 'y' to confirm continuation of"
                             " deployment without all nodes ")
                if resp == 'y':
                    self.log.info(
                        "'{}' entered. Continuing PowerUp".format(resp))
                    break
        self.node_list = node_list
        if cnt < ipmi_cnt:
            self.log.warning('Failed to validate expected number of nodes')

        if len(node_list) > 0 and len(cred_list) > 0:
            # Verify and power off nodes
            self.bmc_ai = self._get_credentials(node_list, cred_list)
            if not self.bmc_ai:
                self.log.error(
                    'Critical error. Unable to establish BMC communication '
                    'with any cluster nodes.\n.')
                sys.exit('Exiting.')

        # set_power_cients('off') has built in 60 s delay
        self.log.info('\nPowering off cluster nodes')
        set_power_clients('off', clients=self.bmc_ai)

        set_power_clients('on', clients=self.bmc_ai)

        self.log.debug('\nSetting "network" boot device on all nodes')
        set_bootdev_clients('network', clients=self.bmc_ai)

        self.log.debug('Cluster nodes IPMI validation complete')
        self.ran_ipmi = True
        if not rc:
            raise UserException('Not all node IPMI ports validated')
 def _destroy_name_sp(self):
     self.ip.link('set', index=self.idx_br_ifc, state='down')
     self.ip.link('del', index=self.idx_br_ifc)
     self.ip.close()
     stdout, stderr, rc = sub_proc_exec('ip netns del {}'.format(self.name))
Exemple #27
0
def nginx_setup(root_dir='/srv', repo_id='nginx'):
    """Install and setup nginx http server

    Args:
        root_dir (str): Path to root directory for requests
        repo_id (str): Name of nginx yum repository

    Returns:
        int: Return code from 'systemctl restart nginx.service'
    """

    log = logger.getlogger()

    # Check if nginx installed. Install if necessary.
    cmd = 'nginx -v'
    try:
        resp, err, rc = sub_proc_exec(cmd)
    except OSError:
        if 'rhel' in linux_distribution(full_distribution_name=False):
            cmd = 'yum -y install nginx'
            resp, err, rc = sub_proc_exec(cmd)
            if rc != 0:
                setup_nginx_yum_repo(root_dir, repo_id)
                cmd = 'yum -y install nginx'
                resp, err, rc = sub_proc_exec(cmd)
                if rc != 0:
                    log.error('Failed installing nginx')
                    log.error(resp)
                    sys.exit(1)
        elif 'ubuntu' in linux_distribution(full_distribution_name=False):
            for cmd in ['apt-get update', 'apt-get install -y nginx']:
                resp, err, rc = sub_proc_exec(cmd)
                if rc != 0:
                    log.error(f"A problem occured while running '{cmd}'")
                    log.error(f'Response: {resp}\nError: {err}\nRC: {rc}')

    cmd = 'systemctl enable nginx.service'
    resp, err, rc = sub_proc_exec(cmd)
    if rc != 0:
        log.error('Failed to enable nginx service')

    cmd = 'systemctl start nginx.service'
    resp, err, rc = sub_proc_exec(cmd)
    if rc != 0:
        log.error('Failed to start nginx service')

    if os.path.isfile('/etc/nginx/conf.d/default.conf'):
        try:
            os.rename('/etc/nginx/conf.d/default.conf',
                      '/etc/nginx/conf.d/default.conf.bak')
        except OSError:
            log.warning('Failed renaming /etc/nginx/conf.d/default.conf')

    nginx_location = {'/': [f'root {root_dir}', 'autoindex on']}
    nginx_directives = {'listen': '80', 'server_name': 'powerup'}

    rc = nginx_modify_conf('/etc/nginx/conf.d/server1.conf',
                           directives=nginx_directives,
                           locations=nginx_location)

    return rc
def _is_firewall_running():
    res, err, rc = sub_proc_exec('systemctl status firewalld')
    if not rc:
        if 'Active: active' in res or 'active (running)' in res:
            return True
Exemple #29
0
def main(args):
    log = logger.getlogger()
    log.debug('log this')
    user_input = len(args) > 0
    if user_input:
        args = parse_input(args)

    dep_base_path = gen.get_dependencies_path()
    dirs = [
        d for d in os.listdir(dep_base_path)
        if os.path.isdir(os.path.join(dep_base_path, d))
    ]
    dirs = [os.path.join(dep_base_path, _dir) for _dir in dirs]
    dep_dir = ""
    if user_input:
        dep_dir = dirs[0]

    if not user_input:
        ch, dep_dir = get_selection(dirs,
                                    prompt='Select a directory to aggregate '
                                    'dependencies from: ')
    dep_files = {}

    pip_pre_files = [
        'client_pip_pre_install.txt',
        'dlipy3_pip_pre_install.txt',
        'dlipy2_pip_pre_install.txt',
        'dlinsights_pip_pre_install.txt',
    ]
    dep_files['pip_pre_files'] = pip_pre_files

    pip_post_files = [
        'client_pip_post_install.txt',
        'dlipy3_pip_post_install.txt',
        'dlipy2_pip_post_install.txt',
        'dlinsights_pip_post_install.txt',
    ]
    dep_files['pip_post_files'] = pip_post_files

    conda_pre_files = [
        'dlipy3_conda_pre_install.txt',
        'dlipy2_conda_pre_install.txt',
        'dlinsights_conda_pre_install.txt',
    ]
    dep_files['conda_pre_files'] = conda_pre_files

    conda_post_files = [
        'dlipy3_conda_post_install.txt',
        'dlipy2_conda_post_install.txt',
        'dlinsights_conda_post_install.txt',
    ]
    dep_files['conda_post_files'] = conda_post_files

    yum_pre_files = ['client_yum_pre_install.txt']
    dep_files['yum_pre_files'] = yum_pre_files

    yum_post_files = ['client_yum_post_install.txt']
    dep_files['yum_post_files'] = yum_post_files

    exists = glob.glob(f'{dep_dir}/**/{yum_pre_files[0]}', recursive=True)
    if exists:
        dep_dir = os.path.dirname(exists[0])
    else:
        log.error('No client yum pre file found')
        sys.exit()

    #  # Change file ownership to current user
    # if not os.access(dep_dir, os.W_OK):
    username = getpass.getuser()
    cmd = f'sudo chown -R {username}:{username} {dep_dir}'
    sub_proc_exec(cmd, shell=True)

    # Clear comments and other known header content from files
    for item in dep_files:
        for _file in dep_files[item]:
            _file_path = os.path.join(dep_dir, _file)
            with open(_file_path, 'r') as f:
                lines = f.read().splitlines()
            with open(_file_path, 'w') as f:
                for line in lines:
                    if line.startswith('#') or line.startswith('@'):
                        continue
                    else:
                        f.write(line + '\n')

    def file_check(file_list):
        for f in file_list:
            file_path = os.path.join(dep_dir, f)
            my_file = os.path.isfile(file_path)
            if my_file:
                pass
            else:
                input(f'\nINFO - {f} Does not exist\n')

    def get_pkg_repo(pkg, pkg_type):
        if pkg_type == 'yum':
            pkg_items = pkg.split()
            repo = pkg_items[2]
            if repo.endswith('-powerup'):
                repo = repo[:-8]

        elif pkg_type == 'pip':
            pkg_items = pkg.split()
            repo = pkg_items[2]
            if pkg_type in repo:
                repo = 'pypi'

        elif pkg_type == 'conda':
            pkg_dir = pkg.rpartition('/')[0]
            if 'ibm-ai' in pkg or 'ibmai' in pkg:
                if 'linux-ppc64le' in pkg:
                    repo = 'ibmai_linux_ppc64le'
                elif 'noarch' in pkg:
                    repo = 'ibmai_noarch'
                elif 'x86_64' in pkg:
                    repo = 'ibmai_linux_x86_64'
                else:
                    repo = 'ibm_ai_unresolved_reponame'
            elif 'repo.anaconda' in pkg:
                repo = '-'.join(pkg_dir.rsplit('/', 2)[-2:])
                repo = 'anaconda_' + repo.replace('-', '_')
            else:
                pkg_dir = pkg.rpartition('/')[0]
                repo = '_'.join(pkg_dir.rsplit('/', 2)[-2:])
        return repo

    def format_pkg_name(pkg, pkg_type):
        if pkg_type == 'yum':
            pkg_items = pkg.split()
            pkg_repo = get_pkg_repo(pkg, pkg_type)
            pkg_fmt_name = (pkg_items[0].rsplit('.', 1)[0] + '-' +
                            pkg_items[1] + '.' +
                            pkg_items[0].rsplit('.', 1)[1])

        elif pkg_type == 'conda':
            pkg_fmt_name = pkg.rpartition('/')[-1]
            pkg_repo = get_pkg_repo(pkg, pkg_type)

        elif pkg_type == 'pip':
            pkg_items = pkg.split()
            pkg_repo = get_pkg_repo(pkg, pkg_type)
            version = pkg_items[1].replace('(', '')
            version = version.replace(')', '')
            pkg_fmt_name = pkg_items[0] + '==' + version

        return pkg_fmt_name, pkg_repo

    def write_merged_files(merged_sets, pkg_type):
        repo_list = {}
        if pkg_type == 'yum':
            repo_list[pkg_type] = []
            for repo in merged_sets:
                file_name = repo.replace('/', '')
                file_name = file_name.replace('@', '')
                file_name = f'{file_name}.yml'
                file_path = os.path.join(dep_dir, file_name)
                with open(file_path, 'w') as f:
                    d = {file_name: sorted(merged_sets[repo], key=str.lower)}
                    repo_list[pkg_type].append({
                        "path": file_path,
                        "filename": file_name,
                        "hash": d
                    })
                    yaml.dump(d, f, indent=4, default_flow_style=False)

        elif pkg_type == 'conda':
            repo_list[pkg_type] = []
            for repo in merged_sets:
                file_name = f'{repo}.yml'
                file_path = os.path.join(dep_dir, file_name)
                with open(file_path, 'w') as f:
                    d = {
                        file_name: sorted(list(merged_sets[repo]),
                                          key=str.lower)
                    }
                    repo_list[pkg_type].append({
                        "path": file_path,
                        "filename": file_name,
                        "hash": d
                    })
                    yaml.dump(d, f, indent=4, default_flow_style=False)

        elif pkg_type == 'pip':
            repo_list[pkg_type] = []
            for repo in merged_sets:
                file_name = 'pypi.yml'
                file_path = os.path.join(dep_dir, file_name)
                with open(file_path, 'w') as f:
                    d = {file_name: sorted(merged_sets[repo], key=str.lower)}
                    repo_list[pkg_type].append({
                        "path": file_path,
                        "filename": file_name,
                        "hash": d
                    })
                    yaml.dump(d, f, indent=4, default_flow_style=False)

        return repo_list

    def get_repo_list(pkgs, pkg_type):
        repo_list = []
        if pkg_type == 'yum':
            for pkg in pkgs:
                repo = get_pkg_repo(pkg, pkg_type)
                if repo not in repo_list:
                    repo_list.append(repo)

        if pkg_type == 'conda':
            for pkg in pkgs:
                repo = get_pkg_repo(pkg, pkg_type)

                if repo not in repo_list:
                    repo_list.append(repo)

        if pkg_type == 'pip':
            for pkg in pkgs:
                if '<pip>' in pkg:
                    repo = get_pkg_repo(pkg, pkg_type)
                    if repo not in repo_list:
                        repo_list.append(repo)

        return repo_list

    def merge_function(pre_files, post_files, pkg_type):
        """ Merges packages of a given type listed in a collection of files
        collected 'post' installation and 'pre' installation for various
        environments.
        The merged set of 'pre' packages is removed from the merge set of
        'post' packages to arrive at the list of installed packages across
        all environments.
        """

        # generate pre paths
        pre_paths = []
        for file in pre_files:
            pre_paths.append(os.path.join(dep_dir, file))

        # Generate post paths
        post_paths = []
        for file in post_files:
            post_paths.append(os.path.join(dep_dir, file))

        # Loop through the files
        pkgs = {}  # # {file:{repo:{pre:[], post: []}
        for i, pre_file in enumerate(pre_paths):
            file_name = os.path.basename(pre_file)
            file_key = file_name.split('_')[0] + '_' + file_name.split('_')[1]
            pkgs[file_key] = {}
            post_file = post_paths[i]
            try:
                with open(pre_file, 'r') as f:
                    pre_pkgs = f.read().splitlines()
            except FileNotFoundError as exc:
                print(f'File not found: {pre_file}. Err: {exc}')

            try:
                with open(post_file, 'r') as f:
                    post_pkgs = f.read().splitlines()
            except FileNotFoundError as exc:
                print(f'File not found: {post_file}. Err: {exc}')

            # Get the repo list
            repo_list = get_repo_list(post_pkgs, pkg_type)
            for repo in repo_list:
                pkgs[file_key][repo] = {}
                pkgs[file_key][repo]['pre'] = []
                pkgs[file_key][repo]['post'] = []
                for pkg in pre_pkgs:
                    pkg_fmt_name, pkg_repo = format_pkg_name(pkg, pkg_type)
                    if pkg_repo == repo:
                        pkgs[file_key][repo]['pre'].append(pkg_fmt_name)

                for pkg in post_pkgs:
                    # Format the name
                    pkg_fmt_name, pkg_repo = format_pkg_name(pkg, pkg_type)
                    if pkg_repo == repo:
                        pkgs[file_key][repo]['post'].append(pkg_fmt_name)

        diff_sets = {}

        # Post - pre pkg sets. (may need adjustment for different repo type)
        for _file in pkgs:
            diff_sets[_file] = {}
            for repo in pkgs[_file]:
                post_minus_pre = set(pkgs[_file][repo]['post'])  # -
                # set(pkgs[_file][repo]['pre']))
                diff_sets[_file][repo] = post_minus_pre

        # Merge by repository
        merged_sets = {}

        for _file in diff_sets:
            for repo in diff_sets[_file]:
                if repo not in merged_sets:
                    merged_sets[repo] = set()
                merged_sets[repo] = merged_sets[repo] | diff_sets[_file][repo]

        return write_merged_files(merged_sets, pkg_type)

    file_check(yum_pre_files)
    file_check(yum_post_files)
    main_repo_list = merge_function(yum_pre_files, yum_post_files, 'yum')
    file_check(conda_pre_files)
    file_check(conda_post_files)
    conda_repo_list = merge_function(conda_pre_files, conda_post_files,
                                     'conda')
    merge_dicts(conda_repo_list, main_repo_list)
    file_check(pip_pre_files)
    file_check(pip_post_files)
    pip_repo_list = merge_function(pip_pre_files, pip_post_files, 'pip')
    merge_dicts(pip_repo_list, main_repo_list)
    software_type = args.software if user_input else None
    proc_family = ""
    if software_type:
        try:
            file_path = GEN_SOFTWARE_PATH + SOFT_FILE
            yaml_file = load_yamlfile(file_path)
            proc_family = "_" + yaml_file["proc_family"]
        except:
            proc_family = ""
            pass
    lists, arch = parse_pkg_list(main_repo_list, software_type, proc_family)
    generate_pkg_list(lists, software_type, arch, dep_dir)
Exemple #30
0
def _validate_ansible_ping(software_hosts_file_path, hosts_list):
    """Validate Ansible connectivity and functionality on all hosts

    Args:
        software_hosts_file_path (str): Path to software inventory file
        host_list (list): List of hostnames or IP addresses

    Returns:
        bool: True if Ansible can connect to all hosts

    Raises:
        UserException: If any host fails
    """
    log = logger.getlogger()
    cmd = ('{} -i {} -m ping all'.format(get_ansible_path(),
                                         software_hosts_file_path))
    resp, err, rc = sub_proc_exec(cmd)
    if str(rc) != "0":
        msg = f'Ansible ping validation failed:\n{resp}'
        log.debug(msg)
        if 'WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!' in msg:
            print(
                '@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n'
                '@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED      @\n'
                '@             ON ONE OR MORE CLIENT NODES!                @\n'
                '@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n'
                'IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\n'
                'Someone could be eavesdropping on you right now '
                '(man-in-the-middle attack)!\n'
                'It is also possible that a host key has just been changed.\n')
            if get_yesno('Remove the existing known host keys? '):
                known_hosts_files = (
                    [os.path.join(Path.home(), ".ssh", "known_hosts")])
                user_name, user_home_dir = get_user_and_home()
                if user_home_dir != str(Path.home()):
                    known_hosts_files.append(os.path.join(user_home_dir,
                                                          ".ssh",
                                                          "known_hosts"))
                for host in hosts_list:
                    print(f'Collecting new host key(s) for {host}')
                    cmd = (f'ssh-keyscan -H {host}')
                    new_host_key, err, rc = sub_proc_exec(cmd)
                    for known_hosts in known_hosts_files:
                        print(f'Removing host keys for {host} '
                              f'from {known_hosts}')
                        cmd = (f'ssh-keygen -R {host} -f {known_hosts}')
                        resp, err, rc = sub_proc_exec(cmd)
                        print(f'Appending new host key for {host} to '
                              f'{known_hosts}')
                        append_line(known_hosts, new_host_key,
                                    check_exists=False)

                if user_home_dir != str(Path.home()):
                    user_known_hosts = os.path.join(user_home_dir, ".ssh",
                                                    "known_hosts")
                    user_uid = pwd.getpwnam(user_name).pw_uid
                    user_gid = grp.getgrnam(user_name).gr_gid
                    os.chown(user_known_hosts, user_uid, user_gid)
                    os.chmod(user_known_hosts, 0o600)
                    os.chown(user_known_hosts + '.old', user_uid, user_gid)
                    os.chmod(user_known_hosts + '.old', 0o600)

                return _validate_ansible_ping(software_hosts_file_path,
                                              hosts_list)
        elif 'Permission denied' in msg:
            msg = ('The PowerUp software installer attempted to log into the '
                   'the client node(s) but was unsuccessful. SSH key access '
                   'may need to be configured.\n')
            print(msg)
            if get_yesno('OK to configure Client Nodes for SSH Key Access? '):
                configure_ssh_keys(software_hosts_file_path)
                return _validate_ansible_ping(software_hosts_file_path,
                                              hosts_list)
        raise UserException(msg)
    log.debug("Software inventory Ansible ping validation passed")
    return True