def remove_client_host_keys(config_path=None): log = logger.getlogger() inv = Inventory(config_path) for ipaddr in inv.yield_nodes_pxe_ipaddr(): log.info("Remove any stale ssh host keys for {}".format(ipaddr)) util.bash_cmd("ssh-keygen -R {}".format(ipaddr)) playbooks_known_hosts = (os.path.join(gen.get_playbooks_path(), 'known_hosts')) if os.path.isfile(playbooks_known_hosts): util.bash_cmd("ssh-keygen -R {} -f {}".format( ipaddr, playbooks_known_hosts))
def extract_iso_images(path): """Extract ISO images into webserver directory Args: path (str): Directory path containing ISOs Returns: list: Paths to extracted ISO images """ return_list = [] if not path.endswith('/'): path += '/' # Extract ISO into web directory for access over http for _file in os.listdir(path): if _file.endswith('.iso'): name = os.path.splitext(_file)[0] dest_dir = HTML_DIR + name # If dest dir already exists continue to next file if not os.path.isdir(dest_dir): os.mkdir(dest_dir) util.bash_cmd('xorriso -osirrox on -indev %s -extract / %s' % ((path + _file), dest_dir)) util.bash_cmd('chmod 755 $(find %s -type d)' % dest_dir) # Do not return paths to "mini" isos if not _file.endswith('mini.iso'): return_list.append(dest_dir) # Ubuntu ppc64el before 16.04.2 requires files from netboot mini iso for _file in os.listdir(path): if _file.endswith('mini.iso'): src_dir = (HTML_DIR + _file[:-4] + '/install/') dest_dir = (HTML_DIR + _file[:-9] + '/install/netboot/ubuntu-installer/ppc64el/') if not os.path.isdir(dest_dir): os.makedirs(dest_dir) for netboot_file in os.listdir(src_dir): util.copy_file(src_dir + netboot_file, dest_dir) return return_list
def _validate_host_list_network(host_list): """Validate all hosts in list are pingable Args: host_list (list): List of hostnames or IP addresses Returns: bool: True if all hosts are pingable Raises: UserException: If list item will not resolve or ping """ log = logger.getlogger() for host in host_list: # Check if host is given as IP address if not netaddr.valid_ipv4(host, flags=0): try: socket.gethostbyname(host) except socket.gaierror as exc: log.debug("Unable to resolve host to IP: '{}' exception: '{}'" .format(host, exc)) raise UserException("Unable to resolve hostname '{}'!" .format(host)) else: raise UserException('Client nodes must be defined using hostnames ' f'(IP address found: {host})!') # Ping IP try: bash_cmd('fping -u {}'.format(' '.join(host_list))) except CalledProcessError as exc: msg = "Ping failed on hosts:\n{}".format(exc.output) log.debug(msg) raise UserException(msg) log.debug("Software inventory host fping validation passed") return True
def _validate_inventory_count(software_hosts_file_path, min_hosts, group='all'): """Validate minimum number of hosts are defined in inventory Calls Ansible to process inventory which validates file syntax. Args: software_hosts_file_path (str): Path to software inventory file min_hosts (int): Minimum number of hosts required to pass group (str, optional): Ansible group name (defaults to 'all') Returns: list: List of hosts defined in software inventory file Raises: UserException: Ansible reports host count of less than min_hosts """ log = logger.getlogger() host_count = None host_list = [] raw_host_list = bash_cmd(f'ansible {group} -i {software_hosts_file_path} ' '--list-hosts') # Iterate over ansible '--list-hosts' output count_verified = False host_count_pattern = re.compile(r'.*\((\d+)\)\:$') for host in raw_host_list.splitlines(): if not count_verified: # Verify host count is > 0 match = host_count_pattern.match(host) if match: host_count = int(match.group(1)) log.debug("Ansible host count: {}".format(host_count)) if host_count < min_hosts: raise UserException("Ansible reporting host count of less " "than one ({})!".format(host_count)) count_verified = True else: host_list.append(host.strip()) log.debug("Software inventory host count validation passed") log.debug("Ansible host list: {}".format(host_list)) return host_list
def cobbler_install(config_path=None): """Install and configure Cobbler in container. This function must be called within the container 'pup-venv' python virtual environment. Cobbler will be installed within this environment. """ cfg = Config(config_path) log = logger.getlogger() # Check to see if cobbler is already installed try: util.bash_cmd('cobbler check') log.info("Cobbler is already installed") return except util.CalledProcessError as error: if error.returncode == 127: log.debug("'cobbler' command not found, continuing with " "installation") else: log.warning("Cobbler is installed but not working:") log.warning(error.output) print("\nPress enter to remove Cobbler and attempt to ") print("re-install, or 'T' to terminate.") resp = input("\nEnter or 'T': ") log.debug("User response = \'{}\'".format(resp)) if resp == 'T': sys.exit('POWER-Up stopped at user request') # Clone cobbler github repo cobbler_url = URL cobbler_branch = BRANCH install_dir = gen.get_cobbler_install_dir() if os.path.exists(install_dir): log.info( "Removing Cobbler source directory \'{}\'".format(install_dir)) util.bash_cmd('rm -rf %s' % install_dir) log.info("Cloning Cobbler branch \'%s\' from \'%s\'" % (cobbler_branch, cobbler_url)) repo = Repo.clone_from(cobbler_url, install_dir, branch=cobbler_branch, single_branch=True) log.info("Cobbler branch \'%s\' cloned into \'%s\'" % (repo.active_branch, repo.working_dir)) # Modify Cobbler scrpit that write DHCP reservations so that the # lease time is included. dhcp_lease_time = cfg.get_globals_dhcp_lease_time() util.replace_regex( MANAGE_DNSMASQ, r'systxt \= systxt \+ \"\\\\n\"', "systxt = systxt + \",{}\\\\n\"".format(dhcp_lease_time)) # Use non-secure http to download network boot-loaders util.replace_regex(COBBLER_DLCONTENT, 'https://cobbler.github.io', 'http://cobbler.github.io') # Use non-secure http to download signatures util.replace_regex(COBBLER_SETTINGS_PY, 'https://cobbler.github.io', 'http://cobbler.github.io') # Run cobbler make install util.bash_cmd('cd %s; make install' % install_dir) # Backup original files util.backup_file(DNSMASQ_TEMPLATE) util.backup_file(MODULES_CONF) util.backup_file(COBBLER_WEB_SETTINGS) util.backup_file(COBBLER_CONF_ORIG) util.backup_file(COBBLER_WEB_CONF_ORIG) util.backup_file(COBBLER_SETTINGS) util.backup_file(PXEDEFAULT_TEMPLATE) util.backup_file(KICKSTART_DONE) util.backup_file(NTP_CONF) util.backup_file(APACHE2_CONF) # Create tftp root directory if not os.path.exists(TFTPBOOT): mode = 0o755 os.mkdir(TFTPBOOT, mode) # Set IP address range to use for unrecognized DHCP clients dhcp_range = 'dhcp-range=%s,%s,%s # %s' util.remove_line(DNSMASQ_TEMPLATE, 'dhcp-range') dhcp_pool_start = gen.get_dhcp_pool_start() for index, netw_type in enumerate(cfg.yield_depl_netw_client_type()): depl_netw_client_ip = cfg.get_depl_netw_client_cont_ip(index) depl_netw_client_netmask = cfg.get_depl_netw_client_netmask(index) network = IPNetwork(depl_netw_client_ip + '/' + depl_netw_client_netmask) entry = dhcp_range % (str(network.network + dhcp_pool_start), str(network.network + network.size - 1), str(dhcp_lease_time), str(network.cidr)) util.append_line(DNSMASQ_TEMPLATE, entry) # Save PXE client network information for later if netw_type == 'pxe': cont_pxe_ipaddr = depl_netw_client_ip cont_pxe_netmask = depl_netw_client_netmask bridge_pxe_ipaddr = cfg.get_depl_netw_client_brg_ip(index) # Configure dnsmasq to enable TFTP server util.append_line(DNSMASQ_TEMPLATE, 'enable-tftp') util.append_line(DNSMASQ_TEMPLATE, 'tftp-root=%s' % TFTPBOOT) util.append_line(DNSMASQ_TEMPLATE, 'user=root') # Configure dnsmasq to use deployer as gateway if cfg.get_depl_gateway(): util.remove_line(DNSMASQ_TEMPLATE, 'dhcp-option') util.append_line(DNSMASQ_TEMPLATE, 'dhcp-option=3,%s' % bridge_pxe_ipaddr) # Cobbler modules configuration util.replace_regex(MODULES_CONF, 'module = manage_bind', 'module = manage_dnsmasq') util.replace_regex(MODULES_CONF, 'module = manage_isc', 'module = manage_dnsmasq') # Copy cobbler.conf into apache2/conf-available copy2(COBBLER_CONF_ORIG, COBBLER_CONF) # Copy cobbler_web.conf into apache2/conf-available copy2(COBBLER_WEB_CONF_ORIG, COBBLER_WEB_CONF) # Apache2 configuration util.bash_cmd('%s cobbler cobbler_web' % A2ENCONF) util.bash_cmd('%s proxy' % A2ENMOD) util.bash_cmd('%s proxy_http' % A2ENMOD) # Set secret key in web settings secret_key = _generate_random_characters() util.replace_regex(COBBLER_WEB_SETTINGS, '^SECRET_KEY = .*', 'SECRET_KEY = "%s"' % secret_key) # Remove "Order allow,deny" lines from cobbler configuration regex = '.*Order allow,deny' util.remove_line(COBBLER_CONF, regex) util.remove_line(COBBLER_WEB_CONF, regex) # Replace "Allow from all" with "Require all granted" in regex = 'Allow from all' replace = 'Require all granted' util.replace_regex(COBBLER_CONF, regex, replace) util.replace_regex(COBBLER_WEB_CONF, regex, replace) # chown www-data WEBUI_SESSIONS uid = pwd.getpwnam("www-data").pw_uid gid = -1 # unchanged os.chown(WEBUI_SESSIONS, uid, gid) # Cobbler settings util.replace_regex(COBBLER_SETTINGS, '127.0.0.1', cont_pxe_ipaddr) util.replace_regex(COBBLER_SETTINGS, 'manage_dhcp: 0', 'manage_dhcp: 1') util.replace_regex(COBBLER_SETTINGS, 'manage_dns: 0', 'manage_dns: 1') util.replace_regex(COBBLER_SETTINGS, 'pxe_just_once: 0', 'pxe_just_once: 1') globals_env_variables = cfg.get_globals_env_variables() if globals_env_variables and 'http_proxy' in globals_env_variables: util.replace_regex( COBBLER_SETTINGS, 'proxy_url_ext: ""', 'proxy_url_ext: %s' % globals_env_variables['http_proxy']) util.replace_regex( COBBLER_SETTINGS, 'default_password_crypted:', 'default_password_crypted: ' '$1$clusterp$/gd3ep3.36A2808GGdHUz.') # Create link to if not os.path.exists(PY_DIST_PKGS): util.bash_cmd('ln -s %s/cobbler %s' % (LOCAL_PY_DIST_PKGS, PY_DIST_PKGS)) # Set PXE timeout to maximum util.replace_regex(PXEDEFAULT_TEMPLATE, r'TIMEOUT \d+', 'TIMEOUT 35996') util.replace_regex(PXEDEFAULT_TEMPLATE, r'TOTALTIMEOUT \d+', 'TOTALTIMEOUT 35996') # Fix line break escape in kickstart_done snippet util.replace_regex(KICKSTART_DONE, "\\\\nwget", "wget") util.replace_regex(KICKSTART_DONE, r"\$saveks", "$saveks + \"; \\\\\\\"\n") util.replace_regex(KICKSTART_DONE, r"\$runpost", "$runpost + \"; \\\\\\\"\n") # Copy authorized_keys ssh key file to web repo directory copy2(ROOT_AUTH_KEYS, WWW_AUTH_KEYS) os.chmod(WWW_AUTH_KEYS, 0o444) # Add mgmt subnet to NTP service configuration cont_pxe_broadcast = str( IPNetwork(cont_pxe_ipaddr + '/' + cont_pxe_netmask).broadcast) util.append_line(NTP_CONF, 'broadcast %s' % cont_pxe_broadcast) # Add 'required-stop' line to cobblerd init.d to avoid warning util.replace_regex(INITD + 'cobblerd', '### END INIT INFO', '# Required-Stop:\n### END INIT INFO') # Set Apache2 'ServerName' util.append_line(APACHE2_CONF, "ServerName localhost") # Restart services _restart_service('ntp') _restart_service('cobblerd') _restart_service('apache2') # Update Cobbler boot-loader files util.bash_cmd('%s get-loaders' % COBBLER) # Update cobbler list of OS signatures util.bash_cmd('%s signature update' % COBBLER) # Run Cobbler sync util.bash_cmd('%s sync' % COBBLER) # Restart services (again) _restart_service('apache2') _restart_service('cobblerd') _restart_service('dnsmasq') # Set services to start on boot _service_start_on_boot('cobblerd') _service_start_on_boot('ntp')
def _service_start_on_boot(service): util.replace_regex(INITD + service, '# Default-Start:.*', '# Default-Start: 2 3 4 5') util.replace_regex(INITD + service, '# Default-Stop:.*', '# Default-Stop: 0 1 6') util.bash_cmd('update-rc.d %s defaults' % service)
def _restart_service(service): util.bash_cmd('service %s restart' % service)
def copy_dir_to_container(self, source_path, cont_dest_path): bash_cmd("tar -h -C {} -c . | lxc-attach -n {} -- tar -C {} -xvp " "--keep-newer-files".format(source_path, self.name, cont_dest_path))
def create(self): # Check if container already exists if self.cont.defined: msg = "Container '%s' already exists" % self.name self.log.warning(msg) print("\nPress enter to continue with node configuration using ") print("existing container, or 'T' to terminate.") resp = raw_input("\nEnter or 'T': ") if resp == 'T': sys.exit('POWER-Up stopped at user request') else: # Create container if not self.cont.create('download', lxc.LXC_CREATE_QUIET, self.rootfs): msg = "Failed to create container '%s'" % self.name self.log.error(msg) raise UserException(msg) self.log.debug("Created container '%s'" % self.name) # Start container if not self.cont.running: if not self.cont.start(): msg = "Failed to start container '%s'" % self.name self.log.error(msg) raise UserException(msg) self.log.debug("Started container '%s'" % self.name) # Get nameservers from /etc/resolv.conf outside container nameservers = [] try: with open(self.RESOLV_CONF, 'r') as resolv_conf: for line in resolv_conf: if re.search(r'^nameserver', line): nameservers.append(line.strip()) except Exception as exc: msg = "Failed to read '{}' - '{}'".format(self.RESOLV_CONF, exc) self.log.error(msg) raise UserException(msg) self.log.info('Configuring container') # Update '/etc/resolv.conf' in container by updating # '/etc/resolvconf/resolv.conf.d/base' for line in nameservers: entry = '"a|%s"' % line line = '"%s"' % line self.run_command( ['sh', '-c', 'grep ' + line + ' ' + self.RESOLV_CONF_BASE + ' || ' 'ex -sc ' + entry + ' -cx ' + self.RESOLV_CONF_BASE], stdout=self.fd) # Sleep to allow /etc/resolv.conf to update # Future enhancement is to poll for change self.run_command(["sleep", "5"], stdout=self.fd) # Create user self.run_command( ['sh', '-c', 'grep deployer /etc/passwd || ' 'adduser --disabled-password --gecos GECOS deployer'], stdout=self.fd) # Create '/root/.ssh' directory self.run_command(['mkdir', '-p', '/root/.ssh'], stdout=self.fd) # Create '/root/.ssh/authorized_keys' file self.run_command(['touch', '/root/.ssh/authorized_keys'], stdout=self.fd) # Change '/root/.ssh' permissions to 0700 self.run_command(['chmod', '700', '/root/.ssh'], stdout=self.fd) # Change '/root/.ssh/authorized_keys' permissions to 0600 self.run_command(['chmod', '600', '/root/.ssh/authorized_keys'], stdout=self.fd) # Create new SSH private/public keys only if they don't exist if (not os.path.isfile(self.PRIVATE_SSH_KEY_FILE) and not os.path.isfile(self.PUBLIC_SSH_KEY_FILE)): key = RSA.generate(self.RSA_BIT_LENGTH) # Create user .ssh directory if needed if not os.path.exists(os.path.expanduser('~/.ssh')): os.mkdir(os.path.expanduser('~/.ssh'), 0o700) # Create private ssh key with open(self.PRIVATE_SSH_KEY_FILE, 'w') as ssh_key: ssh_key.write(key.exportKey()) os.chmod(self.PRIVATE_SSH_KEY_FILE, 0o600) # Create public ssh key public_key = key.publickey().exportKey(format='OpenSSH') with open(self.PUBLIC_SSH_KEY_FILE, 'w') as ssh_key: ssh_key.write(public_key) # Throw exception if one of the key pair is missing elif (not os.path.isfile(self.PRIVATE_SSH_KEY_FILE) and os.path.isfile(self.PUBLIC_SSH_KEY_FILE)): raise UserException("Private SSH key is missing but public exists") elif (os.path.isfile(self.PRIVATE_SSH_KEY_FILE) and not os.path.isfile(self.PUBLIC_SSH_KEY_FILE)): raise UserException("Public SSH key is missing but private exists") # Add public ssh key to container with open(self.PUBLIC_SSH_KEY_FILE, 'r') as file_in: for line in file_in: # public key file should only be 1 line # if it's more than 1 the last will be used public_key = line entry = '"a|%s"' % public_key line = '"%s"' % public_key self.run_command( ['sh', '-c', 'grep ' + line + ' /root/.ssh/authorized_keys || ' 'ex -sc ' + entry + ' -cx /root/.ssh/authorized_keys'], stdout=self.fd) print() self.log.info('Installing software packages in container\n' 'This may take several minutes depending on network ' 'speed') # Update/Upgrade container distro packages self.run_command(["apt-get", "update"], stdout=self.fd) self.run_command(["apt-get", "dist-upgrade", "-y"], stdout=self.fd) # Read INI file ini = ConfigParser.SafeConfigParser(allow_no_value=True) try: ini.read(self.cont_ini) except ConfigParser.Error as exc: msg = exc.message.replace('\n', ' - ') self.log.error(msg) raise UserException(msg) # Install distro container packages if ini.has_section(self.Packages.DISTRO.value): cmd = ['apt-get', 'install', '-y'] for pkg in ini.options(self.Packages.DISTRO.value): cmd.append(pkg) self.run_command(cmd, stdout=self.fd) # Install x86_64 arch specific packages if (self.rootfs.arch == 'amd64' and ini.has_section(self.Packages.DISTRO_AMD64.value)): cmd = ['apt-get', 'install', '-y'] for pkg in ini.options(self.Packages.DISTRO_AMD64.value): cmd.append(pkg) self.run_command(cmd, stdout=self.fd) # Install ppc64el arch specific packages if (self.rootfs.arch == 'ppc64el' and ini.has_section(self.Packages.DISTRO_PPC64EL.value)): cmd = ['apt-get', 'install', '-y'] for pkg in ini.options(self.Packages.DISTRO_PPC64EL.value): cmd.append(pkg) self.run_command(cmd, stdout=self.fd) # Create project directory self.run_command(['mkdir', '-p', self.cont_package_path], stdout=self.fd) # Copy private ssh key pair to container bash_cmd("cat {} | lxc-attach -n {} -- /bin/bash -c \"cat > " "/root/.ssh/gen\"".format(self.PRIVATE_SSH_KEY_FILE, self.name)) bash_cmd("cat {} | lxc-attach -n {} -- /bin/bash -c \"cat > " "/root/.ssh/gen.pub\"".format(self.PUBLIC_SSH_KEY_FILE, self.name)) # Change private key file permissions to 0600 self.run_command(['chmod', '600', '/root/.ssh/gen'], stdout=self.fd) # Copy power-up directory into container bash_cmd("tar --exclude='inventory*.yml' --exclude='pup-venv' -h " "--exclude='logs' -C {} -c . | lxc-attach -n {} -- tar -C {} " "-xvp --keep-newer-files".format(self.depl_package_path, self.name, self.cont_package_path)) # Install python virtual environment self.run_command([self.cont_package_path + '/scripts/venv_install.sh', self.cont_package_path + '/'], stdout=self.fd) # Create file to indicate whether project is installed in a container self.run_command(['touch', self.cont_id_file], stdout=self.fd) print()