def pub_deploy(hosts, profiles=[], identities=[], settings={}): """ Setup a machine with a specified set of profiles. """ remote_paths = [] for host_path in hosts: parts = host_path.split(':') remote_paths += [parts[1]] if len(parts) > 1 else [DEFAULT_REMOTE_PATH] hosts = [host_path.split(':')[0] for host_path in hosts] host_ips = CLOUD_BACKEND.network_ip(hosts) for host, ipaddr in six.iteritems(host_ips): if not ipaddr: logging.error('cannot find IP for %s', host) fab.env.hosts = list(host_ips.values()) for host, remote_path in zip(fab.env.hosts, remote_paths): fab.env.host_string = host if identities: rsync, prefix = find_rsync(host, relative=True, admin=True, username=fab.env.user, key=fab.env.key_filename) for src_path in identities: cmdline = rsync + [src_path + '/./*', prefix + '/'] shell_command(cmdline) copy_setup(profiles, host, remote_path, settings=settings) run_dservices(profiles, host, remote_path, settings=settings)
def stageFile(pathname, context): """ Prepare a configuration file for modification. It involves making a copy of the previous version, then opening a temporary file for edition. """ stage_user = context.value('admin') stage_group = context.value('admin') new_path = context.MOD_SYSCONFDIR + pathname org_path = context.TPL_SYSCONFDIR + pathname log_info('stage %s\n to %s\n original at %s' % (pathname, new_path, org_path)) if not os.path.exists(org_path): # We copy the original configuration file into the local build # directory before modifying it. # Note that we only do that the first time through so unless # the original (cache) directory is deleted, we donot overwrite # the original original files when the script is run a second time. # try: shell_command([ 'install', '-D', '-p', '-o', stage_user, '-g', stage_group, pathname, org_path], admin=True) except Error as err: # We sometimes need sudo access to make backup copies of config # files (even ones with no credentials). This is just a convoluted # way to achieve the first copy before modification. pass if (not os.path.exists(os.path.dirname(new_path)) and len(os.path.dirname(new_path)) > 0): os.makedirs(os.path.dirname(new_path)) return org_path, new_path
def find_privileged_executables(log_path_prefix): '''Look through the filesystem for executables that have the suid bit turned on and executables that can be executed as remote commands.''' # find suid privileged executables suid_results = log_path_prefix + '.suid' try: tero.shell_command([ '/usr/bin/find', '/', '-type f', '\\( -perm -04000 -or -perm -02000 \\) -ls', ' > ' + suid_results ]) except RuntimeError: # It is ok to get an exception here. We cannot exclude /dev, etc. # when searching from root. pass # find rcmd executables rcmd_results = log_path_prefix + getpass.getuser() + '.rcmd' try: tero.shell_command([ '/usr/bin/find', '/', '| grep -e ".rhosts" -e "hosts.equiv"', ' > ' + rcmd_results ]) except RuntimeError: # It is ok to get an exception here. We cannot exclude /dev, etc. # when searching from root. pass
def stageFile(pathname, context): """ Prepare a configuration file for modification. It involves making a copy of the previous version, then opening a temporary file for edition. """ stage_user = context.value('admin') stage_group = context.value('admin') new_path = context.modEtcDir + pathname org_path = context.tplEtcDir + pathname log_info('stage %s\n to %s\n original at %s' % (pathname, new_path, org_path)) if not os.path.exists(org_path): # We copy the original configuration file into the local build # directory before modifying it. # Note that we only do that the first time through so unless # the original (cache) directory is deleted, we donot overwrite # the original original files when the script is run a second time. # try: shell_command([ 'install', '-D', '-p', '-o', stage_user, '-g', stage_group, pathname, org_path ], admin=True) except Error as err: # We sometimes need sudo access to make backup copies of config # files (even ones with no credentials). This is just a convoluted # way to achieve the first copy before modification. pass if (not os.path.exists(os.path.dirname(new_path)) and len(os.path.dirname(new_path)) > 0): os.makedirs(os.path.dirname(new_path)) return org_path, new_path
def find_running_processes(log_path_prefix, dist_host): '''List running processes into "*log_path_prefix*.processes"''' log_path = os.path.abspath(log_path_prefix + '.processes') ps_cmd = ['/bin/ps', '-ej'] if not dist_host.endswith('Darwin'): ps_cmd += ['HF'] tero.shell_command(ps_cmd, log_path, True)
def pub_stage(src_path, host): '''Copy a directory tree from the local machine to the staged machine root directory. This is often used to copy credentials before running a deploy command.''' rsync, prefix = find_rsync(host, relative=True, admin=True, username=fab.env.user, key=fab.env.key_filename) cmdline = rsync + [src_path + '/./*', prefix + '/'] shell_command(cmdline)
def create_archives(backup_dir, backup_tops): '''Create an archive out of each backup_top.''' os.chdir(backup_dir) for backup_top in backup_tops: basename = os.path.basename(backup_top) archive = tero.stampfile(basename) tero.shell_command([ 'tar', '--bzip2', '-cf', archive, '-C', os.path.dirname(backup_top), '--exclude', 'build/', basename ]) tero.dstamp.cleanup_aged_files(backup_dir)
def create_archives(backup_dir, backup_tops): '''Create an archive out of each backup_top.''' os.chdir(backup_dir) for backup_top in backup_tops: basename = os.path.basename(backup_top) archive = tero.stampfile(basename) tero.shell_command(['tar', '--bzip2', '-cf', archive, '-C', os.path.dirname(backup_top), '--exclude', 'build/', basename]) # TODO dstamp hasn't been moved into the tero package yet. dstamp.cleanUpAgedFiles(backup_dir)
def fingerprint_fs(context, log_path_prefix, exclude_tops=None): '''Uses mtree to take a fingerprint of the filesystem and output the specification file in "*log_path_prefix*.mtree". If an *exclude_tops* file exists, it contains patterns used to skip over parts of the filesystem to fingerprint.''' if not exclude_tops and os.path.exists(exclude_tops): exclude_tops_flags = " -X " + exclude_tops else: exclude_tops_flags = "" tero.shell_command([os.path.join(context.value('binDir'), 'mtree'), ' -c -K sha1digest -p /', exclude_tops_flags, ' > ' + os.path.abspath(log_path_prefix + '.mtree')])
def find_open_ports(log_path_prefix, dist_host, apps=None): """ List processes listening on open ports """ app_by_pids = {} if dist_host.endswith('Darwin'): tero.shell_command(['/usr/sbin/lsof', '-i', '-P'], pat=r'.*', admin=True) else: output = tero.shell_command(['/bin/netstat', '-n', '-atp'], pat=r'.*', admin=True) for line in output: # Proto Recv-Q Send-Q LocalAddress ForeignAddress State PID/Program look = re.match( r'(?P<proto>\S+)\s+(?P<recvq>\d+)\s+(?P<sendq>\d+)\s+(?P<local_address>((\d+\.\d+\.\d+\.\d+)|([0-9a-f]*:[0-9a-f]*:[0-9a-f]*)):\d+)\s+(?P<foreign_address>((\d+\.\d+\.\d+\.\d+)|([0-9a-f]*:[0-9a-f]*:[0-9a-f]*)):[0-9\*]+)\s+(?P<state>\S+)\s+(?P<pid>\d+)/(?P<program_name>.+)$', line) if look: pid = int(look.group('pid')) local_address = look.group('local_address') foreign_address = look.group('foreign_address') port = local_address.split(':')[-1] app_by_pids.update({ pid: { 'port': port, 'local_address': local_address, 'foreign_address': foreign_address } }) # Open ports as listed by nmap #XXX tero.shell_command(['nmap', 'localhost'], admin=True) if apps is None: apps = {} if apps: for app_name, app_snap in apps.items(): pid = app_snap.get('pid') if not pid: continue app_snap.update(app_by_pids[pid]) del app_by_pids[pid] apps.update(app_by_pids) return apps
def find_running_processes(log_path_prefix, dist_host, apps=None): """ List running processes """ app_by_pids = {} ps_cmd = ['/bin/ps', '-e', 'u'] if dist_host.endswith('Darwin'): ps_cmd += ['HF'] output = tero.shell_command(ps_cmd, pat=r'.*', admin=True) for line in output: #USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND # START could be a time (hh:mm) or a date (ex:Jun09) look = re.match( r'(?P<user>\S+)\s+(?P<pid>\d+)\s+(?P<cpu>\d+\.\d+)\s+(?P<mem>\d+\.\d+)\s+(?P<vsz>\d+)\s+(?P<rss>\d+)\s+(?P<tty>\S+)\s+(?P<stat>\S+)\s+(?P<start>\S+)\s+(?P<time>\d+:\d+)\s+(?P<command>.+)$', line) if look: user = look.group('user') pid = int(look.group('pid')) command = look.group('command') app_by_pids.update({pid: {'user': user, 'command': command}}) if apps is None: apps = {} if apps: for app_name, app_snap in apps.items(): pid = app_snap.get('pid') if not pid: continue app_snap.update(app_by_pids[pid]) del app_by_pids[pid] apps.update(app_by_pids) return apps
def find_open_ports(log_path_prefix, dist_host): '''List processes listening on open ports into "*log_path_prefix*.ports"''' log_path = os.path.abspath(log_path_prefix + '.ports') if dist_host.endswith('Darwin'): tero.shell_command(['/usr/sbin/lsof', '-i', '-P'], log_path, True) else: tero.shell_command(['/bin/netstat', '-atp'], log_path, True) tero.shell_command(['/bin/netstat', '-n', '-atp'], log_path, True) # Open ports as listed by nmap tero.shell_command(['nmap', 'localhost'], log_path, True)
def find_privileged_executables(log_path_prefix): '''Look through the filesystem for executables that have the suid bit turned on and executables that can be executed as remote commands.''' # find suid privileged executables suid_results = log_path_prefix + '.suid' try: tero.shell_command(['/usr/bin/find', '/', '-type f', '\\( -perm -04000 -or -perm -02000 \\) -ls', ' > ' + suid_results]) except RuntimeError: # It is ok to get an exception here. We cannot exclude /dev, etc. # when searching from root. pass # find rcmd executables rcmd_results = log_path_prefix + getpass.getuser() + '.rcmd' try: tero.shell_command(['/usr/bin/find', '/', '| grep -e ".rhosts" -e "hosts.equiv"', ' > ' + rcmd_results]) except RuntimeError: # It is ok to get an exception here. We cannot exclude /dev, etc. # when searching from root. pass
def pub_deploy(hosts, profiles=[], identities=[], settings={}): """ Setup a machine with a specified set of profiles. """ remote_paths = [] for host_path in hosts: parts = host_path.split(':') remote_paths += [parts[1]] if len(parts) > 1 else [DEFAULT_REMOTE_PATH] hosts = [host_path.split(':')[0] for host_path in hosts] host_ips = CLOUD_BACKEND.network_ip(hosts) for host, ipaddr in host_ips.iteritems(): if not ipaddr: logging.error('cannot find IP for %s', host) fab.env.hosts = host_ips.values() for host, remote_path in zip(fab.env.hosts, remote_paths): fab.env.host_string = host if identities: rsync, prefix = find_rsync(host, relative=True, admin=True, username=fab.env.user, key=fab.env.key_filename) for src_path in identities: cmdline = rsync + [src_path + '/./*', prefix + '/'] shell_command(cmdline) copy_setup(profiles, host, remote_path, settings=settings) run_dservices(profiles, host, remote_path, settings=settings)
def find_apps(root_dir): """ Find apps installed in *root_dir* """ apps = {} for app_name in os.listdir(root_dir): python_version = None python = os.path.join(root_dir, app_name, 'bin', 'python') # find python version if os.path.exists(python): cmdline = [python, '--version'] freeze_output = subprocess.check_output(cmdline) look = re.match(r'Python ([0-9]+(\.[0-9]+)*)', freeze_output.decode('utf-8')) if look: python_version = look.group(1) apps.update({ app_name: { 'owner': pwd.getpwuid(os.stat(os.path.join(root_dir, app_name)).st_uid).pw_name, 'dependencies': { 'python': python_version, } } }) # find python prerequisites pip = os.path.join(root_dir, app_name, 'bin', 'pip') if os.path.exists(pip): cmdline = [pip, 'freeze'] output_lines = tero.shell_command(cmdline, pat=r'.*') for line in output_lines: look = re.match(r'(\S+)==(\S+)', line) if look: prerequisite = look.group(1) version = look.group(2) apps[app_name]['dependencies'].update( {prerequisite: version}) # find process PID pid_path = os.path.join(root_dir, app_name, 'var', 'run', '%s.pid' % app_name) if os.path.exists(pid_path): with open(pid_path) as pid_file: pid = int(pid_file.read()) apps[app_name].update({'pid': pid}) return apps
def pub_check(names, reference=None): """ Run specified checks """ if not names: sys.stdout.write( "Please choose one of the 'schema' or 'apps' command.") return reference_schema = None command = names.pop(0) if command == 'apps': if reference: logging.warning("loading reference %s ...", reference) with open(reference) as schema_file: reference_schema = json.loads(schema_file.read()) check_apps(reference_prerequisites=reference_schema) elif command == 'schema': if not names: names = tero.shell_command([ 'psql', '-qAt', '-c', "select datname from pg_database where datallowconn" ], pat=r'.*', admin='postgres') if reference: logging.warning("loading reference %s ...", reference) with open(reference) as schema_file: reference_schema = _load_sqlschema(schema_file.read()) for name in names: schema_text = None if name and name.endswith('.sql'): schema_path = name logging.warning("loading %s ...", schema_path) with open(schema_path) as schema_file: schema_text = schema_file.read() else: cmdline = [ 'sudo', '-u', 'postgres', 'pg_dump', '--schema-only', name ] logging.warning("loading %s ...", ' '.join(cmdline)) schema_text = subprocess.check_output(cmdline) check_sqlschema(schema_text, reference_schema=reference_schema)
def create_install_script(project_name, context, install_top): """ Create custom packages and an install script that can be run to setup the local machine. After this step, the final directory can then be tar'ed up and distributed to the local machine. """ # Create a package through the local package manager or alternatively # a simple archive of the configuration files and postinst script. prev = os.getcwd() share_dir = os.path.join(install_top, 'share') project_name = os.path.basename(context.value('modEtcDir')) package_dir = context.obj_dir(os.path.basename(context.value('modEtcDir'))) if not os.path.exists(package_dir): os.makedirs(package_dir) make_simple_archive = True if make_simple_archive: os.chdir(context.value('modEtcDir')) package_path = os.path.join(package_dir, project_name + '-' + str(__version__) + '.tar.bz2') archived = [] for dirname in ['etc', 'usr', 'var']: if os.path.exists(dirname): archived += [dirname] shell_command(['tar', 'jcf', package_path] + archived) else: os.chdir(package_dir) for bin_script in ['dws', 'dbldpkg']: build_bin_script = context.obj_dir(os.path.join('bin', bin_script)) if os.path.islink(build_bin_script): os.remove(build_bin_script) os.symlink(os.path.join(install_top, 'bin', bin_script), build_bin_script) build_share_drop = context.obj_dir(os.path.join('share', 'dws')) if os.path.islink(build_share_drop): os.remove(build_share_drop) if not os.path.isdir(os.path.dirname(build_share_drop)): os.makedirs(os.path.dirname(build_share_drop)) os.symlink(os.path.join(share_dir, 'dws'), build_share_drop) pub_make(['dist']) with open(os.path.join( package_dir, '.packagename')) as package_name_file: package_path = package_name_file.read().strip() os.chdir(prev) # Create install script fetch_packages = FilteredList() tero.INDEX.parse(fetch_packages) for package in fetch_packages.fetches: tero.EXCLUDE_PATS += [os.path.basename(package).split('_')[0]] obj_dir = context.obj_dir(project_name) install_script_path = os.path.join(obj_dir, 'install.sh') install_script = tero.setup.create_install_script( install_script_path, context=context) install_script.write('''#!/bin/sh # Script to setup the server set -x ''') deps = ordered_prerequisites([project_name], tero.INDEX) for dep in tero.EXCLUDE_PATS + [project_name]: if dep in deps: deps.remove(dep) install_script.prerequisites(deps) package_name = os.path.basename(package_path) local_package_path = os.path.join(obj_dir, package_name) if (not os.path.exists(local_package_path) or not os.path.samefile(package_path, local_package_path)): print 'copy %s to %s' % (package_path, local_package_path) shutil.copy(package_path, local_package_path) package_files = [os.path.join(project_name, package_name)] for name in fetch_packages.fetches: fullname = context.local_dir(name) package = os.path.basename(fullname) if not os.path.isfile(fullname): # If the package is not present (might happen if dws/semilla # are already installed on the system), let's download it. fetch(tero.CONTEXT, {'https://djaodjin.com/resources/./%s/%s' # XXX % (context.host(), package): None}) shutil.copy(fullname, os.path.join(obj_dir, package)) install_script.install(package, force=True) package_files += [os.path.join(project_name, package)] install_script.install(package_name, force=True, postinst_script=tero.setup.postinst.postinst_path) install_script.write('echo done.\n') install_script.script.close() shell_command(['chmod', '755', install_script_path]) prev = os.getcwd() os.chdir(os.path.dirname(obj_dir)) shell_command(['tar', 'jcf', project_name + '.tar.bz2', os.path.join(project_name, 'install.sh')] + package_files) os.chdir(prev) return os.path.join(os.path.dirname(obj_dir), project_name + '.tar.bz2')
def copy_setup(profiles, host, remote_path, settings=None): """ Copy scripts needed for configuration onto the remote machine. """ if settings is None: settings = {} pythondir = os.path.dirname(os.path.dirname(__file__)) basedir = os.path.dirname(os.path.dirname(os.path.dirname(pythondir))) bindir = os.path.join(basedir, 'bin') etcdir = os.path.join(basedir, 'etc') sharedir = os.path.join(basedir, 'share') profilesdir = os.path.join(sharedir, 'tero', 'profiles') files = [ os.path.join(pythondir, 'tero'), os.path.join(pythondir, 'dws'), os.path.join(bindir, 'dservices'), os.path.join(bindir, 'dbldpkg'), os.path.join(bindir, 'dws'), os.path.join(sharedir, 'dws'), os.path.join(sharedir, 'tero'), os.path.join(etcdir, 'tero', 'config') ] prefix = os.path.commonprefix(files) dirpath = tempfile.mkdtemp() stage_top = os.path.join(dirpath, os.path.basename(remote_path)) stage_profile_dir = os.path.join(stage_top, 'share', 'tero', 'profiles') for staged in files: stage_path = staged.replace(prefix, stage_top + os.sep) if not os.path.exists(os.path.dirname(stage_path)): os.makedirs(os.path.dirname(stage_path)) if os.path.isdir(staged): shutil.copytree(staged, stage_path) else: shutil.copy(staged, stage_path) for profile_name in profiles: look = re.match(r'\w+@(\w+.)+\w+:\S+', profile_name) if not look: # This does not look like a profile on a remote machine # so let's assume it is local file. profile_abs_path = os.path.abspath(profile_name) if not os.path.isfile(profile_abs_path): profile_abs_path = os.path.join(profilesdir, profile_name + '.xml') if not os.path.isfile(profile_abs_path): raise ValueError('cannot find profile "%s"' % profile_name) if not profile_abs_path.startswith(profilesdir): # We are setting up a profile which is not in the default set, # so let's copy it to the machine being setup as well. shutil.copy(profile_abs_path, stage_profile_dir) if fab.env.password: # We will need a sudo password to install packages and configure # them according to a profile. askpass_path = os.path.join(stage_top, 'bin', 'askpass') with open(askpass_path, 'w') as askpass: askpass.write('#!/bin/sh\n') askpass.write('echo %s\n' % fab.env.password) import stat os.chmod(askpass_path, stat.S_IRWXU) if True: # XXX Either implementation is asking for password # XXX admin=True otherwise we cannot create directory in /var/www. cmdline, prefix = find_rsync(host, relative=False, admin=False, key=fab.env.key_filename) cmdline += ['--exclude=".git"', dirpath + '/*'] dest = host + ':' + os.path.dirname(remote_path) if fab.env.user: dest = fab.env.user + '@' + dest cmdline += [dest] shell_command(cmdline) else: import fabric.contrib.project fabric.contrib.project.rsync_project( local_dir=dirpath + '/*', remote_dir=os.path.dirname(remote_path), exclude=['.git']) if not os.path.isdir(dirpath): shutil.rmtree(dirpath)
def find_disk_usage(dist_host): """ List information about disk usage """ tero.shell_command(['/usr/bin/df', '-lh', '--total'])
def create_install_script(project_name, context, install_top): """ Create custom packages and an install script that can be run to setup the local machine. After this step, the final directory can then be tar'ed up and distributed to the local machine. """ # Create a package through the local package manager or alternatively # a simple archive of the configuration files and postinst script. prev = os.getcwd() share_dir = os.path.join(install_top, 'share') project_name = os.path.basename(context.value('modEtcDir')) package_dir = context.obj_dir(os.path.basename(context.value('modEtcDir'))) if not os.path.exists(package_dir): os.makedirs(package_dir) make_simple_archive = True if make_simple_archive: os.chdir(context.value('modEtcDir')) package_path = os.path.join( package_dir, project_name + '-' + str(__version__) + '.tar.bz2') archived = [] for dirname in ['etc', 'usr', 'var']: if os.path.exists(dirname): archived += [dirname] shell_command(['tar', 'jcf', package_path] + archived) else: os.chdir(package_dir) for bin_script in ['dws', 'dbldpkg']: build_bin_script = context.obj_dir(os.path.join('bin', bin_script)) if os.path.islink(build_bin_script): os.remove(build_bin_script) os.symlink(os.path.join(install_top, 'bin', bin_script), build_bin_script) build_share_drop = context.obj_dir(os.path.join('share', 'dws')) if os.path.islink(build_share_drop): os.remove(build_share_drop) if not os.path.isdir(os.path.dirname(build_share_drop)): os.makedirs(os.path.dirname(build_share_drop)) os.symlink(os.path.join(share_dir, 'dws'), build_share_drop) pub_make(['dist']) with open(os.path.join(package_dir, '.packagename')) as package_name_file: package_path = package_name_file.read().strip() os.chdir(prev) # Create install script fetch_packages = FilteredList() tero.INDEX.parse(fetch_packages) for package in fetch_packages.fetches: tero.EXCLUDE_PATS += [os.path.basename(package).split('_')[0]] obj_dir = context.obj_dir(project_name) install_script_path = os.path.join(obj_dir, 'install.sh') install_script = tero.setup.create_install_script(install_script_path, context=context) install_script.write('''#!/bin/sh # Script to setup the server set -x ''') deps = ordered_prerequisites([project_name], tero.INDEX) for dep in tero.EXCLUDE_PATS + [project_name]: if dep in deps: deps.remove(dep) install_script.prerequisites(deps) package_name = os.path.basename(package_path) local_package_path = os.path.join(obj_dir, package_name) if (not os.path.exists(local_package_path) or not os.path.samefile(package_path, local_package_path)): print 'copy %s to %s' % (package_path, local_package_path) shutil.copy(package_path, local_package_path) package_files = [os.path.join(project_name, package_name)] for name in fetch_packages.fetches: fullname = context.local_dir(name) package = os.path.basename(fullname) if not os.path.isfile(fullname): # If the package is not present (might happen if dws/semilla # are already installed on the system), let's download it. fetch( tero.CONTEXT, { 'https://djaodjin.com/resources/./%s/%s' # XXX % (context.host(), package): None }) shutil.copy(fullname, os.path.join(obj_dir, package)) install_script.install(package, force=True) package_files += [os.path.join(project_name, package)] install_script.install(package_name, force=True, postinst_script=tero.setup.postinst.postinst_path) install_script.write('echo done.\n') install_script.script.close() shell_command(['chmod', '755', install_script_path]) prev = os.getcwd() os.chdir(os.path.dirname(obj_dir)) shell_command([ 'tar', 'jcf', project_name + '.tar.bz2', os.path.join(project_name, 'install.sh') ] + package_files) os.chdir(prev) return os.path.join(os.path.dirname(obj_dir), project_name + '.tar.bz2')
def main(args): '''Configure a machine to serve as a forum server, with ssh, e-mail and web daemons. Hook-up the server machine with a dynamic DNS server and make it reachable from the internet when necessary.''' import __main__ import argparse # We keep a starting time stamp such that we can later on # find out the services that need to be restarted. These are # the ones whose configuration files have a modification # later than *start_timestamp*. start_timestamp = datetime.datetime.now() prev = os.getcwd() bin_base = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) parser = argparse.ArgumentParser( usage='%(prog)s [options] *profile*\n\nVersion:\n %(prog)s version ' \ + str(__version__)) parser.add_argument('profiles', nargs='*', help='Profiles to use to configure the machine.') parser.add_argument('--version', action='version', version='%(prog)s ' + str(__version__)) parser.add_argument( '-D', dest='defines', action='append', default=[], help='Add a (key,value) definition to use in templates.') parser.add_argument( '--fingerprint', dest='fingerprint', action='store_true', default=False, help='Fingerprint the system before making modifications') parser.add_argument('--skip-recurse', dest='install', action='store_false', default=True, help='Assumes all prerequisites to build the'\ ' configuration package have been installed correctly. Generate'\ ' a configuration package but donot install it.') parser.add_argument('--dyndns', dest='dyndns', action='store_true', help='Add configuration for dynamic DNS') parser.add_argument('--sshkey', dest='sshkey', action='store_true', help='Configure the ssh daemon to disable password login and use'\ ' keys instead') options = parser.parse_args(args[1:]) if len(options.profiles) < 1: parser.print_help() sys.exit(1) # siteTop where packages are built conf_top = os.getcwd() tero.ASK_PASS = os.path.join(bin_base, 'askpass') # -- Let's start the configuration -- if not os.path.isdir(conf_top): os.makedirs(conf_top) os.chdir(conf_top) tero.USE_DEFAULT_ANSWER = True tero.CONTEXT = Context() tero.CONTEXT.config_filename = os.path.join(conf_top, 'dws.mk') tero.CONTEXT.buildTopRelativeCwd \ = os.path.dirname(tero.CONTEXT.config_filename) tero.CONTEXT.environ['version'] = __version__ # Configuration information # Add necessary variables in context, then parse a list of variable # definitions with format key=value from the command line and append # them to the context. for define in options.defines: key, value = define.split('=') tero.CONTEXT.environ[key] = value project_name = tero.CONTEXT.value('PROJECT_NAME') log_path_prefix = stampfile( tero.CONTEXT.log_path( os.path.join(tero.CONTEXT.host(), socket.gethostname()))) if options.fingerprint: fingerprint(tero.CONTEXT, log_path_prefix) if options.install: # \todo We ask sudo password upfront such that the non-interactive # install process does not bail out because it needs a password. try: shell_command( ['SUDO_ASKPASS="******"' % tero.ASK_PASS, 'sudo', 'echo', 'hello']) except Error: # In case sudo requires a password, let's explicitely ask for it # and cache it now. sys.stdout.write("%s is asking to cache the sudo password such"\ " that it won\'t be asked in the non-interactive part of the script.\n" % sys.argv[0]) shell_command( ['SUDO_ASKPASS="******"' % tero.ASK_PASS, 'sudo', '-A', '-v']) setups = prepare_local_system(tero.CONTEXT, project_name, options.profiles) os.chdir(prev) try: with open(os.path.join(tero.CONTEXT.value('modEtcDir'), 'config.book'), 'w') as book: book.write('''<?xml version="1.0"?> <section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude"> <info> <title>Modification to configuration files</title> </info> <section> <programlisting>''') cmd = subprocess.Popen(' '.join([ 'diff', '-rNu', tero.CONTEXT.value('tplEtcDir'), tero.CONTEXT.value('modEtcDir') ]), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) book.write(''.join(cmd.stdout.readlines())) book.write('</programlisting>\n</section>\n') except Error: # We donot check error code here since the diff will complete # with a non-zero error code if we either modified the config file. pass # Create the postinst script create_postinst(start_timestamp, setups) final_install_package = create_install_script( project_name, tero.CONTEXT, install_top=os.path.dirname(bin_base)) # Install the package as if it was a normal distribution package. if options.install: if not os.path.exists('install'): os.makedirs('install') shutil.copy(final_install_package, 'install') os.chdir('install') install_basename = os.path.basename(final_install_package) project_name = '.'.join(install_basename.split('.')[:-2]) shell_command(['tar', 'jxf', os.path.basename(final_install_package)]) sys.stdout.write('ATTENTION: A sudo password is required now.\n') os.chdir(project_name) shell_command(['./install.sh'], admin=True)
def main(args): '''Configure a machine to serve as a forum server, with ssh, e-mail and web daemons. Hook-up the server machine with a dynamic DNS server and make it reachable from the internet when necessary.''' import __main__ import argparse # We keep a starting time stamp such that we can later on # find out the services that need to be restarted. These are # the ones whose configuration files have a modification # later than *start_timestamp*. start_timestamp = datetime.datetime.now() prev = os.getcwd() bin_base = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) parser = argparse.ArgumentParser( usage='%(prog)s [options] *profile*\n\nVersion:\n %(prog)s version ' \ + str(__version__)) parser.add_argument('profiles', nargs='*', help='Profiles to use to configure the machine.') parser.add_argument('--version', action='version', version='%(prog)s ' + str(__version__)) parser.add_argument('-D', dest='defines', action='append', default=[], help='Add a (key,value) definition to use in templates.') parser.add_argument('--fingerprint', dest='fingerprint', action='store_true', default=False, help='Fingerprint the system before making modifications') parser.add_argument('--skip-recurse', dest='install', action='store_false', default=True, help='Assumes all prerequisites to build the'\ ' configuration package have been installed correctly. Generate'\ ' a configuration package but donot install it.') parser.add_argument('--dyndns', dest='dyndns', action='store_true', help='Add configuration for dynamic DNS') parser.add_argument('--sshkey', dest='sshkey', action='store_true', help='Configure the ssh daemon to disable password login and use'\ ' keys instead') options = parser.parse_args(args[1:]) if len(options.profiles) < 1: parser.print_help() sys.exit(1) # siteTop where packages are built conf_top = os.getcwd() tero.ASK_PASS = os.path.join(bin_base, 'askpass') # -- Let's start the configuration -- if not os.path.isdir(conf_top): os.makedirs(conf_top) os.chdir(conf_top) tero.USE_DEFAULT_ANSWER = True tero.CONTEXT = Context() tero.CONTEXT.config_filename = os.path.join(conf_top, 'dws.mk') tero.CONTEXT.buildTopRelativeCwd \ = os.path.dirname(tero.CONTEXT.config_filename) tero.CONTEXT.environ['version'] = __version__ # Configuration information # Add necessary variables in context, then parse a list of variable # definitions with format key=value from the command line and append # them to the context. for define in options.defines: key, value = define.split('=') tero.CONTEXT.environ[key] = value project_name = tero.CONTEXT.value('PROJECT_NAME') log_path_prefix = stampfile(tero.CONTEXT.log_path( os.path.join(tero.CONTEXT.host(), socket.gethostname()))) if options.fingerprint: fingerprint(tero.CONTEXT, log_path_prefix) if options.install: # \todo We ask sudo password upfront such that the non-interactive # install process does not bail out because it needs a password. try: shell_command( ['SUDO_ASKPASS="******"' % tero.ASK_PASS, 'sudo', 'echo', 'hello']) except Error: # In case sudo requires a password, let's explicitely ask for it # and cache it now. sys.stdout.write("%s is asking to cache the sudo password such"\ " that it won\'t be asked in the non-interactive part of the script.\n" % sys.argv[0]) shell_command( ['SUDO_ASKPASS="******"' % tero.ASK_PASS, 'sudo', '-A', '-v']) setups = prepare_local_system(tero.CONTEXT, project_name, options.profiles) os.chdir(prev) try: with open(os.path.join( tero.CONTEXT.value('modEtcDir'), 'config.book'), 'w') as book: book.write('''<?xml version="1.0"?> <section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude"> <info> <title>Modification to configuration files</title> </info> <section> <programlisting>''') cmd = subprocess.Popen(' '.join(['diff', '-rNu', tero.CONTEXT.value('tplEtcDir'), tero.CONTEXT.value('modEtcDir')]), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) book.write(''.join(cmd.stdout.readlines())) book.write('</programlisting>\n</section>\n') except Error: # We donot check error code here since the diff will complete # with a non-zero error code if we either modified the config file. pass # Create the postinst script create_postinst(start_timestamp, setups) final_install_package = create_install_script(project_name, tero.CONTEXT, install_top=os.path.dirname(bin_base)) # Install the package as if it was a normal distribution package. if options.install: if not os.path.exists('install'): os.makedirs('install') shutil.copy(final_install_package, 'install') os.chdir('install') install_basename = os.path.basename(final_install_package) project_name = '.'.join(install_basename.split('.')[:-2]) shell_command(['tar', 'jxf', os.path.basename(final_install_package)]) sys.stdout.write('ATTENTION: A sudo password is required now.\n') os.chdir(project_name) shell_command(['./install.sh'], admin=True)
def copy_setup(profiles, host, remote_path, settings=None): """ Copy scripts needed for configuration onto the remote machine. """ if settings is None: settings = {} pythondir = os.path.dirname(os.path.dirname(__file__)) basedir = os.path.dirname(os.path.dirname(os.path.dirname(pythondir))) bindir = os.path.join(basedir, 'bin') etcdir = os.path.join(basedir, 'etc') sharedir = os.path.join(basedir, 'share') profilesdir = os.path.join(sharedir, 'tero', 'profiles') files = [os.path.join(pythondir, 'tero'), os.path.join(pythondir, 'dws'), os.path.join(bindir, 'dservices'), os.path.join(bindir, 'dbldpkg'), os.path.join(bindir, 'dws'), os.path.join(sharedir, 'dws'), os.path.join(sharedir, 'tero'), os.path.join(etcdir, 'tero', 'config')] prefix = os.path.commonprefix(files) dirpath = tempfile.mkdtemp() stage_top = os.path.join(dirpath, os.path.basename(remote_path)) stage_profile_dir = os.path.join( stage_top, 'share', 'tero', 'profiles') for staged in files: stage_path = staged.replace(prefix, stage_top + os.sep) if not os.path.exists(os.path.dirname(stage_path)): os.makedirs(os.path.dirname(stage_path)) if os.path.isdir(staged): shutil.copytree(staged, stage_path) else: shutil.copy(staged, stage_path) for profile_name in profiles: look = re.match(r'\w+@(\w+.)+\w+:\S+', profile_name) if not look: # This does not look like a profile on a remote machine # so let's assume it is local file. profile_abs_path = os.path.abspath(profile_name) if not os.path.isfile(profile_abs_path): profile_abs_path = os.path.join( profilesdir, profile_name + '.xml') if not os.path.isfile(profile_abs_path): raise ValueError('cannot find profile "%s"' % profile_name) if not profile_abs_path.startswith(profilesdir): # We are setting up a profile which is not in the default set, # so let's copy it to the machine being setup as well. shutil.copy(profile_abs_path, stage_profile_dir) if fab.env.password: # We will need a sudo password to install packages and configure # them according to a profile. askpass_path = os.path.join(stage_top, 'bin', 'askpass') with open(askpass_path, 'w') as askpass: askpass.write('#!/bin/sh\n') askpass.write('echo %s\n' % fab.env.password) import stat os.chmod(askpass_path, stat.S_IRWXU) if True: # XXX Either implementation is asking for password # XXX admin=True otherwise we cannot create directory in /var/www. cmdline, prefix = find_rsync( host, relative=False, admin=False, key=fab.env.key_filename) cmdline += ['--exclude=".git"', dirpath + '/*'] dest = host + ':' + os.path.dirname(remote_path) if fab.env.user: dest = fab.env.user + '@' + dest cmdline += [dest] shell_command(cmdline) else: import fabric.contrib.project fabric.contrib.project.rsync_project( local_dir=dirpath + '/*', remote_dir=os.path.dirname(remote_path), exclude=['.git']) if not os.path.isdir(dirpath): shutil.rmtree(dirpath)
def create_postinst(start_timestamp, setups, context=None): '''This routine will copy the updated config files on top of the existing ones in /etc and will issue necessary commands for the updated config to be effective. This routine thus requires to execute a lot of commands with admin privileges.''' if not context: context = tero.CONTEXT # \todo how to do this better? with open(os.path.join(context.value('modEtcDir'), 'Makefile'), 'w') as mkfile: mkfile.write(''' # With dws, this Makefile will be invoked through # make -f *buildTop*/dws.mk *srcDir*/Makefile # # With rpmbuild, this Makefile will be invoked directly by rpmbuild like that: # make install DESTDIR=~/rpmbuild/BUILDROOT/*projectName* # # We thus need to accomodate bothe cases, hence the following "-include" # directive. -include dws.mk include %(share_dir)s/dws/prefix.mk DATAROOTDIR := /usr/share install:: \tif [ -d ./etc ] ; then \\ \t\tinstall -d $(DESTDIR)$(SYSCONFDIR) && \\ \t\tcp -rpf ./etc/* $(DESTDIR)$(SYSCONFDIR) ;\\ \tfi \tif [ -d ./var ] ; then \\ \t\tinstall -d $(DESTDIR)$(LOCALSTATEDIR) && \\ \t\tcp -rpf ./var/* $(DESTDIR)$(LOCALSTATEDIR) ; \\ \tfi \tif [ -d ./usr/share ] ; then \\ \t\tinstall -d $(DESTDIR)$(DATAROOTDIR) && \\ \t\tcp -rpf ./usr/share/* $(DESTDIR)$(DATAROOTDIR) ; \\ \tfi \tif [ -d ./usr/lib/systemd/system ] ; then \\ \t\tinstall -d $(DESTDIR)/usr/lib/systemd/system && \\ \t\tcp -rpf ./usr/lib/systemd/system/* $(DESTDIR)/usr/lib/systemd/system ; \\ \tfi include %(share_dir)s/dws/suffix.mk ''' % {'share_dir': context.value('shareDir')}) for pathname in ['/var/spool/cron/crontabs']: if not os.access(pathname, os.W_OK): tero.setup.postinst.shellCommand([ '[ -f ' + pathname + ' ]', '&&', 'chown ', context.value('admin'), pathname ]) # Execute the extra steps necessary after installation # of the configuration files and before restarting the services. daemons = [] for setup in setups: if setup: daemons = merge_unique(daemons, setup.daemons) # Restart services if tero.setup.postinst.scriptfile: tero.setup.postinst.scriptfile.write('\n# Restart services\n') for daemon in daemons: tero.setup.postinst.serviceRestart(daemon) if daemon in tero.setup.after_statements: for stmt in tero.setup.after_statements[daemon]: tero.setup.postinst.shellCommand([stmt]) if tero.setup.postinst.scriptfile: tero.setup.postinst.scriptfile.close() shell_command(['chmod', '755', tero.setup.postinst.postinst_path])
def run(self, context): complete = super(postgresql_serverSetup, self).run(context) if not complete: # As long as the default setup cannot find all prerequisite # executable, libraries, etc. we cannot update configuration # files here. return complete db_host = context.value('dbHost') vpc_cidr = context.value('vpc_cidr') pg_user = context.value('dbUser') postgresql_conf = os.path.join(self.pgdata, 'postgresql.conf') pg_ident_conf = os.path.join(self.pgdata, 'pg_ident.conf') pg_hba_conf = os.path.join(self.pgdata, 'pg_hba.conf') if not os.path.exists(postgresql_conf): # /var/lib/pgsql/data will be empty unless we run initdb once. shell_command([self.postgresql_setup, 'initdb']) listen_addresses = "'localhost'" for key, val in six.iteritems( self.managed['%s-server' % self.daemons[0].replace('-', '')]['files']): if key == 'listen_addresses': listen_addresses = ', '.join( ["'%s'" % address[0] for address in val]) postgresql_conf_settings = {'listen_addresses': listen_addresses} if db_host: db_ssl_key_file = "/etc/pki/tls/private/%s.key" % db_host db_ssl_cert_file = "/etc/pki/tls/certs/%s.crt" % db_host dh_params = "/etc/ssl/certs/dhparam.pem" if (os.path.exists(db_ssl_key_file) and os.path.exists(db_ssl_cert_file)): postgresql_conf_settings.update({ 'ssl': "on", 'ssl_cert_file': "'%s'" % db_ssl_cert_file, 'ssl_key_file': "'%s'" % db_ssl_key_file, 'ssl_prefer_server_ciphers': "on", #ssl_ca_file = '' #ssl_crl_file = '' #ssl_ecdh_curve = 'prime256v1' #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers }) if os.path.exists(dh_params): postgresql_conf_settings.update({ 'ssl_dh_params_file': "'%s'" % dh_params, }) postinst.shellCommand( ['chown', 'root:postgres', db_ssl_key_file]) postinst.shellCommand(['chmod', '640', db_ssl_key_file]) postinst.shellCommand( ['chmod', '755', os.path.dirname(db_ssl_key_file)]) modify_config(postgresql_conf, settings=postgresql_conf_settings, sep=' = ', context=context) # pg_ident system_to_pg_mapping = {'postgres': 'postgres'} if pg_user: system_to_pg_mapping.update({'/^(.*)$': pg_user}) else: logging.warning("dbUser is '%s'. No regular user will be created"\ " to access the database remotely.") old_conf_path, new_conf_path = stageFile(pg_ident_conf, context) with open(new_conf_path, 'w') as new_conf: with open(old_conf_path) as old_conf: for line in old_conf.readlines(): look = re.match(r'^mymap\s+(\S+)\s+(\S+)', line.strip()) if look: system_user = look.group(1) if system_user in system_to_pg_mapping: self.write_ident_line( new_conf, system_user, system_to_pg_mapping[system_user]) del system_to_pg_mapping[system_user] else: new_conf.write(line) for system_user, pgident_user in six.iteritems( system_to_pg_mapping): self.write_ident_line(new_conf, system_user, pgident_user) # pg_hba connections = [ ['all', 'postgres', vpc_cidr], # 'all' because we need to add a constraint on auth_user ['all', pg_user, vpc_cidr] ] old_conf_path, new_conf_path = stageFile(pg_hba_conf, context) with open(new_conf_path, 'w') as new_conf: with open(old_conf_path) as old_conf: source_host = 'host' if (postgresql_conf_settings.get('ssl') and postgresql_conf_settings.get('ssl') == "on"): source_host = 'hostssl' for line in old_conf.readlines(): look = re.match(r'^local.*peer$', line.strip()) if look: new_conf.write(line.strip() + ' map=mymap\n') else: look = re.match(r'^(host|hostssl|hostnossl)\s+'\ '(?P<db>\S+)\s+(?P<pg_user>\S+)\s+(?P<cidr>\S+)\s+(?P<method>\S+)', line.strip()) if look: found = None remains = [] for conn in connections: if (conn[0] == look.group('db') and conn[1] == look.group('pg_user')): found = conn else: remains += [conn] connections = remains if found: new_conf.write( '%(host)s %(db)s%(pg_user)s%(cidr)smd5\n' % { 'host': source_host.ljust(10), 'db': found[0].ljust(16), 'pg_user': found[1].ljust(16), 'cidr': found[2].ljust(24) }) else: new_conf.write(line) else: new_conf.write(line) if connections: new_conf.write("# Remote connections\n") for conn in connections: new_conf.write( '%(host)s %(db)s%(pg_user)s%(cidr)smd5\n' % { 'host': source_host.ljust(10), 'db': conn[0].ljust(16), 'pg_user': conn[1].ljust(16), 'cidr': conn[2].ljust(24) }) self.create_cron_conf(context) #XXX optimizations? #https://people.planetpostgresql.org/devrim/index.php?/archives/83-Using-huge-pages-on-RHEL-7-and-PostgreSQL-9.4.html postinst.shellCommand([ '[ -d %(pgdata)s/base ] ||' % { 'pgdata': self.pgdata }, self.postgresql_setup, 'initdb' ]) return complete
def create_postinst(start_timestamp, setups, context=None): '''This routine will copy the updated config files on top of the existing ones in /etc and will issue necessary commands for the updated config to be effective. This routine thus requires to execute a lot of commands with admin privileges.''' if not context: context = tero.CONTEXT # \todo how to do this better? with open(os.path.join(context.value('modEtcDir'), 'Makefile'), 'w') as mkfile: mkfile.write(''' # With dws, this Makefile will be invoked through # make -f *buildTop*/dws.mk *srcDir*/Makefile # # With rpmbuild, this Makefile will be invoked directly by rpmbuild like that: # make install DESTDIR=~/rpmbuild/BUILDROOT/*projectName* # # We thus need to accomodate bothe cases, hence the following "-include" # directive. -include dws.mk include %(share_dir)s/dws/prefix.mk DATAROOTDIR := /usr/share install:: \tif [ -d ./etc ] ; then \\ \t\tinstall -d $(DESTDIR)$(SYSCONFDIR) && \\ \t\tcp -rpf ./etc/* $(DESTDIR)$(SYSCONFDIR) ;\\ \tfi \tif [ -d ./var ] ; then \\ \t\tinstall -d $(DESTDIR)$(LOCALSTATEDIR) && \\ \t\tcp -rpf ./var/* $(DESTDIR)$(LOCALSTATEDIR) ; \\ \tfi \tif [ -d ./usr/share ] ; then \\ \t\tinstall -d $(DESTDIR)$(DATAROOTDIR) && \\ \t\tcp -rpf ./usr/share/* $(DESTDIR)$(DATAROOTDIR) ; \\ \tfi \tif [ -d ./usr/lib/systemd/system ] ; then \\ \t\tinstall -d $(DESTDIR)/usr/lib/systemd/system && \\ \t\tcp -rpf ./usr/lib/systemd/system/* $(DESTDIR)/usr/lib/systemd/system ; \\ \tfi include %(share_dir)s/dws/suffix.mk ''' % {'share_dir': context.value('shareDir')}) for pathname in ['/var/spool/cron/crontabs']: if not os.access(pathname, os.W_OK): tero.setup.postinst.shellCommand(['[ -f ' + pathname + ' ]', '&&', 'chown ', context.value('admin'), pathname]) # Execute the extra steps necessary after installation # of the configuration files and before restarting the services. daemons = [] for setup in setups: if setup: daemons = merge_unique(daemons, setup.daemons) # Restart services if tero.setup.postinst.scriptfile: tero.setup.postinst.scriptfile.write('\n# Restart services\n') for daemon in daemons: tero.setup.postinst.serviceRestart(daemon) if daemon in tero.setup.after_statements: for stmt in tero.setup.after_statements[daemon]: tero.setup.postinst.shellCommand([stmt]) if tero.setup.postinst.scriptfile: tero.setup.postinst.scriptfile.close() shell_command(['chmod', '755', tero.setup.postinst.postinst_path])