def install(): with sudo(): # Generate a root password and save it in root home root_conf_path = '/root/.my.cnf' if not fabric.contrib.files.exists(root_conf_path): root_pw = generate_password() blueprint.upload('root_my.cnf', '/root/.my.cnf', {'password': root_pw}) debian.chmod('/root/.my.cnf', mode=600) else: # TODO: use fabric.operations.get instead of cat when up to date with upstream with silent(): output = run('cat {}'.format(root_conf_path)) fd = StringIO(output) config_parser = ConfigParser.RawConfigParser() config_parser.readfp(fd) root_pw = config_parser.get('client', 'password') # Install external PPA info('Adding apt key for {}', __name__) run("apt-key adv --keyserver keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A") info('Adding apt repository for {}', __name__) debian.add_apt_repository('http://repo.percona.com/apt trusty main') debian.apt_get('update') # Percona/MySQL base dependencies dependencies = ( 'percona-server-server', 'percona-server-client', 'libmysqlclient-dev', 'mysqltuner' ) # Configure debconf to autoset root password on installation prompts server_package = dependencies[0] debian.debconf_communicate('PURGE', server_package) with silent(): debian.debconf_set_selections( '{}/root_password password {}'.format(server_package, root_pw), '{}/root_password_again password {}'.format(server_package, root_pw) ) # Install package info('Installing {}', __name__) debian.apt_get('install', *dependencies) debian.debconf_communicate('PURGE', server_package) # Auto-answer mysql_secure_installation prompts prompts = { 'Enter current password for root (enter for none): ': root_pw, 'Change the root password? [Y/n] ': 'n', 'Remove anonymous users? [Y/n] ': 'Y', 'Disallow root login remotely? [Y/n] ': 'Y', 'Remove test database and access to it? [Y/n] ': 'Y', 'Reload privilege tables now? [Y/n] ': 'Y' } # Run mysql_secure_installation to remove test-db and remote root login with settings(prompts=prompts): run('mysql_secure_installation')
def update_modules(beat): changes = [] # Get desired state desired_modules = set(blueprint.get('{}.modules'.format(beat), [])) # Get current state with silent(): module_files = run('find /etc/{}/modules.d -iname "*.yml"'.format(beat)).split() enabled_modules = {os.path.basename(module).split('.')[0] for module in module_files} # Disable extra services for extra in enabled_modules - desired_modules: info('Disabling {} module: {}', beat, extra) changes.append(extra) with silent(), sudo(): run('{} modules disable {}'.format(beat, extra)) # Enable services for missing in desired_modules - enabled_modules: info('Enabling {} module: {}', beat, missing) changes.append(missing) with silent(), sudo(): run('{} modules enable {}'.format(beat, missing)) return changes
def update_filters(): changes = [] # Generate desired state as enabled_name => source_name config = blueprint.get('config', {}) filters = { '{}-{}.conf'.format(str(weight).zfill(2), conf): "{}.conf".format(conf) for weight, conf in config.iteritems() } # Get current state with silent(): enabled_filters = run('ls {}'.format(conf_enabled_path)).split() # Disable extra services if blueprint.get('auto_disable_conf', True): for link in set(enabled_filters) - set(filters.keys()): info('Disabling conf: {}', link) changes.append(link) with silent(), sudo(), cd(conf_enabled_path): debian.rm(link) # Enable services for target in set(filters.keys()) - set(enabled_filters): source = os.path.join(conf_available_path, filters[target]) info('Enabling conf: {}', target) changes.append(source) with silent(), sudo(), cd(conf_enabled_path): debian.ln(source, target) return changes
def install(): with sudo(): # Generate a root password and save it in root home root_conf_path = '/root/.my.cnf' if not fabric.contrib.files.exists(root_conf_path): root_pw = generate_password() blueprint.upload('root_my.cnf', '/root/.my.cnf', {'password': root_pw}) debian.chmod('/root/.my.cnf', mode=600) else: # TODO: use fabric.operations.get instead of cat when up to date with upstream with silent(): output = run('cat {}'.format(root_conf_path)) fd = StringIO(output) config_parser = ConfigParser.RawConfigParser() config_parser.readfp(fd) root_pw = config_parser.get('client', 'password') # Install external PPA info('Adding apt key for {}', __name__) run("apt-key adv --keyserver keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A" ) info('Adding apt repository for {}', __name__) debian.add_apt_repository('http://repo.percona.com/apt trusty main') debian.apt_get('update') # Percona/MySQL base dependencies dependencies = ('percona-server-server', 'percona-server-client', 'libmysqlclient-dev', 'mysqltuner') # Configure debconf to autoset root password on installation prompts server_package = dependencies[0] debian.debconf_communicate('PURGE', server_package) with silent(): debian.debconf_set_selections( '{}/root_password password {}'.format(server_package, root_pw), '{}/root_password_again password {}'.format( server_package, root_pw)) # Install package info('Installing {}', __name__) debian.apt_get('install', *dependencies) debian.debconf_communicate('PURGE', server_package) # Auto-answer mysql_secure_installation prompts prompts = { 'Enter current password for root (enter for none): ': root_pw, 'Change the root password? [Y/n] ': 'n', 'Remove anonymous users? [Y/n] ': 'Y', 'Disallow root login remotely? [Y/n] ': 'Y', 'Remove test database and access to it? [Y/n] ': 'Y', 'Reload privilege tables now? [Y/n] ': 'Y' } # Run mysql_secure_installation to remove test-db and remote root login with settings(prompts=prompts): run('mysql_secure_installation')
def info(scope=''): """ Get runtime information from redis itself """ with silent(), hide_prefix(): output = api.run('redis-cli info ' + scope) api.info(output)
def enable(conf, weight, do_restart=True): """ Enable logstash input/output provider :param conf: Input or output provider config file :param weight: Weight of provider :param do_restart: Restart service :return: Got enabled? """ enabled = False conf = conf if conf.endswith(".conf") else "{}.conf".format(conf) with sudo(): available_conf = os.path.join(conf_available_path, conf) if not files.exists(available_conf): warn("Invalid conf: {}".format(conf)) else: with cd(conf_enabled_path): weight = str(weight).zfill(2) conf = "{}-{}".format(weight, conf) if not files.exists(conf): info("Enabling conf: {}", conf) with silent(): debian.ln(available_conf, conf) enabled = True if do_restart: restart("server") return enabled
def dump(schema=None): """ Dump and download all configured, or given, schemas. :param schema: Specific shema to dump and download. """ if not schema: schemas = blueprint.get('schemas', {}).keys() for i, schema in enumerate(schemas, start=1): print("{i}. {schema}".format(i=i, schema=schema)) valid_indices = '[1-{}]+'.format(len(schemas)) schema_choice = prompt('Select schema to dump:', default='1', validate=valid_indices) schema = schemas[int(schema_choice) - 1] with sudo('postgres'): now = datetime.now().strftime('%Y-%m-%d') output_file = '/tmp/{}_{}.backup'.format(schema, now) filename = os.path.basename(output_file) options = dict(format='tar', output_file=output_file, schema=schema) info('Dumping schema {}...', schema) run('pg_dump -c -F {format} -f {output_file} {schema}'.format( **options)) info('Downloading dump...') local_file = '~/{}'.format(filename) files.get(output_file, local_file) with sudo(), silent(): debian.rm(output_file) info('New smoking hot dump at {}', local_file)
def generate_pgtune_conf(role='db'): """ Run pgtune and create pgtune.conf :param role: Which fabric role to place local pgtune.conf template under """ conf_path = postgres_root('postgresql.conf') with sudo(), silent(): output = run('pgtune -T Web -i {}'.format(conf_path)).strip() def parse(c): lines = [l for l in c.splitlines() if '# pgtune' in l] for line in lines: try: comment = line.index('#') line = line[:comment] except ValueError: pass clean = lambda s: s.strip('\n\r\t\'" ') key, _, value = line.partition('=') key, value = map(clean, (key, value)) if key: yield key, value or None tune_conf = dict(parse(output)) tune_conf.update(blueprint.get('pgtune', {})) tune_conf = '\n'.join((' = '.join(item)) for item in tune_conf.iteritems()) conf_dir = os.path.join(os.path.dirname(env['real_fabfile']), 'templates', role, 'postgres') conf_path = os.path.join(conf_dir, 'pgtune.conf') if not os.path.exists(conf_dir): os.makedirs(conf_dir) with open(conf_path, 'w+') as f: f.write(tune_conf)
def reset(branch, repository_path=None, **kwargs): """ Fetch, reset, clean and checkout repository branch. :return: commit """ commit = None if not repository_path: repository_path = debian.pwd() with cd(repository_path): name = os.path.basename(repository_path) info('Resetting git repository: {}@{}', name, branch) with silent('warnings'): commands = [ 'git fetch origin', # Fetch branches and tags 'git reset --hard HEAD', # Make hard reset to HEAD 'git clean -fdx', # Remove untracked files pyc, xxx~ etc 'git checkout HEAD', # Checkout HEAD 'git reset refs/remotes/origin/{} --hard'.format(branch) # Reset to branch ] output = run(' && '.join(commands)) if output.return_code != 0: warn('Failed to reset repository "{}", probably permission denied!'.format(name)) else: output = output.split(os.linesep)[-1][len('HEAD is now at '):] commit = output.split()[0] info('HEAD is now at: {}', output) return commit
def enable(site, do_reload=True): """ Enable site :param site: Site to enable :param do_reload: Reload nginx service :return: Got enabled? """ enabled = False site = site if site.endswith( '.conf') or site == 'default' else '{}.conf'.format(site) with sudo(): available_site = os.path.join(sites_available_path, site) if not files.exists(available_site): warn('Invalid site: {}'.format(site)) else: with cd(sites_enabled_path): if not files.exists(site): info('Enabling site: {}', site) with silent(): debian.ln(available_site, site) enabled = True if do_reload: reload() return enabled
def enable(program, do_reload=True): """ Enable program. :param program: Program to enable :param do_reload: Reload supervisor :return: Got enabled? """ enabled = False program = program if program.endswith( '.conf') or program == 'default' else '{}.conf'.format(program) with sudo(): available_program = os.path.join(programs_available_path, program) if not files.exists(available_program): warn('Invalid program: {}'.format(program)) else: with cd(programs_enabled_path): if not files.exists(program): info('Enabling program: {}', program) with silent(): debian.ln(available_program, program) enabled = True if do_reload: reload() return enabled
def log(repository_path=None, commit='HEAD', count=1, path=None): """ Get log for repository and optional commit range. :param repository_path: Repository path :param commit: Commit to log, ex HEAD..origin :param path: Path or file to log :return: [(<commit>, <comment>), ...] """ if not repository_path: repository_path = debian.pwd() with cd(repository_path), silent(): cmd = 'git log --pretty=oneline {}'.format(commit) if count: cmd += ' -{}'.format(count) if path: cmd += ' -- {}'.format(path) output = run(cmd, pty=False) git_log = output.stdout.strip() git_log = [ col.strip() for row in git_log.split('\n') for col in row.split(' ', 1) if col ] git_log = zip(git_log[::2], git_log[1::2]) return git_log
def configure(): """ Enable/disable configured programs """ with sudo(): # Upload templates uploads = blueprint.upload('init/', '/etc/init/') uploads.extend(blueprint.upload('supervisord.conf', '/etc/')) # Disable previously enabled programs not configured programs-enabled changes = [] programs = blueprint.get('programs') or [] auto_disable = blueprint.get('auto_disable_programs', True) if auto_disable: with silent(): enabled_program_links = run( 'ls {}'.format(programs_enabled_path)).split() for link in enabled_program_links: link_name = os.path.splitext(link)[0] # Without extension if link not in programs and link_name not in programs: changed = disable(link, do_reload=False) changes.append(changed) ### Enable programs from settings for program in programs: changed = enable(program, do_reload=False) changes.append(changed) ### Reload supervisor if new templates or any program has been enabled/disabled if uploads or any(changes): reload()
def install_solr(): with sudo(): version = blueprint.get('version') version_tuple = tuple(map(int, version.split('.'))) archive = 'solr-{}.tgz'.format(version) if version_tuple < (4, 1, 0): archive = 'apache-{}'.format(archive) url = 'https://archive.apache.org/dist/lucene/solr/{}/{}'.format(version, archive) with cd('/tmp'): info('Download {} ({})', 'Solr', version) run('wget {}'.format(url)) info('Extracting archive...') with silent(): run('tar xzf {}'.format(archive)) solr_version_dir = os.path.splitext(archive)[0] solr_version_path = os.path.join('/usr', 'share', solr_version_dir) debian.chmod(solr_version_dir, 755, 'solr', 'solr', recursive=True) if files.exists(solr_version_path): info('Found same existing version, removing it...') debian.rm(solr_version_path, recursive=True) debian.mv(solr_version_dir, '/usr/share/') debian.ln(solr_version_path, solr_home) debian.rm(archive)
def clone(url, branch=None, repository_path=None, **kwargs): """ Clone repository and branch. :param url: Git url to clone :param branch: Branch to checkout :param repository_path: Destination :param kwargs: Not used but here for easier kwarg passing :return: (destination, got_cloned bool) """ repository = parse_url(url, branch=branch) name = repository['name'] branch = repository['branch'] cloned = False if not repository_path: repository_path = os.path.join('.', name) if not files.exists(os.path.join(repository_path, '.git')): info('Cloning {}@{} into {}', url, branch, repository_path) with silent('warnings'): cmd = 'git clone -b {branch} {remote} {name}'.format(branch=branch, remote=url, name=name) output = run(cmd) if output.return_code != 0: warn('Failed to clone repository "{}", probably permission denied!'.format(name)) cloned = None else: cloned = True else: info('Git repository already cloned: {}', name) return repository_path, cloned
def configure(): """ Configure nginx and enable/disable sites """ with sudo(): # Upload templates context = {'num_cores': debian.nproc()} uploads = blueprint.upload('./', nginx_root, context) # Disable previously enabled sites not configured sites-enabled changes = [] sites = blueprint.get('sites') auto_disable_sites = blueprint.get('auto_disable_sites', True) if auto_disable_sites: with silent(): enabled_site_links = run( 'ls {}'.format(sites_enabled_path)).split() for link in enabled_site_links: link_name = os.path.splitext(link)[0] # Without extension if link not in sites and link_name not in sites: changed = disable(link, do_reload=False) changes.append(changed) ### Enable sites from settings for site in sites: changed = enable(site, do_reload=False) changes.append(changed) ### Reload nginx if new templates or any site has been enabled/disabled if uploads or any(changes): reload()
def add_fstab(filesystem=None, mount_point=None, type='auto', options='rw', dump='0', pazz='0'): """ Add mount point configuration to /etc/fstab. If mount point already mounted on different file system then unmount. :param str filesystem: The partition or storage device to be mounted :param str mount_point: The mount point where <filesystem> is mounted to :param str type: The file system type (Default: auto) :param str options: Mount options of the filesystem (Default: rw) :param str dump: Used by the dump utility to decide when to make a backup, 0|1 (Default: 0) :param str pazz: Used by fsck to decide which order filesystems are to be checked (Default: 0) """ with sudo(): fstab_line = '{fs} {mount} {type} {options} {dump} {pazz}'.format( fs=filesystem, mount=mount_point, type=type, options=options, dump=dump, pazz=pazz ) validate_boot_options(options) # Add mount to /etc/fstab if not already there (?) with silent(): output = run('cat /etc/fstab') fstab = output.stdout if fstab_line not in fstab.split('\n'): # TODO: Handle comments info('Adding fstab: {} on {}', filesystem, mount_point) fabric.contrib.files.append('/etc/fstab', fstab_line, use_sudo=True) # Unmount any previous mismatching mount point mounted_file_system = get_mount(mount_point) if mounted_file_system and mounted_file_system != filesystem: unmount(mount_point)
def reset(branch, repository_path=None, **kwargs): """ Fetch, reset, clean and checkout repository branch. :return: commit """ if not repository_path: repository_path = debian.pwd() with cd(repository_path): name = os.path.basename(repository_path) info('Resetting git repository: {}@{}', name, branch) commands = [ 'git fetch origin', # Fetch branches and tags 'git reset --hard HEAD', # Make hard reset to HEAD 'git clean -fdx', # Remove untracked files pyc, xxx~ etc 'git checkout HEAD', # Checkout HEAD 'git reset refs/remotes/origin/{} --hard'.format( branch) # Reset to branch ] with silent(): output = run(' && '.join(commands)) output = output.split(os.linesep)[-1].lstrip('HEAD is now at ') commit = output.split()[0] info('HEAD is now at: {}', output) return commit
def configure(): """ Enable/disable configured programs """ with sudo(): # Upload templates uploads = blueprint.upload("init/", "/etc/init/") uploads.extend(blueprint.upload("supervisord.conf", "/etc/")) # Disable previously enabled programs not configured programs-enabled changes = [] programs = blueprint.get("programs") or [] auto_disable = blueprint.get("auto_disable_programs", True) if auto_disable: with silent(): enabled_program_links = run("ls {}".format(programs_enabled_path)).split() for link in enabled_program_links: link_name = os.path.splitext(link)[0] # Without extension if link not in programs and link_name not in programs: changed = disable(link, do_reload=False) changes.append(changed) ### Enable programs from settings for program in programs: changed = enable(program, do_reload=False) changes.append(changed) ### Reload supervisor if new templates or any program has been enabled/disabled if uploads or any(changes): reload()
def enable(program, do_reload=True): """ Enable program. :param program: Program to enable :param do_reload: Reload supervisor :return: Got enabled? """ enabled = False program = program if program.endswith(".conf") or program == "default" else "{}.conf".format(program) with sudo(): available_program = os.path.join(programs_available_path, program) if not files.exists(available_program): warn("Invalid program: {}".format(program)) else: with cd(programs_enabled_path): if not files.exists(program): info("Enabling program: {}", program) with silent(): debian.ln(available_program, program) enabled = True if do_reload: reload() return enabled
def enable(site, do_reload=True): """ Enable site :param site: Site to enable :param do_reload: Reload nginx service :return: Got enabled? """ enabled = False site = site if site.endswith('.conf') or site == 'default' else '{}.conf'.format(site) with sudo(): available_site = os.path.join(sites_available_path, site) if not files.exists(available_site): warn('Invalid site: {}'.format(site)) else: with cd(sites_enabled_path): if not files.exists(site): info('Enabling site: {}', site) with silent(): debian.ln(available_site, site) enabled = True if do_reload: reload() return enabled
def configure_server(config, auto_disable_conf=True, **context): context.setdefault('use_ssl', True) context.setdefault('elasticsearch_host', '127.0.0.1') uploads = blueprint.upload('./server/', '/etc/logstash/', context) # Disable previously enabled conf not configured through config in settings changes = [] if auto_disable_conf: with silent(): enabled_conf_links = run('ls {}'.format(conf_enabled_path)).split() conf_prospects = [ '{}-{}.conf'.format(str(weight).zfill(2), conf) for weight, conf in config.iteritems() ] for link in enabled_conf_links: if link not in conf_prospects: changed = disable(link, do_restart=False) changes.append(changed) # Enable conf from settings for weight, conf in config.iteritems(): changed = enable(conf, weight, do_restart=False) changes.append(changed) return bool(uploads or any(changes))
def configure(): """ Configure nginx and enable/disable sites """ with sudo(): # Upload templates context = { 'num_cores': debian.nproc() } uploads = blueprint.upload('./', nginx_root, context) # Disable previously enabled sites not configured sites-enabled changes = [] sites = blueprint.get('sites') auto_disable_sites = blueprint.get('auto_disable_sites', True) if auto_disable_sites: with silent(): enabled_site_links = run('ls {}'.format(sites_enabled_path)).split() for link in enabled_site_links: link_name = os.path.splitext(link)[0] # Without extension if link not in sites and link_name not in sites: changed = disable(link, do_reload=False) changes.append(changed) ### Enable sites from settings for site in sites: changed = enable(site, do_reload=False) changes.append(changed) ### Reload nginx if new templates or any site has been enabled/disabled if uploads or any(changes): reload()
def enable(conf, weight, do_restart=True): """ Enable logstash input/output provider :param conf: Input or output provider config file :param weight: Weight of provider :param do_restart: Restart service :return: Got enabled? """ enabled = False conf = conf if conf.endswith('.conf') else '{}.conf'.format(conf) with sudo(): available_conf = os.path.join(conf_available_path, conf) if not files.exists(available_conf): warn('Invalid conf: {}'.format(conf)) else: with cd(conf_enabled_path): weight = str(weight).zfill(2) conf = '{}-{}'.format(weight, conf) if not files.exists(conf): info('Enabling conf: {}', conf) with silent(): debian.ln(available_conf, conf) enabled = True if do_restart: restart('server') return enabled
def dump(schema=None, ignore_tables=''): """ Dump and download a schema. :param schema: Specific shema to dump and download. :param ignore_tables: Tables to skip, separated by | (pipe) """ if not schema: schemas = blueprint.get('schemas', {}).keys() for i, schema in enumerate(schemas, start=1): print("{i}. {schema}".format(i=i, schema=schema)) valid_indices = '[1-{}]+'.format(len(schemas)) schema_choice = prompt('Select schema to dump:', default='1', validate=valid_indices) schema = schemas[int(schema_choice)-1] now = datetime.now().strftime('%Y-%m-%d') output_file = '/tmp/{}_{}.backup.gz'.format(schema, now) filename = os.path.basename(output_file) info('Dumping schema {}...', schema) extra_args = [] for table in ignore_tables.split('|'): extra_args.append('--ignore-table={}.{}'.format(schema, table)) dump_cmd = 'mysqldump {} {} | gzip > {}'.format(schema, ' '.join(extra_args), output_file) run('sudo su root -c "{}"'.format(dump_cmd)) info('Downloading dump...') local_file = '~/%s' % filename fabric.contrib.files.get(output_file, local_file) with sudo(), silent(): debian.rm(output_file) info('New smoking hot dump at {}', local_file)
def install_solr(): with sudo(): version = blueprint.get("version") version_tuple = tuple(map(int, version.split("."))) archive = "solr-{}.tgz".format(version) if version_tuple < (4, 1, 0): archive = "apache-{}".format(archive) url = "https://archive.apache.org/dist/lucene/solr/{}/{}".format(version, archive) with cd("/tmp"): info("Download {} ({})", "Solr", version) run("wget {}".format(url)) info("Extracting archive...") with silent(): run("tar xzf {}".format(archive)) solr_version_dir = os.path.splitext(archive)[0] solr_version_path = os.path.join("/usr", "share", solr_version_dir) debian.chmod(solr_version_dir, 755, "solr", "solr", recursive=True) if files.exists(solr_version_path): info("Found same existing version, removing it...") debian.rm(solr_version_path, recursive=True) debian.mv(solr_version_dir, "/usr/share/") debian.ln(solr_version_path, solr_home) debian.rm(archive)
def install_system_dependencies(): """ Install system wide packages that application depends on. """ with sudo(), silent(): info('Install system dependencies') system_dependencies = blueprint.get('system_dependencies') if system_dependencies: dependencies = [] repositories = [] ppa_dependencies = [] for dependency in system_dependencies: dep, _, rep = dependency.partition('@') if rep: if rep not in repositories: repositories.append(rep) ppa_dependencies.append(dep) elif dep not in dependencies: dependencies.append(dep) debian.apt_get_update() debian.apt_get('install', *dependencies) if repositories: for repository in repositories: debian.add_apt_repository(repository, src=True) debian.apt_get_update() debian.apt_get('install', *ppa_dependencies)
def set_timezone(timezone): """ Set OS timezone :param timezone: Europe/Stockholm """ with silent(): run('ln -sf /usr/share/zoneinfo/{} /etc/localtime'.format(timezone))
def flush(): """ Delete all cached keys """ info('Flushing Memcached...') with sudo(), silent(): run('echo "flush_all" | /bin/netcat -q 2 127.0.0.1 11211') info('Down the drain!')
def unmount(mount_point): """ Unmount mount point. :param str mount_point: Name of mount point to unmount """ with sudo(), silent(): info('Unmounting {}', mount_point) run('umount {}'.format(mount_point))
def reload(self, vassals=None): """ Touch reload specified vassals :param vassals: Vassals to reload """ for vassal_ini in vassals or self.list_vassals(): vassal_ini_path = os.path.join(self.get_config_path(), vassal_ini) with sudo(), silent(): run('touch {}'.format(vassal_ini_path))
def mkdir(location, recursive=True, mode=None, owner=None, group=None): with silent(), sudo(): result = run('test -d "%s" || mkdir %s %s "%s"' % (location, mode and '-m %s' % mode or '', recursive and '-p' or '', location)) if result.succeeded: if owner or group: chmod(location, owner=owner, group=group) else: raise Exception('Failed to create directory %s, %s' % (location, result.stdout))
def configure(): """ Install crontab per termplate (user) """ with sudo(), silent(): with debian.temporary_dir(mode=555) as temp_dir: updates = blueprint.upload('./', temp_dir) for update in updates: user = os.path.basename(update) info('Installing new crontab for {}...', user) run('crontab -u {} {}'.format(user, os.path.join(temp_dir, user)))
def ctl(command, program=""): """ Run supervisorctl :[command],[program] :param command: The command to run :param program: The program to run command against """ with silent(): output = supervisorctl(command, program=program) with hide_prefix(): info(output)
def ctl(command, program=''): """ Run supervisorctl :[command],[program] :param command: The command to run :param program: The program to run command against """ with silent(): output = supervisorctl(command, program=program) with hide_prefix(): info(output)
def kill(sig, process, use_pkill=False): with sudo(): with silent('warnings'): if use_pkill: output = run('pkill -{} {}'.format(sig, process)) else: output = run('kill -{} {}'.format(sig, process)) if output.return_code != 0: warn('No process got {} signal'.format(sig)) else: info('Successfully sent {} signal to {}', sig, process)
def list(*values): """ List sysctl values, e.g. vm.swappiness,vm.panic_on_oom' """ with sudo(), silent(): if not values: for key in run('sysctl -a').split('\n'): info(key) else: for value in values: info(run('sysctl %s' % value))
def mktemp(directory=False, mode=None): with silent(), sudo(): cmd = 'mktemp' if directory: cmd += ' -d' output = run(cmd) path = output.stdout if directory: path += os.path.sep if mode: chmod(path, mode=mode) return path
def fifo(vassal_name, command): """ Issue FIFO commands to a vassal. :param vassal_name: The vassal to command :param command: The FIFO command to issue See: http://uwsgi-docs.readthedocs.org/en/latest/MasterFIFO.html """ fifo_file = '/run/uwsgi/fifo-{}'.format(vassal_name) with sudo(), silent(): run('echo {} > {}'.format(command, fifo_file))
def reload(vassal_path=None): """ Reload uwsgi or reload specific vassal @ path, via touch. :param vassal_path: The absolute path to vassal ini to reload. If not given, the uwsgi service will reload """ if not vassal_path: debian.service('uwsgi', 'reload', check_status=False) else: vassal_name = os.path.splitext(os.path.basename(vassal_path))[0] with sudo(), silent(): info('Reloading {} uWSGI vassal', vassal_name) run('touch {}'.format(vassal_path))
def configure(): """ Install incrontab per template (i.e. user) """ with sudo(), silent(): updates = blueprint.upload('./', '/etc') users = [os.path.basename(update) for update in updates] put(StringIO('\n'.join(users)), '/etc/incron.allow', use_sudo=True) for user in users: info('Installing new incrontab for {}...', user) run('incrontab -u {} {}'.format(user, os.path.join('/etc/incron.usertables', user)))
def current_tag(repository_path=None): """ Get most recent tag :param repository_path: Repository path :return: The most recent tag """ if not repository_path: repository_path = debian.pwd() with cd(repository_path), silent(): output = run('git describe --long --tags --dirty --always', pty=False) # 20141114.1-306-g72354ae-dirty return output.strip().rsplit('-', 2)[0]
def get_mount(mount_point): """ Resolve a mount point to mounted file system. If not mounted, return None. :param str mount_point: Name of mount point to reslve :return str: Mounted file system """ with silent('warnings'): output = run('egrep ".+ {} .+" /proc/mounts'.format(mount_point)) if output.return_code == 0: file_system = output.stdout.split()[0] return file_system
def get_commit(repository_path=None, short=False): """ Get current checked out commit for cloned repository path. :param repository_path: Repository path :param short: Format git commit hash in short (7) format :return: Commit hash """ if not repository_path: repository_path = debian.pwd() with cd(repository_path), silent(): output = run('git rev-parse HEAD') commit = output.strip() if short: commit = commit[:7] return commit
def reload(program=None): """ Reload supervisor or reload program(s), via SIGHUP :param program: The program to reload (all|exact|pattern). If not given, supervisor service will reload """ if not program: service('reload') else: with silent(): if program == 'all': program = '' output = supervisorctl('status', program=program) if output.return_code == 0: pids = [ line.split()[3][:-1] for line in output.stdout.split('\n') ] for pid in pids: debian.sighup(pid)
def get_user(name): with silent(): d = run("cat /etc/passwd | egrep '^%s:' ; true" % name, user='******') s = run("cat /etc/shadow | egrep '^%s:' | awk -F':' '{print $2}'" % name, user='******') results = {} if d: d = d.split(':') assert len(d) >= 7, "/etc/passwd entry is expected to have at least 7 fields, " \ "got %s in: %s" % (len(d), ':'.join(d)) results = dict(name=d[0], uid=d[2], gid=d[3], home=d[5], shell=d[6]) if s: results['passwd'] = s if results: return results else: return None
def mount(mount_point, owner=None, group=None, **fstab): """ Mount and optionally add configuration to fstab. :param str mount_point: Name of mount point :param str owner: Name of mount point owner :param str group: Name of mount point group :param dict fstab: Optional kwargs passed to add_fstab() """ with sudo(): if fstab: add_fstab(mount_point=mount_point, **fstab) # Mount if not is_mounted(mount_point): # Ensure mount point dir exists mkdir(mount_point, owner=owner, group=group, mode=755) with silent(): info('Mounting {}', mount_point) run('mount {}'.format(mount_point))
def install_solr(): with sudo(): version = blueprint.get('version') version_tuple = tuple(map(int, version.split('.'))) archive = 'solr-{}.tgz'.format(version) if version_tuple < (4, 1, 0): archive = 'apache-{}'.format(archive) url = 'https://archive.apache.org/dist/lucene/solr/{}/{}'.format( version, archive) with cd('/tmp'): info('Download {} ({})', 'Solr', version) run('wget {}'.format(url)) info('Extracting archive...') with silent(): run('tar xzf {}'.format(archive)) debian.mv(os.path.splitext(archive)[0], solr_home) debian.chmod(solr_home, 755, 'solr', 'solr', recursive=True) debian.rm(archive)
def add_fstab(filesystem=None, mount_point=None, type='auto', options='rw', dump='0', pazz='0'): """ Add mount point configuration to /etc/fstab. If mount point already mounted on different file system then unmount. :param str filesystem: The partition or storage device to be mounted :param str mount_point: The mount point where <filesystem> is mounted to :param str type: The file system type (Default: auto) :param str options: Mount options of the filesystem (Default: rw) :param str dump: Used by the dump utility to decide when to make a backup, 0|1 (Default: 0) :param str pazz: Used by fsck to decide which order filesystems are to be checked (Default: 0) """ with sudo(): fstab_line = '{fs} {mount} {type} {options} {dump} {pazz}'.format( fs=filesystem, mount=mount_point, type=type, options=options, dump=dump, pazz=pazz) # Add mount to /etc/fstab if not already there (?) with silent(): output = run('cat /etc/fstab') fstab = output.stdout if fstab_line not in fstab.split('\n'): # TODO: Handle comments info('Adding fstab: {} on {}', filesystem, mount_point) fabric.contrib.files.append('/etc/fstab', fstab_line, use_sudo=True) # Unmount any previous mismatching mount point mounted_file_system = get_mount(mount_point) if mounted_file_system and mounted_file_system != filesystem: unmount(mount_point)