def deploy(id=None, silent=False): """ Perform an automatic deploy to the target requested. """ require('hosts') require('code_dir') # Show log of changes, return if nothing to do revset = show_log(id) if not revset: return # See if we have any requirements changes requirements_changes = changed_files(revset, r' requirements/') if requirements_changes: print colors.yellow("Will update requirements (and do migrations):") print indent(requirements_changes) # See if we have any changes to migrations between the revisions we're applying migrations = migrate_diff(revset=revset, silent=True) if migrations: print colors.yellow("Will apply %d migrations:" % len(migrations)) print indent(migrations) if not silent: request_confirm("deploy") hg_update(id) if requirements_changes: update_requirements() if migrations or requirements_changes: migrate(silent=True) collectstatic() restart_server(silent=True)
def migrate_diff(id=None, revset=None, silent=False): """ Check for migrations needed when updating to the given revision. """ require('code_dir') # Exactly one of id and revset must be given assert (id or revset) and not (id and revset) # no revset given, calculate it by using deployment_list if not revset: result = vcs.deployment_list(id) if 'revset' not in result: print(result['message']) abort('Nothing to do') else: revset = result['revset'] # Pull out migrations migrations = vcs.changed_files(revset, "\/(?P<model>\w+)\/migrations\/(?P<migration>.+)") if not silent and migrations: print "Found %d migrations." % len(migrations) print indent(migrations) return migrations
def display_command(command): """ Print command function's docstring, then exit. Invoked with -d/--display. """ # Sanity check command = command.replace('-', '_') cmd_string = command.replace('_', '-') if command not in commands: abort("Command '%s' not found, exiting." % cmd_string) cmd = commands[command] # figure out arg spec while hasattr(cmd, '_decorated'): # descend through decorators cmd = cmd._decorated argspec = inspect.getargspec(cmd) if filter(lambda x: x, argspec): args = "Arguments: " + inspect.formatargspec(*argspec)[1:-1] else: args = "Arguments: None" # Print out nicely presented docstring if found if cmd.__doc__: print("Displaying detailed information for command '%s':" % cmd_string) print(indent(args)) print('') print(indent(cmd.__doc__, strip=True)) print('') # Or print notice if not else: print("No detailed information available for command '%s':" % cmd_string) print(indent(args)) sys.exit(0)
def info(): """Display information about the project configuration.""" puts("Django project for site '%s' located at '%s':" % (SITE_NAME, PROJECT_ROOT)) puts(indent('PROJECT_ENVIRONMENT = %s' % PROJECT_ENVIRONMENT, 4)) puts(indent('DJANGO_SETTINGS_MODULE = %s' % os.environ.get('DJANGO_SETTINGS_MODULE', ''), 4)) puts(indent('STATIC_ROOT = %s' % STATIC_ROOT, 4))
def test_admin_group(dest_dir): source_dir = '%s/run.php' % realm_path(dest_dir) test_config('grep "$adminGroup = 5140;" %s' % source_dir,\ 'Checking if admin group is set',\ 'Update site`s administration group in %s' % source_dir, expect_fail=True) print indent('HINT: check if proper group is set in %s' % source_dir, 6)
def deploy(id=None, silent=False, force=False): """ Perform an automatic deploy to the target requested. """ require('hosts') require('code_dir') # Ask for sudo at the begginning so we don't fail during deployment because of wrong pass if not sudo('whoami'): abort('Failed to elevate to root') return # Show log of changes, return if nothing to do revset = show_log(id) if not revset and not force: return # See if we have any requirements changes requirements_changes = force or vcs.changed_files(revset, r' requirements/') if requirements_changes: print colors.yellow("Will update requirements (and do migrations):") print indent(requirements_changes) # See if we have any changes to migrations between the revisions we're applying migrations = force or migrate_diff(revset=revset, silent=True) if migrations: print colors.yellow("Will apply %d migrations:" % len(migrations)) print indent(migrations) # See if we have any changes to crontab config crontab_changed = force or vcs.changed_files(revset, r'deploy/crontab.conf') if crontab_changed: print colors.yellow("Will update cron entries") # see if nginx conf has changed if vcs.changed_files(revset, r' deploy/%s' % env.nginx_conf): print colors.red("Warning: Nginx configuration change detected, also run: `fab %target% nginx_update`") if not silent: request_confirm("deploy") vcs.update(id) if requirements_changes: update_requirements() if migrations or requirements_changes: migrate(silent=True) if crontab_changed: with cd(env.code_dir): sudo('cp deploy/crontab.conf /etc/cron.d/arhitektuurkolm') collectstatic() restart_server(silent=True) # Run deploy systemchecks check()
def _nested_list(mapping, level=1): result = [] tasks, collections = _sift_tasks(mapping) # Tasks come first result.extend(map(lambda x: indent(x, spaces=level * 4), tasks)) for collection in collections: module = mapping[collection] # Section/module "header" result.append(indent(collection + ":", spaces=level * 4)) # Recurse result.extend(_nested_list(module, level + 1)) return result
def test_indent_with_strip(): for description, input, output in ( ("Sanity check: 1 line string", indent('Test', strip=True), ' Test'), ("Check list of strings", indent(["Test", "Test"], strip=True), ' Test\n Test'), ("Check list of strings", indent([" Test", " Test"], strip=True), ' Test\n Test'), ): eq_.description = "indent(strip=True): %s" % description yield eq_, input, output del eq_.description
def show_log(id=None): """ List revisions to apply/unapply when updating to given revision. When no revision is given, it default to the head of current branch. Returns False when there is nothing to apply/unapply. otherwise revset of revisions that will be applied or unapplied (this can be passed to hg status to see which files changed for example). """ require('code_dir') def run_hg_log(revset): """ Returns lines returned by hg log, as a list (one revision per line). """ result = sudo("hg log --template '{rev}:{node|short} {branch} {desc|firstline}\\n' -r '%s'" % revset) return result.split('\n') if result else [] def get_revset(x, y): assert x or y if x and y: # All revisions that are descendants of the current revision and ancestors of the target revision # (inclusive), but not the current revision itself return '%s::%s' % (x, y) else: # All revisions that are in the current branch, are descendants of the current revision and are not the # current revision itself. return 'branch(p1()) and %s::%s' % (x or '', y or '') with cd(env.code_dir), hide('running', 'stdout'): # First do hg pull hg_pull() revset = get_revset('.', id) revisions = run_hg_log(revset) if len(revisions) > 1: # Target is forward of the current rev print "Revisions to apply:" print indent(revisions[1:]) return revset elif len(revisions) == 1: # Current rev is the same as target print "Already at target revision" return False # Check if target is backwards of the current rev revset = get_revset(id, '.') revisions = run_hg_log(revset) if revisions: print "Revisions to _un_apply:" print indent(reversed(revisions[1:])) return revset else: print "Target revision is not related to the current revision" return False
def merge(hosts, roles, exclude, roledefs): """ Merge given host and role lists into one list of deduped hosts. """ # Abort if any roles don't exist bad_roles = [x for x in roles if x not in roledefs] if bad_roles: abort("The following specified roles do not exist:\n%s" % ( indent(bad_roles) )) # Look up roles, turn into flat list of hosts role_hosts = [] for role in roles: value = roledefs[role] # Handle "lazy" roles (callables) if callable(value): value = value() role_hosts += value # Strip whitespace from host strings. cleaned_hosts = [x.strip() for x in list(hosts) + list(role_hosts)] # Return deduped combo of hosts and role_hosts, preserving order within # them (vs using set(), which may lose ordering) and skipping hosts to be # excluded. # But only if the user hasn't indicated they want this behavior disabled. all_hosts = cleaned_hosts if state.env.dedupe_hosts: deduped_hosts = [] for host in cleaned_hosts: if host not in deduped_hosts and host not in exclude: deduped_hosts.append(host) all_hosts = deduped_hosts return all_hosts
def create_config(self): # Currently, supports only single peer c = CmdBuffer('\n') c << '[DEFAULT]' c << 'log_dir = {0}'.format(self.SHARED_VOLUME) c << 'use_stderr = False' c << '[message]' c << 'write_disk = True' c << 'write_dir = {0}/data/bgp/'.format(self.SHARED_VOLUME) c << 'format = json' if self.peers: info = next(iter(self.peers.values())) remote_as = info['remote_as'] neigh_addr = info['neigh_addr'].split('/')[0] local_as = info['local_as'] or self.asn local_addr = info['local_addr'].split('/')[0] c << '[bgp]' c << 'afi_safi = ipv4, ipv6, vpnv4, vpnv6, flowspec, evpn' c << 'remote_as = {0}'.format(remote_as) c << 'remote_addr = {0}'.format(neigh_addr) c << 'local_as = {0}'.format(local_as) c << 'local_addr = {0}'.format(local_addr) with open('{0}/yabgp.ini'.format(self.config_dir), 'w') as f: print(colors.yellow('[{0}\'s new yabgp.ini]'.format(self.name))) print(colors.yellow(indent(str(c)))) f.writelines(str(c))
def configure_web(self): """ Render and upload web.ini vassal to <project>.ini. :return: Updated vassals """ destination = self.get_config_path() context = self.get_context() ini = self.get_web_vassal() template = os.path.join('uwsgi', ini) default_templates = uwsgi.blueprint.get_default_template_root() with settings(template_dirs=[default_templates]): # Check if a specific web vassal have been created or use the default if template not in blueprint.get_template_loader().list_templates(): # Upload default web vassal info(indent('...using default web vassal')) template = os.path.join('uwsgi', 'default', 'web.ini') uploads = blueprint.upload(template, os.path.join(destination, ini), context=context) if uploads: self.updates.extend(uploads) # Upload remaining (local) vassals user_vassals = blueprint.upload('uwsgi/', destination, context=context) # TODO: skip subdirs if user_vassals: self.updates.extend(user_vassals) return self.updates
def deploy(auto_reload=True, force=False): """ Reset source to configured branch and install requirements, if needed :param bool auto_reload: Reload application providers if source has changed :param bool force: Force install of requirements :return bool: Source code has changed? """ # Reset git repo previous_commit, current_commit = update_source() code_changed = current_commit is not None and previous_commit != current_commit if use_virtualenv() and (code_changed or force): requirements = blueprint.get('requirements', 'requirements.txt') requirements_changed = False if not force: # Check if requirements has changed commit_range = '{}..{}'.format(previous_commit, current_commit) requirements_changed, _, _ = git.diff_stat(git_repository_path(), commit_range, requirements) # Install repo requirements.txt info('Install requirements {}', requirements) if requirements_changed or force: install_requirements() else: info(indent('(requirements not changed in {}...skipping)'), commit_range) if auto_reload: reload() return code_changed
def _handle_failure(message, exception=None): """ Call `abort` or `warn` with the given message. The value of ``env.warn_only`` determines which method is called. If ``exception`` is given, it is inspected to get a string message, which is printed alongside the user-generated ``message``. """ func = env.warn_only and warn or abort if exception is not None: # Figure out how to get a string out of the exception; EnvironmentError # subclasses, for example, "are" integers and .strerror is the string. # Others "are" strings themselves. May have to expand this further for # other error types. if hasattr(exception, 'strerror') and exception.strerror is not None: underlying_msg = exception.strerror else: underlying_msg = exception func("%s\n\nUnderlying exception message:\n%s" % ( message, indent(underlying_msg) )) else: func(message)
def _handle_failure(message, exception=None): """ Call `abort` or `warn` with the given message. The value of ``env.warn_only`` determines which method is called. If ``exception`` is given, it is inspected to get a string message, which is printed alongside the user-generated ``message``. """ func = env.warn_only and warn or abort # If debug printing is on, append a traceback to the message if output.debug: message += "\n\n" + format_exc() # Otherwise, if we were given an exception, append its contents. elif exception is not None: # Figure out how to get a string out of the exception; EnvironmentError # subclasses, for example, "are" integers and .strerror is the string. # Others "are" strings themselves. May have to expand this further for # other error types. if hasattr(exception, 'strerror') and exception.strerror is not None: underlying = exception.strerror else: underlying = exception message += "\n\nUnderlying exception message:\n" + indent(underlying) return func(message)
def _merge(hosts, roles, exclude=[]): """ Merge given host and role lists into one list of deduped hosts. """ # Abort if any roles don't exist bad_roles = [x for x in roles if x not in state.env.roledefs] if bad_roles: abort("The following specified roles do not exist:\n%s" % ( indent(bad_roles) )) # Look up roles, turn into flat list of hosts role_hosts = [] for role in roles: value = state.env.roledefs[role] # Handle "lazy" roles (callables) if callable(value): value = value() role_hosts += value merged_list = list(set(hosts + role_hosts)) for exclude_host in exclude: if exclude_host in merged_list: merged_list.remove(exclude_host) # Return deduped combo of hosts and role_hosts return merged_list
def deploy(auto_reload=True, force=False): """ Reset source to configured branch and install requirements, if needed :return: Got new source? """ # Reset git repo previous_commit, current_commit = update_source() code_changed = current_commit is not None and previous_commit != current_commit if code_changed or force: requirements = blueprint.get('requirements', 'requirements.txt') requirements_changed = False if not force: # Check if requirements has changed commit_range = '{}..{}'.format(previous_commit, current_commit) requirements_changed, _, _ = git.diff_stat(git_repository_path(), commit_range, requirements) # Install repo requirements.txt info('Install requirements {}', requirements) if requirements_changed or force: install_requirements() else: info(indent('(requirements not changed in {}...skipping)'), commit_range) if auto_reload: reload() return code_changed
def maybe_install_requirements(previous_commit, current_commit, force=False, update_pip=False): from .project import requirements_txt, git_repository_path installation_file = requirements_txt() installation_method = get_installation_method(installation_file) has_changed = False commit_range = '{}..{}'.format(previous_commit, current_commit) if not force: if installation_method == 'pip': has_changed, added, removed = diff_requirements( previous_commit, current_commit, installation_file) if has_changed: info('Requirements have changed, added: {}, removed: {}'.format( ', '.join(added), ', '.join(removed))) else: # Check if installation_file has changed commit_range = '{}..{}'.format(previous_commit, current_commit) has_changed, _, _ = git.diff_stat( git_repository_path(), commit_range, installation_file) if has_changed or force: install_requirements(installation_file, update_pip=update_pip) else: info(indent('(requirements not changed in {}...skipping)'), commit_range)
def _merge(hosts, roles, exclude=[]): """ Merge given host and role lists into one list of deduped hosts. """ # Abort if any roles don't exist bad_roles = [x for x in roles if x not in state.env.roledefs] if bad_roles: abort("The following specified roles do not exist:\n%s" % ( indent(bad_roles) )) # Look up roles, turn into flat list of hosts role_hosts = [] for role in roles: value = state.env.roledefs[role] # Handle "lazy" roles (callables) if callable(value): value = value() role_hosts += value # Return deduped combo of hosts and role_hosts, preserving order within # them (vs using set(), which may lose ordering) and skipping hosts to be # excluded. cleaned_hosts = _clean_hosts(list(hosts) + list(role_hosts)) all_hosts = [] for host in cleaned_hosts: if host not in all_hosts and host not in exclude: all_hosts.append(host) return all_hosts
def list_commands(): """ Print all found commands/tasks, then exit. Invoked with -l/--list. """ print("Available commands:\n") # Want separator between name, description to be straight col max_len = reduce(lambda a, b: max(a, len(b)), commands.keys(), 0) sep = ' ' trail = '...' names = sorted(commands.keys()) for name in names: output = None # Print first line of docstring func = commands[name] if func.__doc__: lines = filter(None, func.__doc__.splitlines()) first_line = lines[0].strip() # Truncate it if it's longer than N chars size = 75 - (max_len + len(sep) + len(trail)) if len(first_line) > size: first_line = first_line[:size] + trail output = name.ljust(max_len) + sep + first_line # Or nothing (so just the name) else: output = name print(indent(output)) sys.exit(0)
def migrate_diff(id=None, revset=None, silent=False): """ Check for migrations needed when updating to the given revision. """ require('code_dir') # Exactly one of id and revset must be given assert id or revset assert not (id and revset) if not revset: revset = '.%s%s' % ('::' if get_repo_type() == 'hg' else '.', id) migrations = changed_files(revset, "\/(?P<model>\w+)\/migrations\/(?P<migration>.+)") if not silent and migrations: print "Found %d migrations." % len(migrations) print indent(migrations) return migrations
def service(name, action, check_status=True, show_output=False): c = fabric.context_managers with sudo('root'), c.settings(c.hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True): info('Service: {} {}', name, action) if check_status: output = run('service {} status'.format(name), pty=False, combine_stderr=True) if output.return_code != 0: puts(indent(magenta(output))) return elif action in output: puts(indent('...has status {}'.format(magenta(output[len(name)+1:])))) return output = run('service {} {}'.format(name, action), pty=False, combine_stderr=True) if output.return_code != 0 or show_output: puts(indent(magenta(output)))
def handle_failure(cmd, warn_only): if hasattr(cmd, '__name__'): cmd = cmd.__name__ + '()' message = 'Error running `%s`\n\n%s' % (cmd, indent(format_exc())) if warn_only: warn(message) else: abort(message)
def main(): """ Entry point for mob, a dumb fab replacement """ _commands = { 'build_pybundle': build_pybundle, 'build_venv': build_venv, 'deploy': deploy, 'reset_database': reset_database, } parser = OptionParser(usage=__USAGE__) parser.add_option('-l', '--list', action='store_true', dest='list_commands', help='list avaliable commands') (options, args) = parser.parse_args() st = Config() # Print list (Taken from fabric) if options.list_commands: print("Available commands:\n") # Want separator between name, description to be straight col max_len = reduce(lambda a, b: max(a, len(b)), _commands.keys(), 0) sep = ' ' trail = '...' for name in sorted(_commands.keys()): output = None # Print first line of docstring func = _commands[name] if func.__doc__: lines = filter(None, func.__doc__.splitlines()) first_line = lines[0].strip() # Truncate it if it's longer than N chars size = 75 - (max_len + len(sep) + len(trail)) if len(first_line) > size: first_line = first_line[:size] + trail output = name.ljust(max_len) + sep + first_line # Or nothing (so just the name) else: output = name print(indent(output)) sys.exit(0) # Make sure a command is supplied try: args[0] except IndexError: sys.stderr.write('You must specify an command!\n') sys.exit(1) # Make sure all commands are valied try: commands = [[i, _commands[i]] for i in args] except KeyError, e: sys.stderr.write('Command %s not found!\n' % e) sys.exit(1)
def test_indent(): for description, input, output in ( ("Sanity check: 1 line string", 'Test', ' Test'), ("List of strings turns in to strings joined by \\n", ["Test", "Test"], ' Test\n Test'), ): eq_.description = "indent(): %s" % description yield eq_, indent(input), output del eq_.description
def test_config(cmd, msg, hint, expect_fail=False): response = green('OK ') hint_tmp = '' with settings(hide('running', 'warnings', 'stdout', 'stderr'),\ warn_only=True): result = local(cmd, capture=True) if result.failed != expect_fail: response = red('ERROR') hint_tmp = '\n' + indent('HINT: %s' % hint, 6) print response, msg, hint_tmp
def show_log(commit_id=None): """ List revisions to apply/unapply when updating to given revision. When no revision is given, it default to the head of current branch. Returns False when there is nothing to apply/unapply. otherwise revset of revisions that will be applied or unapplied (this can be passed to `hg|git status` to see which files changed for example). """ result = vcs_log(commit_id) if 'message' in result: print(result['message']) return False elif 'forwards' in result: print("Revisions to apply:") print(indent(result['forwards'])) elif 'backwards' in result: print("Revisions to rollback:") print(indent(result['backwards'])) return result['revset']
def update_source(): """ Update application repository with configured branch. :return: tuple(previous commit, current commit) """ with sudo_project(): # Get current commit path = git_repository_path() previous_commit = git.get_commit(path, short=True) # Update source from git (reset) repository = git_repository() current_commit = git.reset(repository['branch'], repository_path=path) if current_commit is not None and current_commit != previous_commit: info(indent('(new version)')) else: info(indent('(same commit)')) return previous_commit, current_commit
def online_deploy(id=None, silent=False): """ Perform an online deploy to the target requested. """ require('hosts') require('code_dir') # Show log of changes, return if nothing to do revset = show_log(id) if not revset: return migrations = migrate_diff(revset=revset, silent=True) if migrations: print colors.yellow("Will apply %d migrations:" % len(migrations)) print indent(migrations) if not silent: request_confirm("online_deploy") vcs_update(id) migrate(silent=True) collectstatic() restart_server(silent=True)
def _merge(hosts, roles): """ Merge given host and role lists into one list of deduped hosts. """ # Abort if any roles don't exist bad_roles = [x for x in roles if x not in state.env.roledefs] if bad_roles: abort("The following specified roles do not exist:\n%s" % (indent(bad_roles))) # Look up roles, turn into flat list of hosts role_hosts = roles and reduce(add, [state.env.roledefs[y] for y in roles]) or [] # Return deduped combo of hosts and role_hosts return list(set(hosts + role_hosts))
def display_command(name): """ Print command function's docstring, then exit. Invoked with -d/--display. """ # Sanity check command = crawl(name, state.commands) if command is None: msg = "Task '%s' does not appear to exist. Valid task names:\n%s" abort(msg % (name, "\n".join(_normal_list(False)))) # Print out nicely presented docstring if found if command.__doc__: print("Displaying detailed information for task '%s':" % name) print('') print(indent(command.__doc__, strip=True)) print('') # Or print notice if not else: print("No detailed information available for task '%s':" % name) sys.exit(0)
def info(role=None): """Show recipe info :param role: """ if not role: abort(colors.red("You need specify role name")) role_path = get_role_path(role) if not os.path.exists(role_path): abort(colors.red("Role <{0}> don't exists".format(role))) role_info = get_role_info(role) puts(colors.blue("Show <{0}> role params".format(role))) for key, value in sorted(role_info.iteritems(), key=lambda x: x[0]): puts(colors.blue(indent("{0}: {1}".format( key.strip("__").replace("_", " "), value, indent=4)))) puts("-" * 70)
def list_commands(docstring): """ Print all found commands/tasks, then exit. Invoked with ``-l/--list.`` If ``docstring`` is non-empty, it will be printed before the task list. """ if docstring: trailer = "\n" if not docstring.endswith("\n") else "" print(docstring + trailer) print("Available commands:\n") # Want separator between name, description to be straight col max_len = reduce(lambda a, b: max(a, len(b)), commands.keys(), 0) sep = ' ' trail = '...' for name in _command_names(): output = None # Print first line of docstring func = commands[name] if hasattr(func, '__hide__'): continue name = name.replace('_', '-') if func.__doc__: lines = filter(None, func.__doc__.splitlines()) first_line = lines[0].strip() # Truncate it if it's longer than N chars size = 75 - (max_len + len(sep) + len(trail)) if len(first_line) > size: first_line = first_line[:size] + trail output = name.ljust(max_len) + sep + first_line # Or nothing (so just the name) else: output = name print(indent(output)) print if 'stages' in env: print 'Available environments:' print for stage in env.stages: print ' %s' % stage print call_hooks('listing.display') sys.exit(0)
def _create_config_zebra(self): c = CmdBuffer() c << 'hostname zebra' c << 'password zebra' for name, settings in self.zebra_config.get('interfaces', {}).items(): c << 'interface {0}'.format(name) for setting in settings: c << str(setting) for route in self.zebra_config.get('routes', []): c << str(route) c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME) c << 'debug zebra packet' c << 'debug zebra kernel' c << 'debug zebra rib' c << '' with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f: print colors.yellow('[{0}\'s new zebra.conf]'.format(self.name)) print colors.yellow(indent(str(c))) f.writelines(str(c))
def display_command(command): """ Print command function's docstring, then exit. Invoked with -d/--display. """ # Sanity check command = command.replace('-', '_') cmd_string = command.replace('_', '-') if command not in commands: abort("Command '%s' not found, exiting." % cmd_string) cmd = commands[command] # Print out nicely presented docstring if found if cmd.__doc__: print("Displaying detailed information for command '%s':" % cmd_string) print('') print(indent(cmd.__doc__, strip=True)) print('') # Or print notice if not else: print("No detailed information available for command '%s':" % cmd_string) sys.exit(0)
def info(cookbook): """Show cookbook info :param cookbook: cookbook name """ recipe_path = get_cookbook_dir(cookbook) if not os.path.exists(recipe_path): abort(colors.red("Recipe <{0}> don't exists".format(cookbook))) cookbook_info = get_cookbook_info(cookbook) puts(colors.blue("Show <{0}> recipe params".format(cookbook))) for key, value in sorted(cookbook_info.iteritems(), key=lambda x: x[0]): if key not in get_info_skip_list(): puts( colors.blue( indent("{0}: {1}".format(key.strip("__").replace("_", " "), value, indent=4)))) puts("-" * 70)
def _merge(hosts, roles): """ Merge given host and role lists into one list of deduped hosts. """ # Abort if any roles don't exist bad_roles = [x for x in roles if x not in state.env.roledefs] if bad_roles: abort("The following specified roles do not exist:\n%s" % (indent(bad_roles))) # Look up roles, turn into flat list of hosts role_hosts = [] for role in roles: value = state.env.roledefs[role] # Handle "lazy" roles (callables) if callable(value): value = value() role_hosts += value # Return deduped combo of hosts and role_hosts return list(set(_clean_hosts(hosts + role_hosts)))
def merge(hosts, roles, exclude, roledefs): """ Merge given host and role lists into one list of deduped hosts. """ # Abort if any roles don't exist bad_roles = [x for x in roles if x not in roledefs] if bad_roles: abort("The following specified roles do not exist:\n%s" % ( indent(bad_roles) )) # Coerce strings to one-item lists if isinstance(hosts, six.string_types): hosts = [hosts] # Look up roles, turn into flat list of hosts role_hosts = [] for role in roles: value = roledefs[role] # Handle dict style roledefs if isinstance(value, dict): value = value['hosts'] # Handle "lazy" roles (callables) if callable(value): value = value() role_hosts += value # Strip whitespace from host strings. cleaned_hosts = [x.strip() for x in list(hosts) + list(role_hosts)] # Return deduped combo of hosts and role_hosts, preserving order within # them (vs using set(), which may lose ordering) and skipping hosts to be # excluded. # But only if the user hasn't indicated they want this behavior disabled. all_hosts = cleaned_hosts if state.env.dedupe_hosts: deduped_hosts = [] for host in cleaned_hosts: if host not in deduped_hosts and host not in exclude: deduped_hosts.append(host) all_hosts = deduped_hosts return all_hosts
def display_command(name, code=0): """ Print command function's docstring, then exit. Invoked with -d/--display. """ # Sanity check command = crawl(name, state.commands) name = name.replace(".", " ") if command is None: msg = "Task '%s' does not appear to exist. Valid task names:\n%s" abort(msg % (name, "\n".join(_normal_list(False)))) # get the presented docstring if found task_details = get_task_docstring(command) if task_details: print("Displaying detailed information for task '%s':" % name) print('') print(indent(task_details, strip=True)) print('') # Or print notice if not else: print("No detailed information available for task '%s':" % name) sys.exit(code)
def _normal_list(docstrings=True): result = [] task_names = _task_names(state.commands) # Want separator between name, description to be straight col max_len = reduce(lambda a, b: max(a, len(b)), task_names, 0) sep = ' ' trail = '...' max_width = _pty_size()[1] - 1 - len(trail) for name in task_names: docstring = _print_docstring(docstrings, name) if docstring: lines = filter(None, docstring.splitlines()) first_line = lines[0].strip() # Truncate it if it's longer than N chars size = max_width - (max_len + len(sep) + len(trail)) if len(first_line) > size: first_line = first_line[:size] + trail output = name.ljust(max_len) + sep + first_line # Or nothing (so just the name) else: output = name result.append(indent(output)) return result
def display_command(name): """ Print command function's docstring, then exit. Invoked with -d/--display. """ # Sanity check command = crawl(name, state.commands) if command is None: msg = "Task '%s' does not appear to exist. Valid task names:\n%s" abort(msg % (name, "\n".join(_normal_list(False)))) # Print out nicely presented docstring if found if hasattr(command, '__details__'): task_details = command.__details__() else: task_details = get_task_details(command) if task_details: logging.debug("Displaying detailed information for task '%s':" % name) logging.debug('') logging.debug(indent(task_details, strip=True)) logging.debug('') # Or print notice if not else: logging.debug("No detailed information available for task '%s':" % name) sys.exit(0)
def invalid_command_error(arguments): raise NameError("Command not found:\n%s" % indent(" ".join(arguments)))
def _error(message): print red(indent(message, spaces=4))
def deploy(id=None, silent=False, force=False, services=False, auto_nginx=True): """ Perform an automatic deploy to the target requested. """ require('hosts') require('code_dir') if force: force = colors.blue('FORCED DEPLOY') print '-' * 40 print force print '-' * 40 # Ask for sudo at the beginning so we don't fail during deployment because of wrong pass if not sudo('whoami'): abort('Failed to elevate to root') return # Show log of changes, return if nothing to do revset = show_log(id) if not revset and not force: return # See if we have any requirements changes requirements_changes = force or vcs.changed_files(revset, r' requirements/') if requirements_changes: print colors.yellow("Will update requirements (and do migrations)") # See if we have package.json changes package_changed = force or vcs.changed_files(revset, r' {{ cookiecutter.repo_name }}/package.json') if package_changed: print colors.yellow("Will run npm install") # See if we have changes in app source or static files app_changed = force or vcs.changed_files(revset, [r' {{ cookiecutter.repo_name }}/app', r' {{ cookiecutter.repo_name }}/static', r' {{ cookiecutter.repo_name }}/settings', r'webpack']) if app_changed: print colors.yellow("Will run npm build") # See if we have any changes to migrations between the revisions we're applying migrations = force or migrate_diff(revset=revset, silent=True) if migrations: print colors.yellow("Will apply %d migrations:" % len(migrations)) print indent(migrations) # See if we have any changes to crontab config crontab_changed = force or vcs.changed_files(revset, r'deploy/crontab.conf') if crontab_changed: print colors.yellow("Will update cron entries") # see if nginx conf has changed nginx_changed = vcs.changed_files(revset, [r' deploy/%s' % env.nginx_conf]) if nginx_changed: if auto_nginx: print colors.yellow("Nginx configuration change detected, updating automatically") else: print colors.red("Warning: Nginx configuration change detected, also run: `fab %target% nginx_update`") elif force: print colors.yellow("Updating nginx config") # if services flag is set, let the user know if force or services: print colors.yellow("Will update service configuration") if not silent: request_confirm("deploy") vcs.update(id) if requirements_changes: update_requirements() if migrations or requirements_changes: migrate(silent=True) if crontab_changed: with cd(env.code_dir): sudo('cp deploy/crontab.conf /etc/cron.d/{{cookiecutter.repo_name}}') if force or services: configure_services(){% if cookiecutter.project_type == 'spa' %} update_worker_conf(){% endif %} if force or (nginx_changed and auto_nginx): nginx_update() collectstatic(npm_install=package_changed, npm_build=app_changed) {% if cookiecutter.project_type == 'spa' %}reload_server{% else %}restart_server{% endif %}(silent=True) # Run deploy systemchecks check()
def _create_config_bgp(self): config = { 'global': { 'config': { 'as': self.asn, 'router-id': self.router_id, }, 'route-selection-options': { 'config': { 'external-compare-router-id': True, }, }, }, 'neighbors': [], } if self.zebra and self.zapi_version == 2: config['global']['use-multiple-paths'] = { 'config': { 'enabled': True } } for peer, info in self.peers.iteritems(): afi_safi_list = [] if info['interface'] != '': afi_safi_list.append( {'config': { 'afi-safi-name': 'ipv4-unicast' }}) afi_safi_list.append( {'config': { 'afi-safi-name': 'ipv6-unicast' }}) else: version = netaddr.IPNetwork(info['neigh_addr']).version if version == 4: afi_safi_list.append( {'config': { 'afi-safi-name': 'ipv4-unicast' }}) elif version == 6: afi_safi_list.append( {'config': { 'afi-safi-name': 'ipv6-unicast' }}) else: Exception( 'invalid ip address version. {0}'.format(version)) if info['vpn']: afi_safi_list.append( {'config': { 'afi-safi-name': 'l3vpn-ipv4-unicast' }}) afi_safi_list.append( {'config': { 'afi-safi-name': 'l3vpn-ipv6-unicast' }}) afi_safi_list.append( {'config': { 'afi-safi-name': 'l2vpn-evpn' }}) afi_safi_list.append({ 'config': { 'afi-safi-name': 'rtc' }, 'route-target-membership': { 'config': { 'deferral-time': 10 } } }) if info['flowspec']: afi_safi_list.append( {'config': { 'afi-safi-name': 'ipv4-flowspec' }}) afi_safi_list.append( {'config': { 'afi-safi-name': 'l3vpn-ipv4-flowspec' }}) afi_safi_list.append( {'config': { 'afi-safi-name': 'ipv6-flowspec' }}) afi_safi_list.append( {'config': { 'afi-safi-name': 'l3vpn-ipv6-flowspec' }}) neigh_addr = None interface = None if info['interface'] == '': neigh_addr = info['neigh_addr'].split('/')[0] else: interface = info['interface'] n = { 'config': { 'neighbor-address': neigh_addr, 'neighbor-interface': interface, 'peer-as': peer.asn, 'auth-password': info['passwd'], 'vrf': info['vrf'], 'remove-private-as': info['remove_private_as'], }, 'afi-safis': afi_safi_list, 'timers': { 'config': { 'connect-retry': 10, }, }, 'transport': { 'config': {}, }, } n['as-path-options'] = {'config': {}} if info['allow_as_in'] > 0: n['as-path-options']['config']['allow-own-as'] = info[ 'allow_as_in'] if info['replace_peer_as']: n['as-path-options']['config']['replace-peer-as'] = info[ 'replace_peer_as'] if ':' in info['local_addr']: n['transport']['config']['local-address'] = info[ 'local_addr'].split('/')[0] if info['passive']: n['transport']['config']['passive-mode'] = True if info['is_rs_client']: n['route-server'] = {'config': {'route-server-client': True}} if info['local_as']: n['config']['local-as'] = info['local_as'] if info['prefix_limit']: for v in afi_safi_list: v['prefix-limit'] = { 'config': { 'max-prefixes': info['prefix_limit'], 'shutdown-threshold-pct': 80 } } if info['graceful_restart'] is not None: n['graceful-restart'] = { 'config': { 'enabled': True, 'restart-time': 20 } } for afi_safi in afi_safi_list: afi_safi['mp-graceful-restart'] = { 'config': { 'enabled': True } } if info['llgr'] is not None: n['graceful-restart']['config']['restart-time'] = 1 n['graceful-restart']['config'][ 'long-lived-enabled'] = True for afi_safi in afi_safi_list: afi_safi['long-lived-graceful-restart'] = { 'config': { 'enabled': True, 'restart-time': 30 } } if info['is_rr_client']: cluster_id = self.router_id if 'cluster_id' in info and info['cluster_id'] is not None: cluster_id = info['cluster_id'] n['route-reflector'] = { 'config': { 'route-reflector-client': True, 'route-reflector-cluster-id': cluster_id } } if info['addpath']: n['add-paths'] = {'config': {'receive': True, 'send-max': 16}} if len(info.get('default-policy', [])) + len( info.get('policies', [])) > 0: n['apply-policy'] = {'config': {}} for typ, p in info.get('policies', {}).iteritems(): n['apply-policy']['config']['{0}-policy-list'.format(typ)] = [ p['name'] ] def _f(v): if v == 'reject': return 'reject-route' elif v == 'accept': return 'accept-route' raise Exception('invalid default policy type {0}'.format(v)) for typ, d in info.get('default-policy', {}).iteritems(): n['apply-policy']['config']['default-{0}-policy'.format( typ)] = _f(d) if info['treat_as_withdraw']: n['error-handling'] = {'config': {'treat-as-withdraw': True}} config['neighbors'].append(n) config['defined-sets'] = {} if self.prefix_set: config['defined-sets']['prefix-sets'] = self.prefix_set if self.neighbor_set: config['defined-sets']['neighbor-sets'] = self.neighbor_set if self.bgp_set: config['defined-sets']['bgp-defined-sets'] = self.bgp_set policy_list = [] for p in self.policies.itervalues(): policy = {'name': p['name']} if 'statements' in p: policy['statements'] = p['statements'] policy_list.append(policy) if len(policy_list) > 0: config['policy-definitions'] = policy_list if self.zebra: config['zebra'] = { 'config': { 'enabled': True, 'redistribute-route-type-list': ['connect'], 'version': self.zapi_version } } with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f: print colors.yellow('[{0}\'s new gobgpd.conf]'.format(self.name)) if self.config_format is 'toml': raw = toml.dumps(config) elif self.config_format is 'yaml': raw = yaml.dump(config) elif self.config_format is 'json': raw = json.dumps(config) else: raise Exception('invalid config_format {0}'.format( self.config_format)) print colors.yellow(indent(raw)) f.write(raw)
def _create_config_bgp(self): c = CmdBuffer() c << 'hostname bgpd' c << 'password zebra' c << 'router bgp {0}'.format(self.asn) c << 'bgp router-id {0}'.format(self.router_id) if any(info['graceful_restart'] for info in self.peers.itervalues()): c << 'bgp graceful-restart' version = 4 for peer, info in self.peers.iteritems(): version = netaddr.IPNetwork(info['neigh_addr']).version n_addr = info['neigh_addr'].split('/')[0] if version == 6: c << 'no bgp default ipv4-unicast' c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn) if info['is_rs_client']: c << 'neighbor {0} route-server-client'.format(n_addr) for typ, p in info['policies'].iteritems(): c << 'neighbor {0} route-map {1} {2}'.format( n_addr, p['name'], typ) if info['passwd']: c << 'neighbor {0} password {1}'.format(n_addr, info['passwd']) if info['passive']: c << 'neighbor {0} passive'.format(n_addr) if version == 6: c << 'address-family ipv6 unicast' c << 'neighbor {0} activate'.format(n_addr) c << 'exit-address-family' for route in self.routes.itervalues(): if route['rf'] == 'ipv4': c << 'network {0}'.format(route['prefix']) elif route['rf'] == 'ipv6': c << 'address-family ipv6 unicast' c << 'network {0}'.format(route['prefix']) c << 'exit-address-family' else: raise Exception('unsupported route faily: {0}'.format( route['rf'])) if self.zebra: if version == 6: c << 'address-family ipv6 unicast' c << 'redistribute connected' c << 'exit-address-family' else: c << 'redistribute connected' for name, policy in self.policies.iteritems(): c << 'access-list {0} {1} {2}'.format(name, policy['type'], policy['match']) c << 'route-map {0} permit 10'.format(name) c << 'match ip address {0}'.format(name) c << 'set metric {0}'.format(policy['med']) c << 'debug bgp as4' c << 'debug bgp fsm' c << 'debug bgp updates' c << 'debug bgp events' c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME) with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f: print colors.yellow('[{0}\'s new bgpd.conf]'.format(self.name)) print colors.yellow(indent(str(c))) f.writelines(str(c))
def _info(message): print blue(indent(message, spaces=4))
def deploy(id=None, silent=False, force=False, auto_nginx=True): """ Perform an automatic deploy to the target requested. """ require('hosts') require('code_dir') if force: force = colors.blue('FORCED DEPLOY') print('-' * 40) print(force) print('-' * 40) # Ask for sudo at the beginning so we don't fail during deployment because of wrong pass if not sudo('whoami'): abort('Failed to elevate to root') return # Show log of changes, return if nothing to do revset = show_log(id) if not revset and not force: return # Ensure default local.py file exists ensure_local_py_exists() # See if we have any requirements changes requirements_changes = force or vcs.changed_files(revset, r'Pipfile') if requirements_changes: print(colors.yellow("Will update requirements (and do migrations)")) # See if we have any changes to migrations between the revisions we're applying migrations = force or migrate_diff(revset=revset, silent=True) if migrations: print(colors.yellow("Will apply %d migrations:" % len(migrations))) print(indent(migrations)) # See if we have any changes to letsencrypt configurations letsencrypt_changed = force or vcs.changed_files(revset, get_config_modified_patterns('letsencrypt')) if letsencrypt_changed: print(colors.yellow("Will update letsencrypt configurations")) # see if nginx conf has changed nginx_changed = vcs.changed_files(revset, get_config_modified_patterns('nginx')) if nginx_changed: if auto_nginx: print(colors.yellow("Nginx configuration change detected, updating automatically")) else: print(colors.red("Warning: Nginx configuration change detected, also run: `fab %target% nginx_update`")) elif force: print(colors.yellow("Updating nginx config")) if not silent: request_confirm("deploy") vcs.update(id) ensure_docker_networks() docker_compose('build') collectstatic() if migrations or requirements_changes: migrate(silent=True) # Run deploy systemchecks check() docker_up(silent=True, force_recreate=force) # Update nginx after bringing up container if force or (nginx_changed and auto_nginx): nginx_update() if force or letsencrypt_changed: letsencrypt_update()
def upload(source, destination, context=None, user=None, group=None, jinja_env=None): """ """ tmp_dir = tempfile.mkdtemp() try: # TODO: Handle None template_loader # Filter wanted templates source = source.lstrip('./') templates = jinja_env.loader.list_templates() templates = [ template for template in templates if template.startswith(source) ] # No templates is found if not templates: puts(indent(magenta('No templates found'))) return for template in templates: rel_template_path = template[len(source):] if os.path.sep in rel_template_path: # Create directories for template rel_template_dir = os.path.dirname(rel_template_path) template_dir = os.path.join(tmp_dir, rel_template_dir) if not os.path.exists(template_dir): os.makedirs(template_dir) else: # Single template rel_template_path = os.path.basename(template) # Render template context = context or {} context['n'] = os.path.splitext(os.path.basename(template))[0] try: text = jinja_env.get_template(template).render(**context or {}) text = text.encode('utf-8') # Write rendered template to local temp dir rendered_template = os.path.join(tmp_dir, rel_template_path) with file(rendered_template, 'w+') as f: f.write(text) f.write(os.linesep) # Add newline at end removed by jinja except UnicodeDecodeError: warn('Failed to render template "{}"'.format(template)) with silent(), abort_on_error(): # Upload rendered templates to remote temp dir remote_tmp_dir = run('mktemp -d').stdout put(os.path.join(tmp_dir, '*'), remote_tmp_dir, use_sudo=True) # Set given permissions on remote before sync chown(remote_tmp_dir, owner=user or 'root', group=group or user or 'root', recursive=True) # Clean destination if len(templates) > 1 or templates[0].endswith(os.path.sep): destination = destination.rstrip(os.path.sep) + os.path.sep # Sync templates from remote temp dir to remote destination remote_tmp_dir = os.path.join(remote_tmp_dir, '*') cmd = 'rsync -rcbi --out-format="%n" {tmp_dir} {dest} && rm -r {tmp_dir}'.format( tmp_dir=remote_tmp_dir, dest=destination) updated = run(cmd) updated_files = [ line.strip() for line in updated.stdout.split('\n') if line ] if updated_files: for updated_file in updated_files: info(indent('Uploaded: {}'), updated_file) # TODO: Handle renaming else: puts(indent('(no changes found)')) return updated_files except jinja2.TemplateNotFound as e: abort('Templates not found: "{}"'.format(e)) finally: shutil.rmtree(tmp_dir)
def deploy(id=None, silent=False, force=False, auto_nginx=True): """ Perform an automatic deploy to the target requested. """ require('hosts') require('code_dir') if force: force = colors.blue('FORCED DEPLOY') print '-' * 40 print force print '-' * 40 # Ask for sudo at the beginning so we don't fail during deployment because of wrong pass if not sudo('whoami'): abort('Failed to elevate to root') return # Show log of changes, return if nothing to do revset = show_log(id) if not revset and not force: return # See if we have any requirements changes requirements_changes = force or vcs.changed_files(revset, r' requirements/') if requirements_changes: print colors.yellow("Will update requirements (and do migrations)") # See if we have changes in app source or static files app_patterns = [r' {{cookiecutter.repo_name}}/app', r' {{cookiecutter.repo_name}}/static', r' {{cookiecutter.repo_name}}/settings', r' {{cookiecutter.repo_name}}/package.json'] app_changed = force or vcs.changed_files(revset, app_patterns) if app_changed: print colors.yellow("Will run npm build") # See if we have any changes to migrations between the revisions we're applying migrations = force or migrate_diff(revset=revset, silent=True) if migrations: print colors.yellow("Will apply %d migrations:" % len(migrations)) print indent(migrations) # See if we have any changes to crontab config crontab_changed = force or vcs.changed_files(revset, r'deploy/crontab.conf') if crontab_changed: print colors.yellow("Will update cron entries") # See if we have any changes to letsencrypt configurations letsencrypt_changed = force or vcs.changed_files(revset, get_config_modified_patterns('letsencrypt')) if letsencrypt_changed: print colors.yellow("Will update letsencrypt configurations") # see if nginx conf has changed nginx_changed = vcs.changed_files(revset, get_config_modified_patterns('nginx')) if nginx_changed: if auto_nginx: print colors.yellow("Nginx configuration change detected, updating automatically") else: print colors.red("Warning: Nginx configuration change detected, also run: `fab %target% nginx_update`") elif force: print colors.yellow("Updating nginx config") if not silent: request_confirm("deploy") vcs.update(id) ensure_docker_networks() docker_compose('build') collectstatic(npm_build=app_changed) if crontab_changed: with cd(env.code_dir): sudo('cp deploy/crontab.conf /etc/cron.d/{{cookiecutter.repo_name}}') if migrations or requirements_changes: migrate(silent=True) # Run deploy systemchecks check() docker_up(silent=True) # Update nginx after bringing up container if force or (nginx_changed and auto_nginx): nginx_update() if force or letsencrypt_changed: letsencrypt_update()
def require(*keys, **kwargs): """ Check for given keys in the shared environment dict and abort if not found. Positional arguments should be strings signifying what env vars should be checked for. If any of the given arguments do not exist, Fabric will abort execution and print the names of the missing keys. The optional keyword argument ``used_for`` may be a string, which will be printed in the error output to inform users why this requirement is in place. ``used_for`` is printed as part of a string similar to:: "Th(is|ese) variable(s) (are|is) used for %s" so format it appropriately. The optional keyword argument ``provided_by`` may be a list of functions or function names or a single function or function name which the user should be able to execute in order to set the key or keys; it will be included in the error output if requirements are not met. Note: it is assumed that the keyword arguments apply to all given keys as a group. If you feel the need to specify more than one ``used_for``, for example, you should break your logic into multiple calls to ``require()``. .. versionchanged:: 1.1 Allow iterable ``provided_by`` values instead of just single values. """ # If all keys exist and are non-empty, we're good, so keep going. missing_keys = filter(lambda x: x not in env or (x in env and isinstance(env[x], (dict, list, tuple, set)) and not env[x]), keys) if not missing_keys: return # Pluralization if len(missing_keys) > 1: variable = "variables were" used = "These variables are" else: variable = "variable was" used = "This variable is" # Regardless of kwargs, print what was missing. (Be graceful if used outside # of a command.) if 'command' in env: prefix = "The command '%s' failed because the " % env.command else: prefix = "The " msg = "%sfollowing required environment %s not defined:\n%s" % ( prefix, variable, indent(missing_keys) ) # Print used_for if given if 'used_for' in kwargs: msg += "\n\n%s used for %s" % (used, kwargs['used_for']) # And print provided_by if given if 'provided_by' in kwargs: funcs = kwargs['provided_by'] # non-iterable is given, treat it as a list of this single item if not hasattr(funcs, '__iter__'): funcs = [funcs] if len(funcs) > 1: command = "one of the following commands" else: command = "the following command" to_s = lambda obj: getattr(obj, '__name__', str(obj)) provided_by = [to_s(obj) for obj in funcs] msg += "\n\nTry running %s prior to this one, to fix the problem:\n%s"\ % (command, indent(provided_by)) abort(msg)
def main(): """ Main command-line execution loop. """ try: # Parse command line options parser, options, arguments = parse_options() # Handle regular args vs -- args arguments = parser.largs remainder_arguments = parser.rargs # Update env with any overridden option values # NOTE: This needs to remain the first thing that occurs # post-parsing, since so many things hinge on the values in env. for option in env_options: state.env[option.dest] = getattr(options, option.dest) # Handle --hosts, --roles, --exclude-hosts (comma separated string => # list) for key in ['hosts', 'roles', 'exclude_hosts']: if key in state.env and isinstance(state.env[key], basestring): state.env[key] = state.env[key].split(',') # Handle output control level show/hide update_output_levels(show=options.show, hide=options.hide) # Handle version number option if options.show_version: print("Fabric %s" % state.env.version) sys.exit(0) # Load settings from user settings file, into shared env dict. state.env.update(load_settings(state.env.rcfile)) # Find local fabfile path or abort fabfile = find_fabfile() if not fabfile and not remainder_arguments: abort("""Couldn't find any fabfiles! Remember that -f can be used to specify fabfile path, and use -h for help.""") # Store absolute path to fabfile in case anyone needs it state.env.real_fabfile = fabfile # Load fabfile (which calls its module-level code, including # tweaks to env values) and put its commands in the shared commands # dict if fabfile: docstring, callables, default = load_fabfile(fabfile) state.commands.update(callables) # Handle case where we were called bare, i.e. just "fab", and print # a help message. actions = (options.list_commands, options.shortlist, options.display, arguments, remainder_arguments, default) if not any(actions): parser.print_help() sys.exit(1) # Abort if no commands found if not state.commands and not remainder_arguments: abort("Fabfile didn't contain any commands!") # Now that we're settled on a fabfile, inform user. if state.output.debug: if fabfile: print("Using fabfile '%s'" % fabfile) else: print("No fabfile loaded -- remainder command only") # Shortlist is now just an alias for the "short" list format; # it overrides use of --list-format if somebody were to specify both if options.shortlist: options.list_format = 'short' options.list_commands = True # List available commands if options.list_commands: print("\n".join(list_commands(docstring, options.list_format))) sys.exit(0) # Handle show (command-specific help) option if options.display: display_command(options.display) # If user didn't specify any commands to run, show help if not (arguments or remainder_arguments or default): parser.print_help() sys.exit(0) # Or should it exit with error (1)? # Parse arguments into commands to run (plus args/kwargs/hosts) commands_to_run = parse_arguments(arguments) # Parse remainders into a faux "command" to execute remainder_command = parse_remainder(remainder_arguments) # Figure out if any specified task names are invalid unknown_commands = [] for tup in commands_to_run: if crawl(tup[0], state.commands) is None: unknown_commands.append(tup[0]) # Abort if any unknown commands were specified if unknown_commands: abort("Command(s) not found:\n%s" \ % indent(unknown_commands)) # Generate remainder command and insert into commands, commands_to_run if remainder_command: r = '<remainder>' state.commands[r] = lambda: api.run(remainder_command) commands_to_run.append((r, [], {}, [], [], [])) # Ditto for a default, if found if not commands_to_run and default: commands_to_run.append((default.name, [], {}, [], [], [])) if state.output.debug: names = ", ".join(x[0] for x in commands_to_run) print("Commands to run: %s" % names) # At this point all commands must exist, so execute them in order. for name, args, kwargs, cli_hosts, cli_roles, cli_exclude_hosts in commands_to_run: # Get callable by itself task = crawl(name, state.commands) # Set current task name (used for some error messages) state.env.command = name # Set host list (also copy to env) state.env.all_hosts = hosts = get_hosts(task, cli_hosts, cli_roles, cli_exclude_hosts) # If hosts found, execute the function on each host in turn for host in hosts: # Preserve user prev_user = state.env.user # Split host string and apply to env dict username, hostname, port = interpret_host_string(host) # Log to stdout if state.output.running: print("[%s] Executing task '%s'" % (host, name)) # Actually run command _run_task(task, args, kwargs) # Put old user back state.env.user = prev_user # If no hosts found, assume local-only and run once if not hosts: _run_task(task, args, kwargs) # If we got here, no errors occurred, so print a final note. if state.output.status: print("\nDone.") except SystemExit: # a number of internal functions might raise this one. raise except KeyboardInterrupt: if state.output.status: print >> sys.stderr, "\nStopped." sys.exit(1) except: sys.excepthook(*sys.exc_info()) # we might leave stale threads if we don't explicitly exit() sys.exit(1) finally: disconnect_all() sys.exit(0)
def create_config(self): with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f: print colors.yellow('[{0}\'s new gobgpd.conf]'.format(self.name)) print colors.yellow(indent(self.config)) f.write(self.config)
def main(fabfile_locations=None): """ Main command-line execution loop. """ try: # Parse command line options parser, options, arguments = parse_options() # Handle regular args vs -- args arguments = parser.largs remainder_arguments = parser.rargs # Allow setting of arbitrary env keys. # This comes *before* the "specific" env_options so that those may # override these ones. Specific should override generic, if somebody # was silly enough to specify the same key in both places. # E.g. "fab --set shell=foo --shell=bar" should have env.shell set to # 'bar', not 'foo'. for pair in _escape_split(',', options.env_settings): pair = _escape_split('=', pair) # "--set x" => set env.x to True # "--set x=" => set env.x to "" key = pair[0] value = True if len(pair) == 2: value = pair[1] state.env[key] = value # Update env with any overridden option values # NOTE: This needs to remain the first thing that occurs # post-parsing, since so many things hinge on the values in env. for option in env_options: state.env[option.dest] = getattr(options, option.dest) # Handle --hosts, --roles, --exclude-hosts (comma separated string => # list) for key in ['hosts', 'roles', 'exclude_hosts']: if key in state.env and isinstance(state.env[key], string_types): state.env[key] = state.env[key].split(',') # Feed the env.tasks : tasks that are asked to be executed. state.env['tasks'] = arguments # Handle output control level show/hide update_output_levels(show=options.show, hide=options.hide) # Handle version number option if options.show_version: print("Fabric %s" % state.env.version) print("Paramiko %s" % ssh.__version__) sys.exit(0) # Load settings from user settings file, into shared env dict. state.env.update(load_settings(state.env.rcfile)) # Find local fabfile path or abort fabfile = find_fabfile(fabfile_locations) if not fabfile and not remainder_arguments: abort("""Couldn't find any fabfiles! Remember that -f can be used to specify fabfile path, and use -h for help.""") # Store absolute path to fabfile in case anyone needs it state.env.real_fabfile = fabfile # Load fabfile (which calls its module-level code, including # tweaks to env values) and put its commands in the shared commands # dict default = None if fabfile: docstring, callables, default = load_fabfile(fabfile) state.commands.update(callables) # Handle case where we were called bare, i.e. just "fab", and print # a help message. actions = (options.list_commands, options.shortlist, options.display, arguments, remainder_arguments, default) if not any(actions): parser.print_help() sys.exit(1) # Abort if no commands found if not state.commands and not remainder_arguments: abort("Fabfile didn't contain any commands!") # Now that we're settled on a fabfile, inform user. if state.output.debug: if fabfile: print("Using fabfile '%s'" % fabfile) else: print("No fabfile loaded -- remainder command only") # Shortlist is now just an alias for the "short" list format; # it overrides use of --list-format if somebody were to specify both if options.shortlist: options.list_format = 'short' options.list_commands = True # List available commands if options.list_commands: show_commands(docstring, options.list_format) # Handle show (command-specific help) option if options.display: display_command(options.display) # If user didn't specify any commands to run, show help if not (arguments or remainder_arguments or default): parser.print_help() sys.exit(0) # Or should it exit with error (1)? # Parse arguments into commands to run (plus args/kwargs/hosts) commands_to_run = parse_arguments(arguments) # Parse remainders into a faux "command" to execute remainder_command = parse_remainder(remainder_arguments) # Figure out if any specified task names are invalid unknown_commands = [] for tup in commands_to_run: if crawl(tup[0], state.commands) is None: unknown_commands.append(tup[0]) # Abort if any unknown commands were specified if unknown_commands: warn("Command(s) not found:\n%s" \ % indent(unknown_commands)) show_commands(None, options.list_format, 1) # Generate remainder command and insert into commands, commands_to_run if remainder_command: r = '<remainder>' state.commands[r] = lambda: api.run(remainder_command) commands_to_run.append((r, [], {}, [], [], [])) # Ditto for a default, if found if not commands_to_run and default: commands_to_run.append((default.name, [], {}, [], [], [])) # Initial password prompt, if requested if options.initial_password_prompt: prompt = "Initial value for env.password: "******", ".join(x[0] for x in commands_to_run) print("Commands to run: %s" % names) # At this point all commands must exist, so execute them in order. for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run: execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs) # If we got here, no errors occurred, so print a final note. if state.output.status: print("\nDone.") except SystemExit: # a number of internal functions might raise this one. raise except KeyboardInterrupt: if state.output.status: sys.stderr.write("\nStopped.\n") sys.exit(1) except: sys.excepthook(*sys.exc_info()) # we might leave stale threads if we don't explicitly exit() sys.exit(1) finally: disconnect_all() sys.exit(0)
def _success(message): print green(indent(message, spaces=4))
def main(): """ Main command-line execution loop. """ try: # Parse command line options parser, options, arguments = parse_options() # Handle regular args vs -- args arguments = parser.largs remainder_arguments = parser.rargs # Update env with any overridden option values # NOTE: This needs to remain the first thing that occurs # post-parsing, since so many things hinge on the values in env. for option in env_options: env[option.dest] = getattr(options, option.dest) # Handle --hosts, --roles (comma separated string => list) for key in ['hosts', 'roles']: if key in env and isinstance(env[key], str): env[key] = env[key].split(',') # Handle output control level show/hide update_output_levels(show=options.show, hide=options.hide) # Handle version number option if options.show_version: print("Fabric %s" % env.version) sys.exit(0) # Load settings from user settings file, into shared env dict. env.update(load_settings(env.rcfile)) # Find local fabfile path or abort fabfile = find_fabfile() if not fabfile and not remainder_arguments: abort("Couldn't find any fabfiles!") # Store absolute path to fabfile in case anyone needs it env.real_fabfile = fabfile # Load fabfile (which calls its module-level code, including # tweaks to env values) and put its commands in the shared commands # dict if fabfile: docstring, callables = load_fabfile(fabfile) commands.update(callables) # Autocompletion support autocomplete_items = [cmd.replace('_', '-') for cmd in commands] if 'autocomplete' in env: autocomplete_items += env.autocomplete autocomplete(parser, ListCompleter(autocomplete_items)) # Handle hooks related options _disable_hooks = options.disable_hooks _enable_hooks = options.enable_hooks if _disable_hooks: for _hook in _disable_hooks.strip().split(): DISABLED_HOOKS.append(_hook.strip()) if _enable_hooks: for _hook in _enable_hooks.strip().split(): ENABLED_HOOKS.append(_hook.strip()) # Handle the non-execution flow if not arguments and not remainder_arguments: # Non-verbose command list if options.shortlist: shortlist() # Handle show (command-specific help) option if options.display: display_command(options.display) # Else, show the list of commands and exit list_commands(docstring) # Now that we're settled on a fabfile, inform user. if output.debug: if fabfile: print("Using fabfile '%s'" % fabfile) else: print("No fabfile loaded -- remainder command only") # Parse arguments into commands to run (plus args/kwargs/hosts) commands_to_run, env_update = parse_arguments(arguments) env.update(env_update) # Parse remainders into a faux "command" to execute remainder_command = parse_remainder(remainder_arguments) # Figure out if any specified task names are invalid unknown_commands = [] for tup in commands_to_run: if tup[0] not in commands: unknown_commands.append(tup[0]) # Abort if any unknown commands were specified if unknown_commands: abort("Command(s) not found:\n%s" \ % indent(unknown_commands)) # Generate remainder command and insert into commands, commands_to_run if remainder_command: r = '<remainder>' commands[r] = lambda: api.run(remainder_command) commands_to_run.append((r, [], {}, [], [])) if output.debug: names = ", ".join(x[0] for x in commands_to_run) print("Commands to run: %s" % names) call_hooks('commands.before', commands, commands_to_run) # Initialse context runner env() # Initialise the default stage if none are given as the first command. if 'stages' in env: if commands_to_run[0][0] not in env.stages: execute_command( (env.stages[0], (), {}, None, None, None), commands ) else: execute_command(commands_to_run.pop(0), commands) if env.config_file: config_path = realpath(expanduser(env.config_file)) config_path = join(dirname(fabfile), config_path) config_file = open(config_path, 'rb') config = load_yaml(config_file.read()) if not config: env.config = AttributeDict() elif not isinstance(config, dict): abort("Invalid config file found at %s" % config_path) else: env.config = AttributeDict(config) config_file.close() call_hooks('config.loaded') first_time_env_call = 1 # At this point all commands must exist, so execute them in order. for spec in commands_to_run: execute_command(spec, commands) # If we got here, no errors occurred, so print a final note. if output.status: msg = "\nDone." if env.colors: msg = env.color_settings['finish'](msg) print(msg) except SystemExit: # a number of internal functions might raise this one. raise except KeyboardInterrupt: if output.status: msg = "\nStopped." if env.colors: msg = env.color_settings['finish'](msg) print >> sys.stderr, msg sys.exit(1) except: sys.excepthook(*sys.exc_info()) # we might leave stale threads if we don't explicitly exit() sys.exit(1) finally: call_hooks('commands.after') disconnect_all() sys.exit(0)
def prompt(text, key=None, default='', validate=None): """ Prompt user with ``text`` and return the input (like ``raw_input``). A single space character will be appended for convenience, but nothing else. Thus, you may want to end your prompt text with a question mark or a colon, e.g. ``prompt("What hostname?")``. If ``key`` is given, the user's input will be stored as ``env.<key>`` in addition to being returned by `prompt`. If the key already existed in ``env``, its value will be overwritten and a warning printed to the user. If ``default`` is given, it is displayed in square brackets and used if the user enters nothing (i.e. presses Enter without entering any text). ``default`` defaults to the empty string. If non-empty, a space will be appended, so that a call such as ``prompt("What hostname?", default="foo")`` would result in a prompt of ``What hostname? [foo]`` (with a trailing space after the ``[foo]``.) The optional keyword argument ``validate`` may be a callable or a string: * If a callable, it is called with the user's input, and should return the value to be stored on success. On failure, it should raise an exception with an exception message, which will be printed to the user. * If a string, the value passed to ``validate`` is used as a regular expression. It is thus recommended to use raw strings in this case. Note that the regular expression, if it is not fully matching (bounded by ``^`` and ``$``) it will be made so. In other words, the input must fully match the regex. Either way, `prompt` will re-prompt until validation passes (or the user hits ``Ctrl-C``). .. note:: `~fabric.operations.prompt` honors :ref:`env.abort_on_prompts <abort-on-prompts>` and will call `~fabric.utils.abort` instead of prompting if that flag is set to ``True``. If you want to block on user input regardless, try wrapping with `~fabric.context_managers.settings`. Examples:: # Simplest form: environment = prompt('Please specify target environment: ') # With default, and storing as env.dish: prompt('Specify favorite dish: ', 'dish', default='spam & eggs') # With validation, i.e. requiring integer input: prompt('Please specify process nice level: ', key='nice', validate=int) # With validation against a regular expression: release = prompt('Please supply a release name', validate=r'^\w+-\d+(\.\d+)?$') # Prompt regardless of the global abort-on-prompts setting: with settings(abort_on_prompts=False): prompt('I seriously need an answer on this! ') """ handle_prompt_abort("a user-specified prompt() call") # Store previous env value for later display, if necessary if key: previous_value = env.get(key) # Set up default display default_str = "" if default != '': default_str = " [%s] " % str(default).strip() else: default_str = " " # Construct full prompt string prompt_str = text.strip() + default_str # Loop until we pass validation value = None while value is None: # Get input value = raw_input(prompt_str) or default # Handle validation if validate: # Callable if callable(validate): # Callable validate() must raise an exception if validation # fails. try: value = validate(value) except Exception, e: # Reset value so we stay in the loop value = None print("Validation failed for the following reason:") print(indent(e.message) + "\n") # String / regex must match and will be empty if validation fails. else: # Need to transform regex into full-matching one if it's not. if not validate.startswith('^'): validate = r'^' + validate if not validate.endswith('$'): validate += r'$' result = re.findall(validate, value) if not result: print("Regular expression validation failed: '%s' does not match '%s'\n" % (value, validate)) # Reset value so we stay in the loop value = None
def _option(message): print yellow(indent(message, spaces=4))
def _create_config_bgp(self): c = CmdBuffer() c << 'hostname bgpd' c << 'password zebra' c << 'router bgp {0}'.format(self.asn) c << 'bgp router-id {0}'.format(self.router_id) if any(info['graceful_restart'] for info in self.peers.itervalues()): c << 'bgp graceful-restart' if 'global' in self.bgpd_config: if 'confederation' in self.bgpd_config['global']: conf = self.bgpd_config['global']['confederation']['config'] c << 'bgp confederation identifier {0}'.format(conf['identifier']) c << 'bgp confederation peers {0}'.format(' '.join([str(i) for i in conf['member-as-list']])) version = 4 for peer, info in self.peers.iteritems(): version = netaddr.IPNetwork(info['neigh_addr']).version n_addr = info['neigh_addr'].split('/')[0] if version == 6: c << 'no bgp default ipv4-unicast' c << 'neighbor {0} remote-as {1}'.format(n_addr, info['remote_as']) # For rapid convergence c << 'neighbor {0} advertisement-interval 1'.format(n_addr) if info['is_rs_client']: c << 'neighbor {0} route-server-client'.format(n_addr) for typ, p in info['policies'].iteritems(): c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'], typ) if info['passwd']: c << 'neighbor {0} password {1}'.format(n_addr, info['passwd']) if info['passive']: c << 'neighbor {0} passive'.format(n_addr) if version == 6: c << 'address-family ipv6 unicast' c << 'neighbor {0} activate'.format(n_addr) c << 'exit-address-family' if self.zebra: if version == 6: c << 'address-family ipv6 unicast' c << 'redistribute connected' c << 'exit-address-family' else: c << 'redistribute connected' for name, policy in self.policies.iteritems(): c << 'access-list {0} {1} {2}'.format(name, policy['type'], policy['match']) c << 'route-map {0} permit 10'.format(name) c << 'match ip address {0}'.format(name) c << 'set metric {0}'.format(policy['med']) c << 'debug bgp as4' c << 'debug bgp fsm' c << 'debug bgp updates' c << 'debug bgp events' c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME) with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f: print colors.yellow('[{0}\'s new bgpd.conf]'.format(self.name)) print colors.yellow(indent(str(c))) f.writelines(str(c))
def deploy(id=None, silent=False, force=False): """ Perform an automatic deploy to the target requested. """ require('hosts') require('code_dir') # Ask for sudo at the begginning so we don't fail during deployment because of wrong pass if not sudo('whoami'): abort('Failed to elevate to root') return # Show log of changes, return if nothing to do revset = show_log(id) if not revset and not force: return # See if we have any requirements changes requirements_changes = force or vcs.changed_files(revset, r' requirements/') if requirements_changes: print colors.yellow("Will update requirements (and do migrations):") print indent(requirements_changes) # See if we have package.json changes package_changed = force or vcs.changed_files(revset, r' onepager/package.json') if package_changed: print colors.yellow("Will run npm install") # See if we have changes in app source or static files app_changed = force or vcs.changed_files(revset, [ r' onepager/app', r' onepager/static', r' onepager/settings', r'webpack' ]) if app_changed: print colors.yellow("Will run npm build") # See if we have any changes to migrations between the revisions we're applying migrations = force or migrate_diff(revset=revset, silent=True) if migrations: print colors.yellow("Will apply %d migrations:" % len(migrations)) print indent(migrations) # See if we have any changes to crontab config crontab_changed = force or vcs.changed_files(revset, r'deploy/crontab.conf') if crontab_changed: print colors.yellow("Will update cron entries") # see if nginx conf has changed if vcs.changed_files(revset, r' deploy/%s' % env.nginx_conf): print colors.red( "Warning: Nginx configuration change detected, also run: `fab %target% nginx_update`" ) if not silent: request_confirm("deploy") vcs.update(id) if requirements_changes: update_requirements() if migrations or requirements_changes: migrate(silent=True) if crontab_changed: with cd(env.code_dir): sudo('cp deploy/crontab.conf /etc/cron.d/onepager') collectstatic(npm_install=package_changed, npm_build=app_changed) restart_server(silent=True) # Run deploy systemchecks check()