def deploy(self, name, kwargs, data, line_number, in_deploy=True): ''' Wraps a group of operations as a deploy, this should not be used directly, instead use ``pyinfra.api.deploy.deploy``. ''' # Handle nested deploy names if self.deploy_name: name = _make_name(self.deploy_name, name) # Store the previous values old_in_deploy = self.in_deploy old_deploy_name = self.deploy_name old_deploy_kwargs = self.deploy_kwargs old_deploy_data = self.deploy_data old_deploy_line_numbers = self.deploy_line_numbers self.in_deploy = in_deploy # Limit the new hosts to a subset of the old hosts if they existed if ( old_deploy_kwargs and old_deploy_kwargs.get('hosts') is not None ): # If we have hosts - subset them based on the old hosts if 'hosts' in kwargs: kwargs['hosts'] = [ host for host in kwargs['hosts'] if host in old_deploy_kwargs['hosts'] ] # Otherwise simply carry the previous hosts else: kwargs['hosts'] = old_deploy_kwargs['hosts'] # Make new line numbers - note convert from and back to tuple to avoid # keeping deploy_line_numbers mutable. new_line_numbers = list(self.deploy_line_numbers or []) new_line_numbers.append(line_number) new_line_numbers = tuple(new_line_numbers) # Set the new values self.deploy_name = name self.deploy_kwargs = kwargs self.deploy_data = data self.deploy_line_numbers = new_line_numbers logger.debug('Starting deploy {0} (args={1}, data={2})'.format( name, kwargs, data, )) yield # Restore the previous values self.in_deploy = old_in_deploy self.deploy_name = old_deploy_name self.deploy_kwargs = old_deploy_kwargs self.deploy_data = old_deploy_data self.deploy_line_numbers = old_deploy_line_numbers logger.debug('Reset deploy to {0} (args={1}, data={2})'.format( old_deploy_name, old_deploy_kwargs, old_deploy_data, ))
def _get_group_data(deploy_dir): group_data = {} group_data_directory = path.join(deploy_dir, 'group_data') if path.exists(group_data_directory): files = listdir(group_data_directory) for file in files: if not file.endswith('.py'): continue group_data_file = path.join(group_data_directory, file) group_name = path.basename(file)[:-3] logger.debug( 'Looking for group data in: {0}'.format(group_data_file)) # Read the files locals into a dict attrs = exec_file(group_data_file, return_locals=True) group_data[group_name] = { key: value for key, value in six.iteritems(attrs) if not key.startswith('_') } return group_data
def hosts(self, hosts): logger.warning(( 'Use of `State.hosts` is deprecated, ' 'please use normal `if` statements instead.' )) hosts = ensure_host_list(hosts, inventory=self.inventory) # Store the previous value old_limit_hosts = self.limit_hosts # Limit the new hosts to a subset of the old hosts if they existed if old_limit_hosts is not None: hosts = [ host for host in hosts if host in old_limit_hosts ] # Set the new value self.limit_hosts = hosts logger.debug('Setting limit to hosts: {0}'.format(hosts)) yield # Restore the old value self.limit_hosts = old_limit_hosts logger.debug('Reset limit to hosts: {0}'.format(old_limit_hosts))
def make_command( command, env=None, su_user=None, sudo=False, sudo_user=None, preserve_sudo_env=False, shell_executable=Config.SHELL, ): ''' Builds a shell command with various kwargs. ''' debug_meta = {} for key, value in ( ('shell_executable', shell_executable), ('sudo', sudo), ('sudo_user', sudo_user), ('su_user', su_user), ('env', env), ): if value: debug_meta[key] = value logger.debug('Building command ({0}): {1}'.format( ' '.join('{0}: {1}'.format(key, value) for key, value in six.iteritems(debug_meta)), command)) # Use env & build our actual command if env: env_string = ' '.join([ '{0}={1}'.format(key, value) for key, value in six.iteritems(env) ]) command = 'export {0}; {1}'.format(env_string, command) # Quote the command as a string command = shlex_quote(command) # Switch user with su if su_user: command = 'su {0} -c {1}'.format(su_user, command) # Otherwise just sh wrap the command else: command = '{0} -c {1}'.format(shell_executable, command) # Use sudo (w/user?) if sudo: sudo_bits = ['sudo', '-H'] if preserve_sudo_env: sudo_bits.append('-E') if sudo_user: sudo_bits.extend(('-u', sudo_user)) command = '{0} {1}'.format(' '.join(sudo_bits), command) return command
def _get_vagrant_ssh_config(queue, target): logger.debug('Loading SSH config for {0}'.format(target)) queue.put(local.shell( 'vagrant ssh-config {0}'.format(target), splitlines=True, ))
def show(self): sys.stderr.write('--> {0}:\n'.format( click.style( 'An unexpected internal exception occurred', 'red', bold=True, ))) click.echo(err=True) traceback_lines = self.get_traceback_lines() traceback = self.get_traceback() # Syntax errors contain the filename/line/etc, but other exceptions # don't, so print the *last* call to stderr. if not isinstance(self.e, SyntaxError): sys.stderr.write(traceback_lines[-1]) exception = self.get_exception() sys.stderr.write(exception) with open('pyinfra-debug.log', 'w') as f: f.write(traceback) f.write(exception) logger.debug(traceback) logger.debug(exception) click.echo(err=True) click.echo('--> The full traceback has been written to {0}'.format( click.style('pyinfra-debug.log', bold=True), ), err=True) click.echo(( '--> If this is unexpected please consider submitting a bug report ' 'on GitHub, for more information run `pyinfra --support`.'), err=True)
def _run_server_ops(state, host, progress=None): ''' Run all ops for a single server. ''' logger.debug('Running all ops on {0}'.format(host)) for op_hash in state.get_op_order(): op_meta = state.op_meta[op_hash] logger.info('--> {0} {1} on {2}'.format( click.style('--> Starting operation:', 'blue'), click.style(', '.join(op_meta['names']), bold=True), click.style(host.name, bold=True), )) result = _run_server_op(state, host, op_hash) # Trigger CLI progress if provided if progress: progress((host, op_hash)) if result is False: raise PyinfraError('Error in operation {0} on {1}'.format( ', '.join(op_meta['names']), host, )) if pyinfra.is_cli: print()
def _run_host_ops(state, host, progress=None): """ Run all ops for a single server. """ logger.debug("Running all ops on %s", host) for op_hash in state.get_op_order(): op_meta = state.get_op_meta(op_hash) log_operation_start(op_meta) result = _run_host_op_with_context(state, host, op_hash) # Trigger CLI progress if provided if progress: progress((host, op_hash)) if result is False: raise PyinfraError( "Error in operation {0} on {1}".format( ", ".join(op_meta["names"]), host, ), ) if pyinfra.is_cli: click.echo(err=True)
def show(self): sys.stderr.write('--> {0}:\n'.format(click.style( 'An unexpected exception occurred', 'red', bold=True, ))) click.echo() traceback = getattr(self.e, '_traceback') traceback_lines = format_tb(traceback) traceback = ''.join(traceback_lines) # Syntax errors contain the filename/line/etc, but other exceptions # don't, so print the *last* call to stderr. if not isinstance(self.e, SyntaxError): sys.stderr.write(traceback_lines[-1]) exception = ''.join(format_exception(self.e.__class__, self.e, None)) sys.stderr.write(exception) with open('pyinfra-debug.log', 'w') as f: f.write(traceback) f.write(exception) logger.debug(traceback) logger.debug(exception) click.echo() click.echo('--> The full traceback has been written to {0}'.format( click.style('pyinfra-debug.log', bold=True), ))
def _run_server_ops(state, host, progress=None): ''' Run all ops for a single server. ''' logger.debug('Running all ops on {0}'.format(host)) for op_hash in state.get_op_order(): op_meta = state.get_op_meta(op_hash) _log_operation_start(op_meta) result = _run_server_op(state, host, op_hash) # Trigger CLI progress if provided if progress: progress((host, op_hash)) if result is False: raise PyinfraError('Error in operation {0} on {1}'.format( ', '.join(op_meta['names']), host, )) if pyinfra.is_cli: click.echo(err=True)
def run_local_process( command, stdin=None, timeout=None, print_output=False, print_prefix=None, ): process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE) if stdin: write_stdin(stdin, process.stdin) combined_output = read_buffers_into_queue( process.stdout, process.stderr, timeout=timeout, print_output=print_output, print_prefix=print_prefix, ) logger.debug('--> Waiting for exit status...') process.wait() logger.debug('--> Command exit status: {0}'.format(process.returncode)) # Close any open file descriptors process.stdout.close() process.stderr.close() return process.returncode, combined_output
def fail_hosts(self, hosts_to_fail, activated_count=None): """ Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``. """ if not hosts_to_fail: return activated_count = activated_count or len(self.activated_hosts) logger.debug("Failing hosts: %r", hosts_to_fail) self.failed_hosts.update(hosts_to_fail) self.active_hosts -= hosts_to_fail # Check we're not above the fail percent active_hosts = self.active_hosts # No hosts left! if not active_hosts: raise PyinfraError("No hosts remaining!") if self.config.FAIL_PERCENT is not None: percent_failed = (1 - len(active_hosts) / activated_count) * 100 if percent_failed > self.config.FAIL_PERCENT: raise PyinfraError( "Over {0}% of hosts failed ({1}%)".format( self.config.FAIL_PERCENT, int(round(percent_failed)), ), )
def make_win_command( command, env=None, shell_executable=Config.SHELL, ): ''' Builds a windows command with various kwargs. ''' debug_meta = {} for key, value in ( ('shell_executable', shell_executable), ('env', env), ): if value: debug_meta[key] = value logger.debug('Building command ({0}): {1}'.format( ' '.join('{0}: {1}'.format(key, value) for key, value in six.iteritems(debug_meta)), command)) # Use env & build our actual command if env: env_string = ' '.join([ '{0}={1}'.format(key, value) for key, value in six.iteritems(env) ]) command = 'export {0}; {1}'.format(env_string, command) # Quote the command as a string command = shlex_quote(command) command = '{0}'.format(command) return command
def _run_server_ops(state, host, progress=None): name = host.name logger.debug('Running all ops on {0}'.format(name)) for op_hash in state.op_order: op_meta = state.op_meta[op_hash] logger.info('--> {0} {1} on {2}'.format( click.style('--> Starting operation:', 'blue'), click.style(', '.join(op_meta['names']), bold=True), click.style(name, bold=True), )) result = _run_op(state, host, op_hash) # Trigger CLI progress if provided if progress: progress() if result is False: raise PyinfraError('Error in operation {0} on {1}'.format( ', '.join(op_meta['names']), name, )) if state.print_lines: print()
def fail_hosts(self, hosts_to_fail, activated_count=None): ''' Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``. ''' if not hosts_to_fail: return activated_count = activated_count or len(self.activated_hosts) logger.debug('Failing hosts: {0}'.format(', '.join( (host.name for host in hosts_to_fail), ))) # Remove the failed hosts from the inventory self.active_hosts -= hosts_to_fail # Check we're not above the fail percent active_hosts = self.active_hosts # No hosts left! if not active_hosts: raise PyinfraError('No hosts remaining!') if self.config.FAIL_PERCENT is not None: percent_failed = ( 1 - len(active_hosts) / activated_count ) * 100 if percent_failed > self.config.FAIL_PERCENT: raise PyinfraError('Over {0}% of hosts failed ({1}%)'.format( self.config.FAIL_PERCENT, int(round(percent_failed)), ))
def _get_group_data(dirname): group_data = {} group_data_directory = path.join(dirname, "group_data") logger.debug("Checking possible group_data directory: %s", dirname) if path.exists(group_data_directory): files = listdir(group_data_directory) for file in files: if not file.endswith(".py"): continue group_data_file = path.join(group_data_directory, file) group_name = path.basename(file)[:-3] logger.debug("Looking for group data in: %s", group_data_file) # Read the files locals into a dict attrs = exec_file(group_data_file, return_locals=True) keys = attrs.get("__all__", attrs.keys()) group_data[group_name] = { key: value for key, value in attrs.items() if key in keys and not key.startswith("_") } return group_data
def make_command( command, env=None, su_user=None, sudo=False, sudo_user=None, preserve_sudo_env=False, ): ''' Builds a shell command with various kwargs. ''' debug_meta = {} for key, value in ( ('sudo', sudo), ('sudo_user', sudo_user), ('su_user', su_user), ('env', env), ): if value: debug_meta[key] = value logger.debug('Building command ({0})'.format(' '.join( '{0}: {1}'.format(key, value) for key, value in six.iteritems(debug_meta)))) # Use env & build our actual command if env: env_string = ' '.join([ '{0}={1}'.format(key, value) for key, value in six.iteritems(env) ]) command = '{0} {1}'.format(env_string, command) # Escape "'s command = command.replace("'", "\\'") # Switch user with su if su_user: command = "su {0} -c '{1}'".format(su_user, command) # Otherwise just sh wrap the command else: command = "sh -c '{0}'".format(command) # Use sudo (w/user?) if sudo: sudo_bits = ['sudo', '-H'] if preserve_sudo_env: sudo_bits.append('-E') if sudo_user: sudo_bits.extend(('-u', sudo_user)) command = '{0} {1}'.format(' '.join(sudo_bits), command) return command
def connect(state, host, **kwargs): ''' Connect to a single host. Returns the SSH client if succesful. Stateless by design so can be run in parallel. ''' logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs)) name = host.name hostname = host.data.ssh_hostname or name try: # Create new client & connect to the host client = SSHClient() client.set_missing_host_key_policy(MissingHostKeyPolicy()) client.connect(hostname, **kwargs) # Enable SSH forwarding session = client.get_transport().open_session() AgentRequestHandler(session) # Log logger.info('{0}{1}'.format( host.print_prefix, click.style('Connected', 'green'), )) return client except AuthenticationException as e: auth_kwargs = {} for key, value in kwargs.items(): if key in ('username', 'password'): auth_kwargs[key] = value continue if key == 'pkey' and value: auth_kwargs['key'] = host.data.ssh_key auth_args = ', '.join('{0}={1}'.format(key, value) for key, value in auth_kwargs.items()) _log_connect_error(host, 'Authentication error', auth_args) except SSHException as e: _log_connect_error(host, 'SSH error', e) except gaierror: _log_connect_error(host, 'Could not resolve hostname', hostname) except socket_error as e: _log_connect_error(host, 'Could not connect', e) except EOFError as e: _log_connect_error(host, 'EOF error', e)
def _get_vagrant_ssh_config(queue, progress, target): logger.debug("Loading SSH config for %s", target) queue.put( local.shell( "vagrant ssh-config {0}".format(target), splitlines=True, ), ) progress(target)
def activate_host(self, host): """ Flag a host as active. """ logger.debug("Activating host: %s", host) # Add to *both* activated and active - active will reduce as hosts fail # but connected will not, enabling us to track failed %. self.activated_hosts.add(host) self.active_hosts.add(host)
def run_shell_command( state, host, command, get_pty=False, # ignored timeout=None, stdin=None, success_exit_codes=None, print_output=False, print_input=False, return_combined_output=False, **command_kwargs): ''' Execute a command on the local machine. Args: state (``pyinfra.api.State`` object): state object for this command host (``pyinfra.api.Host`` object): the target host command (string): actual command to execute sudo (boolean): whether to wrap the command with sudo sudo_user (string): user to sudo to env (dict): envrionment variables to set timeout (int): timeout for this command to complete before erroring Returns: tuple: (exit_code, stdout, stderr) stdout and stderr are both lists of strings from each buffer. ''' command = make_unix_command(command, **command_kwargs) logger.debug('--> Running command on localhost: {0}'.format(command)) if print_input: click.echo('{0}>>> {1}'.format(host.print_prefix, command)) return_code, combined_output = run_local_process( command, stdin=stdin, timeout=timeout, print_output=print_output, print_prefix=host.print_prefix, ) if success_exit_codes: status = return_code in success_exit_codes else: status = return_code == 0 if return_combined_output: return status, combined_output stdout, stderr = split_combined_output(combined_output) return status, stdout, stderr
def get_ssh_config(user_config_file=None): logger.debug("Loading SSH config: %s", user_config_file) if user_config_file is None: user_config_file = path.expanduser("~/.ssh/config") if path.exists(user_config_file): with open(user_config_file) as f: ssh_config = SSHConfig() ssh_config.parse(f) return ssh_config
def connect(state, host): ''' Connect to a single host. Returns the SSH client if succesful. Stateless by design so can be run in parallel. ''' kwargs = _make_paramiko_kwargs(state, host) logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs)) # Hostname can be provided via SSH config (alias), data, or the hosts name hostname = kwargs.pop( 'hostname', host.data.ssh_hostname or host.name, ) try: # Create new client & connect to the host client = SSHClient() client.set_missing_host_key_policy(MissingHostKeyPolicy()) client.connect(hostname, **kwargs) # Enable SSH forwarding session = client.get_transport().open_session() AgentRequestHandler(session) return client except AuthenticationException: auth_kwargs = {} for key, value in kwargs.items(): if key in ('username', 'password'): auth_kwargs[key] = value continue if key == 'pkey' and value: auth_kwargs['key'] = host.data.ssh_key auth_args = ', '.join('{0}={1}'.format(key, value) for key, value in auth_kwargs.items()) _raise_connect_error(host, 'Authentication error', auth_args) except SSHException as e: _raise_connect_error(host, 'SSH error', e) except gaierror: _raise_connect_error(host, 'Could not resolve hostname', hostname) except socket_error as e: _raise_connect_error(host, 'Could not connect', e) except EOFError as e: _raise_connect_error(host, 'EOF error', e)
def activate_host(self, host): ''' Flag a host as active. ''' logger.debug('Activating host: {0}'.format(host)) # Add to *both* activated and active - active will reduce as hosts fail # but connected will not, enabling us to track failed %. self.activated_hosts.add(host) self.active_hosts.add(host)
def decorated_func(*args, **kwargs): # State & host passed in as kwargs (API, nested op, @deploy op) if 'state' in kwargs and 'host' in kwargs: state = kwargs['state'] host = kwargs['host'] # State & host passed in as first two arguments (LEGACY) elif len(args) >= 2 and isinstance(args[0], State) and isinstance( args[1], Host): show_state_host_arguments_warning(get_call_location()) state = kwargs['state'] = args[0] host = kwargs['host'] = args[1] args_copy = list(args) args = args_copy[2:] # Finally, still no state+host? Use pseudo if we're CLI mode, or fail elif pyinfra.is_cli: state = kwargs['state'] = pseudo_state._module host = kwargs['host'] = pseudo_host._module if not state or not host or state.in_deploy: raise PyinfraError( ('Nested deploy called without state/host: {0} ({1})' ).format(func, get_call_location())) else: raise PyinfraError( ('Deploy called without state/host: {0} ({1})').format( func, get_call_location())) line_number = kwargs.pop('_line_number', None) filename = 'CLI' if line_number is None: frameinfo = get_caller_frameinfo() line_number = frameinfo.lineno filename = frameinfo.filename logger.debug('Adding deploy, called @ {0}:{1}'.format( filename, line_number, )) deploy_kwargs = pop_global_op_kwargs(state, kwargs) # Name the deploy deploy_name = getattr(func, 'deploy_name', func.__name__) deploy_data = getattr(func, 'deploy_data', None) with state.deploy(deploy_name, deploy_kwargs, deploy_data, line_number): # Execute the deploy, passing state and host func(*args, **kwargs)
def deploy(self, name, kwargs, data): ''' Wraps a group of operations as a deploy, this should not be used directly, instead use ``pyinfra.api.deploy.deploy``. ''' # Handle nested deploy names if self.deploy_name: name = _make_name(self.deploy_name, name) self.in_deploy = True # Store the previous values old_deploy_name = self.deploy_name old_deploy_kwargs = self.deploy_kwargs old_deploy_data = self.deploy_data # Limit the new hosts to a subset of the old hosts if they existed if (old_deploy_kwargs and old_deploy_kwargs.get('hosts') is not None): # If we have hosts - subset them based on the old hosts if 'hosts' in kwargs: kwargs['hosts'] = [ host for host in kwargs['hosts'] if host in old_deploy_kwargs['hosts'] ] # Otherwise simply carry the previous hosts else: kwargs['hosts'] = old_deploy_kwargs['hosts'] # Set the new values self.deploy_name = name self.deploy_kwargs = kwargs self.deploy_data = data logger.debug('Starting deploy {0} (args={1}, data={2})'.format( name, kwargs, data, )) yield # Restore the previous values self.deploy_name = old_deploy_name self.deploy_kwargs = old_deploy_kwargs self.deploy_data = old_deploy_data logger.debug('Reset deploy to {0} (args={1}, data={2})'.format( old_deploy_name, old_deploy_kwargs, old_deploy_data, )) self.in_deploy = False
def run_shell_command(state, host, command, get_pty=False, timeout=None, stdin=None, success_exit_codes=None, print_output=False, print_input=False, return_combined_output=False, use_sudo_password=False, **command_kwargs): if use_sudo_password: command_kwargs['use_sudo_password'] = get_sudo_password( state, host, use_sudo_password, run_shell_command=run_shell_command, put_file=put_file, ) chroot_directory = host.host_data['chroot_directory'] command = make_unix_command(command, **command_kwargs) command = QuoteString(command) logger.debug( '--> Running chroot command on ({0}):{1}'.format( chroot_directory, command, ), ) chroot_command = StringCommand( 'chroot', chroot_directory, 'sh', '-c', command, ) return run_local_shell_command( state, host, chroot_command, timeout=timeout, stdin=stdin, success_exit_codes=success_exit_codes, print_output=print_output, print_input=print_input, return_combined_output=return_combined_output, )
def __exit__(self, type_, value, traceback): self.state.pipelining = False # Get pipelined facts! # for name, args in six.iteritems(self.state.facts_to_pipeline): # get_facts(self.state, name, pipeline_args=args) # Actually build our ops for (host_name, func, args, kwargs) in self.state.ops_to_pipeline: logger.debug('Replaying op: {0}, args={1}, kwargs={2}'.format( func, args, kwargs)) func(self.state, self.state.inventory[host_name], *args, **kwargs)
def connect(state, host, **kwargs): ''' Connect to a single host. Returns the SSH client if succesful. Stateless by design so can be run in parallel. ''' logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs)) name = host.name hostname = host.data.ssh_hostname or name try: # Create new client & connect to the host client = SSHClient() client.set_missing_host_key_policy(MissingHostKeyPolicy()) client.connect(hostname, **kwargs) # Enable SSH forwarding session = client.get_transport().open_session() AgentRequestHandler(session) # Log logger.info('{0}{1}'.format( host.print_prefix, click.style('Connected', 'green'), )) return client except AuthenticationException as e: logger.error('Auth error on: {0}, {1}'.format(name, e)) except SSHException as e: logger.error('SSH error on: {0}, {1}'.format(name, e)) except gaierror: if hostname == name: logger.error('Could not resolve {0}'.format(name)) else: logger.error('Could not resolve for {0} (SSH host: {1})'.format( name, hostname)) except socket_error as e: logger.error( 'Could not connect: {0}:{1}, {2}'.format(name, kwargs.get('port', 22), e), ) except EOFError as e: logger.error('EOF error connecting to {0}: {1}'.format(name, e))
def _run_server_ops(state, hostname): logger.debug('Running all ops on {}'.format(hostname)) for op_hash in state.op_order: op_meta = state.op_meta[op_hash] logger.info('{0} {1} on {2}'.format( colored('Starting operation:', 'blue'), colored(', '.join(op_meta['names']), attrs=['bold']), colored(hostname, attrs=['bold']))) result = _run_op(state, hostname, op_hash) if result is False: raise PyinfraError('Error in operation {0} on {1}'.format( ', '.join(op_meta['names']), hostname)) if state.print_lines: print()