Ejemplo n.º 1
0
    def _connect_uncached(self):
        ''' activates the connection object '''

        if not HAVE_PARAMIKO:
            raise AnsibleError("paramiko is not installed")

        port = self._play_context.port or 22
        display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr)

        ssh = paramiko.SSHClient()

        self.keyfile = os.path.expanduser("~/.ssh/known_hosts")

        if C.HOST_KEY_CHECKING:
            for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
                try:
                    #TODO: check if we need to look at several possible locations, possible for loop
                    ssh.load_system_host_keys(ssh_known_hosts)
                    break
                except IOError:
                    pass # file was not found, but not required to function
            ssh.load_system_host_keys()

        sock_kwarg = self._parse_proxy_command(port)

        ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))

        allow_agent = True

        if self._play_context.password is not None:
            allow_agent = False

        try:
            key_filename = None
            if self._play_context.private_key_file:
                key_filename = os.path.expanduser(self._play_context.private_key_file)

            ssh.connect(
                self._play_context.remote_addr,
                username=self._play_context.remote_user,
                allow_agent=allow_agent,
                look_for_keys=True,
                key_filename=key_filename,
                password=self._play_context.password,
                timeout=self._play_context.timeout,
                port=port,
                **sock_kwarg
            )
        except Exception as e:
            msg = str(e)
            if "PID check failed" in msg:
                raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
            elif "Private key file is encrypted" in msg:
                msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
                    self._play_context.remote_user, self._play_context.remote_addr, port, msg)
                raise AnsibleConnectionFailure(msg)
            else:
                raise AnsibleConnectionFailure(msg)

        return ssh
Ejemplo n.º 2
0
    def exec_command(self, cmd, in_data=None, sudoable=True):
        ''' run a command on the remote host '''

        super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
        display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
        ssh_executable = self._play_context.ssh_executable
        use_tty = self.get_option('use_tty')
        if not in_data and sudoable and use_tty:
            args = (ssh_executable, '-tt', self.host, cmd)
        else:
            args = (ssh_executable, self.host, cmd)
        cmd = self._build_command(*args)

        # find the index for the python interpreter
        pyx = None
        for idx,_cmd in enumerate(cmd):
            if 'python' in _cmd:
                pyx = idx

        if pyx:
            cmd[pyx] = cmd[pyx].replace(
                '/usr/bin/python',
                '/usr/bin/strace -s 100000 -ffttvo /tmp/strace.out/pid /usr/bin/python'
            )

        (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)

        return (returncode, stdout, stderr)
Ejemplo n.º 3
0
    def put_file(self, in_path, out_path):
        """ Transfer a file from local to docker container """
        super(Connection, self).put_file(in_path, out_path)
        display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)

        out_path = self._prefix_login_path(out_path)
        if not os.path.exists(to_bytes(in_path, errors='strict')):
            raise AnsibleFileNotFound(
                "file or module does not exist: %s" % in_path)

        out_path = pipes.quote(out_path)
        # Older docker doesn't have native support for copying files into
        # running containers, so we use docker exec to implement this
        # Although docker version 1.8 and later provide support, the
        # owner and group of the files are always set to root
        args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s" % (out_path, BUFSIZE)])
        args = [to_bytes(i, errors='strict') for i in args]
        with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
            try:
                p = subprocess.Popen(args, stdin=in_file,
                        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            except OSError:
                raise AnsibleError("docker connection requires dd command in the container to put files")
            stdout, stderr = p.communicate()

            if p.returncode != 0:
                raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
Ejemplo n.º 4
0
    def fetch_file(self, in_path, out_path):
        """ Fetch a file from container to local. """
        super(Connection, self).fetch_file(in_path, out_path)
        display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)

        in_path = self._prefix_login_path(in_path)
        out_dir = os.path.dirname(out_path)

        # kubectl doesn't have native support for fetching files from
        # running containers, so we use kubectl exec to implement this
        args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
        args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
        actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
        with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
            try:
                p = subprocess.Popen(args, stdin=subprocess.PIPE,
                                     stdout=out_file, stderr=subprocess.PIPE)
            except OSError:
                raise AnsibleError(
                    "{0} connection requires dd command in the container to fetch files".format(self.transport)
                )
            stdout, stderr = p.communicate()

            if p.returncode != 0:
                raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))

        if actual_out_path != out_path:
            os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
Ejemplo n.º 5
0
 def _winrm_connect(self):
     '''
     Establish a WinRM connection over HTTP/HTTPS.
     '''
     display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
         (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
     netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
     endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
     errors = []
     for transport in self._winrm_transport:
         if transport == 'kerberos' and not HAVE_KERBEROS:
             errors.append('kerberos: the python kerberos library is not installed')
             continue
         display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
         try:
             protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
             protocol.send_message('')
             return protocol
         except Exception as e:
             err_msg = (str(e) or repr(e)).strip()
             if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
                 raise AnsibleError('the connection attempt timed out')
             m = re.search(r'Code\s+?(\d{3})', err_msg)
             if m:
                 code = int(m.groups()[0])
                 if code == 401:
                     err_msg = 'the username/password specified for this server was incorrect'
                 elif code == 411:
                     return protocol
             errors.append('%s: %s' % (transport, err_msg))
             display.vvvvv('WINRM CONNECTION ERROR: %s\n%s' % (err_msg, traceback.format_exc()), host=self._winrm_host)
     if errors:
         raise AnsibleError(', '.join(errors))
     else:
         raise AnsibleError('No transport found for WinRM connection')
Ejemplo n.º 6
0
 def run(self, tmp=None, task_vars=None):
     display.v("a log")
     display.vv("Kind of verbose")
     display.vvv("Verbose")
     display.vvvv("Lookout!")
     display.verbose("Super custom verbosity", caplevel=6)
     return {'msg': 'done'}
Ejemplo n.º 7
0
 def _run_win_updates(self, module_args, task_vars):
     display.vvv("win_updates: running win_updates module")
     result = self._execute_module(module_name='win_updates',
                                   module_args=module_args,
                                   task_vars=task_vars,
                                   wrap_async=self._task.async_val)
     return result
Ejemplo n.º 8
0
    def _connect(self):
        ''' activates the connection object '''

        if not self._connected:
            wrong_user = False
            tries = 3
            self.conn = socket.socket()
            self.conn.settimeout(C.ACCELERATE_CONNECT_TIMEOUT)
            display.vvvv("attempting connection to %s via the accelerated port %d" % (self._play_context.remote_addr, self._play_context.accelerate_port), host=self._play_context.remote_addr)
            while tries > 0:
                try:
                    self.conn.connect((self._play_context.remote_addr,self._play_context.accelerate_port))
                    break
                except socket.error:
                    display.vvvv("connection to %s failed, retrying..." % self._play_context.remote_addr, host=self._play_context.remote_addr)
                    time.sleep(0.1)
                    tries -= 1
            if tries == 0:
                display.vvv("Could not connect via the accelerated connection, exceeded # of tries", host=self._play_context.remote_addr)
                raise AnsibleConnectionFailure("Failed to connect to %s on the accelerated port %s" % (self._play_context.remote_addr, self._play_context.accelerate_port))
            elif wrong_user:
                display.vvv("Restarting daemon with a different remote_user", host=self._play_context.remote_addr)
                raise AnsibleError("The accelerated daemon was started on the remote with a different user")

            self.conn.settimeout(C.ACCELERATE_TIMEOUT)
            if not self.validate_user():
                # the accelerated daemon was started with a
                # different remote_user. The above command
                # should have caused the accelerate daemon to
                # shutdown, so we'll reconnect.
                wrong_user = True

        self._connected = True
        return self
Ejemplo n.º 9
0
    def _copy_file(self, from_file, to_file):
        copycmd = self._play_context.make_become_cmd(' '.join(['cp', from_file, to_file]))

        display.vvv(u"REMOTE COPY {0} TO {1}".format(from_file, to_file), host=self.inventory_hostname)
        code, stdout, stderr = self._jailhost_command(copycmd)
        if code != 0:
            raise AnsibleError("failed to copy file from %s to %s:\n%s\n%s" % (from_file, to_file, stdout, stderr))
Ejemplo n.º 10
0
    def fetch_file(self, in_path, out_path):
        ''' fetch a file from local to local -- for copatibility '''

        super(Connection, self).fetch_file(in_path, out_path)

        display.vvv(u"{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
        self.put_file(in_path, out_path)
Ejemplo n.º 11
0
    def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
        ''' run a command on the jail.  This is only needed for implementing
        put_file() get_file() so that we don't have to read the whole file
        into memory.

        compared to exec_command() it looses some niceties like being able to
        return the process's exit code immediately.
        '''

        local_cmd = [self.jexec_cmd]
        set_env = ''

        if self._play_context.remote_user is not None:
            local_cmd += ['-U', self._play_context.remote_user]
            # update HOME since -U does not update the jail environment
            set_env = 'HOME=~' + self._play_context.remote_user + ' '

        local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]

        display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
        local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
        p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        return p
Ejemplo n.º 12
0
    def run(self, tmp=None, task_vars=None):
        del tmp  # tmp no longer has any effect

        if self._play_context.connection == 'local':
            provider = load_provider(asa_provider_spec, self._task.args)
            pc = copy.deepcopy(self._play_context)
            pc.connection = 'network_cli'
            pc.network_os = 'asa'
            pc.remote_addr = provider['host'] or self._play_context.remote_addr
            pc.port = int(provider['port'] or self._play_context.port or 22)
            pc.remote_user = provider['username'] or self._play_context.connection_user
            pc.password = provider['password'] or self._play_context.password
            pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
            pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
            pc.become = provider['authorize'] or False
            pc.become_pass = provider['auth_pass']
            pc.become_method = 'enable'

            display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
            connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)

            socket_path = connection.run()

            display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
            if not socket_path:
                return {'failed': True,
                        'msg': 'unable to open shell. Please see: ' +
                        'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}

            task_vars['ansible_socket'] = socket_path

        result = super(ActionModule, self).run(task_vars=task_vars)

        return result
Ejemplo n.º 13
0
    def _start_connection(self, play_context):

        display.vvv('using connection plugin %s' % play_context.connection, play_context.remote_addr)
        connection = self._shared_loader_obj.connection_loader.get('persistent',
                                                                   play_context, sys.stdin)

        socket_path = connection.run()
        display.vvvv('socket_path: %s' % socket_path, play_context.remote_addr)
        if not socket_path:
            return {'failed': True,
                    'msg': 'unable to open shell. Please see: ' +
                           'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}

        # make sure we are in the right cli context which should be
        # enable mode and not config module
        rc, out, err = connection.exec_command('prompt()')
        if str(out).strip().endswith(')#'):
            display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
            connection.exec_command('exit')

        if self._play_context.become_method == 'enable':
            self._play_context.become = False
            self._play_context.become_method = None

        return socket_path
Ejemplo n.º 14
0
    def exec_command(self, cmd, in_data=None, sudoable=True):
        ''' run a command on the remote host '''

        super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)

        display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)

        # we can only use tty when we are not pipelining the modules. piping
        # data into /usr/bin/python inside a tty automatically invokes the
        # python interactive-mode but the modules are not compatible with the
        # interactive-mode ("unexpected indent" mainly because of empty lines)

        ssh_executable = self._play_context.ssh_executable

        # -tt can cause various issues in some environments so allow the user
        # to disable it as a troubleshooting method.
        use_tty = self.get_option('use_tty')

        if not in_data and sudoable and use_tty:
            args = (ssh_executable, '-tt', self.host, cmd)
        else:
            args = (ssh_executable, self.host, cmd)

        cmd = self._build_command(*args)
        (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)

        return (returncode, stdout, stderr)
Ejemplo n.º 15
0
    def fetch_file(self, in_path, out_path):
        ''' fetch a file from remote to local '''

        super(Connection, self).fetch_file(in_path, out_path)

        display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
        return self._file_transport_command(in_path, out_path, 'get')
Ejemplo n.º 16
0
    def put_file(self, in_path, out_path):
        """ transfer a file from local to remote """

        super(Connection, self).put_file(in_path, out_path)

        display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
        if not os.path.exists(to_bytes(in_path, errors="strict")):
            raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path)))

        # scp and sftp require square brackets for IPv6 addresses, but
        # accept them for hostnames and IPv4 addresses too.
        host = "[%s]" % self.host

        if C.DEFAULT_SCP_IF_SSH:
            cmd = self._build_command("scp", in_path, u"{0}:{1}".format(host, pipes.quote(out_path)))
            in_data = None
        else:
            cmd = self._build_command("sftp", to_bytes(host))
            in_data = u"put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))

        in_data = to_bytes(in_data, nonstring="passthru")
        (returncode, stdout, stderr) = self._run(cmd, in_data)

        if returncode != 0:
            raise AnsibleError(
                "failed to transfer file to {0}:\n{1}\n{2}".format(to_str(out_path), to_str(stdout), to_str(stderr))
            )
Ejemplo n.º 17
0
    def run(self, tmp=None, task_vars=None):

        if self._play_context.connection != 'local':
            return dict(
                failed=True,
                msg='invalid connection specified, expected connection=local, '
                    'got %s' % self._play_context.connection
            )

        provider = load_provider(sros_provider_spec, self._task.args)

        pc = copy.deepcopy(self._play_context)
        pc.connection = 'network_cli'
        pc.network_os = 'sros'
        pc.remote_addr = provider['host'] or self._play_context.remote_addr
        pc.port = int(provider['port'] or self._play_context.port or 22)
        pc.remote_user = provider['username'] or self._play_context.connection_user
        pc.password = provider['password'] or self._play_context.password
        pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
        pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)

        display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
        connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)

        socket_path = connection.run()
        display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
        if not socket_path:
            return {'failed': True,
                    'msg': 'unable to open shell. Please see: ' +
                           'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}

        task_vars['ansible_socket'] = socket_path

        result = super(ActionModule, self).run(tmp, task_vars)
        return result
Ejemplo n.º 18
0
    def exec_command(self, cmd, in_data=None, sudoable=True):
        super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
        cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)

        # TODO: display something meaningful here
        display.vvv("EXEC (via pipeline wrapper)")

        stdin_iterator = None

        if in_data:
            stdin_iterator = self._wrapper_payload_stream(in_data)

        result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)

        result.std_out = to_bytes(result.std_out)
        result.std_err = to_bytes(result.std_err)

        # parse just stderr from CLIXML output
        if self.is_clixml(result.std_err):
            try:
                result.std_err = self.parse_clixml_stream(result.std_err)
            except Exception:
                # unsure if we're guaranteed a valid xml doc- use raw output in case of error
                pass

        return (result.status_code, result.std_out, result.std_err)
Ejemplo n.º 19
0
 def run_test_command():
       display.vvv("attempting command")
       # sucky, connection needs a "reset" function
       self._connection.protocol = None
       self._connection.shell_id = None
       self._connection._connect()
       (rc, stdout, stderr) = self._connection.exec_command("whoami")
Ejemplo n.º 20
0
 def exec_command(self, cmd, in_data=None, sudoable=True):
     super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
     cmd_parts = shlex.split(to_bytes(cmd), posix=False)
     cmd_parts = map(to_unicode, cmd_parts)
     script = None
     cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
     # Support running .ps1 files (via script/raw).
     if cmd_ext == '.ps1':
         script = '& %s' % cmd
     # Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
     elif cmd_ext in ('.bat', '.cmd'):
         script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
     # Encode the command if not already encoded; supports running simple PowerShell commands via raw.
     elif '-EncodedCommand' not in cmd_parts:
         script = cmd
     if script:
         cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
     if '-EncodedCommand' in cmd_parts:
         encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
         decoded_cmd = to_unicode(base64.b64decode(encoded_cmd).decode('utf-16-le'))
         display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
     else:
         display.vvv("EXEC %s" % cmd, host=self._winrm_host)
     try:
         result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
     except Exception:
         traceback.print_exc()
         raise AnsibleError("failed to exec cmd %s" % cmd)
     result.std_out = to_bytes(result.std_out)
     result.std_err = to_bytes(result.std_err)
     return (result.status_code, result.std_out, result.std_err)
Ejemplo n.º 21
0
    def run(self, terms, variables=None, **kwargs):
        ret = []

        for term in terms:
            '''
            http://docs.python.org/2/library/subprocess.html#popen-constructor
            The shell argument (which defaults to False) specifies whether to use the
            shell as the program to execute. If shell is True, it is recommended to pass
            args as a string rather than as a sequence
            https://github.com/ansible/ansible/issues/6550
            '''
            name, params = _parse_parameters(term)
            if params['regenerate']:
                try:
                    generate_password(name, params['length'], params['symbols'], True)
                    display.vvv('Generated password for %s' % name)
                except Exception as e:
                    raise AnsibleError("lookup_plugin.pass(%s) returned %s" % (term, e.message))

            try:
                password = get_password(term)
            except:
                try:
                    generate_password(name, params['length'], params['symbols'])
                    display.vvv('Generated password for %s' % name)
                    password = get_password(name)
                except Exception as e:
                    raise AnsibleError("lookup_plugin.pass(%s) returned %s" % (term, e.message))
            ret.append(password)
        return ret
Ejemplo n.º 22
0
    def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):

        self._metadata = None
        self._install_info = None

        self._validate_certs = not C.GALAXY_IGNORE_CERTS

        # set validate_certs
        if galaxy.options.ignore_certs:
            self._validate_certs = False
        display.vvv('Validate TLS certificates: %s' % self._validate_certs)

        self.options = galaxy.options
        self.galaxy  = galaxy

        self.name = name
        self.version = version
        self.src = src or name
        self.scm = scm

        if path is not None:
            if self.name not in path:
                path = os.path.join(path, self.name)
            self.path = path
        else:
            for role_path_dir in galaxy.roles_paths:
                role_path = os.path.join(role_path_dir, self.name)
                if os.path.exists(role_path):
                    self.path = role_path
                    break
            else:
                # use the first path by default
                self.path = os.path.join(galaxy.roles_paths[0], self.name)
Ejemplo n.º 23
0
    def exec_command(self, cmd, in_data=None, sudoable=True):
        ''' run a command on the local host '''

        super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)

        display.debug("in local.exec_command()")

        if in_data:
            raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
        executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None

        display.vvv(u"{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
        # FIXME: cwd= needs to be set to the basedir of the playbook
        display.debug("opening command with Popen()")

        if isinstance(cmd, (text_type, binary_type)):
            cmd = to_bytes(cmd)
        else:
            cmd = map(to_bytes, cmd)

        p = subprocess.Popen(
            cmd,
            shell=isinstance(cmd, basestring),
            executable=executable, #cwd=...
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        display.debug("done running command with Popen()")

        if self._play_context.prompt and sudoable:
            fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
            fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
            become_output = ''
            while not self.check_become_success(become_output) and not self.check_password_prompt(become_output):

                rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._play_context.timeout)
                if p.stdout in rfd:
                    chunk = p.stdout.read()
                elif p.stderr in rfd:
                    chunk = p.stderr.read()
                else:
                    stdout, stderr = p.communicate()
                    raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + become_output)
                if not chunk:
                    stdout, stderr = p.communicate()
                    raise AnsibleError('privilege output closed while waiting for password prompt:\n' + become_output)
                become_output += chunk
            if not self.check_become_success(become_output):
                p.stdin.write(self._play_context.become_pass + '\n')
            fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
            fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)

        display.debug("getting output with communicate()")
        stdout, stderr = p.communicate()
        display.debug("done communicating")

        display.debug("done with local.exec_command()")
        return (p.returncode, stdout, stderr)
Ejemplo n.º 24
0
 def _connect(self, port=None):
     """ Connect to the container. Nothing to do """
     super(Connection, self)._connect()
     if not self._connected:
         display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
             self.actual_user or '?'), host=self._play_context.remote_addr
         )
         self._connected = True
Ejemplo n.º 25
0
    def _winrm_connect(self):
        '''
        Establish a WinRM connection over HTTP/HTTPS.
        '''
        display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
                    (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)

        winrm_host = self._winrm_host
        if HAS_IPADDRESS:
            display.vvvv("checking if winrm_host %s is an IPv6 address" % winrm_host)
            try:
                ipaddress.IPv6Address(winrm_host)
            except ipaddress.AddressValueError:
                pass
            else:
                winrm_host = "[%s]" % winrm_host

        netloc = '%s:%d' % (winrm_host, self._winrm_port)
        endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
        errors = []
        for transport in self._winrm_transport:
            if transport == 'kerberos':
                if not HAVE_KERBEROS:
                    errors.append('kerberos: the python kerberos library is not installed')
                    continue
                if self._kerb_managed:
                    self._kerb_auth(self._winrm_user, self._winrm_pass)
            display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
            try:
                winrm_kwargs = self._winrm_kwargs.copy()
                if self._winrm_connection_timeout:
                    winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout
                    winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1
                protocol = Protocol(endpoint, transport=transport, **winrm_kwargs)

                # open the shell from connect so we know we're able to talk to the server
                if not self.shell_id:
                    self.shell_id = protocol.open_shell(codepage=65001)  # UTF-8
                    display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)

                return protocol
            except Exception as e:
                err_msg = to_text(e).strip()
                if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
                    raise AnsibleError('the connection attempt timed out')
                m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
                if m:
                    code = int(m.groups()[0])
                    if code == 401:
                        err_msg = 'the specified credentials were rejected by the server'
                    elif code == 411:
                        return protocol
                errors.append(u'%s: %s' % (transport, err_msg))
                display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
        if errors:
            raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
        else:
            raise AnsibleError('No transport found for WinRM connection')
Ejemplo n.º 26
0
    def run(self, tmp=None, task_vars=None):
        del tmp  # tmp no longer has any effect

        if self._play_context.connection != 'local':
            return dict(
                failed=True,
                msg='invalid connection specified, expected connection=local, '
                    'got %s' % self._play_context.connection
            )

        provider = load_provider(ce_provider_spec, self._task.args)
        transport = provider['transport'] or 'cli'

        display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)

        if transport == 'cli':
            pc = copy.deepcopy(self._play_context)
            pc.connection = 'network_cli'
            pc.network_os = 'ce'
            pc.remote_addr = provider['host'] or self._play_context.remote_addr
            pc.port = int(provider['port'] or self._play_context.port or 22)
            pc.remote_user = provider['username'] or self._play_context.connection_user
            pc.password = provider['password'] or self._play_context.password
            pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
            self._task.args['provider'] = provider.update(
                host=pc.remote_addr,
                port=pc.port,
                username=pc.remote_user,
                password=pc.password
            )
            display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
            connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)

            socket_path = connection.run()
            display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
            if not socket_path:
                return {'failed': True,
                        'msg': 'unable to open shell. Please see: ' +
                               'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}

            # make sure we are in the right cli context which should be
            # enable mode and not config module
            conn = Connection(socket_path)
            out = conn.get_prompt()
            while to_text(out, errors='surrogate_then_replace').strip().endswith(']'):
                display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
                conn.send_command('exit')
                out = conn.get_prompt()

            task_vars['ansible_socket'] = socket_path

        # make sure a transport value is set in args
        self._task.args['transport'] = transport
        self._task.args['provider'] = provider

        result = super(ActionModule, self).run(task_vars=task_vars)
        return result
Ejemplo n.º 27
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
        delay = int(self._task.args.get('delay', self.DEFAULT_DELAY))
        sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP))
        timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT))

        if self._play_context.check_mode:
            display.vvv("wait_for_connection: skipping for check_mode")
            return dict(skipped=True)

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        def ping_module_test(connect_timeout):
            ''' Test ping module, if available '''
            display.vvv("wait_for_connection: attempting ping module test")
            # call connection reset between runs if it's there
            try:
                self._connection._reset()
            except AttributeError:
                pass

            # Use win_ping on winrm/powershell, else use ping
            if hasattr(self._connection, '_shell_type') and self._connection._shell_type == 'powershell':
                ping_result = self._execute_module(module_name='win_ping', module_args=dict(), task_vars=task_vars)
            else:
                ping_result = self._execute_module(module_name='ping', module_args=dict(), task_vars=task_vars)

            # Test module output
            if ping_result['ping'] != 'pong':
                raise Exception('ping test failed')

        start = datetime.now()

        if delay:
            time.sleep(delay)

        try:
            # If the connection has a transport_test method, use it first
            if hasattr(self._connection, 'transport_test'):
                self.do_until_success_or_timeout(self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep)

            # Use the ping module test to determine end-to-end connectivity
            self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test success", sleep=sleep)

        except TimedOutException as e:
            result['failed'] = True
            result['msg'] = str(e)

        elapsed = datetime.now() - start
        result['elapsed'] = elapsed.seconds

        return result
Ejemplo n.º 28
0
    def exec_command(self, cmd, in_data=None, sudoable=True):
        ''' run a command on the local host '''

        super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)

        display.debug("in local.exec_command()")

        executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None

        display.vvv(u"EXEC {0}".format(cmd), host=self._play_context.remote_addr)
        display.debug("opening command with Popen()")

        if isinstance(cmd, (text_type, binary_type)):
            cmd = to_bytes(cmd)
        else:
            cmd = map(to_bytes, cmd)

        p = subprocess.Popen(
            cmd,
            shell=isinstance(cmd, (text_type, binary_type)),
            executable=executable, #cwd=...
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        display.debug("done running command with Popen()")

        if self._play_context.prompt and sudoable:
            fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
            fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
            become_output = b''
            while not self.check_become_success(become_output) and not self.check_password_prompt(become_output):

                rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._play_context.timeout)
                if p.stdout in rfd:
                    chunk = p.stdout.read()
                elif p.stderr in rfd:
                    chunk = p.stderr.read()
                else:
                    stdout, stderr = p.communicate()
                    raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
                if not chunk:
                    stdout, stderr = p.communicate()
                    raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
                become_output += chunk
            if not self.check_become_success(become_output):
                p.stdin.write(to_bytes(self._play_context.become_pass, errors='surrogate_or_strict') + b'\n')
            fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
            fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)

        display.debug("getting output with communicate()")
        stdout, stderr = p.communicate(in_data)
        display.debug("done communicating")

        display.debug("done with local.exec_command()")
        return (p.returncode, stdout, stderr)
Ejemplo n.º 29
0
    def put_file(self, in_path, out_path):
        """ Place a local file located in 'in_path' inside container at 'out_path' """
        super(Connection, self).put_file(in_path, out_path)
        display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._container_id)

        real_out_path = self._mount_point + to_bytes(out_path, errors='surrogate_or_strict')
        shutil.copyfile(
            to_bytes(in_path, errors='surrogate_or_strict'),
            to_bytes(real_out_path, errors='surrogate_or_strict')
        )
Ejemplo n.º 30
0
    def fetch_file(self, in_path, out_path):
        """ obtain file specified via 'in_path' from the container and place it at 'out_path' """
        super(Connection, self).fetch_file(in_path, out_path)
        display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._container_id)

        real_in_path = self._mount_point + to_bytes(in_path, errors='surrogate_or_strict')
        shutil.copyfile(
            to_bytes(real_in_path, errors='surrogate_or_strict'),
            to_bytes(out_path, errors='surrogate_or_strict')
        )
Ejemplo n.º 31
0
    def exec_command(self, cmd, in_data=None, sudoable=True):
        ''' run a command on the remote host '''

        super(Connection, self).exec_command(cmd,
                                             in_data=in_data,
                                             sudoable=sudoable)

        if in_data:
            raise AnsibleError(
                "Internal Error: this module does not support optimized module pipelining"
            )

        bufsize = 4096

        try:
            self.ssh.get_transport().set_keepalive(5)
            chan = self.ssh.get_transport().open_session()
        except Exception as e:
            msg = "Failed to open session"
            if len(str(e)) > 0:
                msg += ": %s" % str(e)
            raise AnsibleConnectionFailure(msg)

        # sudo usually requires a PTY (cf. requiretty option), therefore
        # we give it one by default (pty=True in ansble.cfg), and we try
        # to initialise from the calling environment when sudoable is enabled
        if C.PARAMIKO_PTY and sudoable:
            chan.get_pty(term=os.getenv('TERM', 'vt100'),
                         width=int(os.getenv('COLUMNS', 0)),
                         height=int(os.getenv('LINES', 0)))

        display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)

        cmd = to_bytes(cmd, errors='surrogate_or_strict')

        no_prompt_out = b''
        no_prompt_err = b''
        become_output = b''

        try:
            chan.exec_command(cmd)
            if self._play_context.prompt:
                passprompt = False
                become_sucess = False
                while not (become_sucess or passprompt):
                    display.debug('Waiting for Privilege Escalation input')

                    chunk = chan.recv(bufsize)
                    display.debug("chunk is: %s" % chunk)
                    if not chunk:
                        if b'unknown user' in become_output:
                            raise AnsibleError('user %s does not exist' %
                                               self._play_context.become_user)
                        else:
                            break
                            #raise AnsibleError('ssh connection closed waiting for password prompt')
                    become_output += chunk

                    # need to check every line because we might get lectured
                    # and we might get the middle of a line in a chunk
                    for l in become_output.splitlines(True):
                        if self.check_become_success(l):
                            become_sucess = True
                            break
                        elif self.check_password_prompt(l):
                            passprompt = True
                            break

                if passprompt:
                    if self._play_context.become and self._play_context.become_pass:
                        chan.sendall(
                            to_bytes(self._play_context.become_pass) + b'\n')
                    else:
                        raise AnsibleError(
                            "A password is reqired but none was supplied")
                else:
                    no_prompt_out += become_output
                    no_prompt_err += become_output
        except socket.timeout:
            raise AnsibleError(
                'ssh timed out waiting for privilege escalation.\n' +
                become_output)

        stdout = b''.join(chan.makefile('rb', bufsize))
        stderr = b''.join(chan.makefile_stderr('rb', bufsize))

        return (chan.recv_exit_status(), no_prompt_out + stdout,
                no_prompt_out + stderr)
Ejemplo n.º 32
0
    def run(self, tmp=None, task_vars=None):

        if self._play_context.connection != 'local':
            return dict(
                failed=True,
                msg='invalid connection specified, expected connection=local, '
                'got %s' % self._play_context.connection)

        provider = self.load_provider()

        pc = copy.deepcopy(self._play_context)
        pc.connection = 'network_cli'
        pc.network_os = 'dellos10'
        pc.port = provider['port'] or self._play_context.port or 22
        pc.remote_user = provider[
            'username'] or self._play_context.connection_user
        pc.password = provider['password'] or self._play_context.password
        pc.private_key_file = provider[
            'ssh_keyfile'] or self._play_context.private_key_file
        pc.timeout = provider['timeout'] or self._play_context.timeout
        pc.become = provider['authorize'] or False
        pc.become_pass = provider['auth_pass']

        # remove auth from provider arguments
        provider.pop('password', None)
        provider.pop('auth_pass', None)

        display.vvv('using connection plugin %s' % pc.connection,
                    pc.remote_addr)
        connection = self._shared_loader_obj.connection_loader.get(
            'persistent', pc, sys.stdin)

        socket_path = self._get_socket_path(pc)
        display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)

        if not os.path.exists(socket_path):
            # start the connection if it isn't started
            rc, out, err = connection.exec_command('open_shell()')
            display.vvvv('open_shell() returned %s %s %s' % (rc, out, err))
            if not rc == 0:
                return {
                    'failed': True,
                    'msg': 'unable to open shell. Please see: ' +
                    'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell',
                    'rc': rc
                }
        else:
            # make sure we are in the right cli context which should be
            # enable mode and not config module
            rc, out, err = connection.exec_command('prompt()')
            while str(out).strip().endswith(')#'):
                display.vvvv('wrong context, sending exit to device',
                             self._play_context.remote_addr)
                connection.exec_command('exit')
                rc, out, err = connection.exec_command('prompt()')

        task_vars['ansible_socket'] = socket_path

        if self._play_context.become_method == 'enable':
            self._play_context.become = False
            self._play_context.become_method = None

        result = super(ActionModule, self).run(tmp, task_vars)
        return result
Ejemplo n.º 33
0
    def run(self, tmp=None, task_vars=None):

        if self._play_context.connection != 'local':
            return dict(
                failed=True,
                msg='invalid connection specified, expected connection=local, '
                'got %s' % self._play_context.connection)

        module = module_loader._load_module_source(
            self._task.action, module_loader.find_plugin(self._task.action))

        if not getattr(module, 'USE_PERSISTENT_CONNECTION', False):
            return super(ActionModule, self).run(tmp, task_vars)

        provider = self.load_provider()

        pc = copy.deepcopy(self._play_context)
        pc.network_os = 'junos'

        pc.remote_addr = provider['host'] or self._play_context.remote_addr

        if self._task.action == 'junos_netconf':
            pc.connection = 'network_cli'
            pc.port = provider['port'] or self._play_context.port or 22
        else:
            pc.connection = 'netconf'
            pc.port = provider['port'] or self._play_context.port or 830

        pc.remote_user = provider[
            'username'] or self._play_context.connection_user
        pc.password = provider['password'] or self._play_context.password
        pc.private_key_file = provider[
            'ssh_keyfile'] or self._play_context.private_key_file
        pc.timeout = provider['timeout'] or self._play_context.timeout

        display.vvv('using connection plugin %s' % pc.connection,
                    pc.remote_addr)
        connection = self._shared_loader_obj.connection_loader.get(
            'persistent', pc, sys.stdin)

        socket_path = self._get_socket_path(pc)
        display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)

        if not os.path.exists(socket_path):
            # start the connection if it isn't started
            if pc.connection == 'netconf':
                rc, out, err = connection.exec_command('open_session()')
            else:
                rc, out, err = connection.exec_command('open_shell()')

            if rc != 0:
                return {
                    'failed': True,
                    'msg': 'unable to open shell. Please see: ' +
                    'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell',
                    'rc': rc
                }

        elif pc.connection == 'network_cli':
            # make sure we are in the right cli context which should be
            # enable mode and not config module
            rc, out, err = connection.exec_command('prompt()')
            while str(out).strip().endswith(')#'):
                display.vvvv('wrong context, sending exit to device',
                             self._play_context.remote_addr)
                connection.exec_command('exit')
                rc, out, err = connection.exec_command('prompt()')

        task_vars['ansible_socket'] = socket_path

        return super(ActionModule, self).run(tmp, task_vars)
Ejemplo n.º 34
0
    def _winrm_connect(self):
        '''
        Establish a WinRM connection over HTTP/HTTPS.
        '''
        display.vvv(
            "ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
            (self._winrm_user, self._winrm_port, self._winrm_host),
            host=self._winrm_host)

        winrm_host = self._winrm_host
        if HAS_IPADDRESS:
            display.vvvv("checking if winrm_host %s is an IPv6 address" %
                         winrm_host)
            try:
                ipaddress.IPv6Address(winrm_host)
            except ipaddress.AddressValueError:
                pass
            else:
                winrm_host = "[%s]" % winrm_host

        netloc = '%s:%d' % (winrm_host, self._winrm_port)
        endpoint = urlunsplit(
            (self._winrm_scheme, netloc, self._winrm_path, '', ''))
        errors = []
        for transport in self._winrm_transport:
            if transport == 'kerberos':
                if not HAVE_KERBEROS:
                    errors.append(
                        'kerberos: the python kerberos library is not installed'
                    )
                    continue
                if self._kerb_managed:
                    self._kerb_auth(self._winrm_user, self._winrm_pass)
            display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' %
                          (transport, endpoint),
                          host=self._winrm_host)
            try:
                protocol = Protocol(endpoint,
                                    transport=transport,
                                    **self._winrm_kwargs)

                # open the shell from connect so we know we're able to talk to the server
                if not self.shell_id:
                    self.shell_id = protocol.open_shell(
                        codepage=65001)  # UTF-8
                    display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id,
                                  host=self._winrm_host)

                return protocol
            except Exception as e:
                err_msg = to_text(e).strip()
                if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg,
                             re.I):
                    raise AnsibleError('the connection attempt timed out')
                m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
                if m:
                    code = int(m.groups()[0])
                    if code == 401:
                        err_msg = 'the specified credentials were rejected by the server'
                    elif code == 411:
                        return protocol
                errors.append(u'%s: %s' % (transport, err_msg))
                display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' %
                              (err_msg, to_text(traceback.format_exc())),
                              host=self._winrm_host)
        if errors:
            raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
        else:
            raise AnsibleError('No transport found for WinRM connection')
Ejemplo n.º 35
0
    def parse_source(self, source, cache=False):
        ''' Generate or update inventory for the source provided '''

        parsed = False
        display.debug(u'Examining possible inventory source: %s' % source)

        b_source = to_bytes(source)
        # process directories as a collection of inventories
        if os.path.isdir(b_source):
            display.debug(u'Searching for inventory files in directory: %s' %
                          source)
            for i in sorted(os.listdir(b_source)):

                display.debug(u'Considering %s' % i)
                # Skip hidden files and stuff we explicitly ignore
                if IGNORED.search(i):
                    continue

                # recursively deal with directory entries
                b_fullpath = os.path.join(b_source, i)
                parsed_this_one = self.parse_source(b_fullpath, cache=cache)
                display.debug(u'parsed %s as %s' %
                              (to_text(b_fullpath), parsed_this_one))
                if not parsed:
                    parsed = parsed_this_one
        else:
            # left with strings or files, let plugins figure it out

            # set so new hosts can use for inventory_file/dir vasr
            self._inventory.current_source = source

            # get inventory plugins if needed, there should always be at least one generator
            if not self._inventory_plugins:
                self._setup_inventory_plugins()

            # try source with each plugin
            failures = []
            for plugin in self._inventory_plugins:
                plugin_name = to_text(
                    getattr(plugin, '_load_name',
                            getattr(plugin, '_original_path', '')))
                display.debug(u'Attempting to use plugin %s (%s)' %
                              (plugin_name, plugin._original_path))

                # initialize and figure out if plugin wants to attempt parsing this file
                try:
                    plugin_wants = bool(plugin.verify_file(to_text(source)))
                except Exception:
                    plugin_wants = False

                if plugin_wants:
                    try:
                        # in case plugin fails 1/2 way we dont want partial inventory
                        plugin.parse(self._inventory,
                                     self._loader,
                                     source,
                                     cache=cache)
                        parsed = True
                        display.vvv(
                            'Parsed %s inventory source with %s plugin' %
                            (to_text(source), plugin_name))
                        break
                    except AnsibleParserError as e:
                        display.debug('%s was not parsable by %s' %
                                      (to_text(source), plugin_name))
                        failures.append({
                            'src': source,
                            'plugin': plugin_name,
                            'exc': e
                        })
                    except Exception as e:
                        display.debug('%s failed to parse %s' %
                                      (plugin_name, to_text(source)))
                        failures.append({
                            'src': source,
                            'plugin': plugin_name,
                            'exc': AnsibleError(e)
                        })
                else:
                    display.debug('%s did not meet %s requirements' %
                                  (to_text(source), plugin_name))
            else:
                if not parsed and failures:
                    # only if no plugin processed files should we show errors.
                    for fail in failures:
                        display.warning(
                            u'\n* Failed to parse %s with %s plugin: %s' %
                            (to_text(fail['src']), fail['plugin'],
                             to_text(fail['exc'])))
                        if hasattr(fail['exc'], 'tb'):
                            display.vvv(to_text(fail['exc'].tb))
                    if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
                        raise AnsibleError(
                            u'Completely failed to parse inventory source %s' %
                            (to_text(source)))
        if not parsed:
            display.warning("Unable to parse %s as an inventory source" %
                            to_text(source))

        # clear up, jic
        self._inventory.current_source = None

        return parsed
Ejemplo n.º 36
0
    def format_plugin_doc(self, plugin, loader, plugin_type, search_paths):
        text = ''

        try:
            # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
            filename = loader.find_plugin(plugin,
                                          mod_type='.py',
                                          ignore_deprecated=True,
                                          check_aliases=True)
            if filename is None:
                display.warning("%s %s not found in:\n%s\n" %
                                (plugin_type, plugin, search_paths))
                return

            if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
                return

            try:
                doc, plainexamples, returndocs, metadata = get_docstring(
                    filename,
                    fragment_loader,
                    verbose=(self.options.verbosity > 0))
            except Exception:
                display.vvv(traceback.format_exc())
                display.error(
                    "%s %s has a documentation error formatting or is missing documentation."
                    % (plugin_type, plugin),
                    wrap_text=False)
                return

            if doc is not None:

                # assign from other sections
                doc['plainexamples'] = plainexamples
                doc['returndocs'] = returndocs
                doc['metadata'] = metadata

                # generate extra data
                if plugin_type == 'module':
                    # is there corresponding action plugin?
                    if plugin in action_loader:
                        doc['action'] = True
                    else:
                        doc['action'] = False
                doc['filename'] = filename
                doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
                if 'docuri' in doc:
                    doc['docuri'] = doc[plugin_type].replace('_', '-')

                if self.options.show_snippet and plugin_type == 'module':
                    text += self.get_snippet_text(doc)
                else:
                    text += self.get_man_text(doc)

                return text
            else:
                if 'removed' in metadata['status']:
                    display.warning("%s %s has been removed\n" %
                                    (plugin_type, plugin))
                    return

                # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
                # probably a quoting issue.
                raise AnsibleError("Parsing produced an empty object.")
        except Exception as e:
            display.vvv(traceback.format_exc())
            raise AnsibleError(
                "%s %s missing documentation (or could not parse documentation): %s\n"
                % (plugin_type, plugin, str(e)))
Ejemplo n.º 37
0
    def get_vars(self,
                 play=None,
                 host=None,
                 task=None,
                 include_hostvars=True,
                 include_delegate_to=True,
                 use_cache=True):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars
        '''

        display.debug("in VariableManager get_vars()")

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
        )

        # default for all cases
        basedirs = []
        if self.safe_basedir:  # avoid adhoc/console loading cwd
            basedirs = [self._loader.get_basedir()]

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = combine_vars(all_vars, role.get_default_vars())

        if task:
            # set basedirs
            if C.PLAYBOOK_VARS_ROOT == 'all':  # should be default
                basedirs = task.get_search_path()
            elif C.PLAYBOOK_VARS_ROOT in (
                    'bottom', 'playbook_dir'):  # only option in 2.4.0
                basedirs = [task.get_search_path()[0]]
            elif C.PLAYBOOK_VARS_ROOT != 'top':
                # preserves default basedirs, only option pre 2.3
                raise AnsibleError('Unkown playbook vars logic: %s' %
                                   C.PLAYBOOK_VARS_ROOT)

            # if we have a task in this context, and that task has a role, make
            # sure it sees its defaults above any other roles, as we previously
            # (v1) made sure each task had a copy of its roles default vars
            if task._role is not None and (play
                                           or task.action == 'include_role'):
                all_vars = combine_vars(
                    all_vars,
                    task._role.get_default_vars(
                        dep_chain=task.get_dep_chain()))

        if host:
            # THE 'all' group and the rest of groups for a host, used below
            all_group = self._inventory.groups.get('all')
            host_groups = sort_groups(
                [g for g in host.get_groups() if g.name not in ['all']])

            def _get_plugin_vars(plugin, path, entities):
                data = {}
                try:
                    data = plugin.get_vars(self._loader, path, entities)
                except AttributeError:
                    try:
                        for entity in entities:
                            if isinstance(entity, Host):
                                data.update(plugin.get_host_vars(entity.name))
                            else:
                                data.update(plugin.get_group_vars(entity.name))
                    except AttributeError:
                        if hasattr(plugin, 'run'):
                            raise AnsibleError(
                                "Cannot use v1 type vars plugin %s from %s" %
                                (plugin._load_name, plugin._original_path))
                        else:
                            raise AnsibleError(
                                "Invalid vars plugin %s from %s" %
                                (plugin._load_name, plugin._original_path))
                return data

            # internal fuctions that actually do the work
            def _plugins_inventory(entities):
                ''' merges all entities by inventory source '''
                data = {}
                for inventory_dir in self._inventory._sources:
                    if ',' in inventory_dir and not os.path.exists(
                            inventory_dir):  # skip host lists
                        continue
                    elif not os.path.isdir(
                            inventory_dir
                    ):  # always pass 'inventory directory'
                        inventory_dir = os.path.dirname(inventory_dir)

                    for plugin in vars_loader.all():

                        data = combine_vars(
                            data,
                            _get_plugin_vars(plugin, inventory_dir, entities))
                return data

            def _plugins_play(entities):
                ''' merges all entities adjacent to play '''
                data = {}
                for plugin in vars_loader.all():

                    for path in basedirs:
                        data = combine_vars(
                            data, _get_plugin_vars(plugin, path, entities))
                return data

            # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list
            def all_inventory():
                return all_group.get_vars()

            def all_plugins_inventory():
                return _plugins_inventory([all_group])

            def all_plugins_play():
                return _plugins_play([all_group])

            def groups_inventory():
                ''' gets group vars from inventory '''
                return get_group_vars(host_groups)

            def groups_plugins_inventory():
                ''' gets plugin sources from inventory for groups '''
                return _plugins_inventory(host_groups)

            def groups_plugins_play():
                ''' gets plugin sources from play for groups '''
                return _plugins_play(host_groups)

            def plugins_by_groups():
                '''
                    merges all plugin sources by group,
                    This should be used instead, NOT in combination with the other groups_plugins* functions
                '''
                data = {}
                for group in host_groups:
                    data[group] = combine_vars(data[group],
                                               _plugins_inventory(group))
                    data[group] = combine_vars(data[group],
                                               _plugins_play(group))
                return data

            # Merge groups as per precedence config
            # only allow to call the functions we want exposed
            for entry in C.VARIABLE_PRECEDENCE:
                if entry in self._ALLOWED:
                    display.debug('Calling %s to load vars for %s' %
                                  (entry, host.name))
                    all_vars = combine_vars(all_vars, locals()[entry]())
                else:
                    display.warning(
                        'Ignoring unknown variable precedence entry: %s' %
                        (entry))

            # host vars, from inventory, inventory adjacent and play adjacent via plugins
            all_vars = combine_vars(all_vars, host.get_vars())
            all_vars = combine_vars(all_vars, _plugins_inventory([host]))
            all_vars = combine_vars(all_vars, _plugins_play([host]))

            # finally, the facts caches for this host, if it exists
            try:
                facts = self._fact_cache.get(host.name, {})
                all_vars.update(namespace_facts(facts))

                # push facts to main namespace
                if C.INJECT_FACTS_AS_VARS:
                    all_vars = combine_vars(all_vars, wrap_var(facts))
                else:
                    # always 'promote' ansible_local
                    all_vars = combine_vars(
                        all_vars,
                        wrap_var(
                            {'ansible_local': facts.get('ansible_local', {})}))
            except KeyError:
                pass

        if play:
            all_vars = combine_vars(all_vars, play.get_vars())

            vars_files = play.get_vars_files()
            try:
                for vars_file_item in vars_files:
                    # create a set of temporary vars here, which incorporate the extra
                    # and magic vars so we can properly template the vars_files entries
                    temp_vars = combine_vars(all_vars, self._extra_vars)
                    temp_vars = combine_vars(temp_vars, magic_variables)
                    templar = Templar(loader=self._loader, variables=temp_vars)

                    # we assume each item in the list is itself a list, as we
                    # support "conditional includes" for vars_files, which mimics
                    # the with_first_found mechanism.
                    vars_file_list = vars_file_item
                    if not isinstance(vars_file_list, list):
                        vars_file_list = [vars_file_list]

                    # now we iterate through the (potential) files, and break out
                    # as soon as we read one from the list. If none are found, we
                    # raise an error, which is silently ignored at this point.
                    try:
                        for vars_file in vars_file_list:
                            vars_file = templar.template(vars_file)
                            if not (isinstance(vars_file, Sequence)):
                                raise AnsibleError(
                                    "Invalid vars_files entry found: %r\n"
                                    "vars_files entries should be either a string type or "
                                    "a list of string types after template expansion"
                                    % vars_file)
                            try:
                                data = preprocess_vars(
                                    self._loader.load_from_file(vars_file,
                                                                unsafe=True))
                                if data is not None:
                                    for item in data:
                                        all_vars = combine_vars(all_vars, item)
                                break
                            except AnsibleFileNotFound:
                                # we continue on loader failures
                                continue
                            except AnsibleParserError:
                                raise
                        else:
                            # if include_delegate_to is set to False, we ignore the missing
                            # vars file here because we're working on a delegated host
                            if include_delegate_to:
                                raise AnsibleFileNotFound(
                                    "vars file %s was not found" %
                                    vars_file_item)
                    except (UndefinedError, AnsibleUndefinedVariable):
                        if host is not None and self._fact_cache.get(
                                host.name, dict()).get(
                                    'module_setup') and task is not None:
                            raise AnsibleUndefinedVariable(
                                "an undefined variable was found when attempting to template the vars_files item '%s'"
                                % vars_file_item,
                                obj=vars_file_item)
                        else:
                            # we do not have a full context here, and the missing variable could be because of that
                            # so just show a warning and continue
                            display.vvv(
                                "skipping vars_file '%s' due to an undefined variable"
                                % vars_file_item)
                            continue

                    display.vvv("Read vars_file '%s'" % vars_file_item)
            except TypeError:
                raise AnsibleParserError(
                    "Error while reading vars files - please supply a list of file names. "
                    "Got '%s' of type %s" % (vars_files, type(vars_files)))

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = combine_vars(
                        all_vars, role.get_vars(include_params=False))

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars,
                    task._role.get_vars(task.get_dep_chain(),
                                        include_params=False))
            all_vars = combine_vars(all_vars, task.get_vars())

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            # include_vars non-persistent cache
            all_vars = combine_vars(
                all_vars, self._vars_cache.get(host.get_name(), dict()))
            # fact non-persistent cache
            all_vars = combine_vars(
                all_vars,
                self._nonpersistent_fact_cache.get(host.name, dict()))

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars, task._role.get_role_params(task.get_dep_chain()))

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = combine_vars(all_vars, task.get_include_params())

        # extra vars
        all_vars = combine_vars(all_vars, self._extra_vars)

        # magic variables
        all_vars = combine_vars(all_vars, magic_variables)

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            all_vars['environment'] = task.environment

        # if we have a task and we're delegating to another host, figure out the
        # variables for that host now so we don't have to rely on hostvars later
        if task and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'] = self._get_delegated_vars(
                play, task, all_vars)

        # 'vars' magic var
        if task or play:
            # has to be copy, otherwise recursive ref
            all_vars['vars'] = all_vars.copy()

        display.debug("done with get_vars()")
        return all_vars
Ejemplo n.º 38
0
 def _connect(self):
     ''' connect to the lxc; nothing to do here '''
     display.vvv('XXX connect')
     super(Connection, self)._connect()
     #self.container_name = self.ssh._play_context.remote_addr
     self.container_name = self._play_context.ssh_extra_args  # XXX
Ejemplo n.º 39
0
 def _connect(self):
     ''' connect to the chroot; nothing to do here '''
     super(Connection, self)._connect()
     if not self._connected:
         display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
         self._connected = True
Ejemplo n.º 40
0
    def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
        '''
        Starts the command and communicates with it until it ends.
        '''

        display_cmd = list(map(shlex_quote, map(to_text, cmd)))
        display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)),
                    host=self.host)

        # Start the given command. If we don't need to pipeline data, we can try
        # to use a pseudo-tty (ssh will have been invoked with -tt). If we are
        # pipelining data, or can't create a pty, we fall back to using plain
        # old pipes.

        p = None

        if isinstance(cmd, (text_type, binary_type)):
            cmd = to_bytes(cmd)
        else:
            cmd = list(map(to_bytes, cmd))

        if not in_data:
            try:
                # Make sure stdin is a proper pty to avoid tcgetattr errors
                master, slave = pty.openpty()
                if PY3 and self._play_context.password:
                    # pylint: disable=unexpected-keyword-arg
                    p = subprocess.Popen(cmd,
                                         stdin=slave,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE,
                                         pass_fds=self.sshpass_pipe)
                else:
                    p = subprocess.Popen(cmd,
                                         stdin=slave,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE)
                stdin = os.fdopen(master, 'wb', 0)
                os.close(slave)
            except (OSError, IOError):
                p = None

        if not p:
            if PY3 and self._play_context.password:
                # pylint: disable=unexpected-keyword-arg
                p = subprocess.Popen(cmd,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     pass_fds=self.sshpass_pipe)
            else:
                p = subprocess.Popen(cmd,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
            stdin = p.stdin

        # If we are using SSH password authentication, write the password into
        # the pipe we opened in _build_command.

        if self._play_context.password:
            os.close(self.sshpass_pipe[0])
            try:
                os.write(self.sshpass_pipe[1],
                         to_bytes(self._play_context.password) + b'\n')
            except OSError as e:
                # Ignore broken pipe errors if the sshpass process has exited.
                if e.errno != errno.EPIPE or p.poll() is None:
                    raise
            os.close(self.sshpass_pipe[1])

        #
        # SSH state machine
        #

        # Now we read and accumulate output from the running process until it
        # exits. Depending on the circumstances, we may also need to write an
        # escalation password and/or pipelined input to the process.

        states = [
            'awaiting_prompt', 'awaiting_escalation', 'ready_to_send',
            'awaiting_exit'
        ]

        # Are we requesting privilege escalation? Right now, we may be invoked
        # to execute sftp/scp with sudoable=True, but we can request escalation
        # only when using ssh. Otherwise we can send initial data straightaway.

        state = states.index('ready_to_send')
        if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
            if self._play_context.prompt:
                # We're requesting escalation with a password, so we have to
                # wait for a password prompt.
                state = states.index('awaiting_prompt')
                display.debug(u'Initial state: %s: %s' %
                              (states[state], self._play_context.prompt))
            elif self._play_context.become and self._play_context.success_key:
                # We're requesting escalation without a password, so we have to
                # detect success/failure before sending any initial data.
                state = states.index('awaiting_escalation')
                display.debug(u'Initial state: %s: %s' %
                              (states[state], self._play_context.success_key))

        # We store accumulated stdout and stderr output from the process here,
        # but strip any privilege escalation prompt/confirmation lines first.
        # Output is accumulated into tmp_*, complete lines are extracted into
        # an array, then checked and removed or copied to stdout or stderr. We
        # set any flags based on examining the output in self._flags.

        b_stdout = b_stderr = b''
        b_tmp_stdout = b_tmp_stderr = b''

        self._flags = dict(become_prompt=False,
                           become_success=False,
                           become_error=False,
                           become_nopasswd_error=False)

        # select timeout should be longer than the connect timeout, otherwise
        # they will race each other when we can't connect, and the connect
        # timeout usually fails
        timeout = 2 + self._play_context.timeout
        for fd in (p.stdout, p.stderr):
            fcntl.fcntl(fd, fcntl.F_SETFL,
                        fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        # TODO: bcoca would like to use SelectSelector() when open
        # filehandles is low, then switch to more efficient ones when higher.
        # select is faster when filehandles is low.
        selector = selectors.DefaultSelector()
        selector.register(p.stdout, selectors.EVENT_READ)
        selector.register(p.stderr, selectors.EVENT_READ)

        # If we can send initial data without waiting for anything, we do so
        # before we start polling
        if states[state] == 'ready_to_send' and in_data:
            self._send_initial_data(stdin, in_data)
            state += 1

        try:
            while True:
                poll = p.poll()
                events = selector.select(timeout)

                # We pay attention to timeouts only while negotiating a prompt.

                if not events:
                    # We timed out
                    if state <= states.index('awaiting_escalation'):
                        # If the process has already exited, then it's not really a
                        # timeout; we'll let the normal error handling deal with it.
                        if poll is not None:
                            break
                        self._terminate_process(p)
                        raise AnsibleError(
                            'Timeout (%ds) waiting for privilege escalation prompt: %s'
                            % (timeout, to_native(b_stdout)))

                # Read whatever output is available on stdout and stderr, and stop
                # listening to the pipe if it's been closed.

                for key, event in events:
                    if key.fileobj == p.stdout:
                        b_chunk = p.stdout.read()
                        if b_chunk == b'':
                            # stdout has been closed, stop watching it
                            selector.unregister(p.stdout)
                            # When ssh has ControlMaster (+ControlPath/Persist) enabled, the
                            # first connection goes into the background and we never see EOF
                            # on stderr. If we see EOF on stdout, lower the select timeout
                            # to reduce the time wasted selecting on stderr if we observe
                            # that the process has not yet existed after this EOF. Otherwise
                            # we may spend a long timeout period waiting for an EOF that is
                            # not going to arrive until the persisted connection closes.
                            timeout = 1
                        b_tmp_stdout += b_chunk
                        display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" %
                                      (state, to_text(b_chunk)))
                    elif key.fileobj == p.stderr:
                        b_chunk = p.stderr.read()
                        if b_chunk == b'':
                            # stderr has been closed, stop watching it
                            selector.unregister(p.stderr)
                        b_tmp_stderr += b_chunk
                        display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" %
                                      (state, to_text(b_chunk)))

                # We examine the output line-by-line until we have negotiated any
                # privilege escalation prompt and subsequent success/error message.
                # Afterwards, we can accumulate output without looking at it.

                if state < states.index('ready_to_send'):
                    if b_tmp_stdout:
                        b_output, b_unprocessed = self._examine_output(
                            'stdout', states[state], b_tmp_stdout, sudoable)
                        b_stdout += b_output
                        b_tmp_stdout = b_unprocessed

                    if b_tmp_stderr:
                        b_output, b_unprocessed = self._examine_output(
                            'stderr', states[state], b_tmp_stderr, sudoable)
                        b_stderr += b_output
                        b_tmp_stderr = b_unprocessed
                else:
                    b_stdout += b_tmp_stdout
                    b_stderr += b_tmp_stderr
                    b_tmp_stdout = b_tmp_stderr = b''

                # If we see a privilege escalation prompt, we send the password.
                # (If we're expecting a prompt but the escalation succeeds, we
                # didn't need the password and can carry on regardless.)

                if states[state] == 'awaiting_prompt':
                    if self._flags['become_prompt']:
                        display.debug(
                            'Sending become_pass in response to prompt')
                        stdin.write(
                            to_bytes(self._play_context.become_pass) + b'\n')
                        # On python3 stdin is a BufferedWriter, and we don't have a guarantee
                        # that the write will happen without a flush
                        stdin.flush()
                        self._flags['become_prompt'] = False
                        state += 1
                    elif self._flags['become_success']:
                        state += 1

                # We've requested escalation (with or without a password), now we
                # wait for an error message or a successful escalation.

                if states[state] == 'awaiting_escalation':
                    if self._flags['become_success']:
                        display.vvv('Escalation succeeded')
                        self._flags['become_success'] = False
                        state += 1
                    elif self._flags['become_error']:
                        display.vvv('Escalation failed')
                        self._terminate_process(p)
                        self._flags['become_error'] = False
                        raise AnsibleError('Incorrect %s password' %
                                           self._play_context.become_method)
                    elif self._flags['become_nopasswd_error']:
                        display.vvv('Escalation requires password')
                        self._terminate_process(p)
                        self._flags['become_nopasswd_error'] = False
                        raise AnsibleError('Missing %s password' %
                                           self._play_context.become_method)
                    elif self._flags['become_prompt']:
                        # This shouldn't happen, because we should see the "Sorry,
                        # try again" message first.
                        display.vvv('Escalation prompt repeated')
                        self._terminate_process(p)
                        self._flags['become_prompt'] = False
                        raise AnsibleError('Incorrect %s password' %
                                           self._play_context.become_method)

                # Once we're sure that the privilege escalation prompt, if any, has
                # been dealt with, we can send any initial data and start waiting
                # for output.

                if states[state] == 'ready_to_send':
                    if in_data:
                        self._send_initial_data(stdin, in_data)
                    state += 1

                # Now we're awaiting_exit: has the child process exited? If it has,
                # and we've read all available output from it, we're done.

                if poll is not None:
                    if not selector.get_map() or not events:
                        break
                    # We should not see further writes to the stdout/stderr file
                    # descriptors after the process has closed, set the select
                    # timeout to gather any last writes we may have missed.
                    timeout = 0
                    continue

                # If the process has not yet exited, but we've already read EOF from
                # its stdout and stderr (and thus no longer watching any file
                # descriptors), we can just wait for it to exit.

                elif not selector.get_map():
                    p.wait()
                    break

                # Otherwise there may still be outstanding data to read.
        finally:
            selector.close()
            # close stdin after process is terminated and stdout/stderr are read
            # completely (see also issue #848)
            stdin.close()

        if C.HOST_KEY_CHECKING:
            if cmd[0] == b"sshpass" and p.returncode == 6:
                raise AnsibleError(
                    'Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
                    'this.  Please add this host\'s fingerprint to your known_hosts file to manage this host.'
                )

        controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
        if p.returncode != 0 and controlpersisterror:
            raise AnsibleError(
                'using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
                '(or ssh_args in [ssh_connection] section of the config file) before running again'
            )

        # If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
        # we raise a special exception so that we can retry a connection.
        controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
        if p.returncode == 255 and controlpersist_broken_pipe:
            raise AnsibleControlPersistBrokenPipeError(
                'SSH Error: data could not be sent because of ControlPersist broken pipe.'
            )

        if p.returncode == 255 and in_data and checkrc:
            raise AnsibleConnectionFailure(
                'SSH Error: data could not be sent to remote host "%s". Make sure this host can be reached over ssh'
                % self.host)

        return (p.returncode, b_stdout, b_stderr)
Ejemplo n.º 41
0
 def fetch_file(self, in_path, out_path):
     super(Connection, self).fetch_file(in_path, out_path)
     in_path = self._shell._unquote(in_path)
     out_path = out_path.replace('\\', '/')
     display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path),
                 host=self._winrm_host)
     buffer_size = 2**19  # 0.5MB chunks
     makedirs_safe(os.path.dirname(out_path))
     out_file = None
     try:
         offset = 0
         while True:
             try:
                 script = '''
                     If (Test-Path -PathType Leaf "%(path)s")
                     {
                         $stream = New-Object IO.FileStream("%(path)s", [System.IO.FileMode]::Open, [System.IO.FileAccess]::Read, [IO.FileShare]::ReadWrite);
                         $stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
                         $buffer = New-Object Byte[] %(buffer_size)d;
                         $bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
                         $bytes = $buffer[0..($bytesRead-1)];
                         [System.Convert]::ToBase64String($bytes);
                         $stream.Close() | Out-Null;
                     }
                     ElseIf (Test-Path -PathType Container "%(path)s")
                     {
                         Write-Host "[DIR]";
                     }
                     Else
                     {
                         Write-Error "%(path)s does not exist";
                         Exit 1;
                     }
                 ''' % dict(buffer_size=buffer_size,
                            path=self._shell._escape(in_path),
                            offset=offset)
                 display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' %
                               (in_path, out_path, offset),
                               host=self._winrm_host)
                 cmd_parts = self._shell._encode_script(script,
                                                        as_list=True,
                                                        preserve_rc=False)
                 result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
                 if result.status_code != 0:
                     raise IOError(to_native(result.std_err))
                 if result.std_out.strip() == '[DIR]':
                     data = None
                 else:
                     data = base64.b64decode(result.std_out.strip())
                 if data is None:
                     makedirs_safe(out_path)
                     break
                 else:
                     if not out_file:
                         # If out_path is a directory and we're expecting a file, bail out now.
                         if os.path.isdir(
                                 to_bytes(out_path,
                                          errors='surrogate_or_strict')):
                             break
                         out_file = open(
                             to_bytes(out_path,
                                      errors='surrogate_or_strict'), 'wb')
                     out_file.write(data)
                     if len(data) < buffer_size:
                         break
                     offset += len(data)
             except Exception:
                 traceback.print_exc()
                 raise AnsibleError('failed to transfer file to "%s"' %
                                    out_path)
     finally:
         if out_file:
             out_file.close()
Ejemplo n.º 42
0
    def put_file(self, in_path, out_path):
        super(Connection, self).put_file(in_path, out_path)
        out_path = self._shell._unquote(out_path)
        display.vvv('PUT "%s" TO "%s"' % (in_path, out_path),
                    host=self._winrm_host)
        if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
            raise AnsibleFileNotFound('file or module does not exist: "%s"' %
                                      in_path)

        script_template = u'''
            begin {{
                $path = '{0}'

                $DebugPreference = "Continue"
                $ErrorActionPreference = "Stop"
                Set-StrictMode -Version 2

                $fd = [System.IO.File]::Create($path)

                $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()

                $bytes = @() #initialize for empty file case
            }}
            process {{
               $bytes = [System.Convert]::FromBase64String($input)
               $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
               $fd.Write($bytes, 0, $bytes.Length)
            }}
            end {{
                $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null

                $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()

                $fd.Close()

                Write-Output "{{""sha1"":""$hash""}}"
            }}
        '''

        script = script_template.format(self._shell._escape(out_path))
        cmd_parts = self._shell._encode_script(script,
                                               as_list=True,
                                               strict_mode=False,
                                               preserve_rc=False)

        result = self._winrm_exec(cmd_parts[0],
                                  cmd_parts[1:],
                                  stdin_iterator=self._put_file_stdin_iterator(
                                      in_path, out_path))
        # TODO: improve error handling
        if result.status_code != 0:
            raise AnsibleError(to_native(result.std_err))

        put_output = json.loads(result.std_out)
        remote_sha1 = put_output.get("sha1")

        if not remote_sha1:
            raise AnsibleError("Remote sha1 was not returned")

        local_sha1 = secure_hash(in_path)

        if not remote_sha1 == local_sha1:
            raise AnsibleError(
                "Remote sha1 hash {0} does not match local hash {1}".format(
                    to_native(remote_sha1), to_native(local_sha1)))
Ejemplo n.º 43
0
    def run(self, tmp=None, task_vars=None):
        del tmp  # tmp no longer has any effect

        socket_path = None

        if self._play_context.connection in ('network_cli', 'httpapi'):
            provider = self._task.args.get('provider', {})
            if any(provider.values()):
                display.warning(
                    'provider is unnecessary when using %s and will be ignored'
                    % self._play_context.connection)
                del self._task.args['provider']
            if self._task.args.get('transport'):
                display.warning(
                    'transport is unnecessary when using %s and will be ignored'
                    % self._play_context.connection)
                del self._task.args['transport']
        elif self._play_context.connection == 'local':
            provider = load_provider(eos_provider_spec, self._task.args)
            transport = provider['transport'] or 'cli'

            display.vvvv('connection transport is %s' % transport,
                         self._play_context.remote_addr)

            if transport == 'cli':
                pc = copy.deepcopy(self._play_context)
                pc.connection = 'network_cli'
                pc.network_os = 'eos'
                pc.remote_addr = provider[
                    'host'] or self._play_context.remote_addr
                pc.port = int(provider['port'] or self._play_context.port
                              or 22)
                pc.remote_user = provider[
                    'username'] or self._play_context.connection_user
                pc.password = provider[
                    'password'] or self._play_context.password
                pc.private_key_file = provider[
                    'ssh_keyfile'] or self._play_context.private_key_file
                pc.timeout = int(
                    provider['timeout']) if provider['timeout'] else None
                pc.become = provider['authorize'] or False
                if pc.become:
                    pc.become_method = 'enable'
                pc.become_pass = provider['auth_pass']

                display.vvv(
                    'using connection plugin %s (was local)' % pc.connection,
                    pc.remote_addr)
                connection = self._shared_loader_obj.connection_loader.get(
                    'persistent', pc, sys.stdin)

                if connection._play_context.timeout is None:
                    connection._play_context.timeout = connection.get_option(
                        'persistent_command_timeout')

                socket_path = connection.run()
                display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
                if not socket_path:
                    return {
                        'failed':
                        True,
                        'msg':
                        'unable to open shell. Please see: ' +
                        'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'
                    }

                task_vars['ansible_socket'] = socket_path

            else:
                self._task.args['provider'] = ActionModule.eapi_implementation(
                    provider, self._play_context)
        else:
            return {
                'failed':
                True,
                'msg':
                'Connection type %s is not valid for this module' %
                self._play_context.connection
            }

        if (self._play_context.connection == 'local' and transport
                == 'cli') or self._play_context.connection == 'network_cli':
            # make sure we are in the right cli context which should be
            # enable mode and not config module
            if socket_path is None:
                socket_path = self._connection.socket_path

            conn = Connection(socket_path)
            out = conn.get_prompt()
            while '(config' in to_text(
                    out, errors='surrogate_then_replace').strip():
                display.vvvv('wrong context, sending exit to device',
                             self._play_context.remote_addr)
                conn.send_command('abort')
                out = conn.get_prompt()

        result = super(ActionModule, self).run(task_vars=task_vars)
        return result
Ejemplo n.º 44
0
    def parse_inventory(self, host_list):

        if isinstance(host_list, string_types):
            if "," in host_list:
                host_list = host_list.split(",")
                host_list = [h for h in host_list if h and h.strip()]

        self.parser = None

        # Always create the 'all' and 'ungrouped' groups, even if host_list is
        # empty: in this case we will subsequently an the implicit 'localhost' to it.

        ungrouped = Group('ungrouped')
        all = Group('all')
        all.add_child_group(ungrouped)
        base_groups = frozenset([all, ungrouped])

        self.groups = dict(all=all, ungrouped=ungrouped)

        if host_list is None:
            pass
        elif isinstance(host_list, list):
            for h in host_list:
                try:
                    (host, port) = parse_address(h, allow_ranges=False)
                except AnsibleError as e:
                    display.vvv(
                        "Unable to parse address from hostname, leaving unchanged: %s"
                        % to_text(e))
                    host = h
                    port = None

                new_host = Host(host, port)
                if h in C.LOCALHOST:
                    # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
                    if self.localhost is not None:
                        display.warning(
                            "A duplicate localhost-like entry was found (%s). First found localhost was %s"
                            % (h, self.localhost.name))
                    display.vvvv("Set default localhost to %s" % h)
                    self.localhost = new_host
                all.add_host(new_host)
        elif self._loader.path_exists(host_list):
            # TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
            if self.is_directory(host_list):
                # Ensure basedir is inside the directory
                host_list = os.path.join(self.host_list, "")
                self.parser = InventoryDirectory(loader=self._loader,
                                                 groups=self.groups,
                                                 filename=host_list)
            else:
                self.parser = get_file_parser(host_list, self.groups,
                                              self._loader)
                vars_loader.add_directory(self._basedir, with_subdir=True)

            if not self.parser:
                # should never happen, but JIC
                raise AnsibleError(
                    "Unable to parse %s as an inventory source" % host_list)
        else:
            display.warning("Host file not found: %s" % to_text(host_list))

        self._vars_plugins = [x for x in vars_loader.all(self)]

        ### POST PROCESS groups and hosts after specific parser was invoked

        group_names = set()
        # set group vars from group_vars/ files and vars plugins
        for g in self.groups:
            group = self.groups[g]
            group.vars = combine_vars(group.vars,
                                      self.get_group_variables(group.name))
            self.get_group_vars(group)
            group_names.add(group.name)

        host_names = set()
        # get host vars from host_vars/ files and vars plugins
        for host in self.get_hosts(ignore_limits=True,
                                   ignore_restrictions=True):
            host.vars = combine_vars(host.vars,
                                     self.get_host_variables(host.name))
            self.get_host_vars(host)
            host_names.add(host.name)

            mygroups = host.get_groups()

            # ensure hosts are always in 'all'
            if all not in mygroups:
                all.add_host(host)

            if ungrouped in mygroups:
                # clear ungrouped of any incorrectly stored by parser
                if set(mygroups).difference(base_groups):
                    host.remove_group(ungrouped)
            else:
                # add ungrouped hosts to ungrouped
                length = len(mygroups)
                if length == 0 or (length == 1 and all in mygroups):
                    ungrouped.add_host(host)

        # warn if overloading identifier as both group and host
        for conflict in group_names.intersection(host_names):
            display.warning("Found both group and host with same name: %s" %
                            conflict)
Ejemplo n.º 45
0
    def _connect_uncached(self):
        ''' activates the connection object '''

        if not HAVE_PARAMIKO:
            raise AnsibleError("paramiko is not installed")

        port = self._play_context.port or 22
        display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" %
                    (self._play_context.remote_user, port,
                     self._play_context.remote_addr),
                    host=self._play_context.remote_addr)

        ssh = paramiko.SSHClient()

        self.keyfile = os.path.expanduser("~/.ssh/known_hosts")

        if C.HOST_KEY_CHECKING:
            for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts",
                                    "/etc/openssh/ssh_known_hosts"):
                try:
                    #TODO: check if we need to look at several possible locations, possible for loop
                    ssh.load_system_host_keys(ssh_known_hosts)
                    break
                except IOError:
                    pass  # file was not found, but not required to function
            ssh.load_system_host_keys()

        sock_kwarg = self._parse_proxy_command(port)

        ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))

        allow_agent = True

        if self._play_context.password is not None:
            allow_agent = False

        try:
            key_filename = None
            if self._play_context.private_key_file:
                key_filename = os.path.expanduser(
                    self._play_context.private_key_file)

            ssh.connect(self._play_context.remote_addr,
                        username=self._play_context.remote_user,
                        allow_agent=allow_agent,
                        look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
                        key_filename=key_filename,
                        password=self._play_context.password,
                        timeout=self._play_context.timeout,
                        port=port,
                        **sock_kwarg)
        except paramiko.ssh_exception.BadHostKeyException as e:
            raise AnsibleConnectionFailure('host key mismatch for %s' %
                                           e.hostname)
        except Exception as e:
            msg = str(e)
            if "PID check failed" in msg:
                raise AnsibleError(
                    "paramiko version issue, please upgrade paramiko on the machine running ansible"
                )
            elif "Private key file is encrypted" in msg:
                msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
                    self._play_context.remote_user,
                    self._play_context.remote_addr, port, msg)
                raise AnsibleConnectionFailure(msg)
            else:
                raise AnsibleConnectionFailure(msg)

        return ssh
Ejemplo n.º 46
0
 def close(self):
     ''' terminate the connection; nothing to do here '''
     display.vvv('XXX close')
     super(Connection, self).close()
     #self.ssh.close()
     self._connected = False
Ejemplo n.º 47
0
    def get_vars(self,
                 loader,
                 play=None,
                 host=None,
                 task=None,
                 include_hostvars=True,
                 include_delegate_to=True,
                 use_cache=True):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars
        '''

        display.debug("in VariableManager get_vars()")
        cache_entry = self._get_cache_entry(play=play, host=host, task=task)
        if cache_entry in VARIABLE_CACHE and use_cache:
            display.debug("vars are cached, returning them now")
            return VARIABLE_CACHE[cache_entry]

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            loader=loader,
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
        )

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = combine_vars(all_vars, role.get_default_vars())

        # if we have a task in this context, and that task has a role, make
        # sure it sees its defaults above any other roles, as we previously
        # (v1) made sure each task had a copy of its roles default vars
        if task and task._role is not None and (play or task.action
                                                == 'include_role'):
            all_vars = combine_vars(
                all_vars,
                task._role.get_default_vars(dep_chain=task.get_dep_chain()))

        if host:
            # first we merge in vars from groups specified in the inventory (INI or script)
            all_vars = combine_vars(all_vars, host.get_group_vars())

            # next, we load any vars from group_vars files and then any vars from host_vars
            # files which may apply to this host or the groups it belongs to. We merge in the
            # special 'all' group_vars first, if they exist
            if 'all' in self._group_vars_files:
                data = preprocess_vars(self._group_vars_files['all'])
                for item in data:
                    all_vars = combine_vars(all_vars, item)

            for group in sorted(host.get_groups(),
                                key=lambda g: (g.depth, g.name)):
                if group.name in self._group_vars_files and group.name != 'all':
                    for data in self._group_vars_files[group.name]:
                        data = preprocess_vars(data)
                        for item in data:
                            all_vars = combine_vars(all_vars, item)

            # then we merge in vars from the host specified in the inventory (INI or script)
            all_vars = combine_vars(all_vars, host.get_vars())

            # then we merge in the host_vars/<hostname> file, if it exists
            host_name = host.get_name()
            if host_name in self._host_vars_files:
                for data in self._host_vars_files[host_name]:
                    data = preprocess_vars(data)
                    for item in data:
                        all_vars = combine_vars(all_vars, item)

            # finally, the facts caches for this host, if it exists
            try:
                host_facts = wrap_var(self._fact_cache.get(host.name, dict()))
                all_vars = combine_vars(all_vars, host_facts)
            except KeyError:
                pass

        if play:
            all_vars = combine_vars(all_vars, play.get_vars())

            for vars_file_item in play.get_vars_files():
                # create a set of temporary vars here, which incorporate the extra
                # and magic vars so we can properly template the vars_files entries
                temp_vars = combine_vars(all_vars, self._extra_vars)
                temp_vars = combine_vars(temp_vars, magic_variables)
                templar = Templar(loader=loader, variables=temp_vars)

                # we assume each item in the list is itself a list, as we
                # support "conditional includes" for vars_files, which mimics
                # the with_first_found mechanism.
                vars_file_list = vars_file_item
                if not isinstance(vars_file_list, list):
                    vars_file_list = [vars_file_list]

                # now we iterate through the (potential) files, and break out
                # as soon as we read one from the list. If none are found, we
                # raise an error, which is silently ignored at this point.
                try:
                    for vars_file in vars_file_list:
                        vars_file = templar.template(vars_file)
                        try:
                            data = preprocess_vars(
                                loader.load_from_file(vars_file))
                            if data is not None:
                                for item in data:
                                    all_vars = combine_vars(all_vars, item)
                            break
                        except AnsibleFileNotFound:
                            # we continue on loader failures
                            continue
                        except AnsibleParserError:
                            raise
                    else:
                        # if include_delegate_to is set to False, we ignore the missing
                        # vars file here because we're working on a delegated host
                        if include_delegate_to:
                            raise AnsibleFileNotFound(
                                "vars file %s was not found" % vars_file_item)
                except (UndefinedError, AnsibleUndefinedVariable):
                    if host is not None and self._fact_cache.get(
                            host.name,
                            dict()).get('module_setup') and task is not None:
                        raise AnsibleUndefinedVariable(
                            "an undefined variable was found when attempting to template the vars_files item '%s'"
                            % vars_file_item,
                            obj=vars_file_item)
                    else:
                        # we do not have a full context here, and the missing variable could be
                        # because of that, so just show a warning and continue
                        display.vvv(
                            "skipping vars_file '%s' due to an undefined variable"
                            % vars_file_item)
                        continue

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = combine_vars(
                        all_vars, role.get_vars(include_params=False))

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars,
                    task._role.get_vars(task.get_dep_chain(),
                                        include_params=False))
            all_vars = combine_vars(all_vars, task.get_vars())

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            all_vars = combine_vars(
                all_vars, self._vars_cache.get(host.get_name(), dict()))
            all_vars = combine_vars(
                all_vars,
                self._nonpersistent_fact_cache.get(host.name, dict()))

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars, task._role.get_role_params(task.get_dep_chain()))

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = combine_vars(all_vars, task.get_include_params())

        # finally, we merge in extra vars and the magic variables
        all_vars = combine_vars(all_vars, self._extra_vars)
        all_vars = combine_vars(all_vars, magic_variables)

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            if 'environment' not in all_vars:
                all_vars['environment'] = task.environment
            else:
                display.warning(
                    "The variable 'environment' appears to be used already, which is also used internally for environment variables set on the task/block/play. You should use a different variable name to avoid conflicts with this internal variable"
                )

        # if we have a task and we're delegating to another host, figure out the
        # variables for that host now so we don't have to rely on hostvars later
        if task and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'] = self._get_delegated_vars(
                loader, play, task, all_vars)

        #VARIABLE_CACHE[cache_entry] = all_vars
        if task or play:
            all_vars['vars'] = all_vars.copy()

        display.debug("done with get_vars()")
        return all_vars
Ejemplo n.º 48
0
    def scm_archive_role(src,
                         scm='git',
                         name=None,
                         version='HEAD',
                         keep_scm_meta=False):
        def run_scm_cmd(cmd, tempdir):
            try:
                stdout = ''
                stderr = ''
                popen = Popen(cmd, cwd=tempdir, stdout=PIPE, stderr=PIPE)
                stdout, stderr = popen.communicate()
            except Exception as e:
                ran = " ".join(cmd)
                display.debug("ran %s:" % ran)
                display.debug("\tstdout: " + stdout)
                display.debug("\tstderr: " + stderr)
                raise AnsibleError("when executing %s: %s" %
                                   (ran, to_native(e)))
            if popen.returncode != 0:
                raise AnsibleError(
                    "- command %s failed in directory %s (rc=%s)" %
                    (' '.join(cmd), tempdir, popen.returncode))

        if scm not in ['hg', 'git']:
            raise AnsibleError("- scm %s is not currently supported" % scm)

        try:
            scm_path = get_bin_path(scm)
        except (ValueError, OSError, IOError):
            raise AnsibleError(
                "could not find/use %s, it is required to continue with installing %s"
                % (scm, src))

        tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
        clone_cmd = [scm_path, 'clone', src, name]
        run_scm_cmd(clone_cmd, tempdir)

        if scm == 'git' and version:
            checkout_cmd = [scm_path, 'checkout', version]
            run_scm_cmd(checkout_cmd, os.path.join(tempdir, name))

        temp_file = tempfile.NamedTemporaryFile(delete=False,
                                                suffix='.tar',
                                                dir=C.DEFAULT_LOCAL_TMP)
        archive_cmd = None
        if keep_scm_meta:
            display.vvv('tarring %s from %s to %s' %
                        (name, tempdir, temp_file.name))
            with tarfile.open(temp_file.name, "w") as tar:
                tar.add(os.path.join(tempdir, name), arcname=name)
        elif scm == 'hg':
            archive_cmd = [scm_path, 'archive', '--prefix', "%s/" % name]
            if version:
                archive_cmd.extend(['-r', version])
            archive_cmd.append(temp_file.name)
        elif scm == 'git':
            archive_cmd = [
                scm_path, 'archive',
                '--prefix=%s/' % name,
                '--output=%s' % temp_file.name
            ]
            if version:
                archive_cmd.append(version)
            else:
                archive_cmd.append('HEAD')

        if archive_cmd is not None:
            display.vvv('archiving %s' % archive_cmd)
            run_scm_cmd(archive_cmd, os.path.join(tempdir, name))

        return temp_file.name
Ejemplo n.º 49
0
    def execute_install(self):
        """
        uses the args list of roles to be installed, unless -f was specified. The list of roles
        can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
        """
        role_file = self.options.role_file

        if len(self.args) == 0 and role_file is None:
            # the user needs to specify one of either --role-file or specify a single user/role name
            raise AnsibleOptionsError(
                "- you must specify a user/role name or a roles file")

        no_deps = self.options.no_deps
        force = self.options.force

        roles_left = []
        if role_file:
            try:
                f = open(role_file, 'r')
                if role_file.endswith('.yaml') or role_file.endswith('.yml'):
                    try:
                        required_roles = yaml.safe_load(f.read())
                    except Exception as e:
                        raise AnsibleError(
                            "Unable to load data from the requirements file: %s"
                            % role_file)

                    if required_roles is None:
                        raise AnsibleError("No roles found in file: %s" %
                                           role_file)

                    for role in required_roles:
                        if "include" not in role:
                            role = RoleRequirement.role_yaml_parse(role)
                            display.vvv("found role %s in yaml file" %
                                        str(role))
                            if "name" not in role and "scm" not in role:
                                raise AnsibleError(
                                    "Must specify name or src for role")
                            roles_left.append(GalaxyRole(self.galaxy, **role))
                        else:
                            with open(role["include"]) as f_include:
                                try:
                                    roles_left += [
                                        GalaxyRole(self.galaxy, **r) for r in (
                                            RoleRequirement.role_yaml_parse(i)
                                            for i in yaml.safe_load(f_include))
                                    ]
                                except Exception as e:
                                    msg = "Unable to load data from the include requirements file: %s %s"
                                    raise AnsibleError(msg % (role_file, e))
                else:
                    raise AnsibleError("Invalid role requirements file")
                f.close()
            except (IOError, OSError) as e:
                raise AnsibleError('Unable to open %s: %s' %
                                   (role_file, str(e)))
        else:
            # roles were specified directly, so we'll just go out grab them
            # (and their dependencies, unless the user doesn't want us to).
            for rname in self.args:
                role = RoleRequirement.role_yaml_parse(rname.strip())
                roles_left.append(GalaxyRole(self.galaxy, **role))

        for role in roles_left:
            # only process roles in roles files when names matches if given
            if role_file and self.args and role.name not in self.args:
                display.vvv('Skipping role %s' % role.name)
                continue

            display.vvv('Processing role %s ' % role.name)

            # query the galaxy API for the role data

            if role.install_info is not None:
                if role.install_info['version'] != role.version or force:
                    if force:
                        display.display(
                            '- changing role %s from %s to %s' %
                            (role.name, role.install_info['version'],
                             role.version or "unspecified"))
                        role.remove()
                    else:
                        display.warning(
                            '- %s (%s) is already installed - use --force to change version to %s'
                            % (role.name, role.install_info['version'],
                               role.version or "unspecified"))
                        continue
                else:
                    if not force:
                        display.display(
                            '- %s is already installed, skipping.' % str(role))
                        continue

            try:
                installed = role.install()
            except AnsibleError as e:
                display.warning("- %s was NOT installed successfully: %s " %
                                (role.name, str(e)))
                self.exit_without_ignore()
                continue

            # install dependencies, if we want them
            if not no_deps and installed:
                if not role.metadata:
                    display.warning(
                        "Meta file %s is empty. Skipping dependencies." %
                        role.path)
                else:
                    role_dependencies = role.metadata.get('dependencies') or []
                    for dep in role_dependencies:
                        display.debug('Installing dep %s' % dep)
                        dep_req = RoleRequirement()
                        dep_info = dep_req.role_yaml_parse(dep)
                        dep_role = GalaxyRole(self.galaxy, **dep_info)
                        if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
                            # we know we can skip this, as it's not going to
                            # be found on galaxy.ansible.com
                            continue
                        if dep_role.install_info is None:
                            if dep_role not in roles_left:
                                display.display('- adding dependency: %s' %
                                                str(dep_role))
                                roles_left.append(dep_role)
                            else:
                                display.display(
                                    '- dependency %s already pending installation.'
                                    % dep_role.name)
                        else:
                            if dep_role.install_info[
                                    'version'] != dep_role.version:
                                display.warning(
                                    '- dependency %s from role %s differs from already installed version (%s), skipping'
                                    % (str(dep_role), role.name,
                                       dep_role.install_info['version']))
                            else:
                                display.display(
                                    '- dependency %s is already installed, skipping.'
                                    % dep_role.name)

            if not installed:
                display.warning("- %s was NOT installed successfully." %
                                role.name)
                self.exit_without_ignore()

        return 0
Ejemplo n.º 50
0
    def do_pattern_group(self, block):

        results = list()
        registers = {}

        for entry in block:
            task = entry.copy()

            name = task.pop('name', None)
            display.vvv(
                "text_parser: starting pattern_match [%s] in pattern_group" %
                name)

            register = task.pop('register', None)

            when = task.pop('when', None)
            if when is not None:
                if not self._check_conditional(when, self.ds):
                    warning('skipping task due to conditional check failure')
                    continue

            loop = task.pop('loop', None)
            if loop:
                loop = self.template(loop, self.ds)

            loop_var = task.pop('loop_control', {}).get('loop_var') or 'item'
            display.vvvv('text_parser: loop_var is %s' % loop_var)

            if not set(task).issubset(('pattern_group', 'pattern_match')):
                raise AnsibleError('invalid directive specified')

            if 'pattern_group' in task:
                if loop and isinstance(
                        loop, collections.Iterable) and not isinstance(
                            loop, string_types):
                    res = list()
                    for loop_item in loop:
                        self.ds[loop_var] = loop_item
                        res.append(self.do_pattern_group(
                            task['pattern_group']))
                else:
                    res = self.do_pattern_group(task['pattern_group'])

                if res:
                    results.append(res)
                if register:
                    registers[register] = res

            elif isinstance(loop, collections.Iterable) and not isinstance(
                    loop, string_types):
                loop_result = list()

                for loop_item in loop:
                    self.ds[loop_var] = loop_item
                    loop_result.append(self._process_directive(task))

                results.append(loop_result)

                if register:
                    registers[register] = loop_result

            else:
                res = self._process_directive(task)
                if res:
                    results.append(res)
                if register:
                    registers[register] = res

        return registers
Ejemplo n.º 51
0
 def _connect(self):
     ''' connect to the lxc; nothing to do here '''
     super(Connection, self)._connect()
     if not self._connected:
         display.vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
         self._connected = True
Ejemplo n.º 52
0
    def _run(self, cmd, in_data, sudoable=True, tty=False):
        '''
        Starts the command and communicates with it until it ends.
        '''

        display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]]
        display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)),
                    host=self.host)

        # Start the given command.

        p = subprocess.Popen(cmd,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        stdin = p.stdin

        # If we are using SSH password authentication, write the password into
        # the pipe we opened in _build_command.

        if self._play_context.password:
            os.close(self.sshpass_pipe[0])
            os.write(self.sshpass_pipe[1],
                     "{0}\n".format(self._play_context.password))
            os.close(self.sshpass_pipe[1])

        ## SSH state machine
        #
        # Now we read and accumulate output from the running process until it
        # exits. Depending on the circumstances, we may also need to write an
        # escalation password and/or pipelined input to the process.

        states = [
            'awaiting_prompt', 'awaiting_escalation', 'ready_to_send',
            'awaiting_exit'
        ]

        # Are we requesting privilege escalation? Right now, we may be invoked
        # to execute sftp/scp with sudoable=True, but we can request escalation
        # only when using ssh. Otherwise we can send initial data straightaway.

        state = states.index('ready_to_send')
        if 'ssh' in cmd:
            if self._play_context.prompt:
                # We're requesting escalation with a password, so we have to
                # wait for a password prompt.
                state = states.index('awaiting_prompt')
                display.debug('Initial state: %s: %s' %
                              (states[state], self._play_context.prompt))
            elif self._play_context.become and self._play_context.success_key:
                # We're requesting escalation without a password, so we have to
                # detect success/failure before sending any initial data.
                state = states.index('awaiting_escalation')
                display.debug('Initial state: %s: %s' %
                              (states[state], self._play_context.success_key))

        # We store accumulated stdout and stderr output from the process here,
        # but strip any privilege escalation prompt/confirmation lines first.
        # Output is accumulated into tmp_*, complete lines are extracted into
        # an array, then checked and removed or copied to stdout or stderr. We
        # set any flags based on examining the output in self._flags.

        stdout = stderr = ''
        tmp_stdout = tmp_stderr = ''

        self._flags = dict(become_prompt=False,
                           become_success=False,
                           become_error=False,
                           become_nopasswd_error=False)

        # select timeout should be longer than the connect timeout, otherwise
        # they will race each other when we can't connect, and the connect
        # timeout usually fails
        timeout = 2 + self._play_context.timeout
        rpipes = [p.stdout, p.stderr]
        for fd in rpipes:
            fcntl.fcntl(fd, fcntl.F_SETFL,
                        fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        # If we can send initial data without waiting for anything, we do so
        # before we call select.

        if states[state] == 'ready_to_send' and in_data:
            self._send_initial_data(stdin, in_data, tty)
            state += 1

        while True:
            rfd, wfd, efd = select.select(rpipes, [], [], timeout)

            # We pay attention to timeouts only while negotiating a prompt.

            if not rfd:
                if state <= states.index('awaiting_escalation'):
                    # If the process has already exited, then it's not really a
                    # timeout; we'll let the normal error handling deal with it.
                    if p.poll() is not None:
                        break
                    self._terminate_process(p)
                    raise AnsibleError(
                        'Timeout (%ds) waiting for privilege escalation prompt: %s'
                        % (timeout, stdout))

            # Read whatever output is available on stdout and stderr, and stop
            # listening to the pipe if it's been closed.

            if p.stdout in rfd:
                chunk = p.stdout.read()
                if chunk == '':
                    rpipes.remove(p.stdout)
                tmp_stdout += chunk
                display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" %
                              (state, chunk))

            if p.stderr in rfd:
                chunk = p.stderr.read()
                if chunk == '':
                    rpipes.remove(p.stderr)
                tmp_stderr += chunk
                display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" %
                              (state, chunk))

            # We examine the output line-by-line until we have negotiated any
            # privilege escalation prompt and subsequent success/error message.
            # Afterwards, we can accumulate output without looking at it.

            if state < states.index('ready_to_send'):
                if tmp_stdout:
                    output, unprocessed = self._examine_output(
                        'stdout', states[state], tmp_stdout, sudoable)
                    stdout += output
                    tmp_stdout = unprocessed

                if tmp_stderr:
                    output, unprocessed = self._examine_output(
                        'stderr', states[state], tmp_stderr, sudoable)
                    stderr += output
                    tmp_stderr = unprocessed
            else:
                stdout += tmp_stdout
                stderr += tmp_stderr
                tmp_stdout = tmp_stderr = ''

            # If we see a privilege escalation prompt, we send the password.
            # (If we're expecting a prompt but the escalation succeeds, we
            # didn't need the password and can carry on regardless.)

            if states[state] == 'awaiting_prompt':
                if self._flags['become_prompt']:
                    display.debug('Sending become_pass in response to prompt')
                    stdin.write(self._play_context.become_pass + '\n')
                    self._flags['become_prompt'] = False
                    state += 1
                elif self._flags['become_success']:
                    state += 1

            # We've requested escalation (with or without a password), now we
            # wait for an error message or a successful escalation.

            if states[state] == 'awaiting_escalation':
                if self._flags['become_success']:
                    display.debug('Escalation succeeded')
                    self._flags['become_success'] = False
                    state += 1
                elif self._flags['become_error']:
                    display.debug('Escalation failed')
                    self._terminate_process(p)
                    self._flags['become_error'] = False
                    raise AnsibleError('Incorrect %s password' %
                                       self._play_context.become_method)
                elif self._flags['become_nopasswd_error']:
                    display.debug('Escalation requires password')
                    self._terminate_process(p)
                    self._flags['become_nopasswd_error'] = False
                    raise AnsibleError('Missing %s password' %
                                       self._play_context.become_method)
                elif self._flags['become_prompt']:
                    # This shouldn't happen, because we should see the "Sorry,
                    # try again" message first.
                    display.debug('Escalation prompt repeated')
                    self._terminate_process(p)
                    self._flags['become_prompt'] = False
                    raise AnsibleError('Incorrect %s password' %
                                       self._play_context.become_method)

            # Once we're sure that the privilege escalation prompt, if any, has
            # been dealt with, we can send any initial data and start waiting
            # for output.

            if states[state] == 'ready_to_send':
                if in_data:
                    self._send_initial_data(stdin, in_data, tty)
                state += 1

            # Now we're awaiting_exit: has the child process exited? If it has,
            # and we've read all available output from it, we're done.

            if p.poll() is not None:
                if not rpipes or not rfd:
                    break

                # When ssh has ControlMaster (+ControlPath/Persist) enabled, the
                # first connection goes into the background and we never see EOF
                # on stderr. If we see EOF on stdout and the process has exited,
                # we're probably done. We call select again with a zero timeout,
                # just to make certain we don't miss anything that may have been
                # written to stderr between the time we called select() and when
                # we learned that the process had finished.

                if p.stdout not in rpipes:
                    timeout = 0
                    continue

            # If the process has not yet exited, but we've already read EOF from
            # its stdout and stderr (and thus removed both from rpipes), we can
            # just wait for it to exit.

            elif not rpipes:
                p.wait()
                break

            # Otherwise there may still be outstanding data to read.

        # close stdin after process is terminated and stdout/stderr are read
        # completely (see also issue #848)
        stdin.close()

        if C.HOST_KEY_CHECKING:
            if cmd[0] == "sshpass" and p.returncode == 6:
                raise AnsibleError(
                    'Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this.  Please add this host\'s fingerprint to your known_hosts file to manage this host.'
                )

        controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
        if p.returncode != 0 and controlpersisterror:
            raise AnsibleError(
                'using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again'
            )

        if p.returncode == 255 and in_data:
            raise AnsibleConnectionFailure(
                'SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh'
            )

        return (p.returncode, stdout, stderr)
Ejemplo n.º 53
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True,
                        wrap_async=False):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        remote_module_path = None
        args_file_path = None
        remote_files = []

        # if a module name was not specified for this execution, use the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        self._update_module_args(module_name, module_args, task_vars)

        # FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        if not self._is_pipelining_enabled(module_style, wrap_async):

            # we might need remote tmp dir
            if not tmp or not 'tmp' in tmp:
                tmp = self._make_tmp_path()

            remote_module_filename = self._connection._shell.get_remote_filename(
                module_path)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)

        if module_style in ('old', 'non_native_want_json', 'binary'):
            # we'll also need a temp file to hold our module arguments
            args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" %
                          remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        if tmp and remote_module_path:
            remote_files = [tmp, remote_module_path]

        if args_file_path:
            remote_files.append(args_file_path)

        sudoable = True
        in_data = None
        cmd = ""

        if wrap_async:
            # configure, upload, and chmod the async_wrapper module
            (async_module_style, shebang, async_module_data,
             async_module_path) = self._configure_module(
                 module_name='async_wrapper',
                 module_args=dict(),
                 task_vars=task_vars)
            async_module_remote_filename = self._connection._shell.get_remote_filename(
                async_module_path)
            remote_async_module_path = self._connection._shell.join_path(
                tmp, async_module_remote_filename)
            self._transfer_data(remote_async_module_path, async_module_data)
            remote_files.append(remote_async_module_path)

            async_limit = self._task. async
Ejemplo n.º 54
0
    def run(self, tmp=None, task_vars=None):
        socket_path = None

        if self._play_context.connection == 'network_cli':
            provider = self._task.args.get('provider', {})
            if any(provider.values()):
                display.warning(
                    'provider is unnecessary when using network_cli and will be ignored'
                )
        elif self._play_context.connection == 'local':
            provider = load_provider(ios_provider_spec, self._task.args)
            pc = copy.deepcopy(self._play_context)
            pc.connection = 'network_cli'
            pc.network_os = 'ios'
            pc.remote_addr = provider['host'] or self._play_context.remote_addr
            pc.port = int(provider['port'] or self._play_context.port or 22)
            pc.remote_user = provider[
                'username'] or self._play_context.connection_user
            pc.password = provider['password'] or self._play_context.password
            pc.private_key_file = provider[
                'ssh_keyfile'] or self._play_context.private_key_file
            pc.timeout = int(provider['timeout']
                             or C.PERSISTENT_COMMAND_TIMEOUT)
            pc.become = provider['authorize'] or False
            if pc.become:
                pc.become_method = 'enable'
            pc.become_pass = provider['auth_pass']

            display.vvv(
                'using connection plugin %s (was local)' % pc.connection,
                pc.remote_addr)
            connection = self._shared_loader_obj.connection_loader.get(
                'persistent', pc, sys.stdin)

            socket_path = connection.run()
            display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
            if not socket_path:
                return {
                    'failed':
                    True,
                    'msg':
                    'unable to open shell. Please see: ' +
                    'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'
                }

            task_vars['ansible_socket'] = socket_path
        else:
            return {
                'failed':
                True,
                'msg':
                'Connection type %s is not valid for this module' %
                self._play_context.connection
            }

        # make sure we are in the right cli context which should be
        # enable mode and not config module
        if socket_path is None:
            socket_path = self._connection.socket_path

        conn = Connection(socket_path)
        out = conn.get_prompt()
        while to_text(out,
                      errors='surrogate_then_replace').strip().endswith(')#'):
            display.vvvv('wrong context, sending exit to device',
                         self._play_context.remote_addr)
            conn.send_command('exit')
            out = conn.get_prompt()

        result = super(ActionModule, self).run(tmp, task_vars)
        return result
Ejemplo n.º 55
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError(
                    "check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # Get the connection user for permission checks
        remote_user = task_vars.get(
            'ansible_ssh_user') or self._play_context.remote_user

        # set no log in the module arguments, if required
        module_args[
            '_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = display.verbosity

        # give the module information about the ansible version
        module_args['_ansible_version'] = __version__

        # give the module information about its name
        module_args['_ansible_module_name'] = module_name

        # set the syslog facility to be used in the module
        module_args['_ansible_syslog_facility'] = task_vars.get(
            'ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)

        # let module know about filesystems that selinux treats specially
        module_args[
            '_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS

        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp and \
         (module_style != 'new' or \
         not self._connection.has_pipelining or \
         not self._play_context.pipelining or \
         C.DEFAULT_KEEP_REMOTE_FILES or \
         self._play_context.become_method == 'su'):
            remote_module_filename = self._connection._shell.get_remote_filename(
                module_path)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)
            if module_style in ('old', 'non_native_want_json', 'binary'):
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" %
                          remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(
            environment_string,
            shebang,
            cmd,
            arg_path=args_file_path,
            rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd,
                                              sudoable=sudoable,
                                              in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd,
                                                             sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning(
                        'Error deleting remote temporary files (rc: {0}, stderr: {1})'
                        .format(
                            tmp_rm_res.get('rc'),
                            tmp_rm_res.get('stderr',
                                           'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" %
                      (module_name, module_args))
        return data
Ejemplo n.º 56
0
    def run(self, tmp=None, task_vars=None):
        if self._play_context.connection != 'local':
            return dict(
                failed=True,
                msg='invalid connection specified, expected connection=local, '
                'got %s' % self._play_context.connection)

        provider = self.load_provider()
        transport = provider['transport'] or 'cli'

        display.vvv('transport is %s' % transport,
                    self._play_context.remote_addr)

        if transport == 'cli':
            pc = copy.deepcopy(self._play_context)
            pc.connection = 'network_cli'
            pc.network_os = 'eos'
            pc.remote_user = provider[
                'username'] or self._play_context.connection_user
            pc.password = provider[
                'password'] or self._play_context.password or 22
            pc.become = provider['authorize'] or False
            pc.become_pass = provider['auth_pass']

            connection = self._shared_loader_obj.connection_loader.get(
                'persistent', pc, sys.stdin)

            socket_path = self._get_socket_path(pc)
            if not os.path.exists(socket_path):
                # start the connection if it isn't started
                rc, out, err = connection.exec_command('open_shell()')
                if not rc == 0:
                    return {
                        'failed': True,
                        'msg': 'unable to open shell',
                        'rc': rc
                    }

            task_vars['ansible_socket'] = socket_path

        else:
            if provider['host'] is None:
                self._task.args['host'] = self._play_context.remote_addr
            if provider['username'] is None:
                self._task.args[
                    'username'] = self._play_context.connection_user
            if provider['password'] is None:
                self._task.args['password'] = self._play_context.password
            if provider['timeout'] is None:
                self._task.args['timeout'] = self._play_context.timeout
            if task_vars.get('eapi_use_ssl'):
                self._task.args['use_ssl'] = task_vars['eapi_use_ssl']
            if task_vars.get('eapi_validate_certs'):
                self._task.args['validate_certs'] = task_vars[
                    'eapi_validate_certs']

        if self._play_context.become_method == 'enable':
            self._play_context.become = False
            self._play_context.become_method = None

        result = super(ActionModule, self).run(tmp, task_vars)

        if transport == 'cli':
            display.vvv('closing cli shell connection',
                        self._play_context.remote_addr)
            rc, out, err = connection.exec_command('close_shell()')

        return result
Ejemplo n.º 57
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        try:
            source_dir = self._task.args.get('dir')
            source_file = self._task.args.get('file')
            content = self._task.args['content']
        except KeyError as exc:
            return {
                'failed': True,
                'msg': 'missing required argument: %s' % exc
            }

        if not source_dir and not source_file:
            return {
                'failed': True,
                'msg': 'one of `dir` or `file` must be specified'
            }
        elif source_dir and source_file:
            return {
                'failed': True,
                'msg': '`dir` and `file` are mutually exclusive arguments'
            }

        if source_dir:
            sources = self.get_files(to_list(source_dir))
        else:
            sources = to_list(source_file)

        facts = {}

        self.template = template_loader.get('json_template', self._templar)

        for src in sources:
            if not os.path.exists(src) and not os.path.isfile(src):
                raise AnsibleError("src [%s] is either missing or invalid" %
                                   src)

            tasks = self._loader.load_from_file(src)

            self.ds = {'content': content}
            self.ds.update(task_vars)

            for task in tasks:
                name = task.pop('name', None)
                display.vvvv('processing directive: %s' % name)

                register = task.pop('register', None)

                export = task.pop('export', False)
                export_as = task.pop('export_as', 'list')
                if export_as not in self.VALID_EXPORT_AS:
                    raise AnsibleError('invalid value for export_as, got %s' %
                                       export_as)

                if 'export_facts' in task:
                    task['set_vars'] = task.pop('export_facts')
                    export = True
                elif 'set_vars' not in task:
                    if export and not register:
                        warning(
                            'entry will not be exported due to missing register option'
                        )

                when = task.pop('when', None)
                if when is not None:
                    if not self._check_conditional(when, self.ds):
                        display.vvv(
                            'text_parser: skipping task [%s] due to conditional check'
                            % name)
                        continue

                loop = task.pop('loop', None)
                loop_var = task.pop('loop_control',
                                    {}).get('loop_var') or 'item'

                if loop is not None:
                    loop = self.template(loop, self.ds)
                    if not loop:
                        display.vvv(
                            'text_parser: loop option was defined but no loop data found'
                        )
                    res = list()

                    if loop:
                        # loop is a hash so break out key and value
                        if isinstance(loop, collections.Mapping):
                            for loop_key, loop_value in iteritems(loop):
                                self.ds[loop_var] = {
                                    'key': loop_key,
                                    'value': loop_value
                                }
                                resp = self._process_directive(task)
                                res.append(resp)

                        # loop is either a list or a string
                        else:
                            for loop_item in loop:
                                self.ds[loop_var] = loop_item
                                resp = self._process_directive(task)
                                res.append(resp)

                        if 'set_vars' in task:
                            if register:
                                self.ds[register] = res
                                if export:
                                    facts[register] = res
                            else:
                                self.ds.update(res)
                                if export:
                                    facts.update(res)
                        elif register:
                            self.ds[register] = res
                            if export:
                                if export_as in ('dict', 'hash', 'object'):
                                    if register not in facts:
                                        facts[register] = {}
                                    for item in res:
                                        facts[register] = self.rec_update(
                                            facts[register], item)
                                else:
                                    facts[register] = res
                else:
                    res = self._process_directive(task)
                    if 'set_vars' in task:
                        if register:
                            self.ds[register] = res
                            if export:
                                facts[register] = res
                        else:
                            self.ds.update(res)
                            if export:
                                facts.update(res)
                    elif res and register:
                        self.ds[register] = res
                        if export:
                            if register:
                                facts[register] = res
                            else:
                                for r in to_list(res):
                                    for k, v in iteritems(r):
                                        facts.update({to_text(k): v})

        result.update({'ansible_facts': facts, 'included': sources})

        return result
Ejemplo n.º 58
0
 def close(self):
     display.vvv('closing connection', host=self._play_context.remote_addr)
     self.close_shell()
     super(Connection, self).close()
Ejemplo n.º 59
0
 def _connect(self):
     ''' connect to the jail; nothing to do here '''
     super(Connection, self)._connect()
     if not self._connected:
         display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
         self._connected = True
Ejemplo n.º 60
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True,
                        wrap_async=False):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        remote_module_path = None
        args_file_path = None
        remote_files = []

        # if a module name was not specified for this execution, use the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        self._update_module_args(module_name, module_args, task_vars)

        # FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        if not self._is_pipelining_enabled(module_style, wrap_async):

            # we might need remote tmp dir
            if not tmp or 'tmp' not in tmp:
                tmp = self._make_tmp_path()

            remote_module_filename = self._connection._shell.get_remote_filename(
                module_path)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)

        if module_style in ('old', 'non_native_want_json', 'binary'):
            # we'll also need a temp file to hold our module arguments
            args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" %
                          remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        if tmp and remote_module_path:
            remote_files = [tmp, remote_module_path]

        if args_file_path:
            remote_files.append(args_file_path)

        sudoable = True
        in_data = None
        cmd = ""

        if wrap_async:
            # configure, upload, and chmod the async_wrapper module
            (async_module_style, shebang, async_module_data,
             async_module_path) = self._configure_module(
                 module_name='async_wrapper',
                 module_args=dict(),
                 task_vars=task_vars)
            async_module_remote_filename = self._connection._shell.get_remote_filename(
                async_module_path)
            remote_async_module_path = self._connection._shell.join_path(
                tmp, async_module_remote_filename)
            self._transfer_data(remote_async_module_path, async_module_data)
            remote_files.append(remote_async_module_path)

            async_limit = self._task.async_val
            async_jid = str(random.randint(0, 999999999999))

            # call the interpreter for async_wrapper directly
            # this permits use of a script for an interpreter on non-Linux platforms
            # TODO: re-implement async_wrapper as a regular module to avoid this special case
            interpreter = shebang.replace('#!', '').strip()
            async_cmd = [
                interpreter, remote_async_module_path, async_jid, async_limit,
                remote_module_path
            ]

            if environment_string:
                async_cmd.insert(0, environment_string)

            if args_file_path:
                async_cmd.append(args_file_path)
            else:
                # maintain a fixed number of positional parameters for async_wrapper
                async_cmd.append('_')

            if not self._should_remove_tmp_path(tmp):
                async_cmd.append("-preserve_tmp")

            cmd = " ".join(to_text(x) for x in async_cmd)

        else:

            if self._is_pipelining_enabled(module_style):
                in_data = module_data
            else:
                cmd = remote_module_path

            rm_tmp = None

            if self._should_remove_tmp_path(
                    tmp) and not persist_files and delete_remote_tmp:
                if not self._play_context.become or self._play_context.become_user == 'root':
                    # not sudoing or sudoing to root, so can cleanup files in the same step
                    rm_tmp = tmp

            cmd = self._connection._shell.build_module_command(
                environment_string,
                shebang,
                cmd,
                arg_path=args_file_path,
                rm_tmp=rm_tmp).strip()

        # Fix permissions of the tmp path and tmp files. This should be called after all files have been transferred.
        if remote_files:
            # remove none/empty
            remote_files = [x for x in remote_files if x]
            self._fixup_perms2(remote_files, self._play_context.remote_user)

        # actually execute
        res = self._low_level_execute_command(cmd,
                                              sudoable=sudoable,
                                              in_data=in_data)

        # parse the main result
        data = self._parse_returned_data(res)

        # NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
        # get internal info before cleaning
        tmpdir_delete = (not data.pop("_ansible_suppress_tmpdir_delete", False)
                         and wrap_async)

        # remove internal keys
        remove_internal_keys(data)

        # cleanup tmp?
        if (self._play_context.become
                and self._play_context.become_user != 'root'
            ) and not persist_files and delete_remote_tmp or tmpdir_delete:
            self._remove_tmp_path(tmp)

        # FIXME: for backwards compat, figure out if still makes sense
        if wrap_async:
            data['changed'] = True

        # pre-split stdout/stderr into lines if needed
        if 'stdout' in data and 'stdout_lines' not in data:
            # if the value is 'False', a default won't catch it.
            txt = data.get('stdout', None) or u''
            data['stdout_lines'] = txt.splitlines()
        if 'stderr' in data and 'stderr_lines' not in data:
            # if the value is 'False', a default won't catch it.
            txt = data.get('stderr', None) or u''
            data['stderr_lines'] = txt.splitlines()

        display.debug("done with _execute_module (%s, %s)" %
                      (module_name, module_args))
        return data