Beispiel #1
0
    def _save_ssh_host_keys(self, filename):
        '''
        not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
        don't complain about it :)
        '''

        if not self._any_keys_added():
            return False

        path = os.path.expanduser("~/.ssh")
        makedirs_safe(path)

        with open(filename, 'w') as f:

            for hostname, keys in iteritems(self.ssh._host_keys):

                for keytype, key in iteritems(keys):

                    # was f.write
                    added_this_time = getattr(key,
                                              '_added_by_assible_this_time',
                                              False)
                    if not added_this_time:
                        f.write("%s %s %s\n" %
                                (hostname, keytype, key.get_base64()))

            for hostname, keys in iteritems(self.ssh._host_keys):

                for keytype, key in iteritems(keys):
                    added_this_time = getattr(key,
                                              '_added_by_assible_this_time',
                                              False)
                    if added_this_time:
                        f.write("%s %s %s\n" %
                                (hostname, keytype, key.get_base64()))
Beispiel #2
0
def _get_lock(b_path):
    """Get the lock for writing password file."""
    first_process = False
    b_pathdir = os.path.dirname(b_path)
    lockfile_name = to_bytes("%s.assible_lockfile" %
                             hashlib.sha1(b_path).hexdigest())
    lockfile = os.path.join(b_pathdir, lockfile_name)
    if not os.path.exists(lockfile) and b_path != to_bytes('/dev/null'):
        try:
            makedirs_safe(b_pathdir, mode=0o700)
            fd = os.open(lockfile, os.O_CREAT | os.O_EXCL)
            os.close(fd)
            first_process = True
        except OSError as e:
            if e.strerror != 'File exists':
                raise

    counter = 0
    # if the lock is got by other process, wait until it's released
    while os.path.exists(lockfile) and not first_process:
        time.sleep(2**counter)
        if counter >= 2:
            raise AssibleError(
                "Password lookup cannot get the lock in 7 seconds, abort..."
                "This may caused by un-removed lockfile"
                "you can manually remove it from controller machine at %s and try again"
                % lockfile)
        counter += 1
    return first_process, lockfile
Beispiel #3
0
def _write_password_file(b_path, content):
    b_pathdir = os.path.dirname(b_path)
    makedirs_safe(b_pathdir, mode=0o700)

    with open(b_path, 'wb') as f:
        os.chmod(b_path, 0o600)
        b_content = to_bytes(content, errors='surrogate_or_strict') + b'\n'
        f.write(b_content)
    def _generate_retry_inventory(self, retry_path, replay_hosts):
        '''
        Called when a playbook run fails. It generates an inventory which allows
        re-running on ONLY the failed hosts.  This may duplicate some variable
        information in group_vars/host_vars but that is ok, and expected.
        '''
        try:
            makedirs_safe(os.path.dirname(retry_path))
            with open(retry_path, 'w') as fd:
                for x in replay_hosts:
                    fd.write("%s\n" % x)
        except Exception as e:
            display.warning("Could not create retry file '%s'.\n\t%s" %
                            (retry_path, to_text(e)))
            return False

        return True
Beispiel #5
0
    def write_tree_file(self, hostname, buf):
        ''' write something into treedir/hostname '''

        buf = to_bytes(buf)
        try:
            makedirs_safe(self.tree)
        except (OSError, IOError) as e:
            self._display.warning(
                u"Unable to access or create the configured directory (%s): %s"
                % (to_text(self.tree), to_text(e)))

        try:
            path = to_bytes(os.path.join(self.tree, hostname))
            with open(path, 'wb+') as fd:
                fd.write(buf)
        except (OSError, IOError) as e:
            self._display.warning(u"Unable to write to %s's file: %s" %
                                  (hostname, to_text(e)))
Beispiel #6
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for fetch operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        try:
            if self._play_context.check_mode:
                raise AssibleActionSkip(
                    'check mode not (yet) supported for this module')

            source = self._task.args.get('src', None)
            original_dest = dest = self._task.args.get('dest', None)
            flat = boolean(self._task.args.get('flat'), strict=False)
            fail_on_missing = boolean(self._task.args.get(
                'fail_on_missing', True),
                                      strict=False)
            validate_checksum = boolean(self._task.args.get(
                'validate_checksum', True),
                                        strict=False)

            msg = ''
            # validate source and dest are strings FIXME: use basic.py and module specs
            if not isinstance(source, string_types):
                msg = "Invalid type supplied for source option, it must be a string"

            if not isinstance(dest, string_types):
                msg = "Invalid type supplied for dest option, it must be a string"

            if source is None or dest is None:
                msg = "src and dest are required"

            if msg:
                raise AssibleActionFail(msg)

            source = self._connection._shell.join_path(source)
            source = self._remote_expand_user(source)

            remote_checksum = None
            if not self._connection.become:
                # calculate checksum for the remote file, don't bother if using become as slurp will be used
                # Force remote_checksum to follow symlinks because fetch always follows symlinks
                remote_checksum = self._remote_checksum(source,
                                                        all_vars=task_vars,
                                                        follow=True)

            # use slurp if permissions are lacking or privilege escalation is needed
            remote_data = None
            if remote_checksum in ('1', '2', None):
                slurpres = self._execute_module(
                    module_name='assible.legacy.slurp',
                    module_args=dict(src=source),
                    task_vars=task_vars)
                if slurpres.get('failed'):
                    if not fail_on_missing and (
                            slurpres.get('msg').startswith('file not found')
                            or remote_checksum == '1'):
                        result[
                            'msg'] = "the remote file does not exist, not transferring, ignored"
                        result['file'] = source
                        result['changed'] = False
                    else:
                        result.update(slurpres)
                    return result
                else:
                    if slurpres['encoding'] == 'base64':
                        remote_data = base64.b64decode(slurpres['content'])
                    if remote_data is not None:
                        remote_checksum = checksum_s(remote_data)

            # calculate the destination name
            if os.path.sep not in self._connection._shell.join_path('a', ''):
                source = self._connection._shell._unquote(source)
                source_local = source.replace('\\', '/')
            else:
                source_local = source

            # ensure we only use file name, avoid relative paths
            if not is_subpath(dest, original_dest):
                # TODO: ? dest = os.path.expanduser(dest.replace(('../','')))
                raise AssibleActionFail(
                    "Detected directory traversal, expected to be contained in '%s' but got '%s'"
                    % (original_dest, dest))

            if flat:
                if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')
                                 ) and not dest.endswith(os.sep):
                    raise AssibleActionFail(
                        "dest is an existing directory, use a trailing slash if you want to fetch src into that directory"
                    )
                if dest.endswith(os.sep):
                    # if the path ends with "/", we'll use the source filename as the
                    # destination filename
                    base = os.path.basename(source_local)
                    dest = os.path.join(dest, base)
                if not dest.startswith("/"):
                    # if dest does not start with "/", we'll assume a relative path
                    dest = self._loader.path_dwim(dest)
            else:
                # files are saved in dest dir, with a subdir for each host, then the filename
                if 'inventory_hostname' in task_vars:
                    target_name = task_vars['inventory_hostname']
                else:
                    target_name = self._play_context.remote_addr
                dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name,
                                     source_local)

            if remote_checksum in ('0', '1', '2', '3', '4', '5'):
                result['changed'] = False
                result['file'] = source
                if remote_checksum == '0':
                    result[
                        'msg'] = "unable to calculate the checksum of the remote file"
                elif remote_checksum == '1':
                    result['msg'] = "the remote file does not exist"
                elif remote_checksum == '2':
                    result['msg'] = "no read permission on remote file"
                elif remote_checksum == '3':
                    result[
                        'msg'] = "remote file is a directory, fetch cannot work on directories"
                elif remote_checksum == '4':
                    result[
                        'msg'] = "python isn't present on the system.  Unable to compute checksum"
                elif remote_checksum == '5':
                    result[
                        'msg'] = "stdlib json was not found on the remote machine. Only the raw module can work without those installed"
                # Historically, these don't fail because you may want to transfer
                # a log file that possibly MAY exist but keep going to fetch other
                # log files. Today, this is better achieved by adding
                # ignore_errors or failed_when to the task.  Control the behaviour
                # via fail_when_missing
                if fail_on_missing:
                    result['failed'] = True
                    del result['changed']
                else:
                    result['msg'] += ", not transferring, ignored"
                return result

            dest = os.path.normpath(dest)

            # calculate checksum for the local file
            local_checksum = checksum(dest)

            if remote_checksum != local_checksum:
                # create the containing directories, if needed
                makedirs_safe(os.path.dirname(dest))

                # fetch the file and check for changes
                if remote_data is None:
                    self._connection.fetch_file(source, dest)
                else:
                    try:
                        f = open(to_bytes(dest, errors='surrogate_or_strict'),
                                 'wb')
                        f.write(remote_data)
                        f.close()
                    except (IOError, OSError) as e:
                        raise AssibleActionFail(
                            "Failed to fetch the file: %s" % e)
                new_checksum = secure_hash(dest)
                # For backwards compatibility. We'll return None on FIPS enabled systems
                try:
                    new_md5 = md5(dest)
                except ValueError:
                    new_md5 = None

                if validate_checksum and new_checksum != remote_checksum:
                    result.update(
                        dict(failed=True,
                             md5sum=new_md5,
                             msg="checksum mismatch",
                             file=source,
                             dest=dest,
                             remote_md5sum=None,
                             checksum=new_checksum,
                             remote_checksum=remote_checksum))
                else:
                    result.update({
                        'changed': True,
                        'md5sum': new_md5,
                        'dest': dest,
                        'remote_md5sum': None,
                        'checksum': new_checksum,
                        'remote_checksum': remote_checksum
                    })
            else:
                # For backwards compatibility. We'll return None on FIPS enabled systems
                try:
                    local_md5 = md5(dest)
                except ValueError:
                    local_md5 = None
                result.update(
                    dict(changed=False,
                         md5sum=local_md5,
                         file=source,
                         dest=dest,
                         checksum=local_checksum))

        finally:
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
Beispiel #7
0
def ensure_type(value, value_type, origin=None):
    ''' return a configuration variable with casting
    :arg value: The value to ensure correct typing of
    :kwarg value_type: The type of the value.  This can be any of the following strings:
        :boolean: sets the value to a True or False value
        :bool: Same as 'boolean'
        :integer: Sets the value to an integer or raises a ValueType error
        :int: Same as 'integer'
        :float: Sets the value to a float or raises a ValueType error
        :list: Treats the value as a comma separated list.  Split the value
            and return it as a python list.
        :none: Sets the value to None
        :path: Expands any environment variables and tilde's in the value.
        :tmppath: Create a unique temporary directory inside of the directory
            specified by value and return its path.
        :temppath: Same as 'tmppath'
        :tmp: Same as 'tmppath'
        :pathlist: Treat the value as a typical PATH string.  (On POSIX, this
            means colon separated strings.)  Split the value and then expand
            each part for environment variables and tildes.
        :pathspec: Treat the value as a PATH string. Expands any environment variables
            tildes's in the value.
        :str: Sets the value to string types.
        :string: Same as 'str'
    '''

    errmsg = ''
    basedir = None
    if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)):
        basedir = origin

    if value_type:
        value_type = value_type.lower()

    if value is not None:
        if value_type in ('boolean', 'bool'):
            value = boolean(value, strict=False)

        elif value_type in ('integer', 'int'):
            value = int(value)

        elif value_type == 'float':
            value = float(value)

        elif value_type == 'list':
            if isinstance(value, string_types):
                value = [x.strip() for x in value.split(',')]
            elif not isinstance(value, Sequence):
                errmsg = 'list'

        elif value_type == 'none':
            if value == "None":
                value = None

            if value is not None:
                errmsg = 'None'

        elif value_type == 'path':
            if isinstance(value, string_types):
                value = resolve_path(value, basedir=basedir)
            else:
                errmsg = 'path'

        elif value_type in ('tmp', 'temppath', 'tmppath'):
            if isinstance(value, string_types):
                value = resolve_path(value, basedir=basedir)
                if not os.path.exists(value):
                    makedirs_safe(value, 0o700)
                prefix = 'assible-local-%s' % os.getpid()
                value = tempfile.mkdtemp(prefix=prefix, dir=value)
                atexit.register(cleanup_tmp_file, value, warn=True)
            else:
                errmsg = 'temppath'

        elif value_type == 'pathspec':
            if isinstance(value, string_types):
                value = value.split(os.pathsep)

            if isinstance(value, Sequence):
                value = [resolve_path(x, basedir=basedir) for x in value]
            else:
                errmsg = 'pathspec'

        elif value_type == 'pathlist':
            if isinstance(value, string_types):
                value = [x.strip() for x in value.split(',')]

            if isinstance(value, Sequence):
                value = [resolve_path(x, basedir=basedir) for x in value]
            else:
                errmsg = 'pathlist'

        elif value_type in ('dict', 'dictionary'):
            if not isinstance(value, Mapping):
                errmsg = 'dictionary'

        elif value_type in ('str', 'string'):
            if isinstance(value, (string_types, AssibleVaultEncryptedUnicode, bool, int, float, complex)):
                value = unquote(to_text(value, errors='surrogate_or_strict'))
            else:
                errmsg = 'string'

        # defaults to string type
        elif isinstance(value, (string_types, AssibleVaultEncryptedUnicode)):
            value = unquote(to_text(value, errors='surrogate_or_strict'))

        if errmsg:
            raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))

    return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
Beispiel #8
0
    def close(self):
        ''' terminate the connection '''

        cache_key = self._cache_key()
        SSH_CONNECTION_CACHE.pop(cache_key, None)
        SFTP_CONNECTION_CACHE.pop(cache_key, None)

        if hasattr(self, 'sftp'):
            if self.sftp is not None:
                self.sftp.close()

        if self.get_option('host_key_checking') and self.get_option(
                'record_host_keys') and self._any_keys_added():

            # add any new SSH host keys -- warning -- this could be slow
            # (This doesn't acquire the connection lock because it needs
            # to exclude only other known_hosts writers, not connections
            # that are starting up.)
            lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
            dirname = os.path.dirname(self.keyfile)
            makedirs_safe(dirname)

            KEY_LOCK = open(lockfile, 'w')
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)

            try:
                # just in case any were added recently

                self.ssh.load_system_host_keys()
                self.ssh._host_keys.update(self.ssh._system_host_keys)

                # gather information about the current key file, so
                # we can ensure the new file has the correct mode/owner

                key_dir = os.path.dirname(self.keyfile)
                if os.path.exists(self.keyfile):
                    key_stat = os.stat(self.keyfile)
                    mode = key_stat.st_mode
                    uid = key_stat.st_uid
                    gid = key_stat.st_gid
                else:
                    mode = 33188
                    uid = os.getuid()
                    gid = os.getgid()

                # Save the new keys to a temporary file and move it into place
                # rather than rewriting the file. We set delete=False because
                # the file will be moved into place rather than cleaned up.

                tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir,
                                                          delete=False)
                os.chmod(tmp_keyfile.name, mode & 0o7777)
                os.chown(tmp_keyfile.name, uid, gid)

                self._save_ssh_host_keys(tmp_keyfile.name)
                tmp_keyfile.close()

                os.rename(tmp_keyfile.name, self.keyfile)

            except Exception:

                # unable to save keys, including scenario when key was invalid
                # and caught earlier
                traceback.print_exc()
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)

        self.ssh.close()
        self._connected = False
def main():
    """ Called to initiate the connect to the remote device
    """
    rc = 0
    result = {}
    messages = list()
    socket_path = None

    # Need stdin as a byte stream
    if PY3:
        stdin = sys.stdin.buffer
    else:
        stdin = sys.stdin

    # Note: update the below log capture code after Display.display() is refactored.
    saved_stdout = sys.stdout
    sys.stdout = StringIO()

    try:
        # read the play context data via stdin, which means depickling it
        vars_data = read_stream(stdin)
        init_data = read_stream(stdin)

        if PY3:
            pc_data = cPickle.loads(init_data, encoding='bytes')
            variables = cPickle.loads(vars_data, encoding='bytes')
        else:
            pc_data = cPickle.loads(init_data)
            variables = cPickle.loads(vars_data)

        play_context = PlayContext()
        play_context.deserialize(pc_data)
        display.verbosity = play_context.verbosity

    except Exception as e:
        rc = 1
        result.update({
            'error': to_text(e),
            'exception': traceback.format_exc()
        })

    if rc == 0:
        ssh = connection_loader.get('ssh', class_only=True)
        assible_playbook_pid = sys.argv[1]
        task_uuid = sys.argv[2]
        cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, assible_playbook_pid)
        # create the persistent connection dir if need be and create the paths
        # which we will be using later
        tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
        makedirs_safe(tmp_path)

        socket_path = unfrackpath(cp % dict(directory=tmp_path))
        lock_path = unfrackpath("%s/.assible_pc_lock_%s" % os.path.split(socket_path))

        with file_lock(lock_path):
            if not os.path.exists(socket_path):
                messages.append(('vvvv', 'local domain socket does not exist, starting it'))
                original_path = os.getcwd()
                r, w = os.pipe()
                pid = fork_process()

                if pid == 0:
                    try:
                        os.close(r)
                        wfd = os.fdopen(w, 'w')
                        process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, assible_playbook_pid)
                        process.start(variables)
                    except Exception:
                        messages.append(('error', traceback.format_exc()))
                        rc = 1

                    if rc == 0:
                        process.run()
                    else:
                        process.shutdown()

                    sys.exit(rc)

                else:
                    os.close(w)
                    rfd = os.fdopen(r, 'r')
                    data = json.loads(rfd.read(), cls=AssibleJSONDecoder)
                    messages.extend(data.pop('messages'))
                    result.update(data)

            else:
                messages.append(('vvvv', 'found existing local domain socket, using it!'))
                conn = Connection(socket_path)
                conn.set_options(var_options=variables)
                pc_data = to_text(init_data)
                try:
                    conn.update_play_context(pc_data)
                    conn.set_check_prompt(task_uuid)
                except Exception as exc:
                    # Only network_cli has update_play context and set_check_prompt, so missing this is
                    # not fatal e.g. netconf
                    if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
                        pass
                    else:
                        result.update({
                            'error': to_text(exc),
                            'exception': traceback.format_exc()
                        })

    if os.path.exists(socket_path):
        messages.extend(Connection(socket_path).pop_messages())
    messages.append(('vvvv', sys.stdout.getvalue()))
    result.update({
        'messages': messages,
        'socket_path': socket_path
    })

    sys.stdout = saved_stdout
    if 'exception' in result:
        rc = 1
        sys.stderr.write(json.dumps(result, cls=AssibleJSONEncoder))
    else:
        rc = 0
        sys.stdout.write(json.dumps(result, cls=AssibleJSONEncoder))

    sys.exit(rc)