Exemplo n.º 1
0
def main():
    module = AssibleModule(
        argument_spec=dict(
            path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
            state=dict(type='str', default='present', choices=['absent', 'present']),
            regexp=dict(type='str', aliases=['regex']),
            line=dict(type='str', aliases=['value']),
            insertafter=dict(type='str'),
            insertbefore=dict(type='str'),
            backrefs=dict(type='bool', default=False),
            create=dict(type='bool', default=False),
            backup=dict(type='bool', default=False),
            firstmatch=dict(type='bool', default=False),
            validate=dict(type='str'),
        ),
        mutually_exclusive=[['insertbefore', 'insertafter']],
        add_file_common_args=True,
        supports_check_mode=True,
    )

    params = module.params
    create = params['create']
    backup = params['backup']
    backrefs = params['backrefs']
    path = params['path']
    firstmatch = params['firstmatch']
    regexp = params['regexp']
    line = params['line']

    if regexp == '':
        module.warn(
            "The regular expression is an empty string, which will match every line in the file. "
            "This may have unintended consequences, such as replacing the last line in the file rather than appending. "
            "If this is desired, use '^' to match every line in the file and avoid this warning.")

    b_path = to_bytes(path, errors='surrogate_or_strict')
    if os.path.isdir(b_path):
        module.fail_json(rc=256, msg='Path %s is a directory !' % path)

    if params['state'] == 'present':
        if backrefs and regexp is None:
            module.fail_json(msg='regexp is required with backrefs=true')

        if line is None:
            module.fail_json(msg='line is required with state=present')

        # Deal with the insertafter default value manually, to avoid errors
        # because of the mutually_exclusive mechanism.
        ins_bef, ins_aft = params['insertbefore'], params['insertafter']
        if ins_bef is None and ins_aft is None:
            ins_aft = 'EOF'

        present(module, path, regexp, line,
                ins_aft, ins_bef, create, backup, backrefs, firstmatch)
    else:
        if regexp is None and line is None:
            module.fail_json(msg='one of line or regexp is required with state=absent')

        absent(module, path, regexp, line, backup)
Exemplo n.º 2
0
class AssibleDockerClient(Client):
    def __init__(self,
                 argument_spec=None,
                 supports_check_mode=False,
                 mutually_exclusive=None,
                 required_together=None,
                 required_if=None,
                 min_docker_version=MIN_DOCKER_VERSION,
                 min_docker_api_version=None,
                 option_minimal_versions=None,
                 option_minimal_versions_ignore_params=None,
                 fail_results=None):

        # Modules can put information in here which will always be returned
        # in case client.fail() is called.
        self.fail_results = fail_results or {}

        merged_arg_spec = dict()
        merged_arg_spec.update(DOCKER_COMMON_ARGS)
        if argument_spec:
            merged_arg_spec.update(argument_spec)
            self.arg_spec = merged_arg_spec

        mutually_exclusive_params = []
        mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
        if mutually_exclusive:
            mutually_exclusive_params += mutually_exclusive

        required_together_params = []
        required_together_params += DOCKER_REQUIRED_TOGETHER
        if required_together:
            required_together_params += required_together

        self.module = AssibleModule(
            argument_spec=merged_arg_spec,
            supports_check_mode=supports_check_mode,
            mutually_exclusive=mutually_exclusive_params,
            required_together=required_together_params,
            required_if=required_if)

        NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >=
                            LooseVersion('2.0.0'))

        self.docker_py_version = LooseVersion(docker_version)

        if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
            self.fail(
                "Cannot have both the docker-py and docker python modules (old and new version of Docker "
                "SDK for Python) installed together as they use the same namespace and cause a corrupt "
                "installation. Please uninstall both packages, and re-install only the docker-py or docker "
                "python module (for %s's Python %s). It is recommended to install the docker module if no "
                "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
                "can leave the other module in a broken state." %
                (platform.node(), sys.executable))

        if not HAS_DOCKER_PY:
            if NEEDS_DOCKER_PY2:
                msg = missing_required_lib("Docker SDK for Python: docker")
                msg = msg + ", for example via `pip install docker`. The error was: %s"
            else:
                msg = missing_required_lib(
                    "Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)"
                )
                msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
            self.fail(msg % HAS_DOCKER_ERROR)

        if self.docker_py_version < LooseVersion(min_docker_version):
            msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
            if not NEEDS_DOCKER_PY2:
                # The minimal required version is < 2.0 (and the current version as well).
                # Advertise docker (instead of docker-py) for non-Python-2.6 users.
                msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
            elif docker_version < LooseVersion('2.0'):
                msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
            else:
                msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
            self.fail(msg % (docker_version, platform.node(), sys.executable,
                             min_docker_version))

        self.debug = self.module.params.get('debug')
        self.check_mode = self.module.check_mode
        self._connect_params = get_connect_params(self.auth_params,
                                                  fail_function=self.fail)

        try:
            super(AssibleDockerClient, self).__init__(**self._connect_params)
            self.docker_api_version_str = self.version()['ApiVersion']
        except APIError as exc:
            self.fail("Docker API error: %s" % exc)
        except Exception as exc:
            self.fail("Error connecting: %s" % exc)

        self.docker_api_version = LooseVersion(self.docker_api_version_str)
        if min_docker_api_version is not None:
            if self.docker_api_version < LooseVersion(min_docker_api_version):
                self.fail(
                    'Docker API version is %s. Minimum version required is %s.'
                    % (self.docker_api_version_str, min_docker_api_version))

        if option_minimal_versions is not None:
            self._get_minimal_versions(option_minimal_versions,
                                       option_minimal_versions_ignore_params)

    def log(self, msg, pretty_print=False):
        pass
        # if self.debug:
        #     log_file = open('docker.log', 'a')
        #     if pretty_print:
        #         log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
        #         log_file.write(u'\n')
        #     else:
        #         log_file.write(msg + u'\n')

    def fail(self, msg, **kwargs):
        self.fail_results.update(kwargs)
        self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))

    @staticmethod
    def _get_value(param_name, param_value, env_variable, default_value):
        if param_value is not None:
            # take module parameter value
            if param_value in BOOLEANS_TRUE:
                return True
            if param_value in BOOLEANS_FALSE:
                return False
            return param_value

        if env_variable is not None:
            env_value = os.environ.get(env_variable)
            if env_value is not None:
                # take the env variable value
                if param_name == 'cert_path':
                    return os.path.join(env_value, 'cert.pem')
                if param_name == 'cacert_path':
                    return os.path.join(env_value, 'ca.pem')
                if param_name == 'key_path':
                    return os.path.join(env_value, 'key.pem')
                if env_value in BOOLEANS_TRUE:
                    return True
                if env_value in BOOLEANS_FALSE:
                    return False
                return env_value

        # take the default
        return default_value

    @property
    def auth_params(self):
        # Get authentication credentials.
        # Precedence: module parameters-> environment variables-> defaults.

        self.log('Getting credentials')

        params = dict()
        for key in DOCKER_COMMON_ARGS:
            params[key] = self.module.params.get(key)

        if self.module.params.get('use_tls'):
            # support use_tls option in docker_image.py. This will be deprecated.
            use_tls = self.module.params.get('use_tls')
            if use_tls == 'encrypt':
                params['tls'] = True
            if use_tls == 'verify':
                params['validate_certs'] = True

        result = dict(
            docker_host=self._get_value('docker_host', params['docker_host'],
                                        'DOCKER_HOST', DEFAULT_DOCKER_HOST),
            tls_hostname=self._get_value('tls_hostname',
                                         params['tls_hostname'],
                                         'DOCKER_TLS_HOSTNAME',
                                         DEFAULT_TLS_HOSTNAME),
            api_version=self._get_value('api_version', params['api_version'],
                                        'DOCKER_API_VERSION', 'auto'),
            cacert_path=self._get_value('cacert_path', params['ca_cert'],
                                        'DOCKER_CERT_PATH', None),
            cert_path=self._get_value('cert_path', params['client_cert'],
                                      'DOCKER_CERT_PATH', None),
            key_path=self._get_value('key_path', params['client_key'],
                                     'DOCKER_CERT_PATH', None),
            ssl_version=self._get_value('ssl_version', params['ssl_version'],
                                        'DOCKER_SSL_VERSION', None),
            tls=self._get_value('tls', params['tls'], 'DOCKER_TLS',
                                DEFAULT_TLS),
            tls_verify=self._get_value('tls_verfy', params['validate_certs'],
                                       'DOCKER_TLS_VERIFY',
                                       DEFAULT_TLS_VERIFY),
            timeout=self._get_value('timeout', params['timeout'],
                                    'DOCKER_TIMEOUT', DEFAULT_TIMEOUT_SECONDS),
        )

        update_tls_hostname(result)

        return result

    def _handle_ssl_error(self, error):
        match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
        if match:
            self.fail(
                "You asked for verification that Docker daemons certificate's hostname matches %s. "
                "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
                "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
                "setting the `tls` parameter to true." %
                (self.auth_params['tls_hostname'], match.group(1),
                 match.group(1)))
        self.fail("SSL Exception: %s" % (error))

    def _get_minimal_versions(self,
                              option_minimal_versions,
                              ignore_params=None):
        self.option_minimal_versions = dict()
        for option in self.module.argument_spec:
            if ignore_params is not None:
                if option in ignore_params:
                    continue
            self.option_minimal_versions[option] = dict()
        self.option_minimal_versions.update(option_minimal_versions)

        for option, data in self.option_minimal_versions.items():
            # Test whether option is supported, and store result
            support_docker_py = True
            support_docker_api = True
            if 'docker_py_version' in data:
                support_docker_py = self.docker_py_version >= LooseVersion(
                    data['docker_py_version'])
            if 'docker_api_version' in data:
                support_docker_api = self.docker_api_version >= LooseVersion(
                    data['docker_api_version'])
            data['supported'] = support_docker_py and support_docker_api
            # Fail if option is not supported but used
            if not data['supported']:
                # Test whether option is specified
                if 'detect_usage' in data:
                    used = data['detect_usage'](self)
                else:
                    used = self.module.params.get(option) is not None
                    if used and 'default' in self.module.argument_spec[option]:
                        used = self.module.params[
                            option] != self.module.argument_spec[option][
                                'default']
                if used:
                    # If the option is used, compose error message.
                    if 'usage_msg' in data:
                        usg = data['usage_msg']
                    else:
                        usg = 'set %s option' % (option, )
                    if not support_docker_api:
                        msg = 'Docker API version is %s. Minimum version required is %s to %s.'
                        msg = msg % (self.docker_api_version_str,
                                     data['docker_api_version'], usg)
                    elif not support_docker_py:
                        msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
                        if LooseVersion(data['docker_py_version']
                                        ) < LooseVersion('2.0.0'):
                            msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
                        elif self.docker_py_version < LooseVersion('2.0.0'):
                            msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
                        else:
                            msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
                        msg = msg % (docker_version, platform.node(),
                                     sys.executable, data['docker_py_version'],
                                     usg)
                    else:
                        # should not happen
                        msg = 'Cannot %s with your configuration.' % (usg, )
                    self.fail(msg)

    def get_container_by_id(self, container_id):
        try:
            self.log("Inspecting container Id %s" % container_id)
            result = self.inspect_container(container=container_id)
            self.log("Completed container inspection")
            return result
        except NotFound as dummy:
            return None
        except Exception as exc:
            self.fail("Error inspecting container: %s" % exc)

    def get_container(self, name=None):
        '''
        Lookup a container and return the inspection results.
        '''
        if name is None:
            return None

        search_name = name
        if not name.startswith('/'):
            search_name = '/' + name

        result = None
        try:
            for container in self.containers(all=True):
                self.log("testing container: %s" % (container['Names']))
                if isinstance(container['Names'],
                              list) and search_name in container['Names']:
                    result = container
                    break
                if container['Id'].startswith(name):
                    result = container
                    break
                if container['Id'] == name:
                    result = container
                    break
        except SSLError as exc:
            self._handle_ssl_error(exc)
        except Exception as exc:
            self.fail("Error retrieving container list: %s" % exc)

        if result is None:
            return None

        return self.get_container_by_id(result['Id'])

    def get_network(self, name=None, network_id=None):
        '''
        Lookup a network and return the inspection results.
        '''
        if name is None and network_id is None:
            return None

        result = None

        if network_id is None:
            try:
                for network in self.networks():
                    self.log("testing network: %s" % (network['Name']))
                    if name == network['Name']:
                        result = network
                        break
                    if network['Id'].startswith(name):
                        result = network
                        break
            except SSLError as exc:
                self._handle_ssl_error(exc)
            except Exception as exc:
                self.fail("Error retrieving network list: %s" % exc)

        if result is not None:
            network_id = result['Id']

        if network_id is not None:
            try:
                self.log("Inspecting network Id %s" % network_id)
                result = self.inspect_network(network_id)
                self.log("Completed network inspection")
            except NotFound as dummy:
                return None
            except Exception as exc:
                self.fail("Error inspecting network: %s" % exc)

        return result

    def find_image(self, name, tag):
        '''
        Lookup an image (by name and tag) and return the inspection results.
        '''
        if not name:
            return None

        self.log("Find image %s:%s" % (name, tag))
        images = self._image_lookup(name, tag)
        if not images:
            # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
            registry, repo_name = auth.resolve_repository_name(name)
            if registry == 'docker.io':
                # If docker.io is explicitly there in name, the image
                # isn't found in some cases (#41509)
                self.log("Check for docker.io image: %s" % repo_name)
                images = self._image_lookup(repo_name, tag)
                if not images and repo_name.startswith('library/'):
                    # Sometimes library/xxx images are not found
                    lookup = repo_name[len('library/'):]
                    self.log("Check for docker.io image: %s" % lookup)
                    images = self._image_lookup(lookup, tag)
                if not images:
                    # Last case: if docker.io wasn't there, it can be that
                    # the image wasn't found either (#15586)
                    lookup = "%s/%s" % (registry, repo_name)
                    self.log("Check for docker.io image: %s" % lookup)
                    images = self._image_lookup(lookup, tag)

        if len(images) > 1:
            self.fail("Registry returned more than one result for %s:%s" %
                      (name, tag))

        if len(images) == 1:
            try:
                inspection = self.inspect_image(images[0]['Id'])
            except Exception as exc:
                self.fail("Error inspecting image %s:%s - %s" %
                          (name, tag, str(exc)))
            return inspection

        self.log("Image %s:%s not found." % (name, tag))
        return None

    def find_image_by_id(self, image_id):
        '''
        Lookup an image (by ID) and return the inspection results.
        '''
        if not image_id:
            return None

        self.log("Find image %s (by ID)" % image_id)
        try:
            inspection = self.inspect_image(image_id)
        except Exception as exc:
            self.fail("Error inspecting image ID %s - %s" %
                      (image_id, str(exc)))
        return inspection

    def _image_lookup(self, name, tag):
        '''
        Including a tag in the name parameter sent to the Docker SDK for Python images method
        does not work consistently. Instead, get the result set for name and manually check
        if the tag exists.
        '''
        try:
            response = self.images(name=name)
        except Exception as exc:
            self.fail("Error searching for image %s - %s" % (name, str(exc)))
        images = response
        if tag:
            lookup = "%s:%s" % (name, tag)
            lookup_digest = "%s@%s" % (name, tag)
            images = []
            for image in response:
                tags = image.get('RepoTags')
                digests = image.get('RepoDigests')
                if (tags and lookup in tags) or (digests
                                                 and lookup_digest in digests):
                    images = [image]
                    break
        return images

    def pull_image(self, name, tag="latest"):
        '''
        Pull an image
        '''
        self.log("Pulling image %s:%s" % (name, tag))
        old_tag = self.find_image(name, tag)
        try:
            for line in self.pull(name, tag=tag, stream=True, decode=True):
                self.log(line, pretty_print=True)
                if line.get('error'):
                    if line.get('errorDetail'):
                        error_detail = line.get('errorDetail')
                        self.fail("Error pulling %s - code: %s message: %s" %
                                  (name, error_detail.get('code'),
                                   error_detail.get('message')))
                    else:
                        self.fail("Error pulling %s - %s" %
                                  (name, line.get('error')))
        except Exception as exc:
            self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))

        new_tag = self.find_image(name, tag)

        return new_tag, old_tag == new_tag

    def report_warnings(self, result, warnings_key=None):
        '''
        Checks result of client operation for warnings, and if present, outputs them.

        warnings_key should be a list of keys used to crawl the result dictionary.
        For example, if warnings_key == ['a', 'b'], the function will consider
        result['a']['b'] if these keys exist. If the result is a non-empty string, it
        will be reported as a warning. If the result is a list, every entry will be
        reported as a warning.

        In most cases (if warnings are returned at all), warnings_key should be
        ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
        '''
        if warnings_key is None:
            warnings_key = ['Warnings']
        for key in warnings_key:
            if not isinstance(result, Mapping):
                return
            result = result.get(key)
        if isinstance(result, Sequence):
            for warning in result:
                self.module.warn('Docker warning: {0}'.format(warning))
        elif isinstance(result, string_types) and result:
            self.module.warn('Docker warning: {0}'.format(result))

    def inspect_distribution(self, image, **kwargs):
        '''
        Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
        since prior versions did not support accessing private repositories.
        '''
        if self.docker_py_version < LooseVersion('4.0.0'):
            registry = auth.resolve_repository_name(image)[0]
            header = auth.get_config_header(self, registry)
            if header:
                return self._result(self._get(
                    self._url('/distribution/{0}/json', image),
                    headers={'X-Registry-Auth': header}),
                                    json=True)
        return super(AssibleDockerClient,
                     self).inspect_distribution(image, **kwargs)
Exemplo n.º 3
0
def main():
    argument_spec = postgres_common_argument_spec()
    argument_spec.update(
        database=dict(required=True, aliases=['db', 'login_db']),
        state=dict(default='present', choices=['present', 'absent']),
        privs=dict(required=False, aliases=['priv']),
        type=dict(default='table',
                  choices=[
                      'table',
                      'sequence',
                      'function',
                      'database',
                      'schema',
                      'language',
                      'tablespace',
                      'group',
                      'default_privs',
                      'foreign_data_wrapper',
                      'foreign_server',
                      'type',
                  ]),
        objs=dict(required=False, aliases=['obj']),
        schema=dict(required=False),
        roles=dict(required=True, aliases=['role']),
        session_role=dict(required=False),
        target_roles=dict(required=False),
        grant_option=dict(required=False,
                          type='bool',
                          aliases=['admin_option']),
        host=dict(default='', aliases=['login_host']),
        unix_socket=dict(default='', aliases=['login_unix_socket']),
        login=dict(default='postgres', aliases=['login_user']),
        password=dict(default='', aliases=['login_password'], no_log=True),
        fail_on_role=dict(type='bool', default=True),
    )

    module = AssibleModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
    )

    fail_on_role = module.params['fail_on_role']

    # Create type object as namespace for module params
    p = type('Params', (), module.params)
    # param "schema": default, allowed depends on param "type"
    if p.type in ['table', 'sequence', 'function', 'type', 'default_privs']:
        p.schema = p.schema or 'public'
    elif p.schema:
        module.fail_json(msg='Argument "schema" is not allowed '
                         'for type "%s".' % p.type)

    # param "objs": default, required depends on param "type"
    if p.type == 'database':
        p.objs = p.objs or p.database
    elif not p.objs:
        module.fail_json(msg='Argument "objs" is required '
                         'for type "%s".' % p.type)

    # param "privs": allowed, required depends on param "type"
    if p.type == 'group':
        if p.privs:
            module.fail_json(msg='Argument "privs" is not allowed '
                             'for type "group".')
    elif not p.privs:
        module.fail_json(msg='Argument "privs" is required '
                         'for type "%s".' % p.type)

    # Connect to Database
    if not psycopg2:
        module.fail_json(msg=missing_required_lib('psycopg2'),
                         exception=PSYCOPG2_IMP_ERR)
    try:
        conn = Connection(p, module)
    except psycopg2.Error as e:
        module.fail_json(msg='Could not connect to database: %s' %
                         to_native(e),
                         exception=traceback.format_exc())
    except TypeError as e:
        if 'sslrootcert' in e.args[0]:
            module.fail_json(
                msg=
                'Postgresql server must be at least version 8.4 to support sslrootcert'
            )
        module.fail_json(msg="unable to connect to database: %s" %
                         to_native(e),
                         exception=traceback.format_exc())
    except ValueError as e:
        # We raise this when the psycopg library is too old
        module.fail_json(msg=to_native(e))

    if p.session_role:
        try:
            conn.cursor.execute('SET ROLE "%s"' % p.session_role)
        except Exception as e:
            module.fail_json(msg="Could not switch to role %s: %s" %
                             (p.session_role, to_native(e)),
                             exception=traceback.format_exc())

    try:
        # privs
        if p.privs:
            privs = frozenset(pr.upper() for pr in p.privs.split(','))
            if not privs.issubset(VALID_PRIVS):
                module.fail_json(msg='Invalid privileges specified: %s' %
                                 privs.difference(VALID_PRIVS))
        else:
            privs = None
        # objs:
        if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
            objs = conn.get_all_tables_in_schema(p.schema)
        elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
            objs = conn.get_all_sequences_in_schema(p.schema)
        elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
            objs = conn.get_all_functions_in_schema(p.schema)
        elif p.type == 'default_privs':
            if p.objs == 'ALL_DEFAULT':
                objs = frozenset(VALID_DEFAULT_OBJS.keys())
            else:
                objs = frozenset(obj.upper() for obj in p.objs.split(','))
                if not objs.issubset(VALID_DEFAULT_OBJS):
                    module.fail_json(
                        msg='Invalid Object set specified: %s' %
                        objs.difference(VALID_DEFAULT_OBJS.keys()))
            # Again, do we have valid privs specified for object type:
            valid_objects_for_priv = frozenset(
                obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
            if not valid_objects_for_priv == objs:
                module.fail_json(
                    msg=
                    'Invalid priv specified. Valid object for priv: {0}. Objects: {1}'
                    .format(valid_objects_for_priv, objs))
        else:
            objs = p.objs.split(',')

            # function signatures are encoded using ':' to separate args
            if p.type == 'function':
                objs = [obj.replace(':', ',') for obj in objs]

        # roles
        if p.roles == 'PUBLIC':
            roles = 'PUBLIC'
        else:
            roles = p.roles.split(',')

            if len(roles) == 1 and not role_exists(module, conn.cursor,
                                                   roles[0]):
                module.exit_json(changed=False)

                if fail_on_role:
                    module.fail_json(msg="Role '%s' does not exist" %
                                     roles[0].strip())

                else:
                    module.warn("Role '%s' does not exist, nothing to do" %
                                roles[0].strip())

        # check if target_roles is set with type: default_privs
        if p.target_roles and not p.type == 'default_privs':
            module.warn(
                '"target_roles" will be ignored '
                'Argument "type: default_privs" is required for usage of "target_roles".'
            )

        # target roles
        if p.target_roles:
            target_roles = p.target_roles.split(',')
        else:
            target_roles = None

        changed = conn.manipulate_privs(
            obj_type=p.type,
            privs=privs,
            objs=objs,
            roles=roles,
            target_roles=target_roles,
            state=p.state,
            grant_option=p.grant_option,
            schema_qualifier=p.schema,
            fail_on_role=fail_on_role,
        )

    except Error as e:
        conn.rollback()
        module.fail_json(msg=e.message, exception=traceback.format_exc())

    except psycopg2.Error as e:
        conn.rollback()
        module.fail_json(msg=to_native(e.message))

    if module.check_mode:
        conn.rollback()
    else:
        conn.commit()
    module.exit_json(changed=changed, queries=executed_queries)
Exemplo n.º 4
0
def main():

    module = AssibleModule(
        argument_spec=dict(
            name=dict(required=True, type='str', aliases=['service']),
            state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
            enabled=dict(type='bool'),
            sleep=dict(type='int', default=1),
            pattern=dict(type='str'),
            arguments=dict(type='str', aliases=['args']),
            runlevels=dict(type='list', elements='str'),
            daemonize=dict(type='bool', default=False),
        ),
        supports_check_mode=True,
        required_one_of=[['state', 'enabled']],
    )

    name = module.params['name']
    action = module.params['state']
    enabled = module.params['enabled']
    runlevels = module.params['runlevels']
    pattern = module.params['pattern']
    sleep_for = module.params['sleep']
    rc = 0
    out = err = ''
    result = {
        'name': name,
        'changed': False,
        'status': {}
    }

    # ensure service exists, get script name
    fail_if_missing(module, sysv_exists(name), name)
    script = get_sysv_script(name)

    # locate binaries for service management
    paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
    binaries = ['chkconfig', 'update-rc.d', 'insserv', 'service']

    # Keeps track of the service status for various runlevels because we can
    # operate on multiple runlevels at once
    runlevel_status = {}

    location = {}
    for binary in binaries:
        location[binary] = module.get_bin_path(binary, opt_dirs=paths)

    # figure out enable status
    if runlevels:
        for rl in runlevels:
            runlevel_status.setdefault(rl, {})
            runlevel_status[rl]["enabled"] = sysv_is_enabled(name, runlevel=rl)
    else:
        runlevel_status["enabled"] = sysv_is_enabled(name)

    # figure out started status, everyone does it different!
    is_started = False
    worked = False

    # user knows other methods fail and supplied pattern
    if pattern:
        worked = is_started = get_ps(module, pattern)
    else:
        if location.get('service'):
            # standard tool that has been 'destandarized' by reimplementation in other OS/distros
            cmd = '%s %s status' % (location['service'], name)
        elif script:
            # maybe script implements status (not LSB)
            cmd = '%s status' % script
        else:
            module.fail_json(msg="Unable to determine service status")

        (rc, out, err) = module.run_command(cmd)
        if not rc == -1:
            # special case
            if name == 'iptables' and "ACCEPT" in out:
                worked = True
                is_started = True

            # check output messages, messy but sadly more reliable than rc
            if not worked and out.count('\n') <= 1:

                cleanout = out.lower().replace(name.lower(), '')

                for stopped in ['stop', 'is dead ', 'dead but ', 'could not access pid file', 'inactive']:
                    if stopped in cleanout:
                        worked = True
                        break

                if not worked:
                    for started_status in ['run', 'start', 'active']:
                        if started_status in cleanout and "not " not in cleanout:
                            is_started = True
                            worked = True
                            break

            # hope rc is not lying to us, use often used 'bad' returns
            if not worked and rc in [1, 2, 3, 4, 69]:
                worked = True

        if not worked:
            # hail mary
            if rc == 0:
                is_started = True
                worked = True
            # ps for luck, can only assure positive match
            elif get_ps(module, name):
                is_started = True
                worked = True
                module.warn("Used ps output to match service name and determine it is up, this is very unreliable")

    if not worked:
        module.warn("Unable to determine if service is up, assuming it is down")

    ###########################################################################
    # BEGIN: Enable/Disable
    result['status'].setdefault('enabled', {})
    result['status']['enabled']['changed'] = False
    result['status']['enabled']['rc'] = None
    result['status']['enabled']['stdout'] = None
    result['status']['enabled']['stderr'] = None
    if runlevels:
        result['status']['enabled']['runlevels'] = runlevels
        for rl in runlevels:
            if enabled != runlevel_status[rl]["enabled"]:
                result['changed'] = True
                result['status']['enabled']['changed'] = True

        if not module.check_mode and result['changed']:
            # Perform enable/disable here
            if enabled:
                if location.get('update-rc.d'):
                    (rc, out, err) = module.run_command("%s %s enable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
                elif location.get('chkconfig'):
                    (rc, out, err) = module.run_command("%s --level %s %s on" % (location['chkconfig'], ''.join(runlevels), name))
            else:
                if location.get('update-rc.d'):
                    (rc, out, err) = module.run_command("%s %s disable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
                elif location.get('chkconfig'):
                    (rc, out, err) = module.run_command("%s --level %s %s off" % (location['chkconfig'], ''.join(runlevels), name))
    else:
        if enabled is not None and enabled != runlevel_status["enabled"]:
            result['changed'] = True
            result['status']['enabled']['changed'] = True

        if not module.check_mode and result['changed']:
            # Perform enable/disable here
            if enabled:
                if location.get('update-rc.d'):
                    (rc, out, err) = module.run_command("%s %s defaults" % (location['update-rc.d'], name))
                elif location.get('chkconfig'):
                    (rc, out, err) = module.run_command("%s %s on" % (location['chkconfig'], name))
            else:
                if location.get('update-rc.d'):
                    (rc, out, err) = module.run_command("%s %s disable" % (location['update-rc.d'], name))
                elif location.get('chkconfig'):
                    (rc, out, err) = module.run_command("%s %s off" % (location['chkconfig'], name))

    # Assigned above, might be useful is something goes sideways
    if not module.check_mode and result['status']['enabled']['changed']:
        result['status']['enabled']['rc'] = rc
        result['status']['enabled']['stdout'] = out
        result['status']['enabled']['stderr'] = err
        rc, out, err = None, None, None

        if "illegal runlevel specified" in result['status']['enabled']['stderr']:
            module.fail_json(msg="Illegal runlevel specified for enable operation on service %s" % name, **result)
    # END: Enable/Disable
    ###########################################################################

    ###########################################################################
    # BEGIN: state
    result['status'].setdefault(module.params['state'], {})
    result['status'][module.params['state']]['changed'] = False
    result['status'][module.params['state']]['rc'] = None
    result['status'][module.params['state']]['stdout'] = None
    result['status'][module.params['state']]['stderr'] = None
    if action:
        action = re.sub(r'p?ed$', '', action.lower())

        def runme(doit):

            args = module.params['arguments']
            cmd = "%s %s %s" % (script, doit, "" if args is None else args)

            # how to run
            if module.params['daemonize']:
                (rc, out, err) = daemonize(module, cmd)
            else:
                (rc, out, err) = module.run_command(cmd)
            # FIXME: ERRORS

            if rc != 0:
                module.fail_json(msg="Failed to %s service: %s" % (action, name), rc=rc, stdout=out, stderr=err)

            return (rc, out, err)

        if action == 'restart':
            result['changed'] = True
            result['status'][module.params['state']]['changed'] = True
            if not module.check_mode:

                # cannot rely on existing 'restart' in init script
                for dothis in ['stop', 'start']:
                    (rc, out, err) = runme(dothis)
                    if sleep_for:
                        sleep(sleep_for)

        elif is_started != (action == 'start'):
            result['changed'] = True
            result['status'][module.params['state']]['changed'] = True
            if not module.check_mode:
                rc, out, err = runme(action)

        elif is_started == (action == 'stop'):
            result['changed'] = True
            result['status'][module.params['state']]['changed'] = True
            if not module.check_mode:
                rc, out, err = runme(action)

        if not module.check_mode and result['status'][module.params['state']]['changed']:
            result['status'][module.params['state']]['rc'] = rc
            result['status'][module.params['state']]['stdout'] = out
            result['status'][module.params['state']]['stderr'] = err
            rc, out, err = None, None, None
    # END: state
    ###########################################################################

    module.exit_json(**result)
Exemplo n.º 5
0
def main():
    """ main entry point for module execution
    """
    backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
    argument_spec = dict(
        src=dict(type="path"),
        lines=dict(aliases=["commands"], type="list"),
        parents=dict(type="list"),
        before=dict(type="list"),
        after=dict(type="list"),
        match=dict(default="line", choices=["line", "strict", "exact",
                                            "none"]),
        replace=dict(default="line", choices=["line", "block"]),
        multiline_delimiter=dict(default="@"),
        running_config=dict(aliases=["config"]),
        intended_config=dict(),
        defaults=dict(type="bool", default=False),
        backup=dict(type="bool", default=False),
        backup_options=dict(type="dict", options=backup_spec),
        save_when=dict(choices=["always", "never", "modified", "changed"],
                       default="never"),
        diff_against=dict(choices=["startup", "intended", "running"]),
        diff_ignore_lines=dict(type="list"),
    )

    argument_spec.update(ios_argument_spec)

    mutually_exclusive = [("lines", "src"), ("parents", "src")]

    required_if = [
        ("match", "strict", ["lines"]),
        ("match", "exact", ["lines"]),
        ("replace", "block", ["lines"]),
        ("diff_against", "intended", ["intended_config"]),
    ]

    module = AssibleModule(
        argument_spec=argument_spec,
        mutually_exclusive=mutually_exclusive,
        required_if=required_if,
        supports_check_mode=True,
    )

    result = {"changed": False}

    warnings = list()
    check_args(module, warnings)
    result["warnings"] = warnings

    diff_ignore_lines = module.params["diff_ignore_lines"]
    config = None
    contents = None
    flags = get_defaults_flag(module) if module.params["defaults"] else []
    connection = get_connection(module)

    if module.params["backup"] or (module._diff and
                                   module.params["diff_against"] == "running"):
        contents = get_config(module, flags=flags)
        config = NetworkConfig(indent=1, contents=contents)
        if module.params["backup"]:
            result["__backup__"] = contents

    if any((module.params["lines"], module.params["src"])):
        match = module.params["match"]
        replace = module.params["replace"]
        path = module.params["parents"]

        candidate = get_candidate_config(module)
        running = get_running_config(module, contents, flags=flags)
        try:
            response = connection.get_diff(
                candidate=candidate,
                running=running,
                diff_match=match,
                diff_ignore_lines=diff_ignore_lines,
                path=path,
                diff_replace=replace,
            )
        except ConnectionError as exc:
            module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))

        config_diff = response["config_diff"]
        banner_diff = response["banner_diff"]

        if config_diff or banner_diff:
            commands = config_diff.split("\n")

            if module.params["before"]:
                commands[:0] = module.params["before"]

            if module.params["after"]:
                commands.extend(module.params["after"])

            result["commands"] = commands
            result["updates"] = commands
            result["banners"] = banner_diff

            # send the configuration commands to the device and merge
            # them with the current running config
            if not module.check_mode:
                if commands:
                    edit_config_or_macro(connection, commands)
                if banner_diff:
                    connection.edit_banner(
                        candidate=json.dumps(banner_diff),
                        multiline_delimiter=module.
                        params["multiline_delimiter"],
                    )

            result["changed"] = True

    running_config = module.params["running_config"]
    startup_config = None

    if module.params["save_when"] == "always":
        save_config(module, result)
    elif module.params["save_when"] == "modified":
        output = run_commands(module,
                              ["show running-config", "show startup-config"])

        running_config = NetworkConfig(indent=1,
                                       contents=output[0],
                                       ignore_lines=diff_ignore_lines)
        startup_config = NetworkConfig(indent=1,
                                       contents=output[1],
                                       ignore_lines=diff_ignore_lines)

        if running_config.sha1 != startup_config.sha1:
            save_config(module, result)
    elif module.params["save_when"] == "changed" and result["changed"]:
        save_config(module, result)

    if module._diff:
        if not running_config:
            output = run_commands(module, "show running-config")
            contents = output[0]
        else:
            contents = running_config

        # recreate the object in order to process diff_ignore_lines
        running_config = NetworkConfig(indent=1,
                                       contents=contents,
                                       ignore_lines=diff_ignore_lines)

        if module.params["diff_against"] == "running":
            if module.check_mode:
                module.warn(
                    "unable to perform diff against running-config due to check mode"
                )
                contents = None
            else:
                contents = config.config_text

        elif module.params["diff_against"] == "startup":
            if not startup_config:
                output = run_commands(module, "show startup-config")
                contents = output[0]
            else:
                contents = startup_config.config_text

        elif module.params["diff_against"] == "intended":
            contents = module.params["intended_config"]

        if contents is not None:
            base_config = NetworkConfig(indent=1,
                                        contents=contents,
                                        ignore_lines=diff_ignore_lines)

            if running_config.sha1 != base_config.sha1:
                if module.params["diff_against"] == "intended":
                    before = running_config
                    after = base_config
                elif module.params["diff_against"] in ("startup", "running"):
                    before = base_config
                    after = running_config

                result.update({
                    "changed": True,
                    "diff": {
                        "before": str(before),
                        "after": str(after)
                    },
                })

    module.exit_json(**result)
Exemplo n.º 6
0
def main():

    # the command module is the one assible module that does not take key=value args
    # hence don't copy this one if you are looking to build others!
    module = AssibleModule(
        argument_spec=dict(
            _raw_params=dict(),
            _uses_shell=dict(type='bool', default=False),
            argv=dict(type='list', elements='str'),
            chdir=dict(type='path'),
            executable=dict(),
            creates=dict(type='path'),
            removes=dict(type='path'),
            # The default for this really comes from the action plugin
            warn=dict(type='bool', default=False, removed_in_version='2.14', removed_from_collection='assible.builtin'),
            stdin=dict(required=False),
            stdin_add_newline=dict(type='bool', default=True),
            strip_empty_ends=dict(type='bool', default=True),
        ),
        supports_check_mode=True,
    )
    shell = module.params['_uses_shell']
    chdir = module.params['chdir']
    executable = module.params['executable']
    args = module.params['_raw_params']
    argv = module.params['argv']
    creates = module.params['creates']
    removes = module.params['removes']
    warn = module.params['warn']
    stdin = module.params['stdin']
    stdin_add_newline = module.params['stdin_add_newline']
    strip = module.params['strip_empty_ends']

    if not shell and executable:
        module.warn("As of Assible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
        executable = None

    if (not args or args.strip() == '') and not argv:
        module.fail_json(rc=256, msg="no command given")

    if args and argv:
        module.fail_json(rc=256, msg="only command or argv can be given, not both")

    if not shell and args:
        args = shlex.split(args)

    args = args or argv

    # All args must be strings
    if is_iterable(args, include_strings=False):
        args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]

    if chdir:
        try:
            chdir = to_bytes(os.path.abspath(chdir), errors='surrogate_or_strict')
        except ValueError as e:
            module.fail_json(msg='Unable to use supplied chdir: %s' % to_text(e))

        try:
            os.chdir(chdir)
        except (IOError, OSError) as e:
            module.fail_json(msg='Unable to change directory before execution: %s' % to_text(e))

    if creates:
        # do not run the command if the line contains creates=filename
        # and the filename already exists.  This allows idempotence
        # of command executions.
        if glob.glob(creates):
            module.exit_json(
                cmd=args,
                stdout="skipped, since %s exists" % creates,
                changed=False,
                rc=0
            )

    if removes:
        # do not run the command if the line contains removes=filename
        # and the filename does not exist.  This allows idempotence
        # of command executions.
        if not glob.glob(removes):
            module.exit_json(
                cmd=args,
                stdout="skipped, since %s does not exist" % removes,
                changed=False,
                rc=0
            )

    if warn:
        check_command(module, args)

    startd = datetime.datetime.now()

    if not module.check_mode:
        rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline))
    elif creates or removes:
        rc = 0
        out = err = b'Command would have run if not in check mode'
    else:
        module.exit_json(msg="skipped, running in check mode", skipped=True)

    endd = datetime.datetime.now()
    delta = endd - startd

    if strip:
        out = out.rstrip(b"\r\n")
        err = err.rstrip(b"\r\n")

    result = dict(
        cmd=args,
        stdout=out,
        stderr=err,
        rc=rc,
        start=str(startd),
        end=str(endd),
        delta=str(delta),
        changed=True,
    )

    if rc != 0:
        module.fail_json(msg='non-zero return code', **result)

    module.exit_json(**result)
Exemplo n.º 7
0
def main():
    # initialize
    module = AssibleModule(
        argument_spec=dict(
            name=dict(type='str', aliases=['service', 'unit']),
            state=dict(type='str',
                       choices=['reloaded', 'restarted', 'started',
                                'stopped']),
            enabled=dict(type='bool'),
            force=dict(type='bool'),
            masked=dict(type='bool'),
            daemon_reload=dict(type='bool',
                               default=False,
                               aliases=['daemon-reload']),
            daemon_reexec=dict(type='bool',
                               default=False,
                               aliases=['daemon-reexec']),
            scope=dict(type='str',
                       default='system',
                       choices=['system', 'user', 'global']),
            no_block=dict(type='bool', default=False),
        ),
        supports_check_mode=True,
        required_one_of=[[
            'state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec'
        ]],
        required_by=dict(
            state=('name', ),
            enabled=('name', ),
            masked=('name', ),
        ),
    )

    unit = module.params['name']
    if unit is not None:
        for globpattern in (r"*", r"?", r"["):
            if globpattern in unit:
                module.fail_json(
                    msg=
                    "This module does not currently support using glob patterns, found '%s' in service name: %s"
                    % (globpattern, unit))

    systemctl = module.get_bin_path('systemctl', True)

    if os.getenv('XDG_RUNTIME_DIR') is None:
        os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
    ''' Set CLI options depending on params '''
    # if scope is 'system' or None, we can ignore as there is no extra switch.
    # The other choices match the corresponding switch
    if module.params['scope'] != 'system':
        systemctl += " --%s" % module.params['scope']

    if module.params['no_block']:
        systemctl += " --no-block"

    if module.params['force']:
        systemctl += " --force"

    rc = 0
    out = err = ''
    result = dict(
        name=unit,
        changed=False,
        status=dict(),
    )

    # Run daemon-reload first, if requested
    if module.params['daemon_reload'] and not module.check_mode:
        (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
        if rc != 0:
            module.fail_json(msg='failure %d during daemon-reload: %s' %
                             (rc, err))

    # Run daemon-reexec
    if module.params['daemon_reexec'] and not module.check_mode:
        (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
        if rc != 0:
            module.fail_json(msg='failure %d during daemon-reexec: %s' %
                             (rc, err))

    if unit:
        found = False
        is_initd = sysv_exists(unit)
        is_systemd = False

        # check service data, cannot error out on rc as it changes across versions, assume not found
        (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))

        if rc == 0 and not (request_was_ignored(out)
                            or request_was_ignored(err)):
            # load return of systemctl show into dictionary for easy access and return
            if out:
                result['status'] = parse_systemctl_show(
                    to_native(out).split('\n'))

                is_systemd = 'LoadState' in result[
                    'status'] and result['status']['LoadState'] != 'not-found'

                is_masked = 'LoadState' in result['status'] and result[
                    'status']['LoadState'] == 'masked'

                # Check for loading error
                if is_systemd and not is_masked and 'LoadError' in result[
                        'status']:
                    module.fail_json(msg="Error loading unit file '%s': %s" %
                                     (unit, result['status']['LoadError']))
        else:
            # list taken from man systemctl(1) for systemd 244
            valid_enabled_states = [
                "enabled", "enabled-runtime", "linked", "linked-runtime",
                "masked", "masked-runtime", "static", "indirect", "disabled",
                "generated", "transient"
            ]

            (rc, out, err) = module.run_command("%s is-enabled '%s'" %
                                                (systemctl, unit))
            if out.strip() in valid_enabled_states:
                is_systemd = True
            else:
                # fallback list-unit-files as show does not work on some systems (chroot)
                # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
                (rc, out, err) = module.run_command("%s list-unit-files '%s'" %
                                                    (systemctl, unit))
                if rc == 0:
                    is_systemd = True
                else:
                    # Check for systemctl command
                    module.run_command(systemctl, check_rc=True)

        # Does service exist?
        found = is_systemd or is_initd
        if is_initd and not is_systemd:
            module.warn(
                'The service (%s) is actually an init script but the system is managed by systemd'
                % unit)

        # mask/unmask the service, if requested, can operate on services before they are installed
        if module.params['masked'] is not None:
            # state is not masked unless systemd affirms otherwise
            (rc, out, err) = module.run_command("%s is-enabled '%s'" %
                                                (systemctl, unit))
            masked = out.strip() == "masked"

            if masked != module.params['masked']:
                result['changed'] = True
                if module.params['masked']:
                    action = 'mask'
                else:
                    action = 'unmask'

                if not module.check_mode:
                    (rc, out, err) = module.run_command(
                        "%s %s '%s'" % (systemctl, action, unit))
                    if rc != 0:
                        # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
                        fail_if_missing(module, found, unit, msg='host')

        # Enable/disable service startup at boot if requested
        if module.params['enabled'] is not None:

            if module.params['enabled']:
                action = 'enable'
            else:
                action = 'disable'

            fail_if_missing(module, found, unit, msg='host')

            # do we need to enable the service?
            enabled = False
            (rc, out, err) = module.run_command("%s is-enabled '%s'" %
                                                (systemctl, unit))

            # check systemctl result or if it is a init script
            if rc == 0:
                enabled = True
            elif rc == 1:
                # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
                if module.params['scope'] == 'system' and \
                        is_initd and \
                        not out.strip().endswith('disabled') and \
                        sysv_is_enabled(unit):
                    enabled = True

            # default to current state
            result['enabled'] = enabled

            # Change enable/disable if needed
            if enabled != module.params['enabled']:
                result['changed'] = True
                if not module.check_mode:
                    (rc, out, err) = module.run_command(
                        "%s %s '%s'" % (systemctl, action, unit))
                    if rc != 0:
                        module.fail_json(msg="Unable to %s service %s: %s" %
                                         (action, unit, out + err))

                result['enabled'] = not enabled

        # set service state if requested
        if module.params['state'] is not None:
            fail_if_missing(module, found, unit, msg="host")

            # default to desired state
            result['state'] = module.params['state']

            # What is current service state?
            if 'ActiveState' in result['status']:
                action = None
                if module.params['state'] == 'started':
                    if not is_running_service(result['status']):
                        action = 'start'
                elif module.params['state'] == 'stopped':
                    if is_running_service(
                            result['status']) or is_deactivating_service(
                                result['status']):
                        action = 'stop'
                else:
                    if not is_running_service(result['status']):
                        action = 'start'
                    else:
                        action = module.params[
                            'state'][:
                                     -2]  # remove 'ed' from restarted/reloaded
                    result['state'] = 'started'

                if action:
                    result['changed'] = True
                    if not module.check_mode:
                        (rc, out, err) = module.run_command(
                            "%s %s '%s'" % (systemctl, action, unit))
                        if rc != 0:
                            module.fail_json(
                                msg="Unable to %s service %s: %s" %
                                (action, unit, err))
            # check for chroot
            elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
                module.warn(
                    "Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working."
                )
            else:
                # this should not happen?
                module.fail_json(msg="Service is in unknown state",
                                 status=result['status'])

    module.exit_json(**result)
Exemplo n.º 8
0
def main():

    global module

    module = AssibleModule(
        # not checking because of daisy chain to file module
        argument_spec=dict(
            src=dict(type='path'),
            _original_basename=dict(
                type='str'
            ),  # used to handle 'dest is a directory' via template, a slight hack
            content=dict(type='str', no_log=True),
            dest=dict(type='path', required=True),
            backup=dict(type='bool', default=False),
            force=dict(type='bool', default=True, aliases=['thirsty']),
            validate=dict(type='str'),
            directory_mode=dict(type='raw'),
            remote_src=dict(type='bool'),
            local_follow=dict(type='bool'),
            checksum=dict(type='str'),
            follow=dict(type='bool', default=False),
        ),
        add_file_common_args=True,
        supports_check_mode=True,
    )

    if module.params.get('thirsty'):
        module.deprecate(
            'The alias "thirsty" has been deprecated and will be removed, use "force" instead',
            version='2.13',
            collection_name='assible.builtin')

    src = module.params['src']
    b_src = to_bytes(src, errors='surrogate_or_strict')
    dest = module.params['dest']
    # Make sure we always have a directory component for later processing
    if os.path.sep not in dest:
        dest = '.{0}{1}'.format(os.path.sep, dest)
    b_dest = to_bytes(dest, errors='surrogate_or_strict')
    backup = module.params['backup']
    force = module.params['force']
    _original_basename = module.params.get('_original_basename', None)
    validate = module.params.get('validate', None)
    follow = module.params['follow']
    local_follow = module.params['local_follow']
    mode = module.params['mode']
    owner = module.params['owner']
    group = module.params['group']
    remote_src = module.params['remote_src']
    checksum = module.params['checksum']

    if not os.path.exists(b_src):
        module.fail_json(msg="Source %s not found" % (src))
    if not os.access(b_src, os.R_OK):
        module.fail_json(msg="Source %s not readable" % (src))

    # Preserve is usually handled in the action plugin but mode + remote_src has to be done on the
    # remote host
    if module.params['mode'] == 'preserve':
        module.params['mode'] = '0%03o' % stat.S_IMODE(os.stat(b_src).st_mode)
    mode = module.params['mode']

    checksum_dest = None

    if os.path.isfile(src):
        checksum_src = module.sha1(src)
    else:
        checksum_src = None

    # Backwards compat only.  This will be None in FIPS mode
    try:
        if os.path.isfile(src):
            md5sum_src = module.md5(src)
        else:
            md5sum_src = None
    except ValueError:
        md5sum_src = None

    changed = False

    if checksum and checksum_src != checksum:
        module.fail_json(
            msg=
            'Copied file does not match the expected checksum. Transfer failed.',
            checksum=checksum_src,
            expected_checksum=checksum)

    # Special handling for recursive copy - create intermediate dirs
    if dest.endswith(os.sep):
        if _original_basename:
            dest = os.path.join(dest, _original_basename)
        b_dest = to_bytes(dest, errors='surrogate_or_strict')
        dirname = os.path.dirname(dest)
        b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
        if not os.path.exists(b_dirname):
            try:
                (pre_existing_dir,
                 new_directory_list) = split_pre_existing_dir(dirname)
            except AssibleModuleError as e:
                e.result['msg'] += ' Could not copy to {0}'.format(dest)
                module.fail_json(**e.results)

            os.makedirs(b_dirname)
            directory_args = module.load_file_common_arguments(module.params)
            directory_mode = module.params["directory_mode"]
            if directory_mode is not None:
                directory_args['mode'] = directory_mode
            else:
                directory_args['mode'] = None
            adjust_recursive_directory_permissions(pre_existing_dir,
                                                   new_directory_list, module,
                                                   directory_args, changed)

    if os.path.isdir(b_dest):
        basename = os.path.basename(src)
        if _original_basename:
            basename = _original_basename
        dest = os.path.join(dest, basename)
        b_dest = to_bytes(dest, errors='surrogate_or_strict')

    if os.path.exists(b_dest):
        if os.path.islink(b_dest) and follow:
            b_dest = os.path.realpath(b_dest)
            dest = to_native(b_dest, errors='surrogate_or_strict')
        if not force:
            module.exit_json(msg="file already exists",
                             src=src,
                             dest=dest,
                             changed=False)
        if os.access(b_dest, os.R_OK) and os.path.isfile(b_dest):
            checksum_dest = module.sha1(dest)
    else:
        if not os.path.exists(os.path.dirname(b_dest)):
            try:
                # os.path.exists() can return false in some
                # circumstances where the directory does not have
                # the execute bit for the current user set, in
                # which case the stat() call will raise an OSError
                os.stat(os.path.dirname(b_dest))
            except OSError as e:
                if "permission denied" in to_native(e).lower():
                    module.fail_json(
                        msg="Destination directory %s is not accessible" %
                        (os.path.dirname(dest)))
            module.fail_json(msg="Destination directory %s does not exist" %
                             (os.path.dirname(dest)))

    if not os.access(os.path.dirname(b_dest),
                     os.W_OK) and not module.params['unsafe_writes']:
        module.fail_json(msg="Destination %s not writable" %
                         (os.path.dirname(dest)))

    backup_file = None
    if checksum_src != checksum_dest or os.path.islink(b_dest):
        if not module.check_mode:
            try:
                if backup:
                    if os.path.exists(b_dest):
                        backup_file = module.backup_local(dest)
                # allow for conversion from symlink.
                if os.path.islink(b_dest):
                    os.unlink(b_dest)
                    open(b_dest, 'w').close()
                if validate:
                    # if we have a mode, make sure we set it on the temporary
                    # file source as some validations may require it
                    if mode is not None:
                        module.set_mode_if_different(src, mode, False)
                    if owner is not None:
                        module.set_owner_if_different(src, owner, False)
                    if group is not None:
                        module.set_group_if_different(src, group, False)
                    if "%s" not in validate:
                        module.fail_json(msg="validate must contain %%s: %s" %
                                         (validate))
                    (rc, out, err) = module.run_command(validate % src)
                    if rc != 0:
                        module.fail_json(msg="failed to validate",
                                         exit_status=rc,
                                         stdout=out,
                                         stderr=err)
                b_mysrc = b_src
                if remote_src and os.path.isfile(b_src):
                    _, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))

                    shutil.copyfile(b_src, b_mysrc)
                    try:
                        shutil.copystat(b_src, b_mysrc)
                    except OSError as err:
                        if err.errno == errno.ENOSYS and mode == "preserve":
                            module.warn("Unable to copy stats {0}".format(
                                to_native(b_src)))
                        else:
                            raise

                # might be needed below
                if PY3 and hasattr(os, 'listxattr'):
                    try:
                        src_has_acls = 'system.posix_acl_access' in os.listxattr(
                            src)
                    except Exception as e:
                        # assume unwanted ACLs by default
                        src_has_acls = True

                module.atomic_move(
                    b_mysrc,
                    dest,
                    unsafe_writes=module.params['unsafe_writes'])

                if PY3 and hasattr(os, 'listxattr') and platform.system(
                ) == 'Linux' and not remote_src:
                    # atomic_move used above to copy src into dest might, in some cases,
                    # use shutil.copy2 which in turn uses shutil.copystat.
                    # Since Python 3.3, shutil.copystat copies file extended attributes:
                    # https://docs.python.org/3/library/shutil.html#shutil.copystat
                    # os.listxattr (along with others) was added to handle the operation.

                    # This means that on Python 3 we are copying the extended attributes which includes
                    # the ACLs on some systems - further limited to Linux as the documentation above claims
                    # that the extended attributes are copied only on Linux. Also, os.listxattr is only
                    # available on Linux.

                    # If not remote_src, then the file was copied from the controller. In that
                    # case, any filesystem ACLs are artifacts of the copy rather than preservation
                    # of existing attributes. Get rid of them:

                    if src_has_acls:
                        # FIXME If dest has any default ACLs, there are not applied to src now because
                        # they were overridden by copystat. Should/can we do anything about this?
                        # 'system.posix_acl_default' in os.listxattr(os.path.dirname(b_dest))

                        try:
                            clear_facls(dest)
                        except ValueError as e:
                            if 'setfacl' in to_native(e):
                                # No setfacl so we're okay.  The controller couldn't have set a facl
                                # without the setfacl command
                                pass
                            else:
                                raise
                        except RuntimeError as e:
                            # setfacl failed.
                            if 'Operation not supported' in to_native(e):
                                # The file system does not support ACLs.
                                pass
                            else:
                                raise

            except (IOError, OSError):
                module.fail_json(msg="failed to copy: %s to %s" % (src, dest),
                                 traceback=traceback.format_exc())
        changed = True
    else:
        changed = False

    # If neither have checksums, both src and dest are directories.
    if checksum_src is None and checksum_dest is None:
        if remote_src and os.path.isdir(module.params['src']):
            b_src = to_bytes(module.params['src'],
                             errors='surrogate_or_strict')
            b_dest = to_bytes(module.params['dest'],
                              errors='surrogate_or_strict')

            if src.endswith(os.path.sep) and os.path.isdir(
                    module.params['dest']):
                diff_files_changed = copy_diff_files(b_src, b_dest, module)
                left_only_changed = copy_left_only(b_src, b_dest, module)
                common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
                owner_group_changed = chown_recursive(b_dest, module)
                if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
                    changed = True

            if src.endswith(
                    os.path.sep) and not os.path.exists(module.params['dest']):
                b_basename = to_bytes(os.path.basename(src),
                                      errors='surrogate_or_strict')
                b_dest = to_bytes(os.path.join(b_dest, b_basename),
                                  errors='surrogate_or_strict')
                b_src = to_bytes(os.path.join(module.params['src'], ""),
                                 errors='surrogate_or_strict')
                if not module.check_mode:
                    shutil.copytree(b_src, b_dest, symlinks=not (local_follow))
                chown_recursive(dest, module)
                changed = True

            if not src.endswith(os.path.sep) and os.path.isdir(
                    module.params['dest']):
                b_basename = to_bytes(os.path.basename(src),
                                      errors='surrogate_or_strict')
                b_dest = to_bytes(os.path.join(b_dest, b_basename),
                                  errors='surrogate_or_strict')
                b_src = to_bytes(os.path.join(module.params['src'], ""),
                                 errors='surrogate_or_strict')
                if not module.check_mode and not os.path.exists(b_dest):
                    shutil.copytree(b_src, b_dest, symlinks=not (local_follow))
                    changed = True
                    chown_recursive(dest, module)
                if module.check_mode and not os.path.exists(b_dest):
                    changed = True
                if os.path.exists(b_dest):
                    diff_files_changed = copy_diff_files(b_src, b_dest, module)
                    left_only_changed = copy_left_only(b_src, b_dest, module)
                    common_dirs_changed = copy_common_dirs(
                        b_src, b_dest, module)
                    owner_group_changed = chown_recursive(b_dest, module)
                    if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
                        changed = True

            if not src.endswith(os.path.sep) and not os.path.exists(
                    module.params['dest']):
                b_basename = to_bytes(os.path.basename(module.params['src']),
                                      errors='surrogate_or_strict')
                b_dest = to_bytes(os.path.join(b_dest, b_basename),
                                  errors='surrogate_or_strict')
                if not module.check_mode and not os.path.exists(b_dest):
                    os.makedirs(b_dest)
                    b_src = to_bytes(os.path.join(module.params['src'], ""),
                                     errors='surrogate_or_strict')
                    diff_files_changed = copy_diff_files(b_src, b_dest, module)
                    left_only_changed = copy_left_only(b_src, b_dest, module)
                    common_dirs_changed = copy_common_dirs(
                        b_src, b_dest, module)
                    owner_group_changed = chown_recursive(b_dest, module)
                    if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
                        changed = True
                if module.check_mode and not os.path.exists(b_dest):
                    changed = True

    res_args = dict(dest=dest,
                    src=src,
                    md5sum=md5sum_src,
                    checksum=checksum_src,
                    changed=changed)
    if backup_file:
        res_args['backup_file'] = backup_file

    if not module.check_mode:
        file_args = module.load_file_common_arguments(module.params, path=dest)
        res_args['changed'] = module.set_fs_attributes_if_different(
            file_args, res_args['changed'])

    module.exit_json(**res_args)
Exemplo n.º 9
0
def main():
    module = AssibleModule(
        argument_spec=dict(
            state=dict(
                type='str',
                default='present',
                choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
            update_cache=dict(type='bool', aliases=['update-cache']),
            update_cache_retries=dict(type='int', default=5),
            update_cache_retry_max_delay=dict(type='int', default=12),
            cache_valid_time=dict(type='int', default=0),
            purge=dict(type='bool', default=False),
            package=dict(type='list', elements='str', aliases=['pkg', 'name']),
            deb=dict(type='path'),
            default_release=dict(type='str', aliases=['default-release']),
            install_recommends=dict(type='bool',
                                    aliases=['install-recommends']),
            force=dict(type='bool', default=False),
            upgrade=dict(type='str',
                         choices=['dist', 'full', 'no', 'safe', 'yes'],
                         default='no'),
            dpkg_options=dict(type='str', default=DPKG_OPTIONS),
            autoremove=dict(type='bool', default=False),
            autoclean=dict(type='bool', default=False),
            fail_on_autoremove=dict(type='bool', default=False),
            policy_rc_d=dict(type='int', default=None),
            only_upgrade=dict(type='bool', default=False),
            force_apt_get=dict(type='bool', default=False),
            allow_unauthenticated=dict(type='bool',
                                       default=False,
                                       aliases=['allow-unauthenticated']),
        ),
        mutually_exclusive=[['deb', 'package', 'upgrade']],
        required_one_of=[[
            'autoremove', 'deb', 'package', 'update_cache', 'upgrade'
        ]],
        supports_check_mode=True,
    )

    module.run_command_environ_update = APT_ENV_VARS

    if not HAS_PYTHON_APT:
        if module.check_mode:
            module.fail_json(
                msg="%s must be installed to use check mode. "
                "If run normally this module can auto-install it." %
                PYTHON_APT)
        try:
            # We skip cache update in auto install the dependency if the
            # user explicitly declared it with update_cache=no.
            if module.params.get('update_cache') is False:
                module.warn(
                    "Auto-installing missing dependency without updating cache: %s"
                    % PYTHON_APT)
            else:
                module.warn(
                    "Updating cache and auto-installing missing dependency: %s"
                    % PYTHON_APT)
                module.run_command(['apt-get', 'update'], check_rc=True)

            module.run_command([
                'apt-get', 'install', '--no-install-recommends', PYTHON_APT,
                '-y', '-q'
            ],
                               check_rc=True)
            global apt, apt_pkg
            import apt
            import apt.debfile
            import apt_pkg
        except ImportError:
            module.fail_json(
                msg="Could not import python modules: apt, apt_pkg. "
                "Please install %s package." % PYTHON_APT)

    global APTITUDE_CMD
    APTITUDE_CMD = module.get_bin_path("aptitude", False)
    global APT_GET_CMD
    APT_GET_CMD = module.get_bin_path("apt-get")

    p = module.params

    if p['upgrade'] == 'no':
        p['upgrade'] = None

    use_apt_get = p['force_apt_get']

    if not use_apt_get and not APTITUDE_CMD:
        use_apt_get = True

    updated_cache = False
    updated_cache_time = 0
    install_recommends = p['install_recommends']
    allow_unauthenticated = p['allow_unauthenticated']
    dpkg_options = expand_dpkg_options(p['dpkg_options'])
    autoremove = p['autoremove']
    fail_on_autoremove = p['fail_on_autoremove']
    autoclean = p['autoclean']

    # Get the cache object
    cache = get_cache(module)

    try:
        if p['default_release']:
            try:
                apt_pkg.config['APT::Default-Release'] = p['default_release']
            except AttributeError:
                apt_pkg.Config['APT::Default-Release'] = p['default_release']
            # reopen cache w/ modified config
            cache.open(progress=None)

        mtimestamp, updated_cache_time = get_updated_cache_time()
        # Cache valid time is default 0, which will update the cache if
        #  needed and `update_cache` was set to true
        updated_cache = False
        if p['update_cache'] or p['cache_valid_time']:
            now = datetime.datetime.now()
            tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
            if not mtimestamp + tdelta >= now:
                # Retry to update the cache with exponential backoff
                err = ''
                update_cache_retries = module.params.get(
                    'update_cache_retries')
                update_cache_retry_max_delay = module.params.get(
                    'update_cache_retry_max_delay')
                randomize = random.randint(0, 1000) / 1000.0

                for retry in range(update_cache_retries):
                    try:
                        cache.update()
                        break
                    except apt.cache.FetchFailedException as e:
                        err = to_native(e)

                    # Use exponential backoff plus a little bit of randomness
                    delay = 2**retry + randomize
                    if delay > update_cache_retry_max_delay:
                        delay = update_cache_retry_max_delay + randomize
                    time.sleep(delay)
                else:
                    module.fail_json(msg='Failed to update apt cache: %s' %
                                     (err if err else 'unknown reason'))

                cache.open(progress=None)
                mtimestamp, post_cache_update_time = get_updated_cache_time()
                if updated_cache_time != post_cache_update_time:
                    updated_cache = True
                updated_cache_time = post_cache_update_time

            # If there is nothing else to do exit. This will set state as
            #  changed based on if the cache was updated.
            if not p['package'] and not p['upgrade'] and not p['deb']:
                module.exit_json(changed=updated_cache,
                                 cache_updated=updated_cache,
                                 cache_update_time=updated_cache_time)

        force_yes = p['force']

        if p['upgrade']:
            upgrade(module, p['upgrade'], force_yes, p['default_release'],
                    use_apt_get, dpkg_options, autoremove, fail_on_autoremove,
                    allow_unauthenticated)

        if p['deb']:
            if p['state'] != 'present':
                module.fail_json(msg="deb only supports state=present")
            if '://' in p['deb']:
                p['deb'] = fetch_file(module, p['deb'])
            install_deb(module,
                        p['deb'],
                        cache,
                        install_recommends=install_recommends,
                        allow_unauthenticated=allow_unauthenticated,
                        force=force_yes,
                        fail_on_autoremove=fail_on_autoremove,
                        dpkg_options=p['dpkg_options'])

        unfiltered_packages = p['package'] or ()
        packages = [
            package.strip() for package in unfiltered_packages
            if package != '*'
        ]
        all_installed = '*' in unfiltered_packages
        latest = p['state'] == 'latest'

        if latest and all_installed:
            if packages:
                module.fail_json(
                    msg=
                    'unable to install additional packages when upgrading all installed packages'
                )
            upgrade(module, 'yes', force_yes, p['default_release'],
                    use_apt_get, dpkg_options, autoremove, fail_on_autoremove,
                    allow_unauthenticated)

        if packages:
            for package in packages:
                if package.count('=') > 1:
                    module.fail_json(msg="invalid package spec: %s" % package)
                if latest and '=' in package:
                    module.fail_json(
                        msg='version number inconsistent with state=latest: %s'
                        % package)

        if not packages:
            if autoclean:
                cleanup(module,
                        p['purge'],
                        force=force_yes,
                        operation='autoclean',
                        dpkg_options=dpkg_options)
            if autoremove:
                cleanup(module,
                        p['purge'],
                        force=force_yes,
                        operation='autoremove',
                        dpkg_options=dpkg_options)

        if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
            state_upgrade = False
            state_builddep = False
            state_fixed = False
            if p['state'] == 'latest':
                state_upgrade = True
            if p['state'] == 'build-dep':
                state_builddep = True
            if p['state'] == 'fixed':
                state_fixed = True

            success, retvals = install(
                module,
                packages,
                cache,
                upgrade=state_upgrade,
                default_release=p['default_release'],
                install_recommends=install_recommends,
                force=force_yes,
                dpkg_options=dpkg_options,
                build_dep=state_builddep,
                fixed=state_fixed,
                autoremove=autoremove,
                fail_on_autoremove=fail_on_autoremove,
                only_upgrade=p['only_upgrade'],
                allow_unauthenticated=allow_unauthenticated)

            # Store if the cache has been updated
            retvals['cache_updated'] = updated_cache
            # Store when the update time was last
            retvals['cache_update_time'] = updated_cache_time

            if success:
                module.exit_json(**retvals)
            else:
                module.fail_json(**retvals)
        elif p['state'] == 'absent':
            remove(module,
                   packages,
                   cache,
                   p['purge'],
                   force=force_yes,
                   dpkg_options=dpkg_options,
                   autoremove=autoremove)

    except apt.cache.LockFailedException as lockFailedException:
        module.fail_json(msg="Failed to lock apt for exclusive operation: %s" %
                         lockFailedException)
    except apt.cache.FetchFailedException as fetchFailedException:
        module.fail_json(msg="Could not fetch updated apt files: %s" %
                         fetchFailedException)
Exemplo n.º 10
0
def main():
    argument_spec = postgres_common_argument_spec()
    argument_spec.update(
        name=dict(type='str', required=True),
        db=dict(type='str', aliases=['login_db']),
        value=dict(type='str'),
        reset=dict(type='bool'),
        session_role=dict(type='str'),
    )
    module = AssibleModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
    )

    name = module.params["name"]
    value = module.params["value"]
    reset = module.params["reset"]

    # Allow to pass values like 1mb instead of 1MB, etc:
    if value:
        for unit in POSSIBLE_SIZE_UNITS:
            if value[:-2].isdigit() and unit in value[-2:]:
                value = value.upper()

    if value and reset:
        module.fail_json(
            msg="%s: value and reset params are mutually exclusive" % name)

    if not value and not reset:
        module.fail_json(
            msg="%s: at least one of value or reset param must be specified" %
            name)

    conn_params = get_conn_params(module, module.params, warn_db_default=False)
    db_connection = connect_to_db(module, conn_params, autocommit=True)
    cursor = db_connection.cursor(cursor_factory=DictCursor)

    kw = {}
    # Check server version (needs 9.4 or later):
    ver = db_connection.server_version
    if ver < PG_REQ_VER:
        module.warn("PostgreSQL is %s version but %s or later is required" %
                    (ver, PG_REQ_VER))
        kw = dict(
            changed=False,
            restart_required=False,
            value_pretty="",
            prev_val_pretty="",
            value={
                "value": "",
                "unit": ""
            },
        )
        kw['name'] = name
        db_connection.close()
        module.exit_json(**kw)

    # Set default returned values:
    restart_required = False
    changed = False
    kw['name'] = name
    kw['restart_required'] = False

    # Get info about param state:
    res = param_get(cursor, module, name)
    current_value = res[0]
    raw_val = res[1]
    unit = res[2]
    boot_val = res[3]
    context = res[4]

    if value == 'True':
        value = 'on'
    elif value == 'False':
        value = 'off'

    kw['prev_val_pretty'] = current_value
    kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
    kw['context'] = context

    # Do job
    if context == "internal":
        module.fail_json(
            msg="%s: cannot be changed (internal context). See "
            "https://www.postgresql.org/docs/current/runtime-config-preset.html"
            % name)

    if context == "postmaster":
        restart_required = True

    # If check_mode, just compare and exit:
    if module.check_mode:
        if pretty_to_bytes(value) == pretty_to_bytes(current_value):
            kw['changed'] = False

        else:
            kw['value_pretty'] = value
            kw['changed'] = True

        # Anyway returns current raw value in the check_mode:
        kw['value'] = dict(
            value=raw_val,
            unit=unit,
        )
        kw['restart_required'] = restart_required
        module.exit_json(**kw)

    # Set param:
    if value and value != current_value:
        changed = param_set(cursor, module, name, value, context)

        kw['value_pretty'] = value

    # Reset param:
    elif reset:
        if raw_val == boot_val:
            # nothing to change, exit:
            kw['value'] = dict(
                value=raw_val,
                unit=unit,
            )
            module.exit_json(**kw)

        changed = param_set(cursor, module, name, boot_val, context)

    if restart_required:
        module.warn("Restart of PostgreSQL is required for setting %s" % name)

    cursor.close()
    db_connection.close()

    # Reconnect and recheck current value:
    if context in ('sighup', 'superuser-backend', 'backend', 'superuser',
                   'user'):
        db_connection = connect_to_db(module, conn_params, autocommit=True)
        cursor = db_connection.cursor(cursor_factory=DictCursor)

        res = param_get(cursor, module, name)
        # f_ means 'final'
        f_value = res[0]
        f_raw_val = res[1]

        if raw_val == f_raw_val:
            changed = False

        else:
            changed = True

        kw['value_pretty'] = f_value
        kw['value'] = dict(
            value=f_raw_val,
            unit=unit,
        )

        cursor.close()
        db_connection.close()

    kw['changed'] = changed
    kw['restart_required'] = restart_required
    module.exit_json(**kw)
Exemplo n.º 11
0
def main():
    argument_spec = url_argument_spec()
    argument_spec.update(
        dest=dict(type='path'),
        url_username=dict(type='str', aliases=['user']),
        url_password=dict(type='str', aliases=['password'], no_log=True),
        body=dict(type='raw'),
        body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw', 'form-multipart']),
        src=dict(type='path'),
        method=dict(type='str', default='GET'),
        return_content=dict(type='bool', default=False),
        follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
        creates=dict(type='path'),
        removes=dict(type='path'),
        status_code=dict(type='list', elements='int', default=[200]),
        timeout=dict(type='int', default=30),
        headers=dict(type='dict', default={}),
        unix_socket=dict(type='path'),
        remote_src=dict(type='bool', default=False),
    )

    module = AssibleModule(
        argument_spec=argument_spec,
        add_file_common_args=True,
        mutually_exclusive=[['body', 'src']],
    )

    if module.params.get('thirsty'):
        module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
                         version='2.13', collection_name='assible.builtin')

    url = module.params['url']
    body = module.params['body']
    body_format = module.params['body_format'].lower()
    method = module.params['method'].upper()
    dest = module.params['dest']
    return_content = module.params['return_content']
    creates = module.params['creates']
    removes = module.params['removes']
    status_code = [int(x) for x in list(module.params['status_code'])]
    socket_timeout = module.params['timeout']

    dict_headers = module.params['headers']

    if not re.match('^[A-Z]+$', method):
        module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")

    if body_format == 'json':
        # Encode the body unless its a string, then assume it is pre-formatted JSON
        if not isinstance(body, string_types):
            body = json.dumps(body)
        if 'content-type' not in [header.lower() for header in dict_headers]:
            dict_headers['Content-Type'] = 'application/json'
    elif body_format == 'form-urlencoded':
        if not isinstance(body, string_types):
            try:
                body = form_urlencoded(body)
            except ValueError as e:
                module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
        if 'content-type' not in [header.lower() for header in dict_headers]:
            dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
    elif body_format == 'form-multipart':
        try:
            content_type, body = prepare_multipart(body)
        except (TypeError, ValueError) as e:
            module.fail_json(msg='failed to parse body as form-multipart: %s' % to_native(e))
        dict_headers['Content-Type'] = content_type

    if creates is not None:
        # do not run the command if the line contains creates=filename
        # and the filename already exists.  This allows idempotence
        # of uri executions.
        if os.path.exists(creates):
            module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)

    if removes is not None:
        # do not run the command if the line contains removes=filename
        # and the filename does not exist.  This allows idempotence
        # of uri executions.
        if not os.path.exists(removes):
            module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)

    # Make the request
    start = datetime.datetime.utcnow()
    resp, content, dest = uri(module, url, dest, body, body_format, method,
                              dict_headers, socket_timeout)
    resp['elapsed'] = (datetime.datetime.utcnow() - start).seconds
    resp['status'] = int(resp['status'])
    resp['changed'] = False

    # Write the file out if requested
    if dest is not None:
        if resp['status'] in status_code and resp['status'] != 304:
            write_file(module, url, dest, content, resp)
            # allow file attribute changes
            resp['changed'] = True
            module.params['path'] = dest
            file_args = module.load_file_common_arguments(module.params, path=dest)
            resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
        resp['path'] = dest

    # Transmogrify the headers, replacing '-' with '_', since variables don't
    # work with dashes.
    # In python3, the headers are title cased.  Lowercase them to be
    # compatible with the python2 behaviour.
    uresp = {}
    for key, value in iteritems(resp):
        ukey = key.replace("-", "_").lower()
        uresp[ukey] = value

    if 'location' in uresp:
        uresp['location'] = absolute_location(url, uresp['location'])

    # Default content_encoding to try
    content_encoding = 'utf-8'
    if 'content_type' in uresp:
        # Handle multiple Content-Type headers
        charsets = []
        content_types = []
        for value in uresp['content_type'].split(','):
            ct, params = cgi.parse_header(value)
            if ct not in content_types:
                content_types.append(ct)
            if 'charset' in params:
                if params['charset'] not in charsets:
                    charsets.append(params['charset'])

        if content_types:
            content_type = content_types[0]
            if len(content_types) > 1:
                module.warn(
                    'Received multiple conflicting Content-Type values (%s), using %s' % (', '.join(content_types), content_type)
                )
        if charsets:
            content_encoding = charsets[0]
            if len(charsets) > 1:
                module.warn(
                    'Received multiple conflicting charset values (%s), using %s' % (', '.join(charsets), content_encoding)
                )

        u_content = to_text(content, encoding=content_encoding)
        if any(candidate in content_type for candidate in JSON_CANDIDATES):
            try:
                js = json.loads(u_content)
                uresp['json'] = js
            except Exception:
                if PY2:
                    sys.exc_clear()  # Avoid false positive traceback in fail_json() on Python 2
    else:
        u_content = to_text(content, encoding=content_encoding)

    if module.no_log_values:
        uresp = sanitize_keys(uresp, module.no_log_values, NO_MODIFY_KEYS)

    if resp['status'] not in status_code:
        uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
        if return_content:
            module.fail_json(content=u_content, **uresp)
        else:
            module.fail_json(**uresp)
    elif return_content:
        module.exit_json(content=u_content, **uresp)
    else:
        module.exit_json(**uresp)
Exemplo n.º 12
0
def main():

    # get supported pkg managers
    PKG_MANAGERS = get_all_pkg_managers()
    PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]

    # start work
    global module
    module = AssibleModule(argument_spec=dict(manager={
        'type': 'list',
        'elements': 'str',
        'default': ['auto']
    },
                                              strategy={
                                                  'choices': ['first', 'all'],
                                                  'default': 'first'
                                              }),
                           supports_check_mode=True)
    packages = {}
    results = {'assible_facts': {}}
    managers = [x.lower() for x in module.params['manager']]
    strategy = module.params['strategy']

    if 'auto' in managers:
        # keep order from user, we do dedupe below
        managers.extend(PKG_MANAGER_NAMES)
        managers.remove('auto')

    unsupported = set(managers).difference(PKG_MANAGER_NAMES)
    if unsupported:
        if 'auto' in module.params['manager']:
            msg = 'Could not auto detect a usable package manager, check warnings for details.'
        else:
            msg = 'Unsupported package managers requested: %s' % (
                ', '.join(unsupported))
        module.fail_json(msg=msg)

    found = 0
    seen = set()
    for pkgmgr in managers:

        if found and strategy == 'first':
            break

        # dedupe as per above
        if pkgmgr in seen:
            continue
        seen.add(pkgmgr)
        try:
            try:
                # manager throws exception on init (calls self.test) if not usable.
                manager = PKG_MANAGERS[pkgmgr]()
                if manager.is_available():
                    found += 1
                    packages.update(manager.get_packages())

            except Exception as e:
                if pkgmgr in module.params['manager']:
                    module.warn(
                        'Requested package manager %s was not usable by this module: %s'
                        % (pkgmgr, to_text(e)))
                continue

        except Exception as e:
            if pkgmgr in module.params['manager']:
                module.warn('Failed to retrieve packages with %s: %s' %
                            (pkgmgr, to_text(e)))

    if found == 0:
        msg = (
            'Could not detect a supported package manager from the following list: %s, '
            'or the required Python library is not installed. Check warnings for details.'
            % managers)
        module.fail_json(msg=msg)

    # Set the facts, this will override the facts in assible_facts that might exist from previous runs
    # when using operating system level or distribution package managers
    results['assible_facts']['packages'] = packages

    module.exit_json(**results)