示例#1
0
    def get_config(self):
        """
        Reads the settings from the gce.ini file.

        Populates a ConfigParser object with defaults and
        attempts to read an .ini-style configuration from the filename
        specified in GCE_INI_PATH. If the environment variable is
        not present, the filename defaults to gce.ini in the current
        working directory.
        """
        gce_ini_default_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), "gce.ini")
        gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)

        # Create a ConfigParser.
        # This provides empty defaults to each key, so that environment
        # variable configuration (as opposed to INI configuration) is able
        # to work.
        config = configparser.ConfigParser(
            defaults={
                'gce_service_account_email_address': '',
                'gce_service_account_pem_file_path': '',
                'gce_project_id': '',
                'gce_zone': '',
                'libcloud_secrets': '',
                'instance_tags': '',
                'inventory_ip_type': '',
                'cache_path': '~/.ansible/tmp',
                'cache_max_age': '300'
            })
        if 'gce' not in config.sections():
            config.add_section('gce')
        if 'inventory' not in config.sections():
            config.add_section('inventory')
        if 'cache' not in config.sections():
            config.add_section('cache')

        config.read(gce_ini_path)

        #########
        # Section added for processing ini settings
        #########

        # Set the instance_states filter based on config file options
        self.instance_states = []
        if config.has_option('gce', 'instance_states'):
            states = config.get('gce', 'instance_states')
            # Ignore if instance_states is an empty string.
            if states:
                self.instance_states = states.split(',')

        # Set the instance_tags filter, env var overrides config from file
        # and cli param overrides all
        if self.args.instance_tags:
            self.instance_tags = self.args.instance_tags
        else:
            self.instance_tags = os.environ.get(
                'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
        if self.instance_tags:
            self.instance_tags = self.instance_tags.split(',')

        # Caching
        cache_path = config.get('cache', 'cache_path')
        cache_max_age = config.getint('cache', 'cache_max_age')
        # TOOD(supertom): support project-specific caches
        cache_name = 'ansible-gce.cache'
        self.cache = CloudInventoryCache(cache_path=cache_path,
                                         cache_max_age=cache_max_age,
                                         cache_name=cache_name)
        return config
示例#2
0
    def read_settings(self):
        ''' Reads the settings from the packet_net.ini file '''
        if six.PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')

        if _ini_path_raw:
            packet_ini_path = os.path.expanduser(
                os.path.expandvars(_ini_path_raw))
        else:
            packet_ini_path = os.path.join(
                os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
        config.read(packet_ini_path)

        # items per page
        self.items_per_page = 999
        if config.has_option(ini_section, 'items_per_page'):
            config.get(ini_section, 'items_per_page')

        # Instance states to be gathered in inventory. Default is all of them.
        packet_valid_device_states = [
            'active', 'inactive', 'queued', 'provisioning'
        ]
        self.packet_device_states = []
        if config.has_option(ini_section, 'device_states'):
            for device_state in config.get(ini_section,
                                           'device_states').split(','):
                device_state = device_state.strip()
                if device_state not in packet_valid_device_states:
                    continue
                self.packet_device_states.append(device_state)
        else:
            self.packet_device_states = packet_valid_device_states

        # Cache related
        cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
        if not os.path.exists(cache_dir):
            os.makedirs(cache_dir)

        self.cache_path_cache = cache_dir + "/ansible-packet.cache"
        self.cache_path_index = cache_dir + "/ansible-packet.index"
        self.cache_max_age = config.getint(ini_section, 'cache_max_age')

        # Configure nested groups instead of flat namespace.
        if config.has_option(ini_section, 'nested_groups'):
            self.nested_groups = config.getboolean(ini_section,
                                                   'nested_groups')
        else:
            self.nested_groups = False

        # Replace dash or not in group names
        if config.has_option(ini_section, 'replace_dash_in_groups'):
            self.replace_dash_in_groups = config.getboolean(
                ini_section, 'replace_dash_in_groups')
        else:
            self.replace_dash_in_groups = True

        # Configure which groups should be created.
        group_by_options = [
            'group_by_device_id',
            'group_by_hostname',
            'group_by_facility',
            'group_by_project',
            'group_by_operating_system',
            'group_by_plan_type',
            'group_by_tags',
            'group_by_tag_none',
        ]
        for option in group_by_options:
            if config.has_option(ini_section, option):
                setattr(self, option, config.getboolean(ini_section, option))
            else:
                setattr(self, option, True)

        # Do we need to just include hosts that match a pattern?
        try:
            pattern_include = config.get(ini_section, 'pattern_include')
            if pattern_include and len(pattern_include) > 0:
                self.pattern_include = re.compile(pattern_include)
            else:
                self.pattern_include = None
        except configparser.NoOptionError:
            self.pattern_include = None

        # Do we need to exclude hosts that match a pattern?
        try:
            pattern_exclude = config.get(ini_section, 'pattern_exclude')
            if pattern_exclude and len(pattern_exclude) > 0:
                self.pattern_exclude = re.compile(pattern_exclude)
            else:
                self.pattern_exclude = None
        except configparser.NoOptionError:
            self.pattern_exclude = None

        # Projects
        self.projects = []
        configProjects = config.get(ini_section, 'projects')
        configProjects_exclude = config.get(ini_section, 'projects_exclude')
        if (configProjects == 'all'):
            for projectInfo in self.get_projects():
                if projectInfo.name not in configProjects_exclude:
                    self.projects.append(projectInfo.name)
        else:
            self.projects = configProjects.split(",")
示例#3
0
def main():
    """This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
    """

    module = AnsibleModule(
        argument_spec=dict(
            config_base=dict(
                required=False,
                default="/etc/origin",
                type='str'),
            warning_days=dict(
                required=False,
                default=30,
                type='int'),
            show_all=dict(
                required=False,
                default=False,
                type='bool')
        ),
        supports_check_mode=True,
    )

    # Basic scaffolding for OpenShift specific certs
    openshift_base_config_path = os.path.realpath(module.params['config_base'])
    openshift_master_config_path = os.path.join(openshift_base_config_path,
                                                "master", "master-config.yaml")
    openshift_node_config_path = os.path.join(openshift_base_config_path,
                                              "node", "node-config.yaml")
    openshift_cert_check_paths = [
        openshift_master_config_path,
        openshift_node_config_path,
    ]

    # Paths for Kubeconfigs. Additional kubeconfigs are conditionally
    # checked later in the code
    master_kube_configs = ['admin', 'openshift-master',
                           'openshift-node', 'openshift-router',
                           'openshift-registry']

    kubeconfig_paths = []
    for m_kube_config in master_kube_configs:
        kubeconfig_paths.append(
            os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig")
        )

    # Validate some paths we have the ability to do ahead of time
    openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
    kubeconfig_paths = filter_paths(kubeconfig_paths)

    # etcd, where do you hide your certs? Used when parsing etcd.conf
    etcd_cert_params = [
        "ETCD_TRUSTED_CA_FILE",
        "ETCD_CERT_FILE",
        "ETCD_PEER_TRUSTED_CA_FILE",
        "ETCD_PEER_CERT_FILE",
    ]

    # Expiry checking stuff
    now = datetime.datetime.now()
    # todo, catch exception for invalid input and return a fail_json
    warning_days = int(module.params['warning_days'])
    expire_window = datetime.timedelta(days=warning_days)

    # Module stuff
    #
    # The results of our cert checking to return from the task call
    check_results = {}
    check_results['meta'] = {}
    check_results['meta']['warning_days'] = warning_days
    check_results['meta']['checked_at_time'] = str(now)
    check_results['meta']['warn_before_date'] = str(now + expire_window)
    check_results['meta']['show_all'] = str(module.params['show_all'])
    # All the analyzed certs accumulate here
    ocp_certs = []

    ######################################################################
    # Sure, why not? Let's enable check mode.
    if module.check_mode:
        check_results['ocp_certs'] = []
        module.exit_json(
            check_results=check_results,
            msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
            rc=0,
            changed=False
        )

    ######################################################################
    # Check for OpenShift Container Platform specific certs
    ######################################################################
    for os_cert in filter_paths(openshift_cert_check_paths):
        # Open up that config file and locate the cert and CA
        with io.open(os_cert, 'r', encoding='utf-8') as fp:
            cert_meta = {}
            cfg = yaml.load(fp)
            # cert files are specified in parsed `fp` as relative to the path
            # of the original config file. 'master-config.yaml' with certFile
            # = 'foo.crt' implies that 'foo.crt' is in the same
            # directory. certFile = '../foo.crt' is in the parent directory.
            cfg_path = os.path.dirname(fp.name)
            cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile'])
            cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA'])

        ######################################################################
        # Load the certificate and the CA, parse their expiration dates into
        # datetime objects so we can manipulate them later
        for v in cert_meta.values():
            with io.open(v, 'r', encoding='utf-8') as fp:
                cert = fp.read()
                (cert_subject,
                 cert_expiry_date,
                 time_remaining,
                 cert_serial) = load_and_handle_cert(cert, now, ans_module=module)

                expire_check_result = {
                    'cert_cn': cert_subject,
                    'path': fp.name,
                    'expiry': cert_expiry_date,
                    'days_remaining': time_remaining.days,
                    'health': None,
                    'serial': cert_serial
                }

                classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)

    ######################################################################
    # /Check for OpenShift Container Platform specific certs
    ######################################################################

    ######################################################################
    # Check service Kubeconfigs
    ######################################################################
    kubeconfigs = []

    # There may be additional kubeconfigs to check, but their naming
    # is less predictable than the ones we've already assembled.

    try:
        # Try to read the standard 'node-config.yaml' file to check if
        # this host is a node.
        with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp:
            cfg = yaml.load(fp)

        # OK, the config file exists, therefore this is a
        # node. Nodes have their own kubeconfig files to
        # communicate with the master API. Let's read the relative
        # path to that file from the node config.
        node_masterKubeConfig = cfg['masterKubeConfig']
        # As before, the path to the 'masterKubeConfig' file is
        # relative to `fp`
        cfg_path = os.path.dirname(fp.name)
        node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)

        with io.open(node_kubeconfig, 'r', encoding='utf8') as fp:
            # Read in the nodes kubeconfig file and grab the good stuff
            cfg = yaml.load(fp)

        c = cfg['users'][0]['user']['client-certificate-data']
        (cert_subject,
         cert_expiry_date,
         time_remaining,
         cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': fp.name,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
            'serial': cert_serial
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
    except IOError:
        # This is not a node
        pass

    for kube in filter_paths(kubeconfig_paths):
        with io.open(kube, 'r', encoding='utf-8') as fp:
            # TODO: Maybe consider catching exceptions here?
            cfg = yaml.load(fp)

        # Per conversation, "the kubeconfigs you care about:
        # admin, router, registry should all be single
        # value". Following that advice we only grab the data for
        # the user at index 0 in the 'users' list. There should
        # not be more than one user.
        c = cfg['users'][0]['user']['client-certificate-data']
        (cert_subject,
         cert_expiry_date,
         time_remaining,
         cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': fp.name,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
            'serial': cert_serial
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)

    ######################################################################
    # /Check service Kubeconfigs
    ######################################################################

    ######################################################################
    # Check etcd certs
    #
    # Two things to check: 'external' etcd, and embedded etcd.
    ######################################################################
    # FIRST: The 'external' etcd
    #
    # Some values may be duplicated, make this a set for now so we
    # unique them all
    etcd_certs_to_check = set([])
    etcd_certs = []
    etcd_cert_params.append('dne')
    try:
        with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp:
            # Add dummy header section.
            config = io.StringIO()
            config.write(u'[ETCD]\n')
            config.write(fp.read().replace('%', '%%'))
            config.seek(0, os.SEEK_SET)

            etcd_config = configparser.ConfigParser()
            etcd_config.readfp(config)

        for param in etcd_cert_params:
            try:
                etcd_certs_to_check.add(etcd_config.get('ETCD', param))
            except configparser.NoOptionError:
                # That parameter does not exist, oh well...
                pass
    except IOError:
        # No etcd to see here, move along
        pass

    for etcd_cert in filter_paths(etcd_certs_to_check):
        with io.open(etcd_cert, 'r', encoding='utf-8') as fp:
            c = fp.read()
            (cert_subject,
             cert_expiry_date,
             time_remaining,
             cert_serial) = load_and_handle_cert(c, now, ans_module=module)

            expire_check_result = {
                'cert_cn': cert_subject,
                'path': fp.name,
                'expiry': cert_expiry_date,
                'days_remaining': time_remaining.days,
                'health': None,
                'serial': cert_serial
            }

            classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)

    ######################################################################
    # /Check etcd certs
    ######################################################################

    ######################################################################
    # Check router/registry certs
    #
    # These are saved as secrets in etcd. That means that we can not
    # simply read a file to grab the data. Instead we're going to
    # subprocess out to the 'oc get' command. On non-masters this
    # command will fail, that is expected so we catch that exception.
    ######################################################################
    router_certs = []
    registry_certs = []

    ######################################################################
    # First the router certs
    try:
        router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
                                              stdout=subprocess.PIPE)
        router_ds = yaml.load(router_secrets_raw.communicate()[0])
        router_c = router_ds['data']['tls.crt']
        router_path = router_ds['metadata']['selfLink']
    except TypeError:
        # YAML couldn't load the result, this is not a master
        pass
    except OSError:
        # The OC command doesn't exist here. Move along.
        pass
    else:
        (cert_subject,
         cert_expiry_date,
         time_remaining,
         cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': router_path,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
            'serial': cert_serial
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)

    ######################################################################
    # Now for registry
    try:
        registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
                                                stdout=subprocess.PIPE)
        registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
        registry_c = registry_ds['data']['registry.crt']
        registry_path = registry_ds['metadata']['selfLink']
    except TypeError:
        # YAML couldn't load the result, this is not a master
        pass
    except OSError:
        # The OC command doesn't exist here. Move along.
        pass
    else:
        (cert_subject,
         cert_expiry_date,
         time_remaining,
         cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module)

        expire_check_result = {
            'cert_cn': cert_subject,
            'path': registry_path,
            'expiry': cert_expiry_date,
            'days_remaining': time_remaining.days,
            'health': None,
            'serial': cert_serial
        }

        classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)

    ######################################################################
    # /Check router/registry certs
    ######################################################################

    res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)

    msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
        count=res['total'],
        exp=res['expired'],
        warn=res['warning'],
        ok=res['ok'],
        window=int(module.params['warning_days']),
    )

    # By default we only return detailed information about expired or
    # warning certificates. If show_all is true then we will print all
    # the certificates examined.
    if not module.params['show_all']:
        check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
        check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
        check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
        check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
        check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
    else:
        check_results['ocp_certs'] = ocp_certs
        check_results['kubeconfigs'] = kubeconfigs
        check_results['etcd'] = etcd_certs
        check_results['registry'] = registry_certs
        check_results['router'] = router_certs

    # Sort the final results to report in order of ascending safety
    # time. That is to say, the certificates which will expire sooner
    # will be at the front of the list and certificates which will
    # expire later are at the end. Router and registry certs should be
    # limited to just 1 result, so don't bother sorting those.
    def cert_key(item):
        ''' return the days_remaining key '''
        return item['days_remaining']

    check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
    check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
    check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)

    # This module will never change anything, but we might want to
    # change the return code parameter if there is some catastrophic
    # error we noticed earlier
    module.exit_json(
        check_results=check_results,
        summary=res,
        msg=msg,
        rc=0,
        changed=False
    )
示例#4
0
def parse_from_mysql_config_file(cnf):
    cp = configparser.ConfigParser()
    cp.read(cnf)
    return cp
示例#5
0
文件: ini.py 项目: vladdou/ansible
    def run(self, terms, variables=None, **kwargs):

        self.set_options(var_options=variables, direct=kwargs)
        paramvals = self.get_options()

        self.cp = configparser.ConfigParser()
        if paramvals['case_sensitive']:
            self.cp.optionxform = to_native

        ret = []
        for term in terms:

            key = term
            # parameters specified?
            if '=' in term or ' ' in term.strip():
                self._deprecate_inline_kv()
                params = _parse_params(term, paramvals)
                try:
                    updated_key = False
                    for param in params:
                        if '=' in param:
                            name, value = param.split('=')
                            if name not in paramvals:
                                raise AnsibleLookupError(
                                    '%s is not a valid option.' % name)
                            paramvals[name] = value
                        elif key == term:
                            # only take first, this format never supported multiple keys inline
                            key = param
                            updated_key = True
                except ValueError as e:
                    # bad params passed
                    raise AnsibleLookupError(
                        "Could not use '%s' from '%s': %s" %
                        (param, params, to_native(e)),
                        orig_exc=e)
                if not updated_key:
                    raise AnsibleOptionsError(
                        "No key to lookup was provided as first term with in string inline options: %s"
                        % term)
                    # only passed options in inline string

            # TODO: look to use cache to avoid redoing this for every term if they use same file
            # Retrieve file path
            path = self.find_file_in_search_path(variables, 'files',
                                                 paramvals['file'])

            # Create StringIO later used to parse ini
            config = StringIO()
            # Special case for java properties
            if paramvals['type'] == "properties":
                config.write(u'[java_properties]\n')
                paramvals['section'] = 'java_properties'

            # Open file using encoding
            contents, show_data = self._loader._get_file_contents(path)
            contents = to_text(contents,
                               errors='surrogate_or_strict',
                               encoding=paramvals['encoding'])
            config.write(contents)
            config.seek(0, os.SEEK_SET)

            try:
                self.cp.readfp(config)
            except configparser.DuplicateOptionError as doe:
                raise AnsibleLookupError(
                    "Duplicate option in '{file}': {error}".format(
                        file=paramvals['file'], error=to_native(doe)))

            try:
                var = self.get_value(key, paramvals['section'],
                                     paramvals['default'], paramvals['re'])
            except configparser.NoSectionError:
                raise AnsibleLookupError(
                    "No section '{section}' in {file}".format(
                        section=paramvals['section'], file=paramvals['file']))
            if var is not None:
                if isinstance(var, MutableSequence):
                    for v in var:
                        ret.append(v)
                else:
                    ret.append(var)
        return ret
def read_config(cfg_file):
    cfg_parser = configparser.ConfigParser(allow_no_value=True)
    cfg_parser.optionxform = lambda option: option  # preserve as case-sensitive
    cfg_parser.read(cfg_file)
    return cfg_parser
示例#7
0
    def run(self, terms, variables=None, **kwargs):

        self.cp = configparser.ConfigParser()

        ret = []
        for term in terms:
            params = _parse_params(term)
            key = params[0]

            paramvals = {
                'file': 'ansible.ini',
                're': False,
                'default': None,
                'section': "global",
                'type': "ini",
                'encoding': 'utf-8',
            }

            # parameters specified?
            try:
                for param in params[1:]:
                    name, value = param.split('=')
                    if name not in paramvals:
                        raise AnsibleAssertionError('%s not in paramvals' %
                                                    name)
                    paramvals[name] = value
            except (ValueError, AssertionError) as e:
                raise AnsibleError(e)

            # Retrieve file path
            path = self.find_file_in_search_path(variables, 'files',
                                                 paramvals['file'])

            # Create StringIO later used to parse ini
            config = StringIO()
            # Special case for java properties
            if paramvals['type'] == "properties":
                config.write(u'[java_properties]\n')
                paramvals['section'] = 'java_properties'

            # Open file using encoding
            contents, show_data = self._loader._get_file_contents(path)
            contents = to_text(contents,
                               errors='surrogate_or_strict',
                               encoding=paramvals['encoding'])
            config.write(contents)
            config.seek(0, os.SEEK_SET)

            try:
                self.cp.readfp(config)
            except configparser.DuplicateOptionError as doe:
                raise AnsibleLookupError(
                    "Duplicate option in '{file}': {error}".format(
                        file=paramvals['file'], error=to_native(doe)))

            try:
                var = self.get_value(key, paramvals['section'],
                                     paramvals['default'], paramvals['re'])
            except configparser.NoSectionError:
                raise AnsibleLookupError(
                    "No section '{section}' in {file}".format(
                        section=paramvals['section'], file=paramvals['file']))
            if var is not None:
                if isinstance(var, MutableSequence):
                    for v in var:
                        ret.append(v)
                else:
                    ret.append(var)
        return ret
示例#8
0
    def read_settings(self):
        """Reads the settings from the foreman.ini file"""

        config = ConfigParser.ConfigParser()
        config.read(self.config_paths)

        # Foreman API related
        try:
            self.foreman_url = config.get('foreman', 'url')
            self.foreman_user = config.get('foreman', 'user')
            self.foreman_pw = config.get('foreman', 'password', raw=True)
            self.foreman_ssl_verify = config.getboolean(
                'foreman', 'ssl_verify')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
            print("Error parsing configuration: %s" % e, file=sys.stderr)
            return False

        # Ansible related
        try:
            group_patterns = config.get('ansible', 'group_patterns')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            group_patterns = "[]"

        self.group_patterns = json.loads(group_patterns)

        try:
            self.group_prefix = config.get('ansible', 'group_prefix')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.group_prefix = "foreman_"

        try:
            self.want_facts = config.getboolean('ansible', 'want_facts')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.want_facts = True

        try:
            self.want_hostcollections = config.getboolean(
                'ansible', 'want_hostcollections')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.want_hostcollections = False

        try:
            self.want_ansible_ssh_host = config.getboolean(
                'ansible', 'want_ansible_ssh_host')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.want_ansible_ssh_host = False

        # Do we want parameters to be interpreted if possible as JSON? (no by default)
        try:
            self.rich_params = config.getboolean('ansible', 'rich_params')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.rich_params = False

        try:
            self.host_filters = config.get('foreman', 'host_filters')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.host_filters = None

        # Cache related
        try:
            cache_path = os.path.expanduser(config.get('cache', 'path'))
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            cache_path = '.'
        (script, ext) = os.path.splitext(os.path.basename(__file__))
        self.cache_path_cache = cache_path + "/%s.cache" % script
        self.cache_path_inventory = cache_path + "/%s.index" % script
        self.cache_path_params = cache_path + "/%s.params" % script
        self.cache_path_facts = cache_path + "/%s.facts" % script
        self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
        try:
            self.cache_max_age = config.getint('cache', 'max_age')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.cache_max_age = 60
        try:
            self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.scan_new_hosts = False

        return True
示例#9
0
    def read_settings(self):
        ''' Reads the settings from the vmware_inventory.ini file '''

        scriptbasename = __file__
        scriptbasename = os.path.basename(scriptbasename)
        scriptbasename = scriptbasename.replace('.py', '')

        defaults = {
            'vmware': {
                'server':
                '',
                'port':
                443,
                'username':
                '',
                'password':
                '',
                'validate_certs':
                True,
                'ini_path':
                os.path.join(os.path.dirname(__file__),
                             '%s.ini' % scriptbasename),
                'cache_name':
                'ansible-vmware',
                'cache_path':
                '~/.ansible/tmp',
                'cache_max_age':
                3600,
                'max_object_level':
                1,
                'skip_keys':
                'declaredalarmstate,'
                'disabledmethod,'
                'dynamicproperty,'
                'dynamictype,'
                'environmentbrowser,'
                'managedby,'
                'parent,'
                'childtype,'
                'resourceconfig',
                'alias_pattern':
                '{{ config.name + "_" + config.uuid }}',
                'host_pattern':
                '{{ guest.ipaddress }}',
                'host_filters':
                '{{ runtime.powerstate == "poweredOn" }}',
                'groupby_patterns':
                '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
                'lower_var_keys':
                True,
                'custom_field_group_prefix':
                'vmware_tag_',
                'groupby_custom_field_excludes':
                '',
                'groupby_custom_field':
                False
            }
        }

        if PY3:
            config = configparser.ConfigParser()
        else:
            config = configparser.SafeConfigParser()

        # where is the config?
        vmware_ini_path = os.environ.get('VMWARE_INI_PATH',
                                         defaults['vmware']['ini_path'])
        vmware_ini_path = os.path.expanduser(
            os.path.expandvars(vmware_ini_path))
        config.read(vmware_ini_path)

        if 'vmware' not in config.sections():
            config.add_section('vmware')

        # apply defaults
        for k, v in defaults['vmware'].items():
            if not config.has_option('vmware', k):
                config.set('vmware', k, str(v))

        # where is the cache?
        self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
        if self.cache_dir and not os.path.exists(self.cache_dir):
            os.makedirs(self.cache_dir)

        # set the cache filename and max age
        cache_name = config.get('vmware', 'cache_name')
        self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
        self.debugl('cache path is %s' % self.cache_path_cache)
        self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))

        # mark the connection info
        self.server = os.environ.get('VMWARE_SERVER',
                                     config.get('vmware', 'server'))
        self.debugl('server is %s' % self.server)
        self.port = int(
            os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
        self.username = os.environ.get('VMWARE_USERNAME',
                                       config.get('vmware', 'username'))
        self.debugl('username is %s' % self.username)
        self.password = os.environ.get(
            'VMWARE_PASSWORD', config.get('vmware', 'password', raw=True))
        self.validate_certs = os.environ.get(
            'VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
        if self.validate_certs in ['no', 'false', 'False', False]:
            self.validate_certs = False

        self.debugl('cert validation is %s' % self.validate_certs)

        # behavior control
        self.maxlevel = int(config.get('vmware', 'max_object_level'))
        self.debugl('max object level is %s' % self.maxlevel)
        self.lowerkeys = config.get('vmware', 'lower_var_keys')
        if type(self.lowerkeys) != bool:
            if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
                self.lowerkeys = True
            else:
                self.lowerkeys = False
        self.debugl('lower keys is %s' % self.lowerkeys)
        self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
        self.debugl('skip keys is %s' % self.skip_keys)
        temp_host_filters = list(
            config.get('vmware', 'host_filters').split('}},'))
        for host_filter in temp_host_filters:
            host_filter = host_filter.rstrip()
            if host_filter != "":
                if not host_filter.endswith("}}"):
                    host_filter += "}}"
                self.host_filters.append(host_filter)
        self.debugl('host filters are %s' % self.host_filters)

        temp_groupby_patterns = list(
            config.get('vmware', 'groupby_patterns').split('}},'))
        for groupby_pattern in temp_groupby_patterns:
            groupby_pattern = groupby_pattern.rstrip()
            if groupby_pattern != "":
                if not groupby_pattern.endswith("}}"):
                    groupby_pattern += "}}"
                self.groupby_patterns.append(groupby_pattern)
        self.debugl('groupby patterns are %s' % self.groupby_patterns)
        temp_groupby_custom_field_excludes = config.get(
            'vmware', 'groupby_custom_field_excludes')
        self.groupby_custom_field_excludes = [
            x.strip('"') for x in [
                y.strip("'")
                for y in temp_groupby_custom_field_excludes.split(",")
            ]
        ]
        self.debugl('groupby exclude strings are %s' %
                    self.groupby_custom_field_excludes)

        # Special feature to disable the brute force serialization of the
        # virtual machine objects. The key name for these properties does not
        # matter because the values are just items for a larger list.
        if config.has_section('properties'):
            self.guest_props = []
            for prop in config.items('properties'):
                self.guest_props.append(prop[1])

        # save the config
        self.config = config
def main():
    module = AnsibleModule(
        argument_spec=dict(
            name=dict(required=False),
            repo=dict(required=False),
            state=dict(choices=['present', 'absent'], default='present'),
            runrefresh=dict(required=False, default=False, type='bool'),
            description=dict(required=False),
            disable_gpg_check=dict(required=False, default=False, type='bool'),
            autorefresh=dict(required=False,
                             default=True,
                             type='bool',
                             aliases=['refresh']),
            priority=dict(required=False, type='int'),
            enabled=dict(required=False, default=True, type='bool'),
            overwrite_multiple=dict(required=False, default=False,
                                    type='bool'),
            auto_import_keys=dict(required=False, default=False, type='bool'),
        ),
        supports_check_mode=False,
        required_one_of=[['state', 'runrefresh']],
    )

    repo = module.params['repo']
    alias = module.params['name']
    state = module.params['state']
    overwrite_multiple = module.params['overwrite_multiple']
    auto_import_keys = module.params['auto_import_keys']
    runrefresh = module.params['runrefresh']

    zypper_version = get_zypper_version(module)
    warnings = []  # collect warning messages for final output

    repodata = {
        'url': repo,
        'alias': alias,
        'name': module.params['description'],
        'priority': module.params['priority'],
    }
    # rewrite bools in the language that zypper lr -x provides for easier comparison
    if module.params['enabled']:
        repodata['enabled'] = '1'
    else:
        repodata['enabled'] = '0'
    if module.params['disable_gpg_check']:
        repodata['gpgcheck'] = '0'
    else:
        repodata['gpgcheck'] = '1'
    if module.params['autorefresh']:
        repodata['autorefresh'] = '1'
    else:
        repodata['autorefresh'] = '0'

    def exit_unchanged():
        module.exit_json(changed=False, repodata=repodata, state=state)

    # Check run-time module parameters
    if repo == '*' or alias == '*':
        if runrefresh:
            runrefreshrepo(module, auto_import_keys)
            module.exit_json(changed=False, runrefresh=True)
        else:
            module.fail_json(
                msg='repo=* can only be used with the runrefresh option.')

    if state == 'present' and not repo:
        module.fail_json(msg='Module option state=present requires repo')
    if state == 'absent' and not repo and not alias:
        module.fail_json(
            msg='Alias or repo parameter required when state=absent')

    if repo and repo.endswith('.repo'):
        if alias:
            module.fail_json(
                msg=
                'Incompatible option: \'name\'. Do not use name when adding .repo files'
            )
    else:
        if not alias and state == "present":
            module.fail_json(msg='Name required when adding non-repo files.')

    # Download / Open and parse .repo file to ensure idempotency
    if repo and repo.endswith('.repo'):
        if repo.startswith(('http://', 'https://')):
            response, info = fetch_url(module=module, url=repo, force=True)
            if not response or info['status'] != 200:
                module.fail_json(
                    msg='Error downloading .repo file from provided URL')
            repofile_text = to_text(response.read(),
                                    errors='surrogate_or_strict')
        else:
            try:
                with open(repo, encoding='utf-8') as file:
                    repofile_text = file.read()
            except IOError:
                module.fail_json(
                    msg='Error opening .repo file from provided path')

        repofile = configparser.ConfigParser()
        try:
            repofile.readfp(StringIO(repofile_text))
        except configparser.Error:
            module.fail_json(
                msg='Invalid format, .repo file could not be parsed')

        # No support for .repo file with zero or more than one repository
        if len(repofile.sections()) != 1:
            err = "Invalid format, .repo file contains %s repositories, expected 1" % len(
                repofile.sections())
            module.fail_json(msg=err)

        section = repofile.sections()[0]
        repofile_items = dict(repofile.items(section))
        # Only proceed if at least baseurl is available
        if 'baseurl' not in repofile_items:
            module.fail_json(msg='No baseurl found in .repo file')

        # Set alias (name) and url based on values from .repo file
        alias = section
        repodata['alias'] = section
        repodata['url'] = repofile_items['baseurl']

        # If gpgkey is part of the .repo file, auto import key
        if 'gpgkey' in repofile_items:
            auto_import_keys = True

        # Map additional values, if available
        if 'name' in repofile_items:
            repodata['name'] = repofile_items['name']
        if 'enabled' in repofile_items:
            repodata['enabled'] = repofile_items['enabled']
        if 'autorefresh' in repofile_items:
            repodata['autorefresh'] = repofile_items['autorefresh']
        if 'gpgcheck' in repofile_items:
            repodata['gpgcheck'] = repofile_items['gpgcheck']

    exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)

    if alias:
        shortname = alias
    else:
        shortname = repo

    if state == 'present':
        if exists and not mod:
            if runrefresh:
                runrefreshrepo(module, auto_import_keys, shortname)
            exit_unchanged()
        rc, stdout, stderr = addmodify_repo(module, repodata, old_repos,
                                            zypper_version, warnings)
        if rc == 0 and (runrefresh or auto_import_keys):
            runrefreshrepo(module, auto_import_keys, shortname)
    elif state == 'absent':
        if not exists:
            exit_unchanged()
        rc, stdout, stderr = remove_repo(module, shortname)

    if rc == 0:
        module.exit_json(changed=True,
                         repodata=repodata,
                         state=state,
                         warnings=warnings)
    else:
        module.fail_json(msg="Zypper failed with rc %s" % rc,
                         rc=rc,
                         stdout=stdout,
                         stderr=stderr,
                         repodata=repodata,
                         state=state,
                         warnings=warnings)
示例#11
0
    def __init__(self):
        super(CallbackModule, self).__init__()

        if not HAS_SSL:
            self._display.warning(
                "Unable to import ssl module. Will send over port 80.")

        if not HAS_CERTIFI:
            self.disabled = True
            self._display.warning(
                'The `certifi` python module is not installed. '
                'Disabling the Logentries callback plugin.')

        if not HAS_FLATDICT:
            self.disabled = True
            self._display.warning(
                'The `flatdict` python module is not installed. '
                'Disabling the Logentries callback plugin.')

        config_path = os.path.abspath(os.path.dirname(__file__))
        config = configparser.ConfigParser()
        try:
            config.readfp(open(os.path.join(config_path, 'logentries.ini')))
            if config.has_option('logentries', 'api'):
                self.api_uri = config.get('logentries', 'api')
            if config.has_option('logentries', 'port'):
                self.api_port = config.getint('logentries', 'port')
            if config.has_option('logentries', 'tls_port'):
                self.api_tls_port = config.getint('logentries', 'tls_port')
            if config.has_option('logentries', 'use_tls'):
                self.use_tls = config.getboolean('logentries', 'use_tls')
            if config.has_option('logentries', 'token'):
                self.token = config.get('logentries', 'token')
            if config.has_option('logentries', 'flatten'):
                self.flatten = config.getboolean('logentries', 'flatten')

        except:
            self.api_uri = os.getenv('LOGENTRIES_API')
            if self.api_uri is None:
                self.api_uri = 'data.logentries.com'

            try:
                self.api_port = int(os.getenv('LOGENTRIES_PORT'))
                if self.api_port is None:
                    self.api_port = 80
            except TypeError:
                self.api_port = 80

            try:
                self.api_tls_port = int(os.getenv('LOGENTRIES_TLS_PORT'))
                if self.api_tls_port is None:
                    self.api_tls_port = 443
            except TypeError:
                self.api_tls_port = 443

            # this just needs to be set to use TLS
            self.use_tls = os.getenv('LOGENTRIES_USE_TLS')
            if self.use_tls is None:
                self.use_tls = False
            elif self.use_tls.lower() in ['yes', 'true']:
                self.use_tls = True

            self.token = os.getenv('LOGENTRIES_ANSIBLE_TOKEN')
            if self.token is None:
                self.disabled = True
                self._display.warning(
                    'Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment '
                    'variable')

            self.flatten = os.getenv('LOGENTRIES_FLATTEN')
            if self.flatten is None:
                self.flatten = False
            elif self.flatten.lower() in ['yes', 'true']:
                self.flatten = True

        self.verbose = False
        self.timeout = 10
        self.le_jobid = str(uuid.uuid4())

        if self.use_tls:
            self._appender = TLSSocketAppender(verbose=self.verbose,
                                               LE_API=self.api_uri,
                                               LE_TLS_PORT=self.api_tls_port)
        else:
            self._appender = PlainTextSocketAppender(verbose=self.verbose,
                                                     LE_API=self.api_uri,
                                                     LE_PORT=self.api_port)
        self._appender.reopen_connection()
示例#12
0
    if cache_available(config):
        inv = get_cache('scaleway_ansible_inventory.json', config)
    else:
        inv = generate_inv_from_api(config)

    save_cache(inv, config)
    return json.dumps(inv)


if __name__ == '__main__':
    inventory = {}

    # Read config
    if six.PY3:
        config = configparser.ConfigParser()
    else:
        config = configparser.SafeConfigParser()
    for configfilename in [
            os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini',
            'scaleway.ini'
    ]:
        if os.path.exists(configfilename):
            config.read(configfilename)
            break

    if cache_available(config):
        inventory = get_cache('scaleway_ansible_inventory.json', config)
    else:
        inventory = get_inventory(config)
示例#13
0
    def collect(self, module=None, collected_facts=None):
        local_facts = {}
        local_facts['local'] = {}

        if not module:
            return local_facts

        fact_path = module.params.get('fact_path', None)

        if not fact_path or not os.path.exists(fact_path):
            return local_facts

        local = {}
        # go over .fact files, run executables, read rest, skip bad with warning and note
        for fn in sorted(glob.glob(fact_path + '/*.fact')):
            # use filename for key where it will sit under local facts
            fact_base = os.path.basename(fn).replace('.fact', '')
            if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
                failed = None
                try:
                    # run it
                    rc, out, err = module.run_command(fn)
                    if rc != 0:
                        failed = 'Failure executing fact script (%s), rc: %s, err: %s' % (
                            fn, rc, err)
                except (IOError, OSError) as e:
                    failed = 'Could not execute fact script (%s): %s' % (
                        fn, to_text(e))

                if failed is not None:
                    local[fact_base] = failed
                    module.warn(failed)
                    continue
            else:
                # ignores exceptions and returns empty
                out = get_file_content(fn, default='')

            try:
                # ensure we have unicode
                out = to_text(out, errors='surrogate_or_strict')
            except UnicodeError:
                fact = 'error loading fact - output of running "%s" was not utf-8' % fn
                local[fact_base] = fact
                module.warn(fact)
                continue

            # try to read it as json first
            try:
                fact = json.loads(out)
            except ValueError:
                # if that fails read it with ConfigParser
                cp = configparser.ConfigParser()
                try:
                    cp.readfp(StringIO(out))
                except configparser.Error:
                    fact = "error loading facts as JSON or ini - please check content: %s" % fn
                    module.warn(fact)
                else:
                    fact = {}
                    for sect in cp.sections():
                        if sect not in fact:
                            fact[sect] = {}
                        for opt in cp.options(sect):
                            val = cp.get(sect, opt)
                            fact[sect][opt] = val
            except Exception as e:
                fact = "Failed to convert (%s) to JSON: %s" % (fn, to_text(e))
                module.warn(fact)

            local[fact_base] = fact

        local_facts['local'] = local
        return local_facts