Esempio n. 1
0
def ext_pillar(
        minion_id,  # pylint: disable=W0613
        pillar,  # pylint: disable=W0613
        config_file):
    '''
    Execute LDAP searches and return the aggregated data
    '''
    if os.path.isfile(config_file):
        import salt.utils.yaml
        try:
            #open(config_file, 'r') as raw_config:
            config = _render_template(config_file) or {}
            opts = salt.utils.yaml.safe_load(config) or {}
            opts['conf_file'] = config_file
        except Exception as err:
            import salt.log
            msg = 'Error parsing configuration file: {0} - {1}'.format(
                config_file, err)
            if salt.log.is_console_configured():
                log.warning(msg)
            else:
                print(msg)
    else:
        log.debug('Missing configuration file: %s', config_file)

    data = {}
    for source in opts['search_order']:
        config = opts[source]
        result = _do_search(config)
        log.debug('source %s got result %s', source, result)
        if result:
            data = _result_to_dict(data, result, config, source)
    return data
Esempio n. 2
0
def ec2_tags():

  try:
    aws = __opts__['ec2_tags']['aws']
    REGION = aws['region']
    AWS_ACCESS_KEY=aws['access_key']
    AWS_SECRET_KEY=aws['secret_key']
  except KeyError:
    log.warning("ec2_tags: aws configuration required in minion and/or minion config for grain to work")
    return None

  # Connect to EC2 and parse the Roles tags for this instance
  conn = boto.ec2.connect_to_region(REGION,
  aws_access_key_id=AWS_ACCESS_KEY,
  aws_secret_access_key=AWS_SECRET_KEY)

  instance_id = _get_instance_id()

  tags = {}
  try:
    reservation = conn.get_all_instances(instance_ids=[ instance_id ])[0]
    instance = reservation.instances[0]
    tags = instance.tags
  except IndexError, e:
    log.error("Couldn't find information about current instance: %s", e)
    return None
Esempio n. 3
0
def ext_pillar(minion_id,  # pylint: disable=W0613
               pillar,  # pylint: disable=W0613
               config_file):
    '''
    Execute LDAP searches and return the aggregated data
    '''
    if os.path.isfile(config_file):
        try:
            #open(config_file, 'r') as raw_config:
            config = _render_template(config_file) or {}
            opts = yaml.safe_load(config) or {}
            opts['conf_file'] = config_file
        except Exception as err:
            import salt.log
            msg = 'Error parsing configuration file: {0} - {1}'
            if salt.log.is_console_configured():
                log.warning(msg.format(config_file, err))
            else:
                print(msg.format(config_file, err))
    else:
        log.debug('Missing configuration file: {0}'.format(config_file))

    data = {}
    for source in opts['search_order']:
        config = opts[source]
        result = _do_search(config)
        print('source {0} got result {1}'.format(source, result))
        if result:
            data = _result_to_dict(data, result, config, source)
    return data
Esempio n. 4
0
 def verify_env(self):
     '''
     Verify that salt-ssh is ready to run
     '''
     if not salt.utils.which('sshpass'):
         log.warning('Warning:  sshpass is not present, so password-based '
                     'authentication is not available.')
Esempio n. 5
0
 def _get_cached_minion_data(self, *minion_ids):
     # Return two separate dicts of cached grains and pillar data of the
     # minions
     grains = {minion_id: {} for minion_id in minion_ids}
     pillars = grains.copy()
     if not self.opts.get("minion_data_cache", False):
         log.debug("Skipping cached data because minion_data_cache is not "
                   "enabled.")
         return grains, pillars
     if not minion_ids:
         minion_ids = self.cache.list("minions")
     for minion_id in minion_ids:
         if not salt.utils.verify.valid_id(self.opts, minion_id):
             continue
         mdata = self.cache.fetch("minions/{}".format(minion_id), "data")
         if not isinstance(mdata, dict):
             log.warning(
                 "cache.fetch should always return a dict. ReturnedType: %s, MinionId: %s",
                 type(mdata).__name__,
                 minion_id,
             )
             continue
         if "grains" in mdata:
             grains[minion_id] = mdata["grains"]
         if "pillar" in mdata:
             pillars[minion_id] = mdata["pillar"]
     return grains, pillars
Esempio n. 6
0
 def _get_cached_minion_data(self, *minion_ids):
     # Return two separate dicts of cached grains and pillar data of the
     # minions
     grains = dict([(minion_id, {}) for minion_id in minion_ids])
     pillars = grains.copy()
     if not self.opts.get('minion_data_cache', False):
         log.debug('Skipping cached data because minion_data_cache is not '
                   'enabled.')
         return grains, pillars
     if not minion_ids:
         minion_ids = self.cache.list('minions')
     for minion_id in minion_ids:
         if not salt.utils.verify.valid_id(self.opts, minion_id):
             continue
         mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'data')
         if not isinstance(mdata, dict):
             log.warning(
                 'cache.fetch should always return a dict. ReturnedType: %s, MinionId: %s',
                 type(mdata).__name__, minion_id)
             continue
         if 'grains' in mdata:
             grains[minion_id] = mdata['grains']
         if 'pillar' in mdata:
             pillars[minion_id] = mdata['pillar']
     return grains, pillars
Esempio n. 7
0
def is_pid_healthy(pid):
    '''
    This is a health check that will confirm the PID is running
    and executed by salt.

    If pusutil is available:
        * all architectures are checked

    if psutil is not available:
        * Linux/Solaris/etc: archs with `/proc/cmdline` available are checked
        * AIX/Windows: assume PID is healhty and return True
    '''
    if HAS_PSUTIL:
        try:
            proc = psutil.Process(pid)
        except psutil.NoSuchProcess:
            log.warning("PID %s is no longer running.", pid)
            return False
        return any(['salt' in cmd for cmd in proc.cmdline()])

    if salt.utils.platform.is_aix() or salt.utils.platform.is_windows():
        return True

    if not salt.utils.process.os_is_running(pid):
        log.warning("PID %s is no longer running.", pid)
        return False

    cmdline_file = os.path.join('proc', str(pid), 'cmdline')
    try:
        with salt.utils.files.fopen(cmdline_file, 'rb') as fp_:
            return b'salt' in fp_.read()
    except (OSError, IOError) as err:
        log.error("There was a problem reading proc file: %s", err)
        return False
Esempio n. 8
0
 def __init__(self, opts):
     salt.minion.SMinion.__init__(self, opts)
     self.collectors = salt.ext.monitor.loader.collectors(opts)
     if "monitor" in self.opts:
         parser = salt.ext.monitor.parsers.get_parser(self)
         self.tasks = parser.parse()
     else:
         log.warning("monitor not configured in /etc/salt/monitor")
         self.tasks = []
Esempio n. 9
0
 def __init__(self, opts):
     salt.minion.SMinion.__init__(self, opts)
     self.collectors = salt.ext.monitor.loader.collectors(opts)
     if 'monitor' in self.opts:
         parser = salt.ext.monitor.parsers.get_parser(self)
         self.tasks = parser.parse()
     else:
         log.warning('monitor not configured in /etc/salt/monitor')
         self.tasks = []
Esempio n. 10
0
    def _thread_multi_return(class_, minion_instance, opts, data):
        '''
        This method should be used as a threading target, start the actual
        minion side execution.
        '''
        # this seems awkward at first, but it's a workaround for Windows
        # multiprocessing communication.
        if not minion_instance:
            minion_instance = class_(opts)
        ret = {
            'return': {},
            'success': {},
        }
        for ind in range(0, len(data['fun'])):
            for index in range(0, len(data['arg'][ind])):
                try:
                    arg = eval(data['arg'][ind][index])
                    if isinstance(arg, bool):
                        data['arg'][ind][index] = str(data['arg'][ind][index])
                    elif isinstance(arg, (dict, int, list, string_types)):
                        data['arg'][ind][index] = arg
                    else:
                        data['arg'][ind][index] = str(data['arg'][ind][index])
                except Exception:
                    pass

            ret['success'][data['fun'][ind]] = False
            try:
                func = minion_instance.functions[data['fun'][ind]]
                args, kwargs = detect_kwargs(func, data['arg'][ind], data)
                ret['return'][data['fun'][ind]] = func(*args, **kwargs)
                ret['success'][data['fun'][ind]] = True
            except Exception as exc:
                trb = traceback.format_exc()
                log.warning(
                    'The minion function caused an exception: {0}'.format(
                    exc
                    )
                )
                ret['return'][data['fun'][ind]] = trb
            ret['jid'] = data['jid']
        minion_instance._return_pub(ret)
        if data['ret']:
            for returner in set(data['ret'].split(',')):
                ret['id'] = opts['id']
                try:
                    minion_instance.returners['{0}.returner'.format(
                        returner
                    )](ret)
                except Exception as exc:
                    log.error(
                        'The return failed for job {0} {1}'.format(
                        data['jid'],
                        exc
                        )
                    )
Esempio n. 11
0
def get_elb_lbs():
    """
    Returns a dictionary of load balancer names as keys
    each with their respective attributes
    """

    # attributes to extract from the load balancer boto objects
    # this could possibly be a named argument too
    extract_attrs = ['scheme', 'dns_name', 'vpc_id', 'name', 'security_groups']

    try:
        instance_metadata = boto.utils.get_instance_metadata(timeout=5, num_retries=2)
    except Exception as e:
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    # Setup the lbs grain
    lbs_grain = {'lbs': {}}

    # Collect details about this instance
    vpc_id = instance_metadata['network']['interfaces']['macs'].values()[0]['vpc-id']
    region = instance_metadata['placement']['availability-zone'][:-1]

    # Collect load balancers of this instance (in the same vpc)
    try:
        elb_connection = boto.ec2.elb.connect_to_region(region)

        # find load balancers by vpc_id
        all_lbs = [lb for lb in elb_connection.get_all_load_balancers()
                   if lb.vpc_id == vpc_id]
        log.debug('all lbs before filtering by instance id: {}'.format(all_lbs))

        # further filter the load balancers by instance id
        lbs = [lb for lb in all_lbs for inst in lb.instances
               if inst.id == instance_metadata['instance-id']]
        # initialise and populate the output of load balancers
        out = {}
        [out.update({l.name: {}}) for l in lbs]
        [out[l.name].update({attr: getattr(l, attr, None)})
         for attr in extract_attrs for l in lbs]

        if not out:
            # This loglevel could perhaps be adjusted to something more visible
            log.warning("No ELBs found for this instance, this is unusual, "
                        "but we will not break highstate")

        lbs_grain['lbs'] = out

    except Exception as e:
        # This prints a user-friendly error with stacktrace
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    return lbs_grain
Esempio n. 12
0
 def _get_live_minion_pillar(self, minion_id=None, minion_grains=None):
     # Returns a dict of pillar data for one minion
     if minion_id is None:
         return {}
     if not minion_grains:
         log.warning('Cannot get pillar data for %s: no grains supplied.',
                     minion_id)
         return {}
     log.debug('Getting live pillar for %s', minion_id)
     pillar = salt.pillar.Pillar(self.opts, minion_grains, minion_id,
                                 self.saltenv, self.opts['ext_pillar'])
     log.debug('Compiling pillar for %s', minion_id)
     ret = pillar.compile_pillar()
     return ret
Esempio n. 13
0
    def _thread_multi_return(class_, minion_instance, opts, data):
        '''
        This method should be used as a threading target, start the actual
        minion side execution.
        '''
        # this seems awkward at first, but it's a workaround for Windows
        # multiprocessing communication.
        if not minion_instance:
            minion_instance = class_(opts)
        ret = {
            'return': {},
            'success': {},
        }
        for ind in range(0, len(data['fun'])):
            for index in range(0, len(data['arg'][ind])):
                try:
                    arg = eval(data['arg'][ind][index])
                    if isinstance(arg, bool):
                        data['arg'][ind][index] = str(data['arg'][ind][index])
                    elif isinstance(arg, (dict, int, list, string_types)):
                        data['arg'][ind][index] = arg
                    else:
                        data['arg'][ind][index] = str(data['arg'][ind][index])
                except Exception:
                    pass

            ret['success'][data['fun'][ind]] = False
            try:
                func = minion_instance.functions[data['fun'][ind]]
                args, kwargs = detect_kwargs(func, data['arg'][ind], data)
                ret['return'][data['fun'][ind]] = func(*args, **kwargs)
                ret['success'][data['fun'][ind]] = True
            except Exception as exc:
                trb = traceback.format_exc()
                log.warning(
                    'The minion function caused an exception: {0}'.format(exc))
                ret['return'][data['fun'][ind]] = trb
            ret['jid'] = data['jid']
        minion_instance._return_pub(ret)
        if data['ret']:
            for returner in set(data['ret'].split(',')):
                ret['id'] = opts['id']
                try:
                    minion_instance.returners['{0}.returner'.format(returner)](
                        ret)
                except Exception as exc:
                    log.error('The return failed for job {0} {1}'.format(
                        data['jid'], exc))
Esempio n. 14
0
def _read_proc_file(path, opts):
    '''
    Return a dict of JID metadata, or None
    '''
    serial = salt.payload.Serial(opts)
    with salt.utils.files.fopen(path, 'rb') as fp_:
        buf = fp_.read()
        fp_.close()
        if buf:
            data = serial.loads(buf)
        else:
            # Proc file is empty, remove
            try:
                os.remove(path)
            except IOError:
                log.debug('Unable to remove proc file %s.', path)
            return None
    if not isinstance(data, dict):
        # Invalid serial object
        return None
    if not salt.utils.process.os_is_running(data['pid']):
        # The process is no longer running, clear out the file and
        # continue
        try:
            os.remove(path)
        except IOError:
            log.debug('Unable to remove proc file %s.', path)
        return None

    if not _check_cmdline(data):
        pid = data.get('pid')
        if pid:
            log.warning(
                'PID %s exists but does not appear to be a salt process.', pid
            )
        try:
            os.remove(path)
        except IOError:
            log.debug('Unable to remove proc file %s.', path)
        return None
    return data
Esempio n. 15
0
 def _get_live_minion_pillar(self, minion_id=None, minion_grains=None):
     # Returns a dict of pillar data for one minion
     if minion_id is None:
         return {}
     if not minion_grains:
         log.warning(
             'Cannot get pillar data for {0}: no grains supplied.'.format(
                 minion_id
             )
         )
         return {}
     log.debug('Getting live pillar for {0}'.format(minion_id))
     pillar = salt.pillar.Pillar(
                         self.opts,
                         minion_grains,
                         minion_id,
                         self.saltenv,
                         self.opts['ext_pillar'])
     log.debug('Compiling pillar for {0}'.format(minion_id))
     ret = pillar.compile_pillar()
     return ret
Esempio n. 16
0
def ext_pillar(
    minion_id, pillar, config_file  # pylint: disable=W0613  # pylint: disable=W0613
):
    """
    Execute LDAP searches and return the aggregated data
    """
    config_template = None
    try:
        config_template = _render_template(config_file)
    except jinja2.exceptions.TemplateNotFound:
        log.debug("pillar_ldap: missing configuration file %s", config_file)
    except Exception:  # pylint: disable=broad-except
        log.debug(
            "pillar_ldap: failed to render template for %s", config_file, exc_info=True
        )

    if not config_template:
        # We don't have a config file
        return {}

    import salt.utils.yaml

    try:
        opts = salt.utils.yaml.safe_load(config_template) or {}
        opts["conf_file"] = config_file
    except Exception as err:  # pylint: disable=broad-except
        import salt.log

        msg = "pillar_ldap: error parsing configuration file: {} - {}".format(
            config_file, err
        )
        if salt.log.is_console_configured():
            log.warning(msg)
        else:
            print(msg)
        return {}
    else:
        if not isinstance(opts, dict):
            log.warning(
                "pillar_ldap: %s is invalidly formatted, must be a YAML "
                "dictionary. See the documentation for more information.",
                config_file,
            )
            return {}

    if "search_order" not in opts:
        log.warning(
            "pillar_ldap: search_order missing from configuration. See the "
            "documentation for more information."
        )
        return {}

    data = {}
    for source in opts["search_order"]:
        config = opts[source]
        result = _do_search(config)
        log.debug("source %s got result %s", source, result)
        if result:
            data = _result_to_dict(data, result, config, source)
    return data
Esempio n. 17
0
def read_proc_file(path, opts):
    '''
    Return a dict of JID metadata, or None
    '''
    serial = salt.payload.Serial(opts)
    with salt.utils.files.fopen(path, 'rb') as fp_:
        try:
            data = serial.load(fp_)
        except Exception as err:
            # need to add serial exception here
            # Could not read proc file
            log.warning("Issue deserializing data: %s", err)
            return None

    if not isinstance(data, dict):
        # Invalid serial object
        log.warning("Data is not a dict: %s", data)
        return None

    pid = data.get('pid', None)
    if not pid:
        # No pid, not a salt proc file
        log.warning("No PID found in data")
        return None

    return data
Esempio n. 18
0
def ext_pillar(
        minion_id,  # pylint: disable=W0613
        pillar,  # pylint: disable=W0613
        config_file):
    '''
    Execute LDAP searches and return the aggregated data
    '''
    config_template = None
    try:
        config_template = _render_template(config_file)
    except jinja2.exceptions.TemplateNotFound:
        log.debug('pillar_ldap: missing configuration file %s', config_file)
    except Exception:
        log.debug('pillar_ldap: failed to render template for %s',
                  config_file,
                  exc_info=True)

    if not config_template:
        # We don't have a config file
        return {}

    import salt.utils.yaml
    try:
        opts = salt.utils.yaml.safe_load(config_template) or {}
        opts['conf_file'] = config_file
    except Exception as err:
        import salt.log
        msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'.format(
            config_file, err)
        if salt.log.is_console_configured():
            log.warning(msg)
        else:
            print(msg)
        return {}
    else:
        if not isinstance(opts, dict):
            log.warning(
                'pillar_ldap: %s is invalidly formatted, must be a YAML '
                'dictionary. See the documentation for more information.',
                config_file)
            return {}

    if 'search_order' not in opts:
        log.warning(
            'pillar_ldap: search_order missing from configuration. See the '
            'documentation for more information.')
        return {}

    data = {}
    for source in opts['search_order']:
        config = opts[source]
        result = _do_search(config)
        log.debug('source %s got result %s', source, result)
        if result:
            data = _result_to_dict(data, result, config, source)
    return data
Esempio n. 19
0
def get_id():
    """
    Guess the id of the minion.

    - Check /etc/hostname for a value other than localhost
    - If socket.getfqdn() returns us something other than localhost, use it
    - Check /etc/hosts for something that isn't localhost that maps to 127.*
    - Look for a routeable / public IP
    - A private IP is better than a loopback IP
    - localhost may be better than killing the minion

    Returns two values: the detected ID, and a boolean value noting whether or
    not an IP address is being used for the ID.
    """

    log.debug("Guessing ID. The id can be explicitly in set {0}".format(os.path.join(syspaths.CONFIG_DIR, "minion")))

    # Check /etc/hostname
    try:
        with salt.utils.fopen("/etc/hostname") as hfl:
            name = hfl.read().strip()
        if re.search(r"\s", name):
            log.warning(
                "Whitespace character detected in /etc/hostname. " "This file should not contain any whitespace."
            )
        else:
            if name != "localhost":
                return name, False
    except Exception:
        pass

    # Nothing in /etc/hostname or /etc/hostname not found
    fqdn = socket.getfqdn()
    if fqdn != "localhost":
        log.info("Found minion id from getfqdn(): {0}".format(fqdn))
        return fqdn, False

    # Can /etc/hosts help us?
    try:
        with salt.utils.fopen("/etc/hosts") as hfl:
            for line in hfl:
                names = line.split()
                ip_ = names.pop(0)
                if ip_.startswith("127."):
                    for name in names:
                        if name != "localhost":
                            log.info("Found minion id in hosts file: {0}".format(name))
                            return name, False
    except Exception:
        pass

    # Can Windows 'hosts' file help?
    try:
        windir = os.getenv("WINDIR")
        with salt.utils.fopen(windir + "\\system32\\drivers\\etc\\hosts") as hfl:
            for line in hfl:
                # skip commented or blank lines
                if line[0] == "#" or len(line) <= 1:
                    continue
                # process lines looking for '127.' in first column
                try:
                    entry = line.split()
                    if entry[0].startswith("127."):
                        for name in entry[1:]:  # try each name in the row
                            if name != "localhost":
                                log.info("Found minion id in hosts file: {0}".format(name))
                                return name, False
                except IndexError:
                    pass  # could not split line (malformed entry?)
    except Exception:
        pass

    # What IP addresses do we have?
    ip_addresses = [
        salt.utils.network.IPv4Address(addr)
        for addr in salt.utils.network.ip_addrs(include_loopback=True)
        if not addr.startswith("127.")
    ]

    for addr in ip_addresses:
        if not addr.is_private:
            log.info("Using public ip address for id: {0}".format(addr))
            return str(addr), True

    if ip_addresses:
        addr = ip_addresses.pop(0)
        log.info("Using private ip address for id: {0}".format(addr))
        return str(addr), True

    log.error("No id found, falling back to localhost")
    return "localhost", False
Esempio n. 20
0
    def _thread_return(class_, minion_instance, opts, data):
        '''
        This method should be used as a threading target, start the actual
        minion side execution.
        '''
        # this seems awkward at first, but it's a workaround for Windows
        # multiprocessing communication.
        if not minion_instance:
            minion_instance = class_(opts)
        if opts['multiprocessing']:
            fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
            sdata = {'pid': os.getpid()}
            sdata.update(data)
            with salt.utils.fopen(fn_, 'w+') as fp_:
                fp_.write(minion_instance.serial.dumps(sdata))
        ret = {}
        for ind in range(0, len(data['arg'])):
            try:
                arg = eval(data['arg'][ind])
                if isinstance(arg, bool):
                    data['arg'][ind] = str(data['arg'][ind])
                elif isinstance(arg, (dict, int, list, string_types)):
                    data['arg'][ind] = arg
                else:
                    data['arg'][ind] = str(data['arg'][ind])
            except Exception:
                pass

        function_name = data['fun']
        if function_name in minion_instance.functions:
            ret['success'] = False
            try:
                func = minion_instance.functions[data['fun']]
                args, kwargs = detect_kwargs(func, data['arg'], data)
                ret['return'] = func(*args, **kwargs)
                ret['success'] = True
            except CommandNotFoundError as exc:
                msg = 'Command required for \'{0}\' not found: {1}'
                log.debug(msg.format(function_name, str(exc)))
                ret['return'] = msg.format(function_name, str(exc))
            except CommandExecutionError as exc:
                msg = 'A command in {0} had a problem: {1}'
                log.error(msg.format(function_name, str(exc)))
                ret['return'] = 'ERROR: {0}'.format(str(exc))
            except SaltInvocationError as exc:
                msg = 'Problem executing "{0}": {1}'
                log.error(msg.format(function_name, str(exc)))
                ret['return'] = 'ERROR executing {0}: {1}'.format(
                    function_name, exc
                )
            except Exception:
                trb = traceback.format_exc()
                msg = 'The minion function caused an exception: {0}'
                log.warning(msg.format(trb))
                ret['return'] = trb
        else:
            ret['return'] = '"{0}" is not available.'.format(function_name)

        ret['jid'] = data['jid']
        ret['fun'] = data['fun']
        minion_instance._return_pub(ret)
        if data['ret']:
            for returner in set(data['ret'].split(',')):
                ret['id'] = opts['id']
                try:
                    minion_instance.returners['{0}.returner'.format(
                        returner
                    )](ret)
                except Exception as exc:
                    log.error(
                        'The return failed for job {0} {1}'.format(
                        data['jid'],
                        exc
                        )
                    )
Esempio n. 21
0
    '''
    This is a health check that will confirm the PID is running
    and executed by salt.

    If pusutil is available:
        * all architectures are checked

    if psutil is not available:
        * Linux/Solaris/etc: archs with `/proc/cmdline` available are checked
        * AIX/Windows: assume PID is healhty and return True
    '''
    if HAS_PSUTIL:
        try:
            proc = psutil.Process(pid)
        except psutil.NoSuchProcess:
            log.warning("PID %s is no longer running.", pid)
            return False
        return any(['salt' in cmd for cmd in proc.cmdline()])

    if salt.utils.platform.is_aix() or salt.utils.platform.is_windows():
        return True

    if not salt.utils.process.os_is_running(pid):
        log.warning("PID %s is no longer running.", pid)
        return False

    cmdline_file = os.path.join('proc', str(pid), 'cmdline')
    try:
        with salt.utils.files.fopen(cmdline_file, 'rb') as fp_:
            return b'salt' in fp_.read()
    except (OSError, IOError) as err:
Esempio n. 22
0
def master_config(path):
    '''
    Reads in the master configuration file and sets up default options
    '''
    opts = {
        'interface': '0.0.0.0',
        'publish_port': '4505',
        'auth_mode': 1,
        'user': '******',
        'worker_threads': 5,
        'sock_dir': '/var/run/salt/master',
        'ret_port': '4506',
        'timeout': 5,
        'keep_jobs': 24,
        'root_dir': '/',
        'pki_dir': '/etc/salt/pki/master',
        'cachedir': '/var/cache/salt/master',
        'file_roots': {
            'base': ['/srv/salt'],
        },
        'master_roots': {
            'base': ['/srv/salt-master'],
        },
        'pillar_roots': {
            'base': ['/srv/pillar'],
        },
        'ext_pillar': [],
        # NOTE: pillar version changed to 2 by default in 0.10.6
        'pillar_version': 2,
        'pillar_opts': True,
        'syndic_master': '',
        'runner_dirs': [],
        'client_acl': {},
        'external_auth': {},
        'token_expire': 720,
        'file_buffer_size': 1048576,
        'file_ignore_regex': None,
        'file_ignore_glob': None,
        'max_open_files': 100000,
        'hash_type': 'md5',
        'conf_file': path,
        'open_mode': False,
        'auto_accept': False,
        'renderer': 'yaml_jinja',
        'failhard': False,
        'state_top': 'top.sls',
        'master_tops': {},
        'external_nodes': '',
        'order_masters': False,
        'job_cache': True,
        'ext_job_cache': '',
        'master_ext_job_cache': '',
        'minion_data_cache': True,
        'log_file': '/var/log/salt/master',
        'log_level': None,
        'log_level_logfile': None,
        'log_datefmt': __dflt_log_datefmt,
        'log_fmt_console': __dflt_log_fmt_console,
        'log_fmt_logfile': __dflt_log_fmt_logfile,
        'log_granular_levels': {},
        'pidfile': '/var/run/salt-master.pid',
        'cluster_masters': [],
        'cluster_mode': 'paranoid',
        'range_server': 'range:80',
        'reactors': [],
        'serial': 'msgpack',
        'state_verbose': True,
        'state_output': 'full',
        'search': '',
        'search_index_interval': 3600,
        'nodegroups': {},
        'cython_enable': False,
        'key_logfile': '/var/log/salt/key',
        'verify_env': True,
        'permissive_pki_access': False,
        'default_include': 'master.d/*.conf',
    }

    if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
        opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')

    load_config(opts, path, 'SALT_MASTER_CONFIG')

    default_include = opts.get('default_include', [])
    include = opts.get('include', [])

    opts = include_config(default_include, opts, path, verbose=False)
    opts = include_config(include, opts, path, verbose=True)

    opts['aes'] = salt.crypt.Crypticle.generate_key_string()

    opts['extension_modules'] = (opts.get('extension_modules')
                                 or os.path.join(opts['cachedir'], 'extmods'))
    opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
    # Prepend root_dir to other paths
    prepend_root_dir(opts, [
        'pki_dir', 'cachedir', 'log_file', 'pidfile', 'sock_dir',
        'key_logfile', 'extension_modules', 'autosign_file', 'token_dir'
    ])

    # Enabling open mode requires that the value be set to True, and
    # nothing else!
    opts['open_mode'] = opts['open_mode'] is True
    opts['auto_accept'] = opts['auto_accept'] is True
    opts['file_roots'] = _validate_file_roots(opts['file_roots'])

    if opts['file_ignore_regex']:
        # If file_ignore_regex was given, make sure it's wrapped in a list.
        # Only keep valid regex entries for improved performance later on.
        if isinstance(opts['file_ignore_regex'], str):
            ignore_regex = [opts['file_ignore_regex']]
        elif isinstance(opts['file_ignore_regex'], list):
            ignore_regex = opts['file_ignore_regex']

        opts['file_ignore_regex'] = []
        for r in ignore_regex:
            try:
                # Can't store compiled regex itself in opts (breaks serialization)
                re.compile(r)
                opts['file_ignore_regex'].append(r)
            except:
                log.warning(
                    'Unable to parse file_ignore_regex. Skipping: {0}'.format(
                        r))

    if opts['file_ignore_glob']:
        # If file_ignore_glob was given, make sure it's wrapped in a list.
        if isinstance(opts['file_ignore_glob'], str):
            opts['file_ignore_glob'] = [opts['file_ignore_glob']]

    return opts
Esempio n. 23
0
    def _thread_return(class_, minion_instance, opts, data):
        '''
        This method should be used as a threading target, start the actual
        minion side execution.
        '''
        # this seems awkward at first, but it's a workaround for Windows
        # multiprocessing communication.
        if not minion_instance:
            minion_instance = class_(opts)
        if opts['multiprocessing']:
            fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
            sdata = {'pid': os.getpid()}
            sdata.update(data)
            with salt.utils.fopen(fn_, 'w+') as fp_:
                fp_.write(minion_instance.serial.dumps(sdata))
        ret = {}
        for ind in range(0, len(data['arg'])):
            try:
                arg = eval(data['arg'][ind])
                if isinstance(arg, bool):
                    data['arg'][ind] = str(data['arg'][ind])
                elif isinstance(arg, (dict, int, list, string_types)):
                    data['arg'][ind] = arg
                else:
                    data['arg'][ind] = str(data['arg'][ind])
            except Exception:
                pass

        function_name = data['fun']
        if function_name in minion_instance.functions:
            ret['success'] = False
            try:
                func = minion_instance.functions[data['fun']]
                args, kwargs = detect_kwargs(func, data['arg'], data)
                ret['return'] = func(*args, **kwargs)
                ret['success'] = True
            except CommandNotFoundError as exc:
                msg = 'Command required for \'{0}\' not found: {1}'
                log.debug(msg.format(function_name, str(exc)))
                ret['return'] = msg.format(function_name, str(exc))
            except CommandExecutionError as exc:
                msg = 'A command in {0} had a problem: {1}'
                log.error(msg.format(function_name, str(exc)))
                ret['return'] = 'ERROR: {0}'.format(str(exc))
            except SaltInvocationError as exc:
                msg = 'Problem executing "{0}": {1}'
                log.error(msg.format(function_name, str(exc)))
                ret['return'] = 'ERROR executing {0}: {1}'.format(
                    function_name, exc)
            except Exception:
                trb = traceback.format_exc()
                msg = 'The minion function caused an exception: {0}'
                log.warning(msg.format(trb))
                ret['return'] = trb
        else:
            ret['return'] = '"{0}" is not available.'.format(function_name)

        ret['jid'] = data['jid']
        ret['fun'] = data['fun']
        minion_instance._return_pub(ret)
        if data['ret']:
            for returner in set(data['ret'].split(',')):
                ret['id'] = opts['id']
                try:
                    minion_instance.returners['{0}.returner'.format(returner)](
                        ret)
                except Exception as exc:
                    log.error('The return failed for job {0} {1}'.format(
                        data['jid'], exc))
Esempio n. 24
0
def master_config(path):
    '''
    Reads in the master configuration file and sets up default options
    '''
    opts = {'interface': '0.0.0.0',
            'publish_port': '4505',
            'user': '******',
            'worker_threads': 5,
            'sock_dir': '/var/run/salt/master',
            'ret_port': '4506',
            'timeout': 5,
            'keep_jobs': 24,
            'root_dir': '/',
            'pki_dir': '/etc/salt/pki/master',
            'cachedir': '/var/cache/salt/master',
            'file_roots': {
                'base': ['/srv/salt'],
                },
            'master_roots': {
                'base': ['/srv/salt-master'],
                },
            'pillar_roots': {
                'base': ['/srv/pillar'],
                },
            'ext_pillar': [],
            # NOTE: pillar version changed to 2 by default in 0.10.6
            'pillar_version': 2,
            'pillar_opts': True,
            'syndic_master': '',
            'runner_dirs': [],
            'client_acl': {},
            'external_auth': {},
            'token_expire': 720,
            'file_buffer_size': 1048576,
            'file_ignore_regex': None,
            'file_ignore_glob': None,
            'max_open_files': 100000,
            'hash_type': 'md5',
            'conf_file': path,
            'open_mode': False,
            'auto_accept': False,
            'renderer': 'yaml_jinja',
            'failhard': False,
            'state_top': 'top.sls',
            'master_tops': {},
            'external_nodes': '',
            'order_masters': False,
            'job_cache': True,
            'ext_job_cache': '',
            'master_ext_job_cache': '',
            'minion_data_cache': True,
            'log_file': '/var/log/salt/master',
            'log_level': None,
            'log_level_logfile': None,
            'log_datefmt': __dflt_log_datefmt,
            'log_fmt_console': __dflt_log_fmt_console,
            'log_fmt_logfile': __dflt_log_fmt_logfile,
            'log_granular_levels': {},
            'pidfile': '/var/run/salt-master.pid',
            'cluster_masters': [],
            'cluster_mode': 'paranoid',
            'range_server': 'range:80',
            'reactors': [],
            'serial': 'msgpack',
            'state_verbose': True,
            'state_output': 'full',
            'search': '',
            'search_index_interval': 3600,
            'nodegroups': {},
            'cython_enable': False,
            'key_logfile': '/var/log/salt/key',
            'verify_env': True,
            'permissive_pki_access': False,
            'default_include': 'master.d/*.conf',
    }

    if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
        opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')

    load_config(opts, path, 'SALT_MASTER_CONFIG')

    default_include = opts.get('default_include', [])
    include = opts.get('include', [])

    opts = include_config(default_include, opts, path, verbose=False)
    opts = include_config(include, opts, path, verbose=True)

    opts['aes'] = salt.crypt.Crypticle.generate_key_string()

    opts['extension_modules'] = (
            opts.get('extension_modules') or
            os.path.join(opts['cachedir'], 'extmods')
            )
    opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
    # Prepend root_dir to other paths
    prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file',
                            'sock_dir', 'key_logfile', 'extension_modules',
                            'autosign_file', 'token_dir'])

    # Enabling open mode requires that the value be set to True, and
    # nothing else!
    opts['open_mode'] = opts['open_mode'] is True
    opts['auto_accept'] = opts['auto_accept'] is True
    opts['file_roots'] = _validate_file_roots(opts['file_roots'])

    if opts['file_ignore_regex']:
        # If file_ignore_regex was given, make sure it's wrapped in a list.
        # Only keep valid regex entries for improved performance later on.
        if isinstance(opts['file_ignore_regex'], str):
            ignore_regex = [ opts['file_ignore_regex'] ]
        elif isinstance(opts['file_ignore_regex'], list):
            ignore_regex = opts['file_ignore_regex']

        opts['file_ignore_regex'] = []
        for r in ignore_regex:
            try:
                # Can't store compiled regex itself in opts (breaks serialization)
                re.compile(r)
                opts['file_ignore_regex'].append(r)
            except:
                log.warning('Unable to parse file_ignore_regex. Skipping: {0}'.format(r))

    if opts['file_ignore_glob']:
        # If file_ignore_glob was given, make sure it's wrapped in a list.
        if isinstance(opts['file_ignore_glob'], str):
            opts['file_ignore_glob'] = [ opts['file_ignore_glob'] ]

    return opts
Esempio n. 25
0
File: config.py Progetto: herlo/salt
def apply_master_config(overrides=None, defaults=None):
    '''
    Returns master configurations dict.
    '''
    if defaults is None:
        defaults = DEFAULT_MASTER_OPTS

    opts = defaults.copy()
    if overrides:
        opts.update(overrides)

    if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
        opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')

    opts['aes'] = salt.crypt.Crypticle.generate_key_string()

    opts['extension_modules'] = (
        opts.get('extension_modules') or
        os.path.join(opts['cachedir'], 'extmods')
    )
    opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')

    # Prepend root_dir to other paths
    prepend_root_dirs = [
        'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
        'autosign_file', 'token_dir'
    ]

    # These can be set to syslog, so, not actual paths on the system
    for config_key in ('log_file', 'key_logfile'):
        log_setting = opts.get(config_key, '')
        if log_setting is None:
            continue

        if urlparse.urlparse(log_setting).scheme == '':
            prepend_root_dirs.append(config_key)

    prepend_root_dir(opts, prepend_root_dirs)

    # Enabling open mode requires that the value be set to True, and
    # nothing else!
    opts['open_mode'] = opts['open_mode'] is True
    opts['auto_accept'] = opts['auto_accept'] is True
    opts['file_roots'] = _validate_file_roots(opts)

    if opts['file_ignore_regex']:
        # If file_ignore_regex was given, make sure it's wrapped in a list.
        # Only keep valid regex entries for improved performance later on.
        if isinstance(opts['file_ignore_regex'], str):
            ignore_regex = [opts['file_ignore_regex']]
        elif isinstance(opts['file_ignore_regex'], list):
            ignore_regex = opts['file_ignore_regex']

        opts['file_ignore_regex'] = []
        for regex in ignore_regex:
            try:
                # Can't store compiled regex itself in opts (breaks
                # serialization)
                re.compile(regex)
                opts['file_ignore_regex'].append(regex)
            except Exception:
                log.warning(
                    'Unable to parse file_ignore_regex. Skipping: {0}'.format(
                        regex
                    )
                )

    if opts['file_ignore_glob']:
        # If file_ignore_glob was given, make sure it's wrapped in a list.
        if isinstance(opts['file_ignore_glob'], str):
            opts['file_ignore_glob'] = [opts['file_ignore_glob']]

    # Let's make sure `worker_threads` does not drop bellow 3 which has proven
    # to make `salt.modules.publish` not work under the test-suite.
    if opts['worker_threads'] < 3 and opts.get('peer', None):
        log.warning(
            'The \'worker_threads\' setting on {0!r} cannot be lower than 3. '
            'Resetting it to the default value of 3.'.format(
                opts['conf_file']
            )
        )
        opts['worker_threads'] = 3
    return opts
def get_elb_lbs():
    """
    Returns a dictionary of load balancer names as keys
    each with their respective attributes
    """

    # attributes to extract from the load balancer boto objects
    # this could possibly be a named argument too
    extract_attrs = ['scheme', 'dns_name', 'vpc_id', 'name', 'security_groups']

    try:
        instance_metadata = boto.utils.get_instance_metadata(timeout=5,
                                                             num_retries=2)
    except Exception as e:
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    # Setup the lbs grain
    lbs_grain = {'lbs': {}}

    # Collect details about this instance
    vpc_id = instance_metadata['network']['interfaces']['macs'].values(
    )[0]['vpc-id']
    region = instance_metadata['placement']['availability-zone'][:-1]

    # Collect load balancers of this instance (in the same vpc)
    try:
        elb_connection = boto.ec2.elb.connect_to_region(region)

        # find load balancers by vpc_id
        all_lbs = [
            lb for lb in elb_connection.get_all_load_balancers()
            if lb.vpc_id == vpc_id
        ]
        log.debug(
            'all lbs before filtering by instance id: {}'.format(all_lbs))

        # further filter the load balancers by instance id
        lbs = [
            lb for lb in all_lbs for inst in lb.instances
            if inst.id == instance_metadata['instance-id']
        ]
        # initialise and populate the output of load balancers
        out = {}
        [out.update({l.name: {}}) for l in lbs]
        [
            out[l.name].update({attr: getattr(l, attr, None)})
            for attr in extract_attrs for l in lbs
        ]

        if not out:
            # This loglevel could perhaps be adjusted to something more visible
            log.warning("No ELBs found for this instance, this is unusual, "
                        "but we will not break highstate")

        lbs_grain['lbs'] = out

    except Exception as e:
        # This prints a user-friendly error with stacktrace
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    return lbs_grain
Esempio n. 27
0
def apply_master_config(overrides=None, defaults=None):
    """
    Returns master configurations dict.
    """
    if defaults is None:
        defaults = DEFAULT_MASTER_OPTS

    opts = defaults.copy()
    if overrides:
        opts.update(overrides)

    if len(opts["sock_dir"]) > len(opts["cachedir"]) + 10:
        opts["sock_dir"] = os.path.join(opts["cachedir"], ".salt-unix")

    opts["aes"] = salt.crypt.Crypticle.generate_key_string()

    opts["extension_modules"] = opts.get("extension_modules") or os.path.join(opts["cachedir"], "extmods")
    opts["token_dir"] = os.path.join(opts["cachedir"], "tokens")

    # Prepend root_dir to other paths
    prepend_root_dirs = [
        "pki_dir",
        "cachedir",
        "pidfile",
        "sock_dir",
        "extension_modules",
        "autosign_file",
        "token_dir",
    ]

    # These can be set to syslog, so, not actual paths on the system
    for config_key in ("log_file", "key_logfile"):
        log_setting = opts.get(config_key, "")
        if log_setting is None:
            continue

        if urlparse.urlparse(log_setting).scheme == "":
            prepend_root_dirs.append(config_key)

    prepend_root_dir(opts, prepend_root_dirs)

    # Enabling open mode requires that the value be set to True, and
    # nothing else!
    opts["open_mode"] = opts["open_mode"] is True
    opts["auto_accept"] = opts["auto_accept"] is True
    opts["file_roots"] = _validate_file_roots(opts)

    if opts["file_ignore_regex"]:
        # If file_ignore_regex was given, make sure it's wrapped in a list.
        # Only keep valid regex entries for improved performance later on.
        if isinstance(opts["file_ignore_regex"], str):
            ignore_regex = [opts["file_ignore_regex"]]
        elif isinstance(opts["file_ignore_regex"], list):
            ignore_regex = opts["file_ignore_regex"]

        opts["file_ignore_regex"] = []
        for regex in ignore_regex:
            try:
                # Can't store compiled regex itself in opts (breaks
                # serialization)
                re.compile(regex)
                opts["file_ignore_regex"].append(regex)
            except Exception:
                log.warning("Unable to parse file_ignore_regex. Skipping: {0}".format(regex))

    if opts["file_ignore_glob"]:
        # If file_ignore_glob was given, make sure it's wrapped in a list.
        if isinstance(opts["file_ignore_glob"], str):
            opts["file_ignore_glob"] = [opts["file_ignore_glob"]]

    # Let's make sure `worker_threads` does not drop bellow 3 which has proven
    # to make `salt.modules.publish` not work under the test-suite.
    if opts["worker_threads"] < 3 and opts.get("peer", None):
        log.warning(
            "The 'worker_threads' setting on {0!r} cannot be lower than 3. "
            "Resetting it to the default value of 3.".format(opts["conf_file"])
        )
        opts["worker_threads"] = 3
    return opts
Esempio n. 28
0
def apply_master_config(overrides=None, defaults=None):
    '''
    Returns master configurations dict.
    '''
    if defaults is None:
        defaults = DEFAULT_MASTER_OPTS

    opts = defaults.copy()
    if overrides:
        opts.update(overrides)

    if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
        opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')

    opts['aes'] = salt.crypt.Crypticle.generate_key_string()

    opts['extension_modules'] = (opts.get('extension_modules')
                                 or os.path.join(opts['cachedir'], 'extmods'))
    opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')

    # Prepend root_dir to other paths
    prepend_root_dirs = [
        'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
        'autosign_file', 'token_dir'
    ]

    # These can be set to syslog, so, not actual paths on the system
    for config_key in ('log_file', 'key_logfile'):
        log_setting = opts.get(config_key, '')
        if log_setting is None:
            continue

        if urlparse.urlparse(log_setting).scheme == '':
            prepend_root_dirs.append(config_key)

    prepend_root_dir(opts, prepend_root_dirs)

    # Enabling open mode requires that the value be set to True, and
    # nothing else!
    opts['open_mode'] = opts['open_mode'] is True
    opts['auto_accept'] = opts['auto_accept'] is True
    opts['file_roots'] = _validate_file_roots(opts)

    if opts['file_ignore_regex']:
        # If file_ignore_regex was given, make sure it's wrapped in a list.
        # Only keep valid regex entries for improved performance later on.
        if isinstance(opts['file_ignore_regex'], str):
            ignore_regex = [opts['file_ignore_regex']]
        elif isinstance(opts['file_ignore_regex'], list):
            ignore_regex = opts['file_ignore_regex']

        opts['file_ignore_regex'] = []
        for regex in ignore_regex:
            try:
                # Can't store compiled regex itself in opts (breaks
                # serialization)
                re.compile(regex)
                opts['file_ignore_regex'].append(regex)
            except Exception:
                log.warning(
                    'Unable to parse file_ignore_regex. Skipping: {0}'.format(
                        regex))

    if opts['file_ignore_glob']:
        # If file_ignore_glob was given, make sure it's wrapped in a list.
        if isinstance(opts['file_ignore_glob'], str):
            opts['file_ignore_glob'] = [opts['file_ignore_glob']]

    # Let's make sure `worker_threads` does not drop bellow 3 which has proven
    # to make `salt.modules.publish` not work under the test-suite.
    if opts['worker_threads'] < 3 and opts.get('peer', None):
        log.warning(
            'The \'worker_threads\' setting on {0!r} cannot be lower than 3. '
            'Resetting it to the default value of 3.'.format(
                opts['conf_file']))
        opts['worker_threads'] = 3
    return opts
Esempio n. 29
0
def get_id():
    '''
    Guess the id of the minion.

    - Check /etc/hostname for a value other than localhost
    - If socket.getfqdn() returns us something other than localhost, use it
    - Check /etc/hosts for something that isn't localhost that maps to 127.*
    - Look for a routeable / public IP
    - A private IP is better than a loopback IP
    - localhost may be better than killing the minion

    Returns two values: the detected ID, and a boolean value noting whether or
    not an IP address is being used for the ID.
    '''

    log.debug('Guessing ID. The id can be explicitly in set {0}'.format(
        os.path.join(syspaths.CONFIG_DIR, 'minion')))

    # Check /etc/hostname
    try:
        with salt.utils.fopen('/etc/hostname') as hfl:
            name = hfl.read().strip()
        if re.search(r'\s', name):
            log.warning('Whitespace character detected in /etc/hostname. '
                        'This file should not contain any whitespace.')
        else:
            if name != 'localhost':
                return name, False
    except Exception:
        pass

    # Nothing in /etc/hostname or /etc/hostname not found
    fqdn = socket.getfqdn()
    if fqdn != 'localhost':
        log.info('Found minion id from getfqdn(): {0}'.format(fqdn))
        return fqdn, False

    # Can /etc/hosts help us?
    try:
        with salt.utils.fopen('/etc/hosts') as hfl:
            for line in hfl:
                names = line.split()
                ip_ = names.pop(0)
                if ip_.startswith('127.'):
                    for name in names:
                        if name != 'localhost':
                            log.info(
                                'Found minion id in hosts file: {0}'.format(
                                    name))
                            return name, False
    except Exception:
        pass

    # Can Windows 'hosts' file help?
    try:
        windir = os.getenv("WINDIR")
        with salt.utils.fopen(windir +
                              '\\system32\\drivers\\etc\\hosts') as hfl:
            for line in hfl:
                # skip commented or blank lines
                if line[0] == '#' or len(line) <= 1:
                    continue
                # process lines looking for '127.' in first column
                try:
                    entry = line.split()
                    if entry[0].startswith('127.'):
                        for name in entry[1:]:  # try each name in the row
                            if name != 'localhost':
                                log.info('Found minion id in hosts file: {0}'.
                                         format(name))
                                return name, False
                except IndexError:
                    pass  # could not split line (malformed entry?)
    except Exception:
        pass

    # What IP addresses do we have?
    ip_addresses = [
        salt.utils.network.IPv4Address(addr)
        for addr in salt.utils.network.ip_addrs(include_loopback=True)
        if not addr.startswith('127.')
    ]

    for addr in ip_addresses:
        if not addr.is_private:
            log.info('Using public ip address for id: {0}'.format(addr))
            return str(addr), True

    if ip_addresses:
        addr = ip_addresses.pop(0)
        log.info('Using private ip address for id: {0}'.format(addr))
        return str(addr), True

    log.error('No id found, falling back to localhost')
    return 'localhost', False