Beispiel #1
0
def load_config(opts, path, env_var):
    '''
    Attempts to update ``opts`` dict by parsing either the file described by
    ``path`` or the environment variable described by ``env_var`` as YAML.
    '''
    if not path or not os.path.isfile(path):
        path = os.environ.get(env_var, path)
    # If the configuration file is missing, attempt to copy the template,
    # after removing the first header line.
    if not os.path.isfile(path):
        template = '{0}.template'.format(path)
        if os.path.isfile(template):
            with open(path, 'w') as out:
                with open(template, 'r') as f:
                    f.readline()  # skip first line
                    out.write(f.read())

    if os.path.isfile(path):
        try:
            opts.update(_read_conf_file(path))
            opts['conf_file'] = path
        except Exception as e:
            import salt.log
            msg = 'Error parsing configuration file: {0} - {1}'
            if salt.log.is_console_configured():
                log.warn(msg.format(path, e))
            else:
                print(msg.format(path, e))
    else:
        log.debug('Missing configuration file: {0}'.format(path))
Beispiel #2
0
 def grain_match(self, tgt):
     '''
     Reads in the grains glob match
     '''
     log.debug('grains target: {0}'.format(tgt))
     comps = tgt.rsplit(':', 1)
     if len(comps) != 2:
         log.error('Got insufficient arguments for grains match '
                   'statement from master')
         return False
     match = self._traverse_dict(self.opts['grains'], comps[0])
     if match == {}:
         log.error('Targeted grain "{0}" not found'.format(comps[0]))
         return False
     if isinstance(match, dict):
         log.error('Targeted grain "{0}" must correspond to a list, '
                   'string, or numeric value'.format(comps[0]))
         return False
     if isinstance(match, list):
         # We are matching a single component to a single list member
         for member in match:
             if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
                 return True
         return False
     return bool(fnmatch.fnmatch(str(match).lower(), comps[1].lower()))
Beispiel #3
0
 def start(self):
     log.debug("starting monitor with {} task{}".format(len(self.tasks), "" if len(self.tasks) == 1 else "s"))
     if self.tasks:
         for task in self.tasks:
             threading.Thread(target=task.run).start()
     else:
         log.error("no monitor tasks to run")
Beispiel #4
0
 def _handle_aes(self, load):
     '''
     Takes the aes encrypted load, decrypts is and runs the encapsulated
     instructions
     '''
     try:
         data = self.crypticle.loads(load)
     except AuthenticationError:
         self.authenticate()
         data = self.crypticle.loads(load)
     # Verify that the publication is valid
     if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
        or 'arg' not in data:
         return
     # Verify that the publication applies to this minion
     if 'tgt_type' in data:
         if not getattr(self.matcher,
                        '{0}_match'.format(data['tgt_type']))(data['tgt']):
             return
     else:
         if not self.matcher.glob_match(data['tgt']):
             return
     # If the minion does not have the function, don't execute,
     # this prevents minions that could not load a minion module
     # from returning a predictable exception
     #if data['fun'] not in self.functions:
     #    return
     if 'user' in data:
         log.info(('User {0[user]} Executing command {0[fun]} with jid '
                   '{0[jid]}'.format(data)))
     else:
         log.info(('Executing command {0[fun]} with jid {0[jid]}'
                   .format(data)))
     log.debug('Command details {0}'.format(data))
     self._handle_decoded_payload(data)
Beispiel #5
0
    def stop(self,
             signal,
             frame):
        '''
        we override stop() to brake our main loop
        and have a pretty log message
        '''
        log.info("received signal {0}".format(signal))

        # if we have running workers, run through all and join() the ones
        # that have finished. if we still have running workers after that,
        # wait 5 secs for the rest and then exit. Maybe we should improv
        # this a litte bit more
        if( len(self.running_workers) > 0 ):
            clean_workers = []

            for count in range(0, 2):
                for worker in self.running_workers:
                    if worker.isAlive():
                        clean_workers.append(worker)
                    else:
                        worker.join()
                        log.debug("joined worker #{0}".format(worker.getName()))

                if( len(clean_workers) > 0 ):
                    log.info("waiting 5secs for remaining workers..")
                    time.sleep(5)
                else:
                    break

        log.info("salt-eventsd has shut down")

        # leave the cleanup to the supers stop
        super(SaltEventsDaemon, self).stop(signal, frame)
def _dism(action,
          image=None):
    '''
    Run a DISM servicing command on the given image.
    '''
    command='dism {0} {1}'.format(
        '/Image:{0}'.format(image) if image else '/Online',
        action
    )
    ret = {'action': action,
           'image': image,
           'result': True,
           'message': ''}
    output = __salt__['cmd.run'](command, ignore_retcode=True)
    if not re.search('The operation completed successfully.', output):
        ret['result'] = False
        ret['message'] = re.search(
            '(Error: \d+\r?\n\r?\n([^\r\n]+\r?\n)+\r?\nThe DISM log file can be found at [^\r\n]\r?\n)',
            output,
            re.MULTILINE
        ).group(1)
        log.exception('DISM command \'{0}\' on image {1} failed: {2}'.format(action, image, ret['message']))
    else:
        ret['message'] = output
        log.debug('DISM command \'{0}\' on image {1} completed with the following output: {2}'.format(action, image, output))
    return ret
Beispiel #7
0
 def __init__(self,
              tgt='',
              expr_form='glob',
              env=None,
              use_cached_grains=True,
              use_cached_pillar=True,
              grains_fallback=True,
              pillar_fallback=True,
              opts=None):
     log.debug('New instance of {0} created.'.format(
         self.__class__.__name__))
     if opts is None:
         log.error('{0}: Missing master opts init arg.'.format(
             self.__class__.__name__))
         raise SaltException('{0}: Missing master opts init arg.'.format(
             self.__class__.__name__))
     else:
         self.opts = opts
     self.tgt = tgt
     self.expr_form = expr_form
     self.env = env
     self.use_cached_grains = use_cached_grains
     self.use_cached_pillar = use_cached_pillar
     self.grains_fallback = grains_fallback
     self.pillar_fallback = pillar_fallback
     log.debug('Init settings: tgt: \"{0}\", expr_form: \"{1}\", env: \"{2}\", use_cached_grains: {3}, use_cached_pillar: {4}, grains_fallback: {5}, pillar_fallback: {6}'.format(tgt, expr_form, env, use_cached_grains, use_cached_pillar, grains_fallback, pillar_fallback))
 def _get_cached_minion_data(self, *minion_ids):
     # Return two separate dicts of cached grains and pillar data of the
     # minions
     grains = dict([(minion_id, {}) for minion_id in minion_ids])
     pillars = grains.copy()
     if not self.opts.get('minion_data_cache', False):
         log.debug('Skipping cached data because minion_data_cache is not '
                   'enabled.')
         return grains, pillars
     mdir = os.path.join(self.opts['cachedir'], 'minions')
     try:
         for minion_id in minion_ids:
             if not salt.utils.verify.valid_id(self.opts, minion_id):
                 continue
             path = os.path.join(mdir, minion_id, 'data.p')
             if os.path.isfile(path):
                 with salt.utils.fopen(path, 'rb') as fp_:
                     mdata = self.serial.loads(fp_.read())
                     if mdata.get('grains', False):
                         grains[minion_id] = mdata['grains']
                     if mdata.get('pillar', False):
                         pillars[minion_id] = mdata['pillar']
     except (OSError, IOError):
         return grains, pillars
     return grains, pillars
Beispiel #9
0
 def _get_cached_minion_data(self, *minion_ids):
     # Return two separate dicts of cached grains and pillar data of the minions
     grains = dict([(minion_id, {}) for minion_id in minion_ids])
     pillars = grains.copy()
     if not self.opts.get('minion_data_cache', False):
         log.debug('Skipping cached data because minion_data_cache is not enabled.')
         return grains, pillars
     serial = salt.payload.Serial(self.opts)
     mdir = os.path.join(self.opts['cachedir'], 'minions')
     # salt.utils.verify.valid_id has changed in git development to require opts arg
     valid_id_args = inspect.getargspec(salt.utils.verify.valid_id).args
     log.debug('salt.utils.verify.valid_id accepts args: {0}'.format(valid_id_args))
     try:
         for minion_id in minion_ids:
             if 'opts' in valid_id_args:
                 if not salt.utils.verify.valid_id(self.opts, minion_id):
                     continue
             else:
                 if not salt.utils.verify.valid_id(self.opts, minion_id):
                     continue
             path = os.path.join(mdir, minion_id, 'data.p')
             if os.path.isfile(path):
                 with salt.utils.fopen(path) as fp_:
                     mdata = serial.loads(fp_.read())
                     if mdata.get('grains', False):
                         grains[minion_id] = mdata['grains']
                     if mdata.get('pillar', False):
                         pillars[minion_id] = mdata['pillar']
     except (OSError, IOError):
         return grains, pillars
     return grains, pillars
Beispiel #10
0
    def __init__(self, parser):
        self.parser = parser

        log.debug("TODO: load more manager here")
        self.manager = LibvirtManager()
        self.image = ImageService()
        self.flavor = FlavorService()
Beispiel #11
0
def ext_pillar(minion_id,  # pylint: disable=W0613
               pillar,  # pylint: disable=W0613
               config_file):
    '''
    Execute LDAP searches and return the aggregated data
    '''
    if os.path.isfile(config_file):
        try:
            #open(config_file, 'r') as raw_config:
            config = _render_template(config_file) or {}
            opts = yaml.safe_load(config) or {}
            opts['conf_file'] = config_file
        except Exception as err:
            import salt.log
            msg = 'Error parsing configuration file: {0} - {1}'
            if salt.log.is_console_configured():
                log.warn(msg.format(config_file, err))
            else:
                print(msg.format(config_file, err))
    else:
        log.debug('Missing configuration file: {0}'.format(config_file))

    data = {}
    for source in opts['search_order']:
        config = opts[source]
        result = _do_search(config)
        print('source {0} got result {1}'.format(source, result))
        if result:
            data = _result_to_dict(data, result, config)
    return data
Beispiel #12
0
    def __init__(self, opts, log_queue=None):
        '''
        starts the timer and inits the cache itself
        '''
        super(ConnectedCache, self).__init__(log_queue=log_queue)
        log.debug('ConCache initializing...')

        # the possible settings for the cache
        self.opts = opts

        # the actual cached minion ids
        self.minions = []

        self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc')
        self.update_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc')
        self.upd_t_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
        self.cleanup()

        # the timer provides 1-second intervals to the loop in run()
        # to make the cache system most responsive, we do not use a loop-
        # delay which makes it hard to get 1-second intervals without a timer
        self.timer_stop = Event()
        self.timer = CacheTimer(self.opts, self.timer_stop)
        self.timer.start()
        self.running = True
    def get_minion_grains(self):
        '''
        Get grains data for the targeted minions, either by fetching the
        cached minion data on the master, or by fetching the grains
        directly on the minion.

        By default, this function tries hard to get the pillar data:
            - Try to get the cached minion grains if the master
                has minion_data_cache: True
            - If the grains data for the minion is cached, use it.
            - If there is no cached grains data for a minion,
                then try to get the minion grains directly from the minion.
        '''
        minion_grains = {}
        minion_ids = self._tgt_to_list()
        if any(arg for arg in [self.use_cached_grains, self.grains_fallback]):
            log.debug('Getting cached minion data.')
            cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
        else:
            cached_minion_grains = {}
        log.debug('Getting minion grain data for: {0}'.format(minion_ids))
        minion_grains = self._get_minion_grains(
                                        *minion_ids,
                                        cached_grains=cached_minion_grains)
        return minion_grains
Beispiel #14
0
 def immutable_encoder(obj):
     log.debug('IMMUTABLE OBJ: {0}'.format(obj))
     if isinstance(obj, immutabletypes.ImmutableDict):
         return dict(obj)
     if isinstance(obj, immutabletypes.ImmutableList):
         return list(obj)
     if isinstance(obj, immutabletypes.ImmutableSet):
         return set(obj)
Beispiel #15
0
 def run(self):
     '''
     Gather currently connected minions and update the cache
     '''
     new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids())
     cc = cache_cli(self.opts)
     cc.get_cached()
     cc.put_cache([new_mins])
     log.debug('ConCache CacheWorker update finished')
 def get_cached_mine_data(self):
     '''
     Get cached mine data for the targeted minions.
     '''
     mine_data = {}
     minion_ids = self._tgt_to_list()
     log.debug('Getting cached mine data for: {0}'.format(minion_ids))
     mine_data = self._get_cached_mine_data(*minion_ids)
     return mine_data
Beispiel #17
0
def _linux_gpu_data():
    '''
    num_gpus: int
    gpus:
      - vendor: nvidia|amd|ati|...
        model: string
    '''
    # dominant gpu vendors to search for (MUST be lowercase for matching below)
    known_vendors = ['nvidia', 'amd', 'ati', 'intel']

    devs = []
    try:
        lspci_out = __salt__['cmd.run']('lspci -vmm')

        cur_dev = {}
        error = False
        for line in lspci_out.splitlines():
            # check for record-separating empty lines
            if line == '':
                if cur_dev.get('Class', '') == 'VGA compatible controller':
                    devs.append(cur_dev)
                # XXX; may also need to search for "3D controller"
                cur_dev = {}
                continue
            if re.match(r'^\w+:\s+.*', line):
                key, val = line.split(':', 1)
                cur_dev[key.strip()] = val.strip()
            else:
                error = True
                log.debug('Unexpected lspci output: \'{0}\''.format(line))

        if error:
            log.warn(
                'Error loading grains, unexpected linux_gpu_data output, '
                'check that you have a valid shell configured and '
                'permissions to run lspci command'
            )
    except OSError:
        pass

    gpus = []
    for gpu in devs:
        vendor_strings = gpu['Vendor'].lower().split()
        # default vendor to 'unknown', overwrite if we match a known one
        vendor = 'unknown'
        for name in known_vendors:
            # search for an 'expected' vendor name in the list of strings
            if name in vendor_strings:
                vendor = name
                break
        gpus.append({'vendor': vendor, 'model': gpu['Device']})

    grains = {}
    grains['num_gpus'] = len(gpus)
    grains['gpus'] = gpus
    return grains
 def _tgt_to_list(self):
     # Return a list of minion ids that match the target and expr_form
     minion_ids = []
     ckminions = salt.utils.minions.CkMinions(self.opts)
     minion_ids = ckminions.check_minions(self.tgt, self.expr_form)
     if len(minion_ids) == 0:
         log.debug('No minions matched for tgt="{0}" and expr_form="{1}"'.format(self.tgt, self.expr_form))
         return {}
     log.debug('Matching minions for tgt="{0}" and expr_form="{1}": {2}'.format(self.tgt, self.expr_form, minion_ids))
     return minion_ids
 def _get_live_minion_grains(self, minion_ids):
     # Returns a dict of grains fetched directly from the minions
     log.debug('Getting live grains for minions: "{0}"'.format(minion_ids))
     client = salt.client.get_local_client(self.opts['conf_file'])
     ret = client.cmd(
                    ','.join(minion_ids),
                     'grains.items',
                     timeout=self.opts['timeout'],
                     expr_form='list')
     return ret
Beispiel #20
0
 def renew(self):
     '''
     compares the current minion list against the ips
     connected on the master publisher port and updates
     the minion list accordingly
     '''
     log.debug('ConCache renewing minion cache')
     new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids())
     self.minions = new_mins
     log.debug('ConCache received {0} minion ids'.format(len(new_mins)))
Beispiel #21
0
def _dmidecode_data(regex_dict):
    '''
    Parse the output of dmidecode in a generic fashion that can
    be used for the multiple system types which have dmidecode.
    '''
    ret = {}

    if 'proxyminion' in __opts__:
        return {}

    # No use running if dmidecode/smbios isn't in the path
    if salt.utils.which('dmidecode'):
        out = __salt__['cmd.run']('dmidecode')
    elif salt.utils.which('smbios'):
        out = __salt__['cmd.run']('smbios')
    else:
        log.debug(
            'The `dmidecode` binary is not available on the system. GPU '
            'grains will not be available.'
        )
        return ret

    for section in regex_dict:
        section_found = False

        # Look at every line for the right section
        for line in out.splitlines():
            if not line:
                continue
            # We've found it, woohoo!
            if re.match(section, line):
                section_found = True
                continue
            if not section_found:
                continue

            # Now that a section has been found, find the data
            for item in regex_dict[section]:
                # Examples:
                #    Product Name: 64639SU
                #    Version: 7LETC1WW (2.21 )
                regex = re.compile(r'\s+{0}\s+(.*)$'.format(item))
                grain = regex_dict[section][item]
                # Skip to the next iteration if this grain
                # has been found in the dmidecode output.
                if grain in ret:
                    continue

                match = regex.match(line)

                # Finally, add the matched data to the grains returned
                if match:
                    ret[grain] = match.group(1).strip()
    return ret
Beispiel #22
0
    def __connect(self):
        '''
        Connect to the Jabber server.
        '''
        log.debug('connecting to {}:{}'.format(*self.server_addr))
        if not self.connect(self.server_addr):
            raise JabberError('connect failed: {}:{}'
                                .format(*self.server_addr))

        # Process Jabber messages forever in a background thread
        self.process(block=False)
def get_elb_lbs():
    """
    Returns a dictionary of load balancer names as keys
    each with their respective attributes
    """

    # attributes to extract from the load balancer boto objects
    # this could possibly be a named argument too
    extract_attrs = ['scheme', 'dns_name', 'vpc_id', 'name', 'security_groups']

    try:
        instance_metadata = boto.utils.get_instance_metadata(timeout=5, num_retries=2)
    except Exception as e:
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    # Setup the lbs grain
    lbs_grain = {'lbs': {}}

    # Collect details about this instance
    vpc_id = instance_metadata['network']['interfaces']['macs'].values()[0]['vpc-id']
    region = instance_metadata['placement']['availability-zone'][:-1]

    # Collect load balancers of this instance (in the same vpc)
    try:
        elb_connection = boto.ec2.elb.connect_to_region(region)

        # find load balancers by vpc_id
        all_lbs = [lb for lb in elb_connection.get_all_load_balancers()
                   if lb.vpc_id == vpc_id]
        log.debug('all lbs before filtering by instance id: {}'.format(all_lbs))

        # further filter the load balancers by instance id
        lbs = [lb for lb in all_lbs for inst in lb.instances
               if inst.id == instance_metadata['instance-id']]
        # initialise and populate the output of load balancers
        out = {}
        [out.update({l.name: {}}) for l in lbs]
        [out[l.name].update({attr: getattr(l, attr, None)})
         for attr in extract_attrs for l in lbs]

        if not out:
            # This loglevel could perhaps be adjusted to something more visible
            log.warning("No ELBs found for this instance, this is unusual, "
                        "but we will not break highstate")

        lbs_grain['lbs'] = out

    except Exception as e:
        # This prints a user-friendly error with stacktrace
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    return lbs_grain
Beispiel #24
0
 def secure(self):
     '''
     secure the sockets for root-only access
     '''
     log.debug('ConCache securing sockets')
     if os.path.exists(self.cache_sock):
         os.chmod(self.cache_sock, 0o600)
     if os.path.exists(self.update_sock):
         os.chmod(self.update_sock, 0o600)
     if os.path.exists(self.upd_t_sock):
         os.chmod(self.upd_t_sock, 0o600)
Beispiel #25
0
 def cleanup(self):
     '''
     remove sockets on shutdown
     '''
     log.debug('ConCache cleaning up')
     if os.path.exists(self.cache_sock):
         os.remove(self.cache_sock)
     if os.path.exists(self.update_sock):
         os.remove(self.update_sock)
     if os.path.exists(self.upd_t_sock):
         os.remove(self.upd_t_sock)
Beispiel #26
0
def get_id():
    '''
    Guess the id of the minion.

    - If socket.getfqdn() returns us something other than localhost, use it
    - Check /etc/hosts for something that isn't localhost that maps to 127.*
    - Look for a routeable / public IP
    - A private IP is better than a loopback IP
    - localhost may be better than killing the minion
    '''

    log.debug('Guessing ID. The id can be explicitly in set {0}'
              .format('/etc/salt/minion'))
    fqdn = socket.getfqdn()
    if 'localhost' != fqdn:
        log.info('Found minion id from getfqdn(): {0}'.format(fqdn))
        return fqdn, False

    # Can /etc/hosts help us?
    try:
        # TODO Add Windows host file support
        with open('/etc/hosts') as f:
            line = f.readline()
            while line:
                names = line.split()
                ip = names.pop(0)
                if ip.startswith('127.'):
                    for name in names:
                        if name != 'localhost':
                            log.info('Found minion id in hosts file: {0}'
                                     .format(name))
                            return name, False
                line = f.readline()
    except Exception:
        pass

    # What IP addresses do we have?
    ip_addresses = [salt.utils.socket_util.IPv4Address(a) for a
                    in salt.utils.socket_util.ip4_addrs()
                    if not a.startswith('127.')]

    for a in ip_addresses:
        if not a.is_private:
            log.info('Using public ip address for id: {0}'.format(a))
            return str(a), True

    if ip_addresses:
        a = ip_addresses.pop(0)
        log.info('Using private ip address for id: {0}'.format(a))
        return str(a), True

    log.error('No id found, falling back to localhost')
    return 'localhost', False
Beispiel #27
0
def launch_server():
    """
    Start the salt master here
    """

    hostname = get_fqhostname()
    ip = host_to_ip(hostname)
    log.debug("Hellostack start server %s" % ip)

    master_opts = salt.config.client_config("/etc/salt/master")
    master = salt.master.Master(master_opts)
    master.start()
Beispiel #28
0
 def _parse_subscriber(self, subscriber):
     '''
     Parse the subscriber string into the structure needed by _deliver().
     '''
     log.debug('add recipient: %s', subscriber)
     recipient = Recipient(subscriber,
                           max_msgs=self.max_msgs,
                           max_age=self.max_age,
                           state=UNKNOWN,
                           pending=self.pending)
     self.recipients[recipient.addr] = recipient
     return recipient
Beispiel #29
0
 def compound_match(self, tgt):
     '''
     Runs the compound target check
     '''
     if not isinstance(tgt, string_types):
         log.debug('Compound target received that is not a string')
         return False
     ref = {'G': 'grain',
            'P': 'grain_pcre',
            'X': 'exsel',
            'I': 'pillar',
            'L': 'list',
            'S': 'ipcidr',
            'E': 'pcre'}
     if HAS_RANGE:
         ref['R'] = 'range'
     results = []
     opers = ['and', 'or', 'not', '(', ')']
     tokens = re.findall(r'[^\s()]+|[()]', tgt)
     for match in tokens:
         # Try to match tokens from the compound target, first by using
         # the 'G, X, I, L, S, E' matcher types, then by hostname glob.
         if '@' in match and match[1] == '@':
             comps = match.split('@')
             matcher = ref.get(comps[0])
             if not matcher:
                 # If an unknown matcher is called at any time, fail out
                 return False
             results.append(
                 str(
                     getattr(self, '{0}_match'.format(matcher))(
                         '@'.join(comps[1:])
                     )
                 )
             )
         elif match in opers:
             # We didn't match a target, so append a boolean operator or
             # subexpression
             if match == 'not':
                 if results[-1] == 'and':
                     pass
                 elif results[-1] == 'or':
                     pass
                 else:
                     results.append('and')
             results.append(match)
         else:
             # The match is not explicitly defined, evaluate it as a glob
             results.append(str(self.glob_match(match)))
     return eval(' '.join(results))
Beispiel #30
0
 def deliver(self, alert):
     '''
     Deliver an alert sent from a minion.
     '''
     severity = alert.get('severity')
     if severity is not None:
         alert['severity'] = severity.lower()
         alert['SEVERITY'] = severity.upper()
     epoch_time = alert.get('time', time.time())
     alert['time'] = time.strftime(self.timeformat,
                                   time.localtime(epoch_time))
     alert['verb'] = self.verbs.get(alert.get('verb', DEFAULT_VERB))
     log.debug('deliver: %s', alert)
     for agent in self.agents.values():
         agent.deliver(alert)
Beispiel #31
0
def collector(hostname, cmd, result):
    '''
    Collect data in a mongo database.
    '''
    conn = pymongo.Connection(
        __opts__['mongo.host'],
        __opts__['mongo.port'],
    )
    db = conn[__opts__['mongo.db']]

    user = __opts__.get('mongo.user')
    password = __opts__.get('mongo.password')
    if user and password:
        db.authenticate(user, password)

    collection = db[hostname]
    back = _escape_dot(result)
    log.debug(back)
    collection.insert({
        'utctime': datetime.datetime.utcnow(),
        'cmd': cmd,
        'result': back
    })
Beispiel #32
0
 def _get_minion_grains(self, *minion_ids, **kwargs):
     # Get the minion grains either from cache or from a direct query
     # on the minion. By default try to use cached grains first, then
     # fall back to querying the minion directly.
     ret = {}
     cached_grains = kwargs.get('cached_grains', {})
     cret = {}
     lret = {}
     if self.use_cached_grains:
         cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
         missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
         log.debug('Missed cached minion grains for: %s', missed_minions)
         if self.grains_fallback:
             lret = self._get_live_minion_grains(missed_minions)
         ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
     else:
         lret = self._get_live_minion_grains(minion_ids)
         missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
         log.debug('Missed live minion grains for: %s', missed_minions)
         if self.grains_fallback:
             cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
         ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
     return ret
Beispiel #33
0
 def _get_minion_pillar(self, *minion_ids, **kwargs):
     # Get the minion pillar either from cache or from a direct query
     # on the minion. By default try use the cached pillar first, then
     # fall back to rendering pillar on demand with the supplied grains.
     ret = {}
     grains = kwargs.get('grains', {})
     cached_pillar = kwargs.get('cached_pillar', {})
     cret = {}
     lret = {}
     if self.use_cached_pillar:
         cret = dict([(minion_id, mcache) for (minion_id, mcache) in cached_pillar.iteritems() if mcache])
         missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
         log.debug('Missed cached minion pillars for: {0}'.format(missed_minions))
         if self.pillar_fallback:
             lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in missed_minions])
         ret = dict(dict([(minion_id, {}) for minion_id in minion_ids]).items() + lret.items() + cret.items())
     else:
         lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in minion_ids])
         missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
         log.debug('Missed live minion pillars for: {0}'.format(missed_minions))
         if self.pillar_fallback:
             cret = dict([(minion_id, mcache) for (minion_id, mcache) in cached_pillar.iteritems() if mcache])
         ret = dict(dict([(minion_id, {}) for minion_id in minion_ids]).items() + lret.items() + cret.items())
     return ret
Beispiel #34
0
 def _get_minion_pillar(self, *minion_ids, **kwargs):
     # Get the minion pillar either from cache or from a direct query
     # on the minion. By default try use the cached pillar first, then
     # fall back to rendering pillar on demand with the supplied grains.
     ret = {}
     grains = kwargs.get('grains', {})
     cached_pillar = kwargs.get('cached_pillar', {})
     cret = {}
     lret = {}
     if self.use_cached_pillar:
         cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
         missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
         log.debug('Missed cached minion pillars for: %s', missed_minions)
         if self.pillar_fallback and missed_minions:
             lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in missed_minions])
         ret = {key: value for key, value in [(minion_id, {}) for minion_id in minion_ids] + list(six.iteritems(cret)) + list(six.iteritems(lret))}
     else:
         lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in minion_ids])
         missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
         log.debug('Missed live minion pillars for: %s', missed_minions)
         if self.pillar_fallback and missed_minions:
             cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
         ret = {key: value for key, value in [(minion_id, {}) for minion_id in minion_ids] + list(six.iteritems(cret)) + list(six.iteritems(lret))}
     return ret
Beispiel #35
0
    def stop(self, signal, frame):
        '''
        We override stop() to brake our main loop
        and have a pretty log message
        '''
        log.info("Received signal {0}".format(signal))

        # if we have running workers, run through all and join() the ones
        # that have finished. if we still have running workers after that,
        # wait 5 secs for the rest and then exit. Maybe we should improv
        # this a litte bit more
        if len(self.running_workers) > 0:
            clean_workers = []

            for count in range(0, 2):
                for worker in self.running_workers:
                    if worker.isAlive():
                        clean_workers.append(worker)
                    else:
                        worker.join()
                        log.debug("Joined worker #{0}".format(
                            worker.getName()))

                if len(clean_workers) > 0:
                    log.info("Waiting 5secs for remaining workers..")
                    time.sleep(5)
                else:
                    break

        log.info("salt-eventsd has shut down")

        # leave the cleanup to the supers stop
        try:
            super(SaltEventsDaemon, self).stop(signal, frame)
        except (IOError, OSError):
            os._exit(0)
Beispiel #36
0
def load_config(opts, path, env_var):
    '''
    Attempts to update ``opts`` dict by parsing either the file described by
    ``path`` or the environment variable described by ``env_var`` as YAML.
    '''
    if path is None:
        # When the passed path is None, we just want the configuration
        # defaults, not actually loading the whole configuration.
        return opts

    if not path or not os.path.isfile(path):
        path = os.environ.get(env_var, path)
    # If the configuration file is missing, attempt to copy the template,
    # after removing the first header line.
    if not os.path.isfile(path):
        template = '{0}.template'.format(path)
        if os.path.isfile(template):
            import salt.utils  # TODO: Need to re-import, need to find out why
            with salt.utils.fopen(path, 'w') as out:
                with salt.utils.fopen(template, 'r') as f:
                    f.readline()  # skip first line
                    out.write(f.read())

    if os.path.isfile(path):
        try:
            opts.update(_read_conf_file(path))
            opts['conf_file'] = path
        except Exception as e:
            import salt.log
            msg = 'Error parsing configuration file: {0} - {1}'
            if salt.log.is_console_configured():
                log.warn(msg.format(path, e))
            else:
                print(msg.format(path, e))
    else:
        log.debug('Missing configuration file: {0}'.format(path))
Beispiel #37
0
    def get_minion_pillar(self):
        """
        Get pillar data for the targeted minions, either by fetching the
        cached minion data on the master, or by compiling the minion's
        pillar data on the master.

        For runner modules that need access minion pillar data, this
        function should be used instead of getting the pillar data by
        executing the pillar module on the minions.

        By default, this function tries hard to get the pillar data:
            - Try to get the cached minion grains and pillar if the
                master has minion_data_cache: True
            - If the pillar data for the minion is cached, use it.
            - If there is no cached grains/pillar data for a minion,
                then try to get the minion grains directly from the minion.
            - Use the minion grains to compile the pillar directly from the
                master using salt.pillar.Pillar
        """
        minion_pillars = {}
        minion_grains = {}
        minion_ids = self._tgt_to_list()
        if self.tgt and not minion_ids:
            return {}
        if any(
            arg
            for arg in [
                self.use_cached_grains,
                self.use_cached_pillar,
                self.grains_fallback,
                self.pillar_fallback,
            ]
        ):
            log.debug("Getting cached minion data")
            cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(
                *minion_ids
            )
        else:
            cached_minion_grains = {}
            cached_minion_pillars = {}
        log.debug("Getting minion grain data for: %s", minion_ids)
        minion_grains = self._get_minion_grains(
            *minion_ids, cached_grains=cached_minion_grains
        )
        log.debug("Getting minion pillar data for: %s", minion_ids)
        minion_pillars = self._get_minion_pillar(
            *minion_ids, grains=minion_grains, cached_pillar=cached_minion_pillars
        )
        return minion_pillars
Beispiel #38
0
def _read_proc_file(path, opts):
    '''
    Return a dict of JID metadata, or None
    '''
    serial = salt.payload.Serial(opts)
    with salt.utils.files.fopen(path, 'rb') as fp_:
        buf = fp_.read()
        fp_.close()
        if buf:
            data = serial.loads(buf)
        else:
            # Proc file is empty, remove
            try:
                os.remove(path)
            except IOError:
                log.debug('Unable to remove proc file %s.', path)
            return None
    if not isinstance(data, dict):
        # Invalid serial object
        return None
    if not salt.utils.process.os_is_running(data['pid']):
        # The process is no longer running, clear out the file and
        # continue
        try:
            os.remove(path)
        except IOError:
            log.debug('Unable to remove proc file %s.', path)
        return None

    if not _check_cmdline(data):
        pid = data.get('pid')
        if pid:
            log.warning(
                'PID %s exists but does not appear to be a salt process.', pid
            )
        try:
            os.remove(path)
        except IOError:
            log.debug('Unable to remove proc file %s.', path)
        return None
    return data
Beispiel #39
0
    A single monitor task.
    '''
    def __init__(self, taskid, pyexe, context, scheduler=None):
        self.taskid    = taskid
        self.code      = pyexe
        self.context   = context
        self.scheduler = scheduler

    def run(self):
        log.trace('start thread for %s', self.taskid)
        minion = self.context.get('id')
        collector = self.context.get('collector')
        while True:
            try:
                exec self.code in self.context
            except Exception, ex:
                log.error("can't execute %s: %s", self.taskid, ex, exc_info=ex)
            if collector:
                jid = datetime.datetime.strftime(
                             datetime.datetime.now(), 'M%Y%m%d%H%M%S%f')
                try:
                    collector(minion, self.context['cmd'], self.context['result'])
                except Exception, ex:
                    log.error('monitor error: %s', self.taskid, exc_info=ex)
            if self.scheduler is None:
                break
            duration = self.scheduler.next()
            log.trace('%s: sleep %s seconds', self.taskid, duration)
            time.sleep(duration)
        log.debug('thread exit: %s', self.taskid)
Beispiel #40
0
    def loads(self, msg, encoding=None, raw=False):
        """
        Run the correct loads serialization format

        :param encoding: Useful for Python 3 support. If the msgpack data
                         was encoded using "use_bin_type=True", this will
                         differentiate between the 'bytes' type and the
                         'str' type by decoding contents with 'str' type
                         to what the encoding was set as. Recommended
                         encoding is 'utf-8' when using Python 3.
                         If the msgpack data was not encoded using
                         "use_bin_type=True", it will try to decode
                         all 'bytes' and 'str' data (the distinction has
                         been lost in this case) to what the encoding is
                         set as. In this case, it will fail if any of
                         the contents cannot be converted.
        """
        try:

            def ext_type_decoder(code, data):
                if code == 78:
                    data = salt.utils.stringutils.to_unicode(data)
                    return datetime.datetime.strptime(data,
                                                      "%Y%m%dT%H:%M:%S.%f")
                return data

            gc.disable()  # performance optimization for msgpack
            loads_kwargs = {"use_list": True, "ext_hook": ext_type_decoder}
            if salt.utils.msgpack.version >= (0, 4, 0):
                # msgpack only supports 'encoding' starting in 0.4.0.
                # Due to this, if we don't need it, don't pass it at all so
                # that under Python 2 we can still work with older versions
                # of msgpack.
                if salt.utils.msgpack.version >= (0, 5, 2):
                    if encoding is None:
                        loads_kwargs["raw"] = True
                    else:
                        loads_kwargs["raw"] = False
                else:
                    loads_kwargs["encoding"] = encoding
                try:
                    ret = salt.utils.msgpack.unpackb(msg, **loads_kwargs)
                except UnicodeDecodeError:
                    # msg contains binary data
                    loads_kwargs.pop("raw", None)
                    loads_kwargs.pop("encoding", None)
                    ret = salt.utils.msgpack.loads(msg, **loads_kwargs)
            else:
                ret = salt.utils.msgpack.loads(msg, **loads_kwargs)
            if encoding is None and not raw:
                ret = salt.transport.frame.decode_embedded_strs(ret)
        except Exception as exc:  # pylint: disable=broad-except
            log.critical(
                "Could not deserialize msgpack message. This often happens "
                "when trying to read a file not in binary mode. "
                "To see message payload, enable debug logging and retry. "
                "Exception: %s",
                exc,
            )
            log.debug("Msgpack deserialization failure on message: %s", msg)
            gc.collect()

            exc_msg = "Could not deserialize msgpack message. See log for more info."
            raise SaltDeserializationError(exc_msg) from exc
        finally:
            gc.enable()
        return ret
Beispiel #41
0
    def _thread_return(class_, minion_instance, opts, data):
        '''
        This method should be used as a threading target, start the actual
        minion side execution.
        '''
        # this seems awkward at first, but it's a workaround for Windows
        # multiprocessing communication.
        if not minion_instance:
            minion_instance = class_(opts)
        if opts['multiprocessing']:
            fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
            sdata = {'pid': os.getpid()}
            sdata.update(data)
            with salt.utils.fopen(fn_, 'w+') as fp_:
                fp_.write(minion_instance.serial.dumps(sdata))
        ret = {}
        for ind in range(0, len(data['arg'])):
            try:
                arg = eval(data['arg'][ind])
                if isinstance(arg, bool):
                    data['arg'][ind] = str(data['arg'][ind])
                elif isinstance(arg, (dict, int, list, string_types)):
                    data['arg'][ind] = arg
                else:
                    data['arg'][ind] = str(data['arg'][ind])
            except Exception:
                pass

        function_name = data['fun']
        if function_name in minion_instance.functions:
            ret['success'] = False
            try:
                func = minion_instance.functions[data['fun']]
                args, kwargs = detect_kwargs(func, data['arg'], data)
                ret['return'] = func(*args, **kwargs)
                ret['success'] = True
            except CommandNotFoundError as exc:
                msg = 'Command required for \'{0}\' not found: {1}'
                log.debug(msg.format(function_name, str(exc)))
                ret['return'] = msg.format(function_name, str(exc))
            except CommandExecutionError as exc:
                msg = 'A command in {0} had a problem: {1}'
                log.error(msg.format(function_name, str(exc)))
                ret['return'] = 'ERROR: {0}'.format(str(exc))
            except SaltInvocationError as exc:
                msg = 'Problem executing "{0}": {1}'
                log.error(msg.format(function_name, str(exc)))
                ret['return'] = 'ERROR executing {0}: {1}'.format(
                    function_name, exc)
            except Exception:
                trb = traceback.format_exc()
                msg = 'The minion function caused an exception: {0}'
                log.warning(msg.format(trb))
                ret['return'] = trb
        else:
            ret['return'] = '"{0}" is not available.'.format(function_name)

        ret['jid'] = data['jid']
        ret['fun'] = data['fun']
        minion_instance._return_pub(ret)
        if data['ret']:
            for returner in set(data['ret'].split(',')):
                ret['id'] = opts['id']
                try:
                    minion_instance.returners['{0}.returner'.format(returner)](
                        ret)
                except Exception as exc:
                    log.error('The return failed for job {0} {1}'.format(
                        data['jid'], exc))
Beispiel #42
0
def _linux_gpu_data():
    '''
    num_gpus: int
    gpus:
      - vendor: nvidia|amd|ati|...
        model: string
    '''
    lspci = salt.utils.which('lspci')
    if not lspci:
        log.info(
            'The `lspci` binary is not available on the system. GPU grains '
            'will not be available.')
        return {}

    elif __opts__.get('enable_gpu_grains', None) is False:
        log.info(
            'Skipping lspci call because enable_gpu_grains was set to False '
            'in the config. GPU grains will not be available.')
        return {}

    # dominant gpu vendors to search for (MUST be lowercase for matching below)
    known_vendors = ['nvidia', 'amd', 'ati', 'intel']

    devs = []
    try:
        lspci_out = __salt__['cmd.run']('lspci -vmm')

        cur_dev = {}
        error = False
        # Add a blank element to the lspci_out.splitlines() list,
        # otherwise the last device is not evaluated as a cur_dev and ignored.
        lspci_list = lspci_out.splitlines()
        lspci_list.append('')
        for line in lspci_list:
            # check for record-separating empty lines
            if line == '':
                if cur_dev.get('Class', '') == 'VGA compatible controller':
                    devs.append(cur_dev)
                # XXX; may also need to search for "3D controller"
                cur_dev = {}
                continue
            if re.match(r'^\w+:\s+.*', line):
                key, val = line.split(':', 1)
                cur_dev[key.strip()] = val.strip()
            else:
                error = True
                log.debug('Unexpected lspci output: \'{0}\''.format(line))

        if error:
            log.warn('Error loading grains, unexpected linux_gpu_data output, '
                     'check that you have a valid shell configured and '
                     'permissions to run lspci command')
    except OSError:
        pass

    gpus = []
    for gpu in devs:
        vendor_strings = gpu['Vendor'].lower().split()
        # default vendor to 'unknown', overwrite if we match a known one
        vendor = 'unknown'
        for name in known_vendors:
            # search for an 'expected' vendor name in the list of strings
            if name in vendor_strings:
                vendor = name
                break
        gpus.append({'vendor': vendor, 'model': gpu['Device']})

    grains = {}
    grains['num_gpus'] = len(gpus)
    grains['gpus'] = gpus
    return grains
Beispiel #43
0
    def cmd_block(self, is_retry=False):
        '''
        Prepare the pre-check command to send to the subsystem
        '''
        # 1. execute SHIM + command
        # 2. check if SHIM returns a master request or if it completed
        # 3. handle any master request
        # 4. re-execute SHIM + command
        # 5. split SHIM results from command results
        # 6. return command results

        log.debug('Performing shimmed, blocking command as follows:\n{0}'.format(' '.join(self.argv)))
        cmd_str = self._cmd_str()
        stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)

        log.debug('STDOUT {1}\n{0}'.format(stdout, self.target['host']))
        log.debug('STDERR {1}\n{0}'.format(stderr, self.target['host']))
        log.debug('RETCODE {1}: {0}'.format(retcode, self.target['host']))

        error = self.categorize_shim_errors(stdout, stderr, retcode)
        if error:
            if error == 'Undefined SHIM state':
                self.deploy()
                stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
                if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
                    # If RSTR is not seen in both stdout and stderr then there
                    # was a thin deployment problem.
                    return 'ERROR: Failure deploying thin: {0}'.format(stdout), stderr, retcode
                stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
                stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
            else:
                return 'ERROR: {0}'.format(error), stderr, retcode

        # FIXME: this discards output from ssh_shim if the shim succeeds.  It should
        # always save the shim output regardless of shim success or failure.
        if re.search(RSTR_RE, stdout):
            stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
        else:
            # This is actually an error state prior to the shim but let it fall through
            pass

        if re.search(RSTR_RE, stderr):
            # Found RSTR in stderr which means SHIM completed and only
            # and remaining output is only from salt.
            stderr = re.split(RSTR_RE, stderr, 1)[1].strip()

        else:
            # RSTR was found in stdout but not stderr - which means there
            # is a SHIM command for the master.
            shim_command = re.split(r'\r?\n', stdout, 1)[0].strip()
            if 'deploy' == shim_command and retcode == salt.exitcodes.EX_THIN_DEPLOY:
                self.deploy()
                stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
                if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
                    # If RSTR is not seen in both stdout and stderr then there
                    # was a thin deployment problem.
                    return 'ERROR: Failure deploying thin: {0}'.format(stdout), stderr, retcode
                stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
                stderr = re.split(RSTR_RE, stderr, 1)[1].strip()

        return stdout, stderr, retcode
Beispiel #44
0
def load_config(path, env_var, default_path=None):
    '''
    Returns configuration dict from parsing either the file described by
    ``path`` or the environment variable described by ``env_var`` as YAML.
    '''
    if path is None:
        # When the passed path is None, we just want the configuration
        # defaults, not actually loading the whole configuration.
        return {}

    if default_path is None:
        # This is most likely not being used from salt, ie, could be salt-cloud
        # or salt-api which have not yet migrated to the new default_path
        # argument. Let's issue a warning message that the environ vars won't
        # work.
        import inspect
        previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
        log.warning(
            'The function \'{0}()\' defined in {1!r} is not yet using the '
            'new \'default_path\' argument to `salt.config.load_config()`. '
            'As such, the {2!r} environment variable will be ignored'.format(
                previous_frame.function, previous_frame.filename, env_var))
        # In this case, maintain old behaviour
        default_path = DEFAULT_MASTER_OPTS['conf_file']

    # Default to the environment variable path, if it exists
    env_path = os.environ.get(env_var, path)
    if not env_path or not os.path.isfile(env_path):
        env_path = path
    # If non-default path from `-c`, use that over the env variable
    if path != default_path:
        env_path = path

    path = env_path

    # If the configuration file is missing, attempt to copy the template,
    # after removing the first header line.
    if not os.path.isfile(path):
        template = '{0}.template'.format(path)
        if os.path.isfile(template):
            import salt.utils  # TODO: Need to re-import, need to find out why
            log.debug('Writing {0} based on {1}'.format(path, template))
            with salt.utils.fopen(path, 'w') as out:
                with salt.utils.fopen(template, 'r') as ifile:
                    ifile.readline()  # skip first line
                    out.write(ifile.read())

    if os.path.isfile(path):
        try:
            opts = _read_conf_file(path)
            opts['conf_file'] = path
            return opts
        except Exception as err:
            import salt.log
            msg = 'Error parsing configuration file: {0} - {1}'
            if salt.log.is_console_configured():
                log.warn(msg.format(path, err))
            else:
                print(msg.format(path, err))
    else:
        log.debug('Missing configuration file: {0}'.format(path))

    return {}
Beispiel #45
0
    def run(self):
        '''
        Main loop of the FSCache, checks schedule, retrieves result-data
        from the workers and answer requests with data from the cache
        '''
        context = zmq.Context()
        # the socket for incoming cache requests
        creq_in = context.socket(zmq.REP)
        creq_in.setsockopt(zmq.LINGER, 100)
        creq_in.bind('ipc:///' + self.cache_sock)

        # the socket for incoming cache-updates from workers
        cupd_in = context.socket(zmq.REP)
        cupd_in.setsockopt(zmq.LINGER, 100)
        cupd_in.bind('ipc:///' + self.update_sock)

        # wait for the timer to bind to its socket
        log.debug('wait 2 secs for the timer')
        time.sleep(2)

        # the socket for the timer-event
        timer_in = context.socket(zmq.PULL)
        timer_in.setsockopt(zmq.LINGER, 100)
        timer_in.connect('ipc:///' + self.upd_t_sock)

        poller = zmq.Poller()
        poller.register(creq_in, zmq.POLLIN)
        poller.register(cupd_in, zmq.POLLIN)
        poller.register(timer_in, zmq.POLLIN)

        # our serializer
        serial = salt.payload.Serial(self.opts.get('serial', ''))

        # register a signal handler
        signal.signal(signal.SIGINT, self.signal_handler)

        # secure the sockets from the world
        self.secure()

        log.info('FSCache started')
        log.debug('FSCache started')

        while self.running:

            # we check for new events with the poller
            try:
                socks = dict(poller.poll())
            except KeyboardInterrupt:
                self.stop()
            except zmq.ZMQError as t:
                self.stop()

            # check for next cache-request
            if socks.get(creq_in) == zmq.POLLIN:
                msg = serial.loads(creq_in.recv())
                log.debug('Received request: {0}'.format(msg))

                # we only accept requests as lists [req_id, <path>]
                if isinstance(msg, list):
                    # for now only one item is assumed to be requested
                    msgid, file_n = msg[:]
                    log.debug('Looking for {0}:{1}'.format(msgid, file_n))

                    fdata = self.path_data.get(file_n, None)

                    if fdata is not None:
                        log.debug('Cache HIT')
                    else:
                        log.debug('Cache MISS')

                    # simulate slow caches
                    #randsleep = random.randint(0,3)
                    #time.sleep(randsleep)

                    # Send reply back to client
                    reply = serial.dumps([msgid, fdata])
                    creq_in.send(reply)

                # wrong format, item not cached
                else:
                    reply = serial.dumps([msgid, None])
                    creq_in.send(reply)

            # check for next cache-update from workers
            elif socks.get(cupd_in) == zmq.POLLIN:
                new_c_data = serial.loads(cupd_in.recv())
                # tell the worker to exit
                cupd_in.send(serial.dumps('OK'))

                # check if the returned data is usable
                if not isinstance(new_c_data, dict):
                    log.error('Worker returned unusable result')
                    del new_c_data
                    continue

                # the workers will return differing data:
                # 1. '{'file1': <data1>, 'file2': <data2>,...}' - a cache update
                # 2. '{search-path: None}' -  job was not run, pre-checks failed
                # 3. '{}' - no files found, check the pattern if defined?
                # 4. anything else is considered malformed

                if len(new_c_data) == 0:
                    log.debug('Got empty update from worker')
                elif new_c_data.values()[0] is not None:
                    log.debug('Got cache update with {0} item(s)'.format(
                        len(new_c_data)))
                    self.path_data.update(new_c_data)
                else:
                    log.debug('Got malformed result dict from worker')

                log.info('{0} entries in cache'.format(len(self.path_data)))

            # check for next timer-event to start new jobs
            elif socks.get(timer_in) == zmq.POLLIN:
                sec_event = serial.loads(timer_in.recv())

                log.debug('Timer event: #{0}'.format(sec_event))

                # loop through the jobs and start if a jobs ival matches
                for item in self.jobs:
                    if sec_event in self.jobs[item]['ival']:
                        self.run_job(item)
        self.stop()
        creq_in.close()
        cupd_in.close()
        timer_in.close()
        context.term()
        log.debug('Shutting down')\
Beispiel #46
0
    def listen(self):
        '''
        the main event loop where we receive the events and
        start the workers that dump our data into the database
        '''
        # log on to saltstacks event-bus
        event = salt.utils.event.SaltEvent(self.node,
                                           self.sock_dir)

        # we store our events in a list, we dont really care about an order
        # or what kind of data is put in there. all that is configured with the
        # sql-template configured in the configfile
        event_queue = []

        # start our dump_timer
        self.ev_timer.start()

        # this is for logline chronology so the timer-message always comes
        # _before_ the actual startup-message of the listening loop below :-)
        time.sleep(1)

        log.info("entering main event loop")
        log.info("listening on: {0}".format(event.puburi))

        # read everything we can get our hands on
        while True:

            # the zmq-socket does not like ^C very much, make the error
            # a little more graceful. alright, alright, ignore the damn thing,
            # we're exiting anyways...
            try:
                ret = event.get_event(full=True)
            except zmq.ZMQError:
                pass

            if ret is None:
                continue

            # if the timer has expired, we may have not received enough
            # events in the queue to reach event_limit, in that case we dump
            # the data anyway to have it in the database
            if(self.ev_timer_ev):
                if (len(self.running_workers) < self.max_workers) and \
                   (len(event_queue) > 0):

                    self._init_worker(event_queue)

                    # reset our queue to prevent duplicate entries
                    del event_queue[:]

                    # we reset the timer.ev_timer_ev  at the end of the loop 
                    # so we can update the stats that are logged


            # filter only the events we're interested in. all events have a tag 
            # we can filter them by. we match with a precompiled regex
            if( 'tag' in ret ):

                # filter out events with an empty tag. those are special
                if( ret['tag'] != '' ):
                       
                    # run through our configured events and try to match the 
                    # current events tag against the ones we're interested in
                    for key in self.event_map.keys():
                        if( self.event_map[key]['tag'].match(ret['tag'])):
                            log.debug("matching on {0}:{1}".format(key, 
                                                                   ret['tag']))

                            prio = self.event_map[key].get('prio', 0)

                            # push prio1-events directly into a worker
                            if prio > 0:
                                log.debug('Prio1 event found, pushing immediately!')
                                self.events_han += 1
                                self._init_worker([ret])
                            else:
                                event_queue.append(ret)
                                self.events_han += 1

            # once we reach the event_limit, start a worker that
            # writes that data in to the database
            if len(event_queue) >= self.event_limit:

                # only start a worker if not too many workers are running
                if len(self.running_workers) < self.max_workers:
                    self._init_worker(event_queue)
                    # reset the timer
                    self.ev_timer.reset()

                    # reset our queue to prevent duplicate entries
                    del event_queue[:]

                else:
                    # FIXME: we need to handle this situation somehow if
                    # too many workers are running. just flush the events?
                    # there really is no sane way except queueing more and more
                    # until some sort of limit is reached and we care more about
                    # our saltmaster than about the collected events!
                    log.critical("too many workers running, loosing data!!!")
                   
            # a list for the workers that are still running
            clean_workers = []

            # run through all the workers and join() the ones
            # that have finished dumping their data and keep
            # the running ones on our list
            for worker in self.running_workers:
                if worker.isAlive():
                    clean_workers.append(worker)
                else:
                    worker.join()
                    log.debug("joined worker #{0}".format(worker.getName()))
                    self.threads_join += 1

            # get rid of the old reference  and set a new one
            # FIXME: is this really neccessary?
            del self.running_workers

            self.running_workers = clean_workers
            self.events_rec += 1

            # we update the stats every 'received div handled == 0'
            # or if we recevied a timer event from our ResetTimer
            if( (self.events_rec % self.state_upd) == 0 ):
                self._write_state()
            elif(self.ev_timer_ev):
                self._write_state()
                self.ev_timer_ev = False

        log.info("listen loop ended...")                
Beispiel #47
0
    def loads(self, msg, encoding=None, raw=False):
        '''
        Run the correct loads serialization format

        :param encoding: Useful for Python 3 support. If the msgpack data
                         was encoded using "use_bin_type=True", this will
                         differentiate between the 'bytes' type and the
                         'str' type by decoding contents with 'str' type
                         to what the encoding was set as. Recommended
                         encoding is 'utf-8' when using Python 3.
                         If the msgpack data was not encoded using
                         "use_bin_type=True", it will try to decode
                         all 'bytes' and 'str' data (the distinction has
                         been lost in this case) to what the encoding is
                         set as. In this case, it will fail if any of
                         the contents cannot be converted.
        '''
        try:

            def ext_type_decoder(code, data):
                if code == 78:
                    data = salt.utils.stringutils.to_unicode(data)
                    return datetime.datetime.strptime(data,
                                                      '%Y%m%dT%H:%M:%S.%f')
                return data

            gc.disable()  # performance optimization for msgpack
            if msgpack.version >= (0, 4, 0):
                # msgpack only supports 'encoding' starting in 0.4.0.
                # Due to this, if we don't need it, don't pass it at all so
                # that under Python 2 we can still work with older versions
                # of msgpack.
                try:
                    ret = salt.utils.msgpack.loads(msg,
                                                   use_list=True,
                                                   ext_hook=ext_type_decoder,
                                                   encoding=encoding,
                                                   _msgpack_module=msgpack)
                except UnicodeDecodeError:
                    # msg contains binary data
                    ret = msgpack.loads(msg,
                                        use_list=True,
                                        ext_hook=ext_type_decoder)
            else:
                ret = salt.utils.msgpack.loads(msg,
                                               use_list=True,
                                               ext_hook=ext_type_decoder,
                                               _msgpack_module=msgpack)
            if six.PY3 and encoding is None and not raw:
                ret = salt.transport.frame.decode_embedded_strs(ret)
        except Exception as exc:
            log.critical(
                'Could not deserialize msgpack message. This often happens '
                'when trying to read a file not in binary mode. '
                'To see message payload, enable debug logging and retry. '
                'Exception: %s', exc)
            log.debug('Msgpack deserialization failure on message: %s', msg)
            gc.collect()
            raise
        finally:
            gc.enable()
        return ret
Beispiel #48
0
    def cmd_block(self, is_retry=False):
        '''
        Prepare the pre-check command to send to the subsystem

        1. execute SHIM + command
        2. check if SHIM returns a master request or if it completed
        3. handle any master request
        4. re-execute SHIM + command
        5. split SHIM results from command results
        6. return command results
        '''
        self.argv = _convert_args(self.argv)
        log.debug('Performing shimmed, blocking command as follows:\n{0}'.format(' '.join(self.argv)))
        cmd_str = self._cmd_str()
        stdout, stderr, retcode = self.shim_cmd(cmd_str)

        log.trace('STDOUT {1}\n{0}'.format(stdout, self.target['host']))
        log.trace('STDERR {1}\n{0}'.format(stderr, self.target['host']))
        log.debug('RETCODE {1}: {0}'.format(retcode, self.target['host']))

        error = self.categorize_shim_errors(stdout, stderr, retcode)
        if error:
            if error == 'Undefined SHIM state':
                self.deploy()
                stdout, stderr, retcode = self.shim_cmd(cmd_str)
                if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
                    # If RSTR is not seen in both stdout and stderr then there
                    # was a thin deployment problem.
                    return 'ERROR: Failure deploying thin, undefined state: {0}'.format(stdout), stderr, retcode
                while re.search(RSTR_RE, stdout):
                    stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
                while re.search(RSTR_RE, stderr):
                    stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
            else:
                return 'ERROR: {0}'.format(error), stderr, retcode

        # FIXME: this discards output from ssh_shim if the shim succeeds.  It should
        # always save the shim output regardless of shim success or failure.
        while re.search(RSTR_RE, stdout):
            stdout = re.split(RSTR_RE, stdout, 1)[1].strip()

        if re.search(RSTR_RE, stderr):
            # Found RSTR in stderr which means SHIM completed and only
            # and remaining output is only from salt.
            while re.search(RSTR_RE, stderr):
                stderr = re.split(RSTR_RE, stderr, 1)[1].strip()

        else:
            # RSTR was found in stdout but not stderr - which means there
            # is a SHIM command for the master.
            shim_command = re.split(r'\r?\n', stdout, 1)[0].strip()
            log.debug('SHIM retcode({0}) and command: {1}'.format(retcode, shim_command))
            if 'deploy' == shim_command and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY:
                self.deploy()
                stdout, stderr, retcode = self.shim_cmd(cmd_str)
                if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
                    if not self.tty:
                        # If RSTR is not seen in both stdout and stderr then there
                        # was a thin deployment problem.
                        log.error('ERROR: Failure deploying thin, retrying: {0}\n{1}'.format(stdout, stderr), stderr, retcode)
                        return self.cmd_block()
                    elif not re.search(RSTR_RE, stdout):
                        # If RSTR is not seen in stdout with tty, then there
                        # was a thin deployment problem.
                        log.error('ERROR: Failure deploying thin, retrying: {0}\n{1}'.format(stdout, stderr), stderr, retcode)
                while re.search(RSTR_RE, stdout):
                    stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
                if self.tty:
                    stderr = ''
                else:
                    while re.search(RSTR_RE, stderr):
                        stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
            elif 'ext_mods' == shim_command:
                self.deploy_ext()
                stdout, stderr, retcode = self.shim_cmd(cmd_str)
                if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
                    # If RSTR is not seen in both stdout and stderr then there
                    # was a thin deployment problem.
                    return 'ERROR: Failure deploying ext_mods: {0}'.format(stdout), stderr, retcode
                while re.search(RSTR_RE, stdout):
                    stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
                while re.search(RSTR_RE, stderr):
                    stderr = re.split(RSTR_RE, stderr, 1)[1].strip()

        return stdout, stderr, retcode
Beispiel #49
0
    def tune_in(self):
        '''
        Lock onto the publisher. This is the main event loop for the minion
        '''
        log.info('{0} is starting as user \'{1}\''.format(
            self.__class__.__name__, getpass.getuser()))
        log.debug('Minion "{0}" trying to tune in'.format(self.opts['id']))
        self.context = zmq.Context()

        # Prepare the minion event system
        #
        # Start with the publish socket
        id_hash = hashlib.md5(self.opts['id']).hexdigest()
        epub_sock_path = os.path.join(
            self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash))
        epull_sock_path = os.path.join(
            self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash))
        self.epub_sock = self.context.socket(zmq.PUB)
        if self.opts.get('ipc_mode', '') == 'tcp':
            epub_uri = 'tcp://127.0.0.1:{0}'.format(self.opts['tcp_pub_port'])
            epull_uri = 'tcp://127.0.0.1:{0}'.format(
                self.opts['tcp_pull_port'])
        else:
            epub_uri = 'ipc://{0}'.format(epub_sock_path)
            salt.utils.check_ipc_path_max_len(epub_uri)
            epull_uri = 'ipc://{0}'.format(epull_sock_path)
            salt.utils.check_ipc_path_max_len(epull_uri)
        log.debug('{0} PUB socket URI: {1}'.format(self.__class__.__name__,
                                                   epub_uri))
        log.debug('{0} PULL socket URI: {1}'.format(self.__class__.__name__,
                                                    epull_uri))

        # Create the pull socket
        self.epull_sock = self.context.socket(zmq.PULL)
        # Bind the event sockets
        self.epub_sock.bind(epub_uri)
        self.epull_sock.bind(epull_uri)
        # Restrict access to the sockets
        if not self.opts.get('ipc_mode', '') == 'tcp':
            os.chmod(epub_sock_path, 448)
            os.chmod(epull_sock_path, 448)

        self.poller = zmq.Poller()
        self.epoller = zmq.Poller()
        self.socket = self.context.socket(zmq.SUB)
        self.socket.setsockopt(zmq.SUBSCRIBE, '')
        self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
        if hasattr(zmq, 'RECONNECT_IVL_MAX'):
            self.socket.setsockopt(zmq.RECONNECT_IVL_MAX,
                                   self.opts['recon_max'])
        if hasattr(zmq, 'TCP_KEEPALIVE'):
            self.socket.setsockopt(zmq.TCP_KEEPALIVE,
                                   self.opts['tcp_keepalive'])
            self.socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE,
                                   self.opts['tcp_keepalive_idle'])
            self.socket.setsockopt(zmq.TCP_KEEPALIVE_CNT,
                                   self.opts['tcp_keepalive_cnt'])
            self.socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL,
                                   self.opts['tcp_keepalive_intvl'])
        if hasattr(zmq, 'IPV4ONLY'):
            self.socket.setsockopt(
                zmq.IPV4ONLY,
                int(not int(self.opts.get('ipv6_enable', False))))
        self.socket.connect(self.master_pub)
        self.poller.register(self.socket, zmq.POLLIN)
        self.epoller.register(self.epull_sock, zmq.POLLIN)
        # Send an event to the master that the minion is live
        self._fire_master(
            'Minion {0} started at {1}'.format(self.opts['id'],
                                               time.asctime()), 'minion_start')

        if self.opts['multiprocessing'] and not salt.utils.is_windows():
            signal.signal(signal.SIGCHLD, self.handle_sigchld)
        # Make sure to gracefully handle SIGUSR1
        enable_sigusr1_handler()

        # On first startup execute a state run if configured to do so
        self._state_run()

        while True:
            try:
                self.schedule.eval()
                socks = dict(
                    self.poller.poll(self.opts['loop_interval'] * 1000))
                if self.socket in socks and socks[self.socket] == zmq.POLLIN:
                    payload = self.serial.loads(self.socket.recv())
                    self._handle_payload(payload)
                time.sleep(0.05)
                # Clean up the minion processes which have been executed and
                # have finished
                # Check if modules and grains need to be refreshed
                self.passive_refresh()
                # Check the event system
                if self.epoller.poll(1):
                    try:
                        package = self.epull_sock.recv(zmq.NOBLOCK)
                        self.epub_sock.send(package)
                    except Exception:
                        pass
            except zmq.ZMQError:
                # This is thrown by the inturupt caused by python handling the
                # SIGCHLD. This is a safe error and we just start the poll
                # again
                continue
            except Exception:
                log.critical(traceback.format_exc())
Beispiel #50
0
    def clear_cached_minion_data(self,
                                 clear_pillar=False,
                                 clear_grains=False,
                                 clear_mine=False,
                                 clear_mine_func=None):
        '''
        Clear the cached data/files for the targeted minions.
        '''
        clear_what = []
        if clear_pillar:
            clear_what.append('pillar')
        if clear_grains:
            clear_what.append('grains')
        if clear_mine:
            clear_what.append('mine')
        if clear_mine_func is not None:
            clear_what.append('mine_func: \'{0}\''.format(clear_mine_func))
        if not clear_what:
            log.debug('No cached data types specified for clearing.')
            return False

        minion_ids = self._tgt_to_list()
        log.debug('Clearing cached %s data for: %s', ', '.join(clear_what),
                  minion_ids)
        if clear_pillar == clear_grains:
            # clear_pillar and clear_grains are both True or both False.
            # This means we don't deal with pillar/grains caches at all.
            grains = {}
            pillars = {}
        else:
            # Unless both clear_pillar and clear_grains are True, we need
            # to read in the pillar/grains data since they are both stored
            # in the same file, 'data.p'
            grains, pillars = self._get_cached_minion_data(*minion_ids)
        try:
            c_minions = self.cache.list('minions')
            for minion_id in minion_ids:
                if not salt.utils.verify.valid_id(self.opts, minion_id):
                    continue

                if minion_id not in c_minions:
                    # Cache bank for this minion does not exist. Nothing to do.
                    continue
                bank = 'minions/{0}'.format(minion_id)
                minion_pillar = pillars.pop(minion_id, False)
                minion_grains = grains.pop(minion_id, False)
                if ((clear_pillar and clear_grains)
                        or (clear_pillar and not minion_grains)
                        or (clear_grains and not minion_pillar)):
                    # Not saving pillar or grains, so just delete the cache file
                    self.cache.flush(bank, 'data')
                elif clear_pillar and minion_grains:
                    self.cache.store(bank, 'data', {'grains': minion_grains})
                elif clear_grains and minion_pillar:
                    self.cache.store(bank, 'data', {'pillar': minion_pillar})
                if clear_mine:
                    # Delete the whole mine file
                    self.cache.flush(bank, 'mine')
                elif clear_mine_func is not None:
                    # Delete a specific function from the mine file
                    mine_data = self.cache.fetch(bank, 'mine')
                    if isinstance(mine_data, dict):
                        if mine_data.pop(clear_mine_func, False):
                            self.cache.store(bank, 'mine', mine_data)
        except (OSError, IOError):
            return True
        return True
Beispiel #51
0
    def clear_cached_minion_data(self,
                                 clear_pillar=False,
                                 clear_grains=False,
                                 clear_mine=False,
                                 clear_mine_func=None):
        '''
        Clear the cached data/files for the targeted minions.
        '''
        clear_what = []
        if clear_pillar:
            clear_what.append('pillar')
        if clear_grains:
            clear_what.append('grains')
        if clear_mine:
            clear_what.append('mine')
        if clear_mine_func is not None:
            clear_what.append('mine_func: {0!r}'.format(clear_mine_func))
        if not len(clear_what):
            log.debug('No cached data types specified for clearing.')
            return False

        minion_ids = self._tgt_to_list()
        log.debug('Clearing cached {0} data for: {1}'.format(
            ', '.join(clear_what),
            minion_ids))
        if clear_pillar == clear_grains:
            # clear_pillar and clear_grains are both True or both False.
            # This means we don't deal with pillar/grains caches at all.
            grains = {}
            pillars = {}
        else:
            # Unless both clear_pillar and clear_grains are True, we need
            # to read in the pillar/grains data since they are both stored
            # in the same file, 'data.p'
            grains, pillars = self._get_cached_minion_data(*minion_ids)
        try:
            for minion_id in minion_ids:
                if not salt.utils.verify.valid_id(self.opts, minion_id):
                    continue
                cdir = os.path.join(self.opts['cachedir'], 'minions', minion_id)
                if not os.path.isdir(cdir):
                    # Cache dir for this minion does not exist. Nothing to do.
                    continue
                data_file = os.path.join(cdir, 'data.p')
                mine_file = os.path.join(cdir, 'mine.p')
                minion_pillar = pillars.pop(minion_id, False)
                minion_grains = grains.pop(minion_id, False)
                if ((clear_pillar and clear_grains) or
                    (clear_pillar and not minion_grains) or
                    (clear_grains and not minion_pillar)):
                    # Not saving pillar or grains, so just delete the cache file
                    os.remove(os.path.join(data_file))
                elif clear_pillar and minion_grains:
                    tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
                    os.close(tmpfh)
                    with salt.utils.fopen(tmpfname, 'w+b') as fp_:
                        fp_.write(self.serial.dumps({'grains': minion_grains}))
                    os.rename(tmpfname, data_file)
                elif clear_grains and minion_pillar:
                    tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
                    os.close(tmpfh)
                    with salt.utils.fopen(tmpfname, 'w+b') as fp_:
                        fp_.write(self.serial.dumps({'pillar': minion_pillar}))
                    os.rename(tmpfname, data_file)
                if clear_mine:
                    # Delete the whole mine file
                    os.remove(os.path.join(mine_file))
                elif clear_mine_func is not None:
                    # Delete a specific function from the mine file
                    with salt.utils.fopen(mine_file, 'rb') as fp_:
                        mine_data = self.serial.loads(fp_.read())
                    if isinstance(mine_data, dict):
                        if mine_data.pop(clear_mine_func, False):
                            tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
                            os.close(tmpfh)
                            with salt.utils.fopen(tmpfname, 'w+b') as fp_:
                                fp_.write(self.serial.dumps(mine_data))
                            os.rename(tmpfname, mine_file)
        except (OSError, IOError):
            return True
        return True
Beispiel #52
0
    def run(self):
        '''
        Main loop of the ConCache, starts updates in intervals and
        answers requests from the MWorkers
        '''
        context = zmq.Context()
        # the socket for incoming cache requests
        creq_in = context.socket(zmq.REP)
        creq_in.setsockopt(zmq.LINGER, 100)
        creq_in.bind('ipc://' + self.cache_sock)

        # the socket for incoming cache-updates from workers
        cupd_in = context.socket(zmq.SUB)
        cupd_in.setsockopt(zmq.SUBSCRIBE, b'')
        cupd_in.setsockopt(zmq.LINGER, 100)
        cupd_in.bind('ipc://' + self.update_sock)

        # the socket for the timer-event
        timer_in = context.socket(zmq.SUB)
        timer_in.setsockopt(zmq.SUBSCRIBE, b'')
        timer_in.setsockopt(zmq.LINGER, 100)
        timer_in.connect('ipc://' + self.upd_t_sock)

        poller = zmq.Poller()
        poller.register(creq_in, zmq.POLLIN)
        poller.register(cupd_in, zmq.POLLIN)
        poller.register(timer_in, zmq.POLLIN)

        # our serializer
        serial = salt.payload.Serial(self.opts.get('serial', ''))

        # register a signal handler
        signal.signal(signal.SIGINT, self.signal_handler)

        # secure the sockets from the world
        self.secure()

        log.info('ConCache started')

        while self.running:

            # we check for new events with the poller
            try:
                socks = dict(poller.poll(1))
            except KeyboardInterrupt:
                self.stop()
            except zmq.ZMQError as zmq_err:
                log.error('ConCache ZeroMQ-Error occurred')
                log.exception(zmq_err)
                self.stop()

            # check for next cache-request
            if socks.get(creq_in) == zmq.POLLIN:
                msg = serial.loads(creq_in.recv())
                log.debug('ConCache Received request: %s', msg)

                # requests to the minion list are send as str's
                if isinstance(msg, six.string_types):
                    if msg == 'minions':
                        # Send reply back to client
                        reply = serial.dumps(self.minions)
                        creq_in.send(reply)

            # check for next cache-update from workers
            if socks.get(cupd_in) == zmq.POLLIN:
                new_c_data = serial.loads(cupd_in.recv())
                # tell the worker to exit
                #cupd_in.send(serial.dumps('ACK'))

                # check if the returned data is usable
                if not isinstance(new_c_data, list):
                    log.error('ConCache Worker returned unusable result')
                    del new_c_data
                    continue

                # the cache will receive lists of minions
                # 1. if the list only has 1 item, its from an MWorker, we append it
                # 2. if the list contains another list, its from a CacheWorker and
                #    the currently cached minions are replaced with that list
                # 3. anything else is considered malformed

                try:

                    if not new_c_data:
                        log.debug('ConCache Got empty update from worker')
                        continue

                    data = new_c_data[0]

                    if isinstance(data, six.string_types):
                        if data not in self.minions:
                            log.debug('ConCache Adding minion %s to cache',
                                      new_c_data[0])
                            self.minions.append(data)

                    elif isinstance(data, list):
                        log.debug('ConCache Replacing minion list from worker')
                        self.minions = data

                except IndexError:
                    log.debug('ConCache Got malformed result dict from worker')
                    del new_c_data

                log.info('ConCache %s entries in cache', len(self.minions))

            # check for next timer-event to start new jobs
            if socks.get(timer_in) == zmq.POLLIN:
                sec_event = serial.loads(timer_in.recv())

                # update the list every 30 seconds
                if int(sec_event % 30) == 0:
                    cw = CacheWorker(self.opts)
                    cw.start()

        self.stop()
        creq_in.close()
        cupd_in.close()
        timer_in.close()
        context.term()
        log.debug('ConCache Shutting down')
Beispiel #53
0
def get_id():
    '''
    Guess the id of the minion.

    - Check /etc/hostname for a value other than localhost
    - If socket.getfqdn() returns us something other than localhost, use it
    - Check /etc/hosts for something that isn't localhost that maps to 127.*
    - Look for a routeable / public IP
    - A private IP is better than a loopback IP
    - localhost may be better than killing the minion

    Returns two values: the detected ID, and a boolean value noting whether or
    not an IP address is being used for the ID.
    '''

    log.debug('Guessing ID. The id can be explicitly in set {0}'.format(
        os.path.join(syspaths.CONFIG_DIR, 'minion')))

    # Check /etc/hostname
    try:
        with salt.utils.fopen('/etc/hostname') as hfl:
            name = hfl.read().strip()
        if re.search(r'\s', name):
            log.warning('Whitespace character detected in /etc/hostname. '
                        'This file should not contain any whitespace.')
        else:
            if name != 'localhost':
                return name, False
    except Exception:
        pass

    # Nothing in /etc/hostname or /etc/hostname not found
    fqdn = socket.getfqdn()
    if fqdn != 'localhost':
        log.info('Found minion id from getfqdn(): {0}'.format(fqdn))
        return fqdn, False

    # Can /etc/hosts help us?
    try:
        with salt.utils.fopen('/etc/hosts') as hfl:
            for line in hfl:
                names = line.split()
                ip_ = names.pop(0)
                if ip_.startswith('127.'):
                    for name in names:
                        if name != 'localhost':
                            log.info(
                                'Found minion id in hosts file: {0}'.format(
                                    name))
                            return name, False
    except Exception:
        pass

    # Can Windows 'hosts' file help?
    try:
        windir = os.getenv("WINDIR")
        with salt.utils.fopen(windir +
                              '\\system32\\drivers\\etc\\hosts') as hfl:
            for line in hfl:
                # skip commented or blank lines
                if line[0] == '#' or len(line) <= 1:
                    continue
                # process lines looking for '127.' in first column
                try:
                    entry = line.split()
                    if entry[0].startswith('127.'):
                        for name in entry[1:]:  # try each name in the row
                            if name != 'localhost':
                                log.info('Found minion id in hosts file: {0}'.
                                         format(name))
                                return name, False
                except IndexError:
                    pass  # could not split line (malformed entry?)
    except Exception:
        pass

    # What IP addresses do we have?
    ip_addresses = [
        salt.utils.network.IPv4Address(addr)
        for addr in salt.utils.network.ip_addrs(include_loopback=True)
        if not addr.startswith('127.')
    ]

    for addr in ip_addresses:
        if not addr.is_private:
            log.info('Using public ip address for id: {0}'.format(addr))
            return str(addr), True

    if ip_addresses:
        addr = ip_addresses.pop(0)
        log.info('Using private ip address for id: {0}'.format(addr))
        return str(addr), True

    log.error('No id found, falling back to localhost')
    return 'localhost', False
def get_elb_lbs():
    """
    Returns a dictionary of load balancer names as keys
    each with their respective attributes
    """

    # attributes to extract from the load balancer boto objects
    # this could possibly be a named argument too
    extract_attrs = ['scheme', 'dns_name', 'vpc_id', 'name', 'security_groups']

    try:
        instance_metadata = boto.utils.get_instance_metadata(timeout=5,
                                                             num_retries=2)
    except Exception as e:
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    # Setup the lbs grain
    lbs_grain = {'lbs': {}}

    # Collect details about this instance
    vpc_id = instance_metadata['network']['interfaces']['macs'].values(
    )[0]['vpc-id']
    region = instance_metadata['placement']['availability-zone'][:-1]

    # Collect load balancers of this instance (in the same vpc)
    try:
        elb_connection = boto.ec2.elb.connect_to_region(region)

        # find load balancers by vpc_id
        all_lbs = [
            lb for lb in elb_connection.get_all_load_balancers()
            if lb.vpc_id == vpc_id
        ]
        log.debug(
            'all lbs before filtering by instance id: {}'.format(all_lbs))

        # further filter the load balancers by instance id
        lbs = [
            lb for lb in all_lbs for inst in lb.instances
            if inst.id == instance_metadata['instance-id']
        ]
        # initialise and populate the output of load balancers
        out = {}
        [out.update({l.name: {}}) for l in lbs]
        [
            out[l.name].update({attr: getattr(l, attr, None)})
            for attr in extract_attrs for l in lbs
        ]

        if not out:
            # This loglevel could perhaps be adjusted to something more visible
            log.warning("No ELBs found for this instance, this is unusual, "
                        "but we will not break highstate")

        lbs_grain['lbs'] = out

    except Exception as e:
        # This prints a user-friendly error with stacktrace
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    return lbs_grain