Example #1
0
 def start(self):
     log.debug("starting monitor with {} task{}".format(len(self.tasks), "" if len(self.tasks) == 1 else "s"))
     if self.tasks:
         for task in self.tasks:
             threading.Thread(target=task.run).start()
     else:
         log.error("no monitor tasks to run")
Example #2
0
def ec2_tags():
    boto_version = StrictVersion(boto.__version__)
    required_boto_version = StrictVersion('2.8.0')
    if boto_version < required_boto_version:
        log.error("Installed boto version %s < %s, can't find ec2_tags",
                  boto_version, required_boto_version)
        return None

    if not _on_ec2():
        log.info("Not an EC2 instance, skipping")
        return None

    instance_id, region = _get_instance_info()
    credentials = _get_credentials()

    # Connect to EC2 and parse the Roles tags for this instance
    try:
        conn = boto.ec2.connect_to_region(
            region,
            aws_access_key_id=credentials['access_key'],
            aws_secret_access_key=credentials['secret_key'],
        )
    except Exception, e:
        log.error("Could not get AWS connection: %s", e)
        return None
Example #3
0
def daemonize():
    '''
    Daemonize a process
    '''
    try:
        pid = os.fork()
        if pid > 0:
            # exit first parent
            sys.exit(0)
    except OSError as exc:
        log.error(
            'fork #1 failed: {0} ({1})'.format(exc.errno, exc.strerror)
        )
        sys.exit(1)

    # decouple from parent environment
    os.chdir('/')
    os.setsid()
    os.umask(18)

    # do second fork
    try:
        pid = os.fork()
        if pid > 0:
            sys.exit(0)
    except OSError as exc:
        log.error(
            'fork #2 failed: {0} ({1})'.format(
                exc.errno, exc.strerror
            )
        )
        sys.exit(1)
def dns_check(addr, safe=False):
    '''
    Return the ip resolved by dns, but do not exit on failure, only raise an
    exception.
    '''
    try:
        socket.inet_aton(addr)
    except socket.error:
        # Not a valid ip adder, check DNS
        try:
            addr = socket.gethostbyname(addr)
        except socket.gaierror:
            err = ('This master address: \'{0}\' was previously resolvable but '
                  'now fails to resolve! The previously resolved ip addr '
                  'will continue to be used').format(addr)
            if safe:
                import salt.log
                if salt.log.is_console_configured():
                    # If logging is not configured it also means that either
                    # the master or minion instance calling this hasn't even
                    # started running
                    log.error(err)
                raise SaltClientError
            else:
                err = err.format(addr)
                sys.stderr.write(err)
                sys.exit(42)
    return addr
Example #5
0
def load_agents(config):
    '''
    Load the agents specified in /etc/salt/alert from the
    salt.ext.alert.agents package.  Each module must define a
    load_agents() function that accepts the parsed YAML configuration
    for the agents.
    '''
    ignore_modules = ['alert.time', 'alert.subscriptions', 'alert.verbs']
    agents = {}
    for key, value in config.iteritems():
        if key.startswith('alert.') and key not in ignore_modules:
            modname = AGENTS_MODULE + '._' + key[6:]
            log.trace('load %s', modname)
            try:
                mod = __import__(modname, fromlist=[AGENTS_MODULE])
            except ImportError, ex:
                log.trace('not an agent module: %s', modname, exc_info=ex)
                continue
            try:
                new_agents = mod.load_agents(value)
            except AttributeError, ex:
                log.error('not an agent module: %s', modname, exc_info=ex)
                continue
            common = set(agents.keys()) & set(new_agents.keys())
            if len(common) != 0:
                raise ValueError(
                        'agent name(s) collide in config: {}'.format(
                        ', '.join(["'{}'".format(x) for x in common])))
            agents.update(new_agents)
            log.trace('loaded alert agent(s): %s', new_agents.keys())
Example #6
0
 def __init__(self,
              tgt='',
              expr_form='glob',
              env=None,
              use_cached_grains=True,
              use_cached_pillar=True,
              grains_fallback=True,
              pillar_fallback=True,
              opts=None):
     log.debug('New instance of {0} created.'.format(
         self.__class__.__name__))
     if opts is None:
         log.error('{0}: Missing master opts init arg.'.format(
             self.__class__.__name__))
         raise SaltException('{0}: Missing master opts init arg.'.format(
             self.__class__.__name__))
     else:
         self.opts = opts
     self.tgt = tgt
     self.expr_form = expr_form
     self.env = env
     self.use_cached_grains = use_cached_grains
     self.use_cached_pillar = use_cached_pillar
     self.grains_fallback = grains_fallback
     self.pillar_fallback = pillar_fallback
     log.debug('Init settings: tgt: \"{0}\", expr_form: \"{1}\", env: \"{2}\", use_cached_grains: {3}, use_cached_pillar: {4}, grains_fallback: {5}, pillar_fallback: {6}'.format(tgt, expr_form, env, use_cached_grains, use_cached_pillar, grains_fallback, pillar_fallback))
Example #7
0
def main():
    """
    Start computer service.

    .. code-block:: bash

        hs-computer --server|--node|--all
    """

    salt.log.setup_console_logger()

    parser = option_parser()
    shell = Shell(parser)

    try:
        shell.exec_command()
    except Exception as err:
        log.error("shell command failed: {0!r}".format(
            err)
        )
    except KeyboardInterrupt:
        raise SystemExit('\nExiting gracefully on Ctrl-c')
    finally:
        # TODO: need cleanup
        log.trace("TODO cleanup work")

    return
def ec2_tags():
  log = logging.getLogger(__name__)

  # ************* REQUEST VALUES *************
  instanceid = _get_instance_id()
  method = 'GET'
  service = 'ec2'
  region = _get_region()
  host = 'ec2.'+region+'.amazonaws.com'
  endpoint = 'https://ec2.'+region+'.amazonaws.com'
  params = [('Action','DescribeTags')]
  params.append( ('Filter.1.Name','resource-id') )
  params.append( ('Filter.1.Value.1',instanceid) )
  params.append( ('Version','2015-04-15') )
  request_parameters = urllib.urlencode(params)

  creds = _get_role_credentials()

  access_key = creds['AccessKeyId']
  secret_key = creds['SecretAccessKey']
  token = creds['Token']

  if access_key is None or secret_key is None or token is None:
      log.error('No role credentials found.')
      return None

  # Create a date for headers and the credential string
  t = datetime.datetime.utcnow()
  amzdate = t.strftime('%Y%m%dT%H%M%SZ')
  datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope

  # Calculate AWS Signature V4
  canonical_uri = '/' 
  canonical_querystring = request_parameters
  canonical_headers = 'host:' + host + '\n' + 'x-amz-date:' + amzdate + '\n' + 'x-amz-security-token:' + token + '\n'
  signed_headers = 'host;x-amz-date;x-amz-security-token'
  payload_hash = hashlib.sha256('').hexdigest()
  canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash

  algorithm = 'AWS4-HMAC-SHA256'
  credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
  string_to_sign = algorithm + '\n' +  amzdate + '\n' +  credential_scope + '\n' +  hashlib.sha256(canonical_request).hexdigest()

  signing_key = _getSignatureKey(secret_key, datestamp, region, service)
  signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()

  authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' +  'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature

  request_url = endpoint + '?' + canonical_querystring

  r = urllib2.Request(request_url)
  r.add_header('x-amz-date',amzdate)
  r.add_header('Authorization',authorization_header)
  r.add_header('x-amz-security-token',token)
  try:
    result = urllib2.urlopen(r)
  except Exception, e:
      log.error('Could not complete EC2 API request.')
      return None
Example #9
0
 def _match(target, pattern, regex_match=False):
     if regex_match:
         try:
             return re.match(pattern.lower(), str(target).lower())
         except Exception:
             log.error('Invalid regex \'{0}\' in match'.format(pattern))
             return False
     else:
         return fnmatch.fnmatch(str(target).lower(), pattern.lower())
Example #10
0
    def _thread_multi_return(class_, minion_instance, opts, data):
        '''
        This method should be used as a threading target, start the actual
        minion side execution.
        '''
        # this seems awkward at first, but it's a workaround for Windows
        # multiprocessing communication.
        if not minion_instance:
            minion_instance = class_(opts)
        ret = {
            'return': {},
            'success': {},
        }
        for ind in range(0, len(data['fun'])):
            for index in range(0, len(data['arg'][ind])):
                try:
                    arg = eval(data['arg'][ind][index])
                    if isinstance(arg, bool):
                        data['arg'][ind][index] = str(data['arg'][ind][index])
                    elif isinstance(arg, (dict, int, list, string_types)):
                        data['arg'][ind][index] = arg
                    else:
                        data['arg'][ind][index] = str(data['arg'][ind][index])
                except Exception:
                    pass

            ret['success'][data['fun'][ind]] = False
            try:
                func = minion_instance.functions[data['fun'][ind]]
                args, kwargs = detect_kwargs(func, data['arg'][ind], data)
                ret['return'][data['fun'][ind]] = func(*args, **kwargs)
                ret['success'][data['fun'][ind]] = True
            except Exception as exc:
                trb = traceback.format_exc()
                log.warning(
                    'The minion function caused an exception: {0}'.format(
                    exc
                    )
                )
                ret['return'][data['fun'][ind]] = trb
            ret['jid'] = data['jid']
        minion_instance._return_pub(ret)
        if data['ret']:
            for returner in set(data['ret'].split(',')):
                ret['id'] = opts['id']
                try:
                    minion_instance.returners['{0}.returner'.format(
                        returner
                    )](ret)
                except Exception as exc:
                    log.error(
                        'The return failed for job {0} {1}'.format(
                        data['jid'],
                        exc
                        )
                    )
Example #11
0
 def do_image(self):
     try:
         name = "{0}_image".format(self.CRUD_MAP[self.args.mode])
         function = getattr(self.image, name)
         function()
     except Exception as err:
         log.error('call function{0} failed: {1!r}'.format(
             name, err)
         )
         raise RunTimeFailture("call do_image failure")
Example #12
0
File: config.py Project: herlo/salt
def get_id():
    '''
    Guess the id of the minion.

    - If socket.getfqdn() returns us something other than localhost, use it
    - Check /etc/hosts for something that isn't localhost that maps to 127.*
    - Look for a routeable / public IP
    - A private IP is better than a loopback IP
    - localhost may be better than killing the minion
    '''

    log.debug('Guessing ID. The id can be explicitly in set {0}'
              .format('/etc/salt/minion'))
    fqdn = socket.getfqdn()
    if 'localhost' != fqdn:
        log.info('Found minion id from getfqdn(): {0}'.format(fqdn))
        return fqdn, False

    # Can /etc/hosts help us?
    try:
        # TODO Add Windows host file support
        with open('/etc/hosts') as f:
            line = f.readline()
            while line:
                names = line.split()
                ip = names.pop(0)
                if ip.startswith('127.'):
                    for name in names:
                        if name != 'localhost':
                            log.info('Found minion id in hosts file: {0}'
                                     .format(name))
                            return name, False
                line = f.readline()
    except Exception:
        pass

    # What IP addresses do we have?
    ip_addresses = [salt.utils.socket_util.IPv4Address(a) for a
                    in salt.utils.socket_util.ip4_addrs()
                    if not a.startswith('127.')]

    for a in ip_addresses:
        if not a.is_private:
            log.info('Using public ip address for id: {0}'.format(a))
            return str(a), True

    if ip_addresses:
        a = ip_addresses.pop(0)
        log.info('Using private ip address for id: {0}'.format(a))
        return str(a), True

    log.error('No id found, falling back to localhost')
    return 'localhost', False
Example #13
0
    def __init__(self,
                 tgt='',
                 tgt_type='glob',
                 saltenv=None,
                 use_cached_grains=True,
                 use_cached_pillar=True,
                 grains_fallback=True,
                 pillar_fallback=True,
                 opts=None,
                 expr_form=None):

        # remember to remove the expr_form argument from this function when
        # performing the cleanup on this deprecation.
        if expr_form is not None:
            salt.utils.warn_until(
                'Fluorine',
                'the target type should be passed using the \'tgt_type\' '
                'argument instead of \'expr_form\'. Support for using '
                '\'expr_form\' will be removed in Salt Fluorine.'
            )
            tgt_type = expr_form

        log.debug('New instance of {0} created.'.format(
            self.__class__.__name__))
        if opts is None:
            log.error('{0}: Missing master opts init arg.'.format(
                self.__class__.__name__))
            raise SaltException('{0}: Missing master opts init arg.'.format(
                self.__class__.__name__))
        else:
            self.opts = opts
        self.serial = salt.payload.Serial(self.opts)
        self.tgt = tgt
        self.tgt_type = tgt_type
        self.saltenv = saltenv
        self.use_cached_grains = use_cached_grains
        self.use_cached_pillar = use_cached_pillar
        self.grains_fallback = grains_fallback
        self.pillar_fallback = pillar_fallback
        self.cache = salt.cache.Cache(opts)
        log.debug(
            'Init settings: tgt: \'{0}\', tgt_type: \'{1}\', saltenv: \'{2}\', '
            'use_cached_grains: {3}, use_cached_pillar: {4}, '
            'grains_fallback: {5}, pillar_fallback: {6}'.format(
                tgt, tgt_type, saltenv, use_cached_grains, use_cached_pillar,
                grains_fallback, pillar_fallback
            )
        )
Example #14
0
 def run(self):
     log.trace('start thread for %s', self.taskid)
     minion = self.context.get('id')
     collector = self.context.get('collector')
     while True:
         try:
             exec self.code in self.context
         except Exception, ex:
             log.error("can't execute %s: %s", self.taskid, ex, exc_info=ex)
         if collector:
             jid = datetime.datetime.strftime(
                          datetime.datetime.now(), 'M%Y%m%d%H%M%S%f')
             try:
                 collector(minion, self.context['cmd'], self.context['result'])
             except Exception, ex:
                 log.error('monitor error: %s', self.taskid, exc_info=ex)
def _pswrapper(cmdlet, **kwargs):
    '''
    Wrap calls to PowerShell cmdlets.

    The results of the cmdlet, if successful, are serialized into a
    JSON document by Powershell and deserialized into the appropriate
    Python object by Salt.
    '''
    cmd = []

    ## On older versions of Windows Server, assume AD FS 2.0.  This
    ## requires manually importing the AD FS PowerShell snapin prior
    ## to executing related cmdlets.
    if __grains__['osrelease'] in ['2008Server', '2008ServerR2']:
        cmd.append('Add-PSSnapin Microsoft.Adfs.PowerShell;')

    cmd.append(cmdlet)

    ## Loop through kwargs, which get translated verbatim into cmdlet
    ## parameters.
    for k, v in kwargs.items():
        if k.find('__pub') >= 0:
            ## filter out special kwargs like '/__pub_fun'
            pass
        elif v == True or v == False:
            cmd.append('-{0}:${1}'.format(k, v))
        elif type(v) is dict and 'username' in v:
            ## assume dicts that contain a 'username' key should get
            ## transformed into PSCredential objects
            cmd.append('-{0} ({1})'.format(k, _pscredential(v.username, v.password)))
        elif type(v) is int:
            cmd.append('-{0} {1}'.format(k, v))
        else:
            cmd.append('-{0} "{1}"'.format(k, v))
    cmd.append('| ConvertTo-Json -Compress -Depth 32')

    ## TODO: replace with cmd.powershell in a future Salt release
    response = __salt__['cmd.run'](
        " ".join(cmd),
        shell='powershell',
        python_shell=True)
    try:
        return json.loads(response)
    except Exception:
        log.error("Error converting PowerShell JSON return", exc_info=True)
        return {}
Example #16
0
    def __init__(self,
                 tgt='',
                 expr_form='glob',
                 saltenv=None,
                 use_cached_grains=True,
                 use_cached_pillar=True,
                 grains_fallback=True,
                 pillar_fallback=True,
                 opts=None,
                 env=None):
        if env is not None:
            salt.utils.warn_until(
                'Boron',
                'Passing a salt environment should be done using \'saltenv\' '
                'not \'env\'. This functionality will be removed in Salt '
                'Boron.'
            )
            # Backwards compatibility
            saltenv = env

        log.debug('New instance of {0} created.'.format(
            self.__class__.__name__))
        if opts is None:
            log.error('{0}: Missing master opts init arg.'.format(
                self.__class__.__name__))
            raise SaltException('{0}: Missing master opts init arg.'.format(
                self.__class__.__name__))
        else:
            self.opts = opts
        self.serial = salt.payload.Serial(self.opts)
        self.tgt = tgt
        self.expr_form = expr_form
        self.saltenv = saltenv
        self.use_cached_grains = use_cached_grains
        self.use_cached_pillar = use_cached_pillar
        self.grains_fallback = grains_fallback
        self.pillar_fallback = pillar_fallback
        log.debug(
            'Init settings: tgt: {0!r}, expr_form: {1!r}, saltenv: {2!r}, '
            'use_cached_grains: {3}, use_cached_pillar: {4}, '
            'grains_fallback: {5}, pillar_fallback: {6}'.format(
                tgt, expr_form, saltenv, use_cached_grains, use_cached_pillar,
                grains_fallback, pillar_fallback
            )
        )
Example #17
0
 def __message(self, event):
     '''
     Handle 'service-unavailable' error by requeueing message and
     temporarily suspending message sending.
     '''
     if event['type'] == 'error':
         addr = event['from'].bare
         condition = event['error'].get_condition()
         log.error('%s: %s', addr, condition)
         if condition == 'service-unavailable':
             recipient = self.recipients.get(addr)
             if recipient:
                 msg = event.get('body')
                 log.debug('resend to %s: %s', addr, msg)
                 recipient.readd_msg(msg)
                 self.service_down = True
                 self.schedule('service-down',
                               self.retry_service_wait,
                               self.__retry_service)
Example #18
0
 def _expand_tasks(self, parsed_yaml):
     '''
     Assemble compiled code from the configuration described by
     python dictionaries and lists.
     '''
     results = []
     for tasknum, taskdict in enumerate(parsed_yaml, 1):
         try:
             log.trace(taskdict)
             taskid = taskdict.get('id', 'monitor-{}'.format(tasknum))
             pysrc = self._expand_task(taskid, taskdict)
             log.trace("generated '%s' task source:\n%s", taskid, pysrc)
             pyexe = compile(pysrc, '<monitor-config>', 'exec')
             scheduler = self._expand_scheduler(taskdict)
             results.append(MonitorTask(taskid, pyexe, self.context, scheduler))
         except ValueError, ex:
             log.error( 'ignore monitor command #{} {!r}: {}'.format(
                                     tasknum,
                                     taskdict.get('run', '<unknown>'),
                                     ex ) )
Example #19
0
def highstate(test=None, **kwargs):
    '''
    Retrieve the state data from the salt master for this minion and execute it

    CLI Example:

    .. code-block:: bash

        salt '*' state.highstate

        salt '*' state.highstate exclude=sls_to_exclude
        salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
    '''
    __opts__['grains'] = __grains__
    st_ = salt.client.ssh.state.SSHHighState(__opts__, __pillar__, __salt__)
    chunks = st_.compile_low_chunks()
    file_refs = salt.client.ssh.state.lowstate_file_refs(chunks, kwargs.get('extra_filerefs', ''))
    trans_tar = salt.client.ssh.state.prep_trans_tar(
            __opts__,
            chunks,
            file_refs,
            __pillar__)
    trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
    cmd = 'state.pkg /tmp/.salt/salt_state.tgz test={0} pkg_sum={1} hash_type={2}'.format(
            test,
            trans_tar_sum,
            __opts__['hash_type'])
    single = salt.client.ssh.Single(
            __opts__,
            cmd,
            **__salt__.kwargs)
    single.shell.send(
            trans_tar,
            '/tmp/.salt/salt_state.tgz')
    stdout, stderr, _ = single.cmd_block()
    try:
        stdout = json.loads(stdout, object_hook=salt.utils.decode_dict)
    except Exception, e:
        log.error("JSON Render failed for: {0}".format(stdout))
        log.error(str(e))
Example #20
0
def request(mods=None, **kwargs):
    '''
    .. versionadded:: 2017.7.3

    Request that the local admin execute a state run via
    `salt-call state.run_request`
    All arguments match state.apply

    CLI Example:

    .. code-block:: bash

        salt '*' state.request
        salt '*' state.request test
        salt '*' state.request test,pkgs
    '''
    kwargs['test'] = True
    ret = apply_(mods, **kwargs)
    notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
    serial = salt.payload.Serial(__opts__)
    req = check_request()
    req.update({
        kwargs.get('name', 'default'): {
            'test_run': ret,
            'mods': mods,
            'kwargs': kwargs
        }
    })
    with salt.utils.files.set_umask(0o077):
        try:
            if salt.utils.platform.is_windows():
                # Make sure cache file isn't read-only
                __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
            with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
                serial.dump(req, fp_)
        except (IOError, OSError):
            log.error(
                'Unable to write state request file %s. Check permission.',
                notify_path)
    return ret
Example #21
0
 def grain_match(self, tgt):
     '''
     Reads in the grains glob match
     '''
     log.debug('grains target: {0}'.format(tgt))
     comps = tgt.rsplit(':', 1)
     if len(comps) != 2:
         log.error('Got insufficient arguments for grains match '
                   'statement from master')
         return False
     match = self._traverse_dict(self.opts['grains'], comps[0])
     if match == {}:
         log.error('Targeted grain "{0}" not found'.format(comps[0]))
         return False
     if isinstance(match, dict):
         log.error('Targeted grain "{0}" must correspond to a list, '
                   'string, or numeric value'.format(comps[0]))
         return False
     if isinstance(match, list):
         # We are matching a single component to a single list member
         for member in match:
             if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
                 return True
         return False
     return bool(fnmatch.fnmatch(str(match).lower(), comps[1].lower()))
Example #22
0
    def __init__(
        self,
        tgt="",
        tgt_type="glob",
        saltenv=None,
        use_cached_grains=True,
        use_cached_pillar=True,
        grains_fallback=True,
        pillar_fallback=True,
        opts=None,
    ):

        log.debug("New instance of %s created.", self.__class__.__name__)
        if opts is None:
            log.error("%s: Missing master opts init arg.",
                      self.__class__.__name__)
            raise SaltException("{}: Missing master opts init arg.".format(
                self.__class__.__name__))
        else:
            self.opts = opts
        self.tgt = tgt
        self.tgt_type = tgt_type
        self.saltenv = saltenv
        self.use_cached_grains = use_cached_grains
        self.use_cached_pillar = use_cached_pillar
        self.grains_fallback = grains_fallback
        self.pillar_fallback = pillar_fallback
        self.cache = salt.cache.factory(opts)
        log.debug(
            "Init settings: tgt: '%s', tgt_type: '%s', saltenv: '%s', "
            "use_cached_grains: %s, use_cached_pillar: %s, "
            "grains_fallback: %s, pillar_fallback: %s",
            tgt,
            tgt_type,
            saltenv,
            use_cached_grains,
            use_cached_pillar,
            grains_fallback,
            pillar_fallback,
        )
Example #23
0
 def grain_match(self, tgt):
     '''
     Reads in the grains glob match
     '''
     log.debug('grains target: {0}'.format(tgt))
     comps = tgt.rsplit(':', 1)
     if len(comps) != 2:
         log.error('Got insufficient arguments for grains match '
                   'statement from master')
         return False
     match = self._traverse_dict(self.opts['grains'], comps[0])
     if match == {}:
         log.error('Targeted grain "{0}" not found'.format(comps[0]))
         return False
     if isinstance(match, dict):
         log.error('Targeted grain "{0}" must correspond to a list, '
                   'string, or numeric value'.format(comps[0]))
         return False
     if isinstance(match, list):
         # We are matching a single component to a single list member
         for member in match:
             if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
                 return True
         return False
     return bool(fnmatch.fnmatch(str(match).lower(), comps[1].lower()))
Example #24
0
def ec2_tags():

    boto_version = StrictVersion(boto.__version__)
    required_boto_version = StrictVersion('2.8.0')
    if boto_version < required_boto_version:
        log.error("%s: installed boto version %s < %s, can't find ec2_tags",
                __name__, boto_version, required_boto_version)
        return None

    if not _on_ec2():
        log.info("%s: not an EC2 instance, skipping", __name__)
        return None

    (instance_id, region) = _get_instance_info()
    credentials = _get_credentials()
    if not credentials:
        log.error("%s: no AWS credentials found, see documentation for how to provide them.", __name__)
        return None

    # Connect to EC2 and parse the Roles tags for this instance
    conn = boto.ec2.connect_to_region(region,
            aws_access_key_id=credentials['access_key'],
            aws_secret_access_key=credentials['secret_key'])

    tags = {}
    try:
        reservation = conn.get_all_instances(instance_ids=[ instance_id ])[0]
        instance = reservation.instances[0]
        tags = instance.tags
    except IndexError, e:
        log.error("Couldn't retrieve instance information: %s", e)
        return None
Example #25
0
def clear_request(name=None):
    """
    .. versionadded:: 2017.7.3

    Clear out the state execution request without executing it

    CLI Example:

    .. code-block:: bash

        salt '*' state.clear_request
    """
    notify_path = os.path.join(__opts__["cachedir"], "req_state.p")
    serial = salt.payload.Serial(__opts__)
    if not os.path.isfile(notify_path):
        return True
    if not name:
        try:
            os.remove(notify_path)
        except (IOError, OSError):
            pass
    else:
        req = check_request()
        if name in req:
            req.pop(name)
        else:
            return False
        with salt.utils.files.set_umask(0o077):
            try:
                if salt.utils.platform.is_windows():
                    # Make sure cache file isn't read-only
                    __salt__["cmd.run"]('attrib -R "{0}"'.format(notify_path))
                with salt.utils.files.fopen(notify_path, "w+b") as fp_:
                    serial.dump(req, fp_)
            except (IOError, OSError):
                log.error(
                    "Unable to write state request file %s. Check permission.",
                    notify_path,
                )
    return True
Example #26
0
def request(mods=None, **kwargs):
    """
    .. versionadded:: 2017.7.3

    Request that the local admin execute a state run via
    `salt-call state.run_request`
    All arguments match state.apply

    CLI Example:

    .. code-block:: bash

        salt '*' state.request
        salt '*' state.request test
        salt '*' state.request test,pkgs
    """
    kwargs["test"] = True
    ret = apply_(mods, **kwargs)
    notify_path = os.path.join(__opts__["cachedir"], "req_state.p")
    serial = salt.payload.Serial(__opts__)
    req = check_request()
    req.update({
        kwargs.get("name", "default"): {
            "test_run": ret,
            "mods": mods,
            "kwargs": kwargs,
        }
    })
    with salt.utils.files.set_umask(0o077):
        try:
            if salt.utils.platform.is_windows():
                # Make sure cache file isn't read-only
                __salt__["cmd.run"]('attrib -R "{0}"'.format(notify_path))
            with salt.utils.files.fopen(notify_path, "w+b") as fp_:
                serial.dump(req, fp_)
        except (IOError, OSError):
            log.error(
                "Unable to write state request file %s. Check permission.",
                notify_path)
    return ret
Example #27
0
def ec2_tags():

    boto_version = StrictVersion(boto.__version__)
    required_boto_version = StrictVersion('2.8.0')
    if boto_version < required_boto_version:
        log.error("%s: installed boto version %s < %s, can't find ec2_tags",
                  __name__, boto_version, required_boto_version)
        return None

    if not _on_ec2():
        log.info("%s: not an EC2 instance, skipping", __name__)
        return None

    (instance_id, region) = _get_instance_info()
    credentials = _get_credentials()
    if not credentials:
        log.error(
            "%s: no AWS credentials found, see documentation for how to provide them.",
            __name__)
        return None

    # Connect to EC2 and parse the Roles tags for this instance
    conn = boto.ec2.connect_to_region(
        region,
        aws_access_key_id=credentials['access_key'],
        aws_secret_access_key=credentials['secret_key'])

    tags = {}
    try:
        reservation = conn.get_all_instances(instance_ids=[instance_id])[0]
        instance = reservation.instances[0]
        tags = instance.tags
    except IndexError, e:
        log.error("Couldn't retrieve instance information: %s", e)
        return None
Example #28
0
def clear_request(name=None):
    '''
    .. versionadded:: 2017.7.3

    Clear out the state execution request without executing it

    CLI Example:

    .. code-block:: bash

        salt '*' state.clear_request
    '''
    notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
    serial = salt.payload.Serial(__opts__)
    if not os.path.isfile(notify_path):
        return True
    if not name:
        try:
            os.remove(notify_path)
        except (IOError, OSError):
            pass
    else:
        req = check_request()
        if name in req:
            req.pop(name)
        else:
            return False
        cumask = os.umask(0o77)
        try:
            if salt.utils.platform.is_windows():
                # Make sure cache file isn't read-only
                __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
            with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
                serial.dump(req, fp_)
        except (IOError, OSError):
            msg = 'Unable to write state request file {0}. Check permission.'
            log.error(msg.format(notify_path))
        os.umask(cumask)
    return True
Example #29
0
 def grain_pcre_match(self, tgt):
     '''
     Matches a grain based on regex
     '''
     comps = tgt.split(':')
     if len(comps) < 2:
         log.error('Got insufficient arguments for grains from master')
         return False
     if comps[0] not in self.opts['grains']:
         log.error('Got unknown grain from master: {0}'.format(comps[0]))
         return False
     if isinstance(self.opts['grains'][comps[0]], list):
         # We are matching a single component to a single list member
         for member in self.opts['grains'][comps[0]]:
             if re.match(comps[1].lower(), str(member).lower()):
                 return True
         return False
     return bool(
         re.match(
             comps[1].lower(),
             str(self.opts['grains'][comps[0]]).lower()
         )
     )
Example #30
0
    def exec_command(self):
        """dispatch command"""

        if not isinstance(self.parser, ArgumentParser):
            raise InvalidParam("Not parser found")

        self.args = self.parser.parse_args()
        print self.args
        salt.log.set_logger_level(__name__, self.args.log_level)
        log.debug("command argument: %r" % self.args)

        if self.args.opt not in self.support_commands:
            raise InvalidParam("Not support sub-command, see help")

        # dispatch command
        try:
            func = getattr(self, "do_%s" % self.args.opt)
            func()
        except Exception as err:
            log.error('call sub-command:{0} failed: {1!r}'.format(
                self.args.opt, err)
            )
            raise RunTimeFailture("call do_func failture")
Example #31
0
 def confirm_top(self, match, data, nodegroups=None):
     '''
     Takes the data passed to a top file environment and determines if the
     data matches this minion
     '''
     matcher = 'glob'
     if not data:
         log.error('Recived bad data when setting the match from the top '
                   'file')
         return False
     for item in data:
         if isinstance(item, dict):
             if 'match' in item:
                 matcher = item['match']
     if hasattr(self, matcher + '_match'):
         funcname = '{0}_match'.format(matcher)
         if matcher == 'nodegroup':
             return getattr(self, funcname)(match, nodegroups)
         return getattr(self, funcname)(match)
     else:
         log.error('Attempting to match with unknown matcher: {0}'.format(
             matcher))
         return False
Example #32
0
def compare_versions(ver1='', oper='==', ver2='', cmp_func=None):
    '''
    Compares two version numbers. Accepts a custom function to perform the
    cmp-style version comparison, otherwise uses version_cmp().
    '''
    cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,),
               '>=': (0, 1), '>': (1,)}
    if oper not in ['!='] + cmp_map.keys():
        log.error('Invalid operator "{0}" for version '
                  'comparison'.format(oper))
        return False

    if cmp_func is None:
        cmp_func = version_cmp

    cmp_result = cmp_func(ver1, ver2)
    if cmp_result is None:
        return False

    if oper == '!=':
        return cmp_result not in cmp_map['==']
    else:
        return cmp_result in cmp_map[oper]
Example #33
0
 def confirm_top(self, match, data, nodegroups=None):
     '''
     Takes the data passed to a top file environment and determines if the
     data matches this minion
     '''
     matcher = 'glob'
     if not data:
         log.error('Recived bad data when setting the match from the top '
                   'file')
         return False
     for item in data:
         if isinstance(item, dict):
             if 'match' in item:
                 matcher = item['match']
     if hasattr(self, matcher + '_match'):
         funcname = '{0}_match'.format(matcher)
         if matcher == 'nodegroup':
             return getattr(self, funcname)(match, nodegroups)
         return getattr(self, funcname)(match)
     else:
         log.error('Attempting to match with unknown matcher: {0}'.format(
             matcher
         ))
         return False
Example #34
0
 def _deliver(self, addrs, alert):
     '''
     Deliver the alert to the specified addresses.
     This method should only be called by Agent.deliver().
     addrs = a list of "To:" recipients.  Each recipient can be a
             plain email address, e.g. "*****@*****.**", or a real name
             plus an address, e.g. "Super Duper <*****@*****.**>".
     alert = the alert dict used to expand ${var}s in message subject
             and body
     '''
     if len(addrs) == 0:
         return
     full_addrs = [addr[0] for addr in addrs]
     email_addrs = [addr[1] for addr in addrs]
     msg = email.mime.text.MIMEText(self.body.safe_substitute(alert))
     msg['Subject'] = self.subject.safe_substitute(alert)
     msg['From'] = self.sender
     msg['To'] = ', '.join(full_addrs)
     msgstr = msg.as_string()
     log.trace('send email:\n%s', msgstr)
     log.trace('email: connect to %s port %s', self.server, self.port)
     try:
         s = smtplib.SMTP(self.server, self.port)
         s.ehlo()
         if s.has_extn('STARTTLS'):
             log.trace('email: start tls')
             s.starttls()
         if self.user and self.password:
             log.trace('email: login as %s', self.user)
             s.login(self.user, self.password)
         log.trace('email: send message to %s', email_addrs)
         s.sendmail(self.user, email_addrs, msgstr)
         log.trace('email: disconnect')
         s.quit()
     except smtplib.SMTPException, ex:
         log.error('failed to send email alert:\n%s', msgstr, exc_info=ex)
Example #35
0
 def _deliver(self, addrs, alert):
     """
     Deliver the alert to the specified addresses.
     This method should only be called by Agent.deliver().
     addrs = a list of "To:" recipients.  Each recipient can be a
             plain email address, e.g. "*****@*****.**", or a real name
             plus an address, e.g. "Super Duper <*****@*****.**>".
     alert = the alert dict used to expand ${var}s in message subject
             and body
     """
     if len(addrs) == 0:
         return
     full_addrs = [addr[0] for addr in addrs]
     email_addrs = [addr[1] for addr in addrs]
     msg = email.mime.text.MIMEText(self.body.safe_substitute(alert))
     msg["Subject"] = self.subject.safe_substitute(alert)
     msg["From"] = self.sender
     msg["To"] = ", ".join(full_addrs)
     msgstr = msg.as_string()
     log.trace("send email:\n%s", msgstr)
     log.trace("email: connect to %s port %s", self.server, self.port)
     try:
         s = smtplib.SMTP(self.server, self.port)
         s.ehlo()
         if s.has_extn("STARTTLS"):
             log.trace("email: start tls")
             s.starttls()
         if self.user and self.password:
             log.trace("email: login as %s", self.user)
             s.login(self.user, self.password)
         log.trace("email: send message to %s", email_addrs)
         s.sendmail(self.user, email_addrs, msgstr)
         log.trace("email: disconnect")
         s.quit()
     except smtplib.SMTPException, ex:
         log.error("failed to send email alert:\n%s", msgstr, exc_info=ex)
Example #36
0
 def _load_subscriptions(self, config, agents):
     '''
     Load the alert subscriptions from /etc/salt/alert.
     '''
     subscriptions = config.get('alert.subscriptions')
     if not subscriptions:
         log.error('alert.subscriptions missing or empty in config')
         return
     for pattern, subscribers in config.get('alert.subscriptions', {}).iteritems():
         regex = re.compile(pattern)
         if isinstance(subscribers, basestring):
             subscribers = [subscribers]
         for subscriber in subscribers:
             if ':' in subscriber:
                 protocol, addr = subscriber.split(':', 1)
             else:
                 protocol = DEFAULT_PROTOCOL
                 addr = subscriber
             agent = agents.get(protocol)
             if not agent:
                 log.error('ignore subscriber "%s": unknown protocol "%s"',
                             subscriber, protocol )
                 continue
             agent.add_subscriber(regex, addr)
Example #37
0
def dns_check(addr, safe=False, ipv6=False):
    '''
    Return the ip resolved by dns, but do not exit on failure, only raise an
    exception. Obeys system preference for IPv4/6 address resolution.
    '''
    error = False
    try:
        hostnames = socket.getaddrinfo(addr, None, socket.AF_UNSPEC,
                                       socket.SOCK_STREAM)
        if not hostnames:
            error = True
        else:
            addr = False
            for h in hostnames:
                if h[0] == socket.AF_INET or (h[0] == socket.AF_INET6
                                              and ipv6):
                    addr = ip_bracket(h[4][0])
                    break
            if not addr:
                error = True
    except socket.error:
        error = True

    if error:
        err = ('This master address: \'{0}\' was previously resolvable '
               'but now fails to resolve! The previously resolved ip addr '
               'will continue to be used').format(addr)
        if safe:
            if salt.log.is_console_configured():
                # If logging is not configured it also means that either
                # the master or minion instance calling this hasn't even
                # started running
                log.error(err)
            raise SaltClientError()
        raise SaltSystemExit(code=42, msg=err)
    return addr
Example #38
0
def dns_check(addr, safe=False, ipv6=False):
    '''
    Return the ip resolved by dns, but do not exit on failure, only raise an
    exception. Obeys system preference for IPv4/6 address resolution.
    '''
    error = False
    try:
        hostnames = socket.getaddrinfo(
            addr, None, socket.AF_UNSPEC, socket.SOCK_STREAM
        )
        if not hostnames:
            error = True
        else:
            addr = False
            for h in hostnames:
                if h[0] == socket.AF_INET or (h[0] == socket.AF_INET6 and ipv6):
                    addr = ip_bracket(h[4][0])
                    break
            if not addr:
                error = True
    except socket.error:
        error = True

    if error:
        err = ('This master address: \'{0}\' was previously resolvable '
               'but now fails to resolve! The previously resolved ip addr '
               'will continue to be used').format(addr)
        if safe:
            if salt.log.is_console_configured():
                # If logging is not configured it also means that either
                # the master or minion instance calling this hasn't even
                # started running
                log.error(err)
            raise SaltClientError()
        raise SaltSystemExit(code=42, msg=err)
    return addr
Example #39
0
def ec2_tags():

    boto_version = StrictVersion(boto.__version__)
    required_boto_version = StrictVersion('2.8.0')
    if boto_version < required_boto_version:
        log.error("%s: installed boto version %s < %s, can't find ec2_tags",
                  __name__, boto_version, required_boto_version)
        return None

    if not _on_ec2():
        log.info("%s: not an EC2 instance, skipping", __name__)
        return None

    (instance_id, region) = _get_instance_info()
    credentials = _get_credentials()

    # Connect to EC2 and parse the Roles tags for this instance
    try:
        conn = boto.ec2.connect_to_region(
            region,
            aws_access_key_id=credentials['access_key'],
            aws_secret_access_key=credentials['secret_key'])
    except:
        if not (credentials['access_key'] and credentials['secret_key']):
            log.error(
                "%s: no AWS credentials found, see documentation for how to provide them.",
                __name__)
            return None
        else:
            log.error(
                "%s: invalid AWS credentials found, see documentation for how to provide them.",
                __name__)
            return None

    tags = {}
    try:
        _tags = conn.get_all_tags(filters={
            'resource-type': 'instance',
            'resource-id': instance_id
        })
        for tag in _tags:
            tags[tag.name] = tag.value
    except IndexError, e:
        log.error("Couldn't retrieve instance information: %s", e)
        return None
Example #40
0
def ec2_tags():

    boto_version = StrictVersion(boto.__version__)
    required_boto_version = StrictVersion('2.8.0')
    if boto_version < required_boto_version:
        log.error("%s: installed boto version %s < %s, can't find ec2_tags",
                __name__, boto_version, required_boto_version)
        return None

    if not _on_ec2():
        log.info("%s: not an EC2 instance, skipping", __name__)
        return None

    (instance_id, region) = _get_instance_info()
    credentials = _get_credentials()

    # Connect to EC2 and parse the Roles tags for this instance
    try:
        conn = boto.ec2.connect_to_region(region,
                aws_access_key_id=credentials['access_key'],
                aws_secret_access_key=credentials['secret_key'])
    except:
        if not (credentials['access_key'] and credentials['secret_key']):
            log.error("%s: no AWS credentials found, see documentation for how to provide them.", __name__)
            return None
        else:
            log.error("%s: invalid AWS credentials found, see documentation for how to provide them.", __name__)
            return None

    tags = {}
    try:
        _tags = conn.get_all_tags(filters={'resource-type': 'instance',
                'resource-id': instance_id})
        for tag in _tags:
            tags[tag.name] = tag.value
    except IndexError, e:
        log.error("Couldn't retrieve instance information: %s", e)
        return None
Example #41
0
def ec2_tags():
    boto_version = StrictVersion(boto.__version__)
    required_boto_version = StrictVersion('2.8.0')
    if boto_version < required_boto_version:
        log.error("Installed boto version %s < %s, can't find ec2_tags",
                  boto_version, required_boto_version)
        return None

    if not _on_ec2():
        log.info("Not an EC2 instance, skipping")
        return None

    instance_id, region = _get_instance_info()
    credentials = _get_credentials()

    # Connect to EC2 and parse the Roles tags for this instance
    try:
        conn = boto.ec2.connect_to_region(
            region,
            aws_access_key_id=credentials['access_key'],
            aws_secret_access_key=credentials['secret_key'],
        )
    except Exception as e:
        log.error("Could not get AWS connection: %s", e)
        return None

    ec2_tags = {}
    try:
        tags = conn.get_all_tags(filters={'resource-type': 'instance',
                                          'resource-id': instance_id})
        for tag in tags:
            ec2_tags[tag.name] = tag.value
    except Exception as e:
        log.error("Couldn't retrieve instance tags: %s", e)
        return None

    ret = dict(ec2_tags=ec2_tags)

    # Provide ec2_tags_roles functionality
    if 'Roles' in ec2_tags:
        ret['ec2_roles'] = ec2_tags['Roles'].split(',')

    return ret
Example #42
0
def high(data, **kwargs):
    '''
    Execute the compound calls stored in a single set of high data
    This function is mostly intended for testing the state system

    CLI Example:

    .. code-block:: bash

        salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
    '''
    __pillar__.update(kwargs.get('pillar', {}))
    st_kwargs = __salt__.kwargs
    __opts__['grains'] = __grains__
    st_ = salt.client.ssh.state.SSHHighState(
            __opts__,
            __pillar__,
            __salt__,
            __context__['fileclient'])
    chunks = st_.state.compile_high_data(high)
    file_refs = salt.client.ssh.state.lowstate_file_refs(
            chunks,
            _merge_extra_filerefs(
                kwargs.get('extra_filerefs', ''),
                __opts__.get('extra_filerefs', '')
                )
            )
    # Create the tar containing the state pkg and relevant files.
    trans_tar = salt.client.ssh.state.prep_trans_tar(
            __context__['fileclient'],
            chunks,
            file_refs,
            __pillar__,
            id_=st_kwargs['id_'])
    trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
    cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
            __opts__['thin_dir'],
            trans_tar_sum,
            __opts__['hash_type'])
    single = salt.client.ssh.Single(
            __opts__,
            cmd,
            fsclient=__context__['fileclient'],
            **st_kwargs)
    single.shell.send(
            trans_tar,
            '{0}/salt_state.tgz'.format(__opts__['thin_dir']))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except (OSError, IOError):
        pass

    # Read in the JSON data and return the data structure
    try:
        return json.loads(stdout, object_hook=salt.utils.decode_dict)
    except Exception as e:
        log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #43
0
def top(topfn, test=None, **kwargs):
    """
    Execute a specific top file instead of the default

    CLI Example:

    .. code-block:: bash

        salt '*' state.top reverse_top.sls
        salt '*' state.top reverse_top.sls exclude=sls_to_exclude
        salt '*' state.top reverse_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
    """
    __pillar__.update(kwargs.get("pillar", {}))
    st_kwargs = __salt__.kwargs
    __opts__["grains"] = __grains__.value()
    opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
    if salt.utils.args.test_mode(test=test, **kwargs):
        opts["test"] = True
    else:
        opts["test"] = __opts__.get("test", None)
    st_ = salt.client.ssh.state.SSHHighState(opts, __pillar__, __salt__,
                                             __context__["fileclient"])
    st_.opts["state_top"] = os.path.join("salt://", topfn)
    st_.push_active()
    chunks = st_.compile_low_chunks()
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get("extra_filerefs", ""),
                              opts.get("extra_filerefs", "")),
    )

    roster = salt.roster.Roster(opts, opts.get("roster", "flat"))
    roster_grains = roster.opts["grains"]

    # Create the tar containing the state pkg and relevant files.
    _cleanup_slsmod_low_data(chunks)
    trans_tar = salt.client.ssh.state.prep_trans_tar(
        __context__["fileclient"],
        chunks,
        file_refs,
        __pillar__,
        st_kwargs["id_"],
        roster_grains,
    )
    trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts["hash_type"])
    cmd = "state.pkg {}/salt_state.tgz test={} pkg_sum={} hash_type={}".format(
        opts["thin_dir"], test, trans_tar_sum, opts["hash_type"])
    single = salt.client.ssh.Single(opts,
                                    cmd,
                                    fsclient=__context__["fileclient"],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar, "{}/salt_state.tgz".format(opts["thin_dir"]))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except OSError:
        pass

    # Read in the JSON data and return the data structure
    try:
        return salt.utils.json.loads(stdout)
    except Exception as e:  # pylint: disable=broad-except
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #44
0
def get_id():
    """
    Guess the id of the minion.

    - Check /etc/hostname for a value other than localhost
    - If socket.getfqdn() returns us something other than localhost, use it
    - Check /etc/hosts for something that isn't localhost that maps to 127.*
    - Look for a routeable / public IP
    - A private IP is better than a loopback IP
    - localhost may be better than killing the minion

    Returns two values: the detected ID, and a boolean value noting whether or
    not an IP address is being used for the ID.
    """

    log.debug("Guessing ID. The id can be explicitly in set {0}".format(os.path.join(syspaths.CONFIG_DIR, "minion")))

    # Check /etc/hostname
    try:
        with salt.utils.fopen("/etc/hostname") as hfl:
            name = hfl.read().strip()
        if re.search(r"\s", name):
            log.warning(
                "Whitespace character detected in /etc/hostname. " "This file should not contain any whitespace."
            )
        else:
            if name != "localhost":
                return name, False
    except Exception:
        pass

    # Nothing in /etc/hostname or /etc/hostname not found
    fqdn = socket.getfqdn()
    if fqdn != "localhost":
        log.info("Found minion id from getfqdn(): {0}".format(fqdn))
        return fqdn, False

    # Can /etc/hosts help us?
    try:
        with salt.utils.fopen("/etc/hosts") as hfl:
            for line in hfl:
                names = line.split()
                ip_ = names.pop(0)
                if ip_.startswith("127."):
                    for name in names:
                        if name != "localhost":
                            log.info("Found minion id in hosts file: {0}".format(name))
                            return name, False
    except Exception:
        pass

    # Can Windows 'hosts' file help?
    try:
        windir = os.getenv("WINDIR")
        with salt.utils.fopen(windir + "\\system32\\drivers\\etc\\hosts") as hfl:
            for line in hfl:
                # skip commented or blank lines
                if line[0] == "#" or len(line) <= 1:
                    continue
                # process lines looking for '127.' in first column
                try:
                    entry = line.split()
                    if entry[0].startswith("127."):
                        for name in entry[1:]:  # try each name in the row
                            if name != "localhost":
                                log.info("Found minion id in hosts file: {0}".format(name))
                                return name, False
                except IndexError:
                    pass  # could not split line (malformed entry?)
    except Exception:
        pass

    # What IP addresses do we have?
    ip_addresses = [
        salt.utils.network.IPv4Address(addr)
        for addr in salt.utils.network.ip_addrs(include_loopback=True)
        if not addr.startswith("127.")
    ]

    for addr in ip_addresses:
        if not addr.is_private:
            log.info("Using public ip address for id: {0}".format(addr))
            return str(addr), True

    if ip_addresses:
        addr = ip_addresses.pop(0)
        log.info("Using private ip address for id: {0}".format(addr))
        return str(addr), True

    log.error("No id found, falling back to localhost")
    return "localhost", False
Example #45
0
def low(data, **kwargs):
    '''
    Execute a single low data call
    This function is mostly intended for testing the state system

    CLI Example:

    .. code-block:: bash

        salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
    '''
    st_kwargs = __salt__.kwargs
    __opts__['grains'] = __grains__
    chunks = [data]
    st_ = salt.client.ssh.state.SSHHighState(__opts__, __pillar__, __salt__,
                                             __context__['fileclient'])
    for chunk in chunks:
        chunk['__id__'] = chunk['name'] if not chunk.get(
            '__id__') else chunk['__id__']
    err = st_.state.verify_data(data)
    if err:
        return err
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get('extra_filerefs', ''),
                              __opts__.get('extra_filerefs', '')))
    roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
    roster_grains = roster.opts['grains']

    # Create the tar containing the state pkg and relevant files.
    trans_tar = salt.client.ssh.state.prep_trans_tar(
        __opts__, __context__['fileclient'], chunks, file_refs, __pillar__,
        st_kwargs['id_'], roster_grains)
    trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar,
                                                  __opts__['hash_type'])
    cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
        __opts__['thin_dir'], trans_tar_sum, __opts__['hash_type'])
    single = salt.client.ssh.Single(__opts__,
                                    cmd,
                                    fsclient=__context__['fileclient'],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar,
                      '{0}/salt_state.tgz'.format(__opts__['thin_dir']))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except (OSError, IOError):
        pass

    # Read in the JSON data and return the data structure
    try:
        return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
    except Exception as e:
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #46
0
def highstate(test=None, **kwargs):
    """
    Retrieve the state data from the salt master for this minion and execute it

    CLI Example:

    .. code-block:: bash

        salt '*' state.highstate

        salt '*' state.highstate exclude=sls_to_exclude
        salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
    """
    __pillar__.update(kwargs.get("pillar", {}))
    st_kwargs = __salt__.kwargs
    __opts__["grains"] = __grains__.value()
    opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
    st_ = salt.client.ssh.state.SSHHighState(opts, __pillar__.value(),
                                             __salt__.value(),
                                             __context__["fileclient"])
    st_.push_active()
    chunks = st_.compile_low_chunks()
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get("extra_filerefs", ""),
                              opts.get("extra_filerefs", "")),
    )
    # Check for errors
    for chunk in chunks:
        if not isinstance(chunk, dict):
            __context__["retcode"] = 1
            return chunks

    roster = salt.roster.Roster(opts, opts.get("roster", "flat"))
    roster_grains = roster.opts["grains"]

    # Create the tar containing the state pkg and relevant files.
    _cleanup_slsmod_low_data(chunks)
    trans_tar = salt.client.ssh.state.prep_trans_tar(
        __context__["fileclient"],
        chunks,
        file_refs,
        __pillar__.value(),
        st_kwargs["id_"],
        roster_grains,
    )
    trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts["hash_type"])
    cmd = "state.pkg {}/salt_state.tgz test={} pkg_sum={} hash_type={}".format(
        opts["thin_dir"], test, trans_tar_sum, opts["hash_type"])
    single = salt.client.ssh.Single(opts,
                                    cmd,
                                    fsclient=__context__["fileclient"],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar, "{}/salt_state.tgz".format(opts["thin_dir"]))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except OSError:
        pass

    # Read in the JSON data and return the data structure
    try:
        return salt.utils.json.loads(stdout)
    except Exception as e:  # pylint: disable=broad-except
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #47
0
def single(fun, name, test=None, **kwargs):
    '''
    .. versionadded:: 2015.5.0

    Execute a single state function with the named kwargs, returns False if
    insufficient data is sent to the command

    By default, the values of the kwargs will be parsed as YAML. So, you can
    specify lists values, or lists of single entry key-value maps, as you
    would in a YAML salt file. Alternatively, JSON format of keyword values
    is also supported.

    CLI Example:

    .. code-block:: bash

        salt '*' state.single pkg.installed name=vim

    '''
    st_kwargs = __salt__.kwargs
    __opts__['grains'] = __grains__

    # state.fun -> [state, fun]
    comps = fun.split('.')
    if len(comps) < 2:
        __context__['retcode'] = 1
        return 'Invalid function passed'

    # Create the low chunk, using kwargs as a base
    kwargs.update({
        'state': comps[0],
        'fun': comps[1],
        '__id__': name,
        'name': name
    })

    opts = copy.deepcopy(__opts__)

    # Set test mode
    if salt.utils.test_mode(test=test, **kwargs):
        opts['test'] = True
    else:
        opts['test'] = __opts__.get('test', None)

    # Get the override pillar data
    __pillar__.update(kwargs.get('pillar', {}))

    # Create the State environment
    st_ = salt.client.ssh.state.SSHState(__opts__, __pillar__)

    # Verify the low chunk
    err = st_.verify_data(kwargs)
    if err:
        __context__['retcode'] = 1
        return err

    # Must be a list of low-chunks
    chunks = [kwargs]

    # Retrieve file refs for the state run, so we can copy relevant files down
    # to the minion before executing the state
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get('extra_filerefs', ''),
                              __opts__.get('extra_filerefs', '')))

    # Create the tar containing the state pkg and relevant files.
    trans_tar = salt.client.ssh.state.prep_trans_tar(__context__['fileclient'],
                                                     chunks, file_refs,
                                                     __pillar__,
                                                     st_kwargs['id_'])

    # Create a hash so we can verify the tar on the target system
    trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])

    # We use state.pkg to execute the "state package"
    cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
        __opts__['thin_dir'], test, trans_tar_sum, __opts__['hash_type'])

    # Create a salt-ssh Single object to actually do the ssh work
    single = salt.client.ssh.Single(__opts__,
                                    cmd,
                                    fsclient=__context__['fileclient'],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)

    # Copy the tar down
    single.shell.send(trans_tar,
                      '{0}/salt_state.tgz'.format(__opts__['thin_dir']))

    # Run the state.pkg command on the target
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except (OSError, IOError):
        pass

    # Read in the JSON data and return the data structure
    try:
        return json.loads(stdout, object_hook=salt.utils.decode_dict)
    except Exception as e:
        log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #48
0
def highstate(test=None, **kwargs):
    '''
    Retrieve the state data from the salt master for this minion and execute it

    CLI Example:

    .. code-block:: bash

        salt '*' state.highstate

        salt '*' state.highstate exclude=sls_to_exclude
        salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
    '''
    __pillar__.update(kwargs.get('pillar', {}))
    st_kwargs = __salt__.kwargs
    __opts__['grains'] = __grains__
    st_ = salt.client.ssh.state.SSHHighState(__opts__, __pillar__, __salt__,
                                             __context__['fileclient'])
    st_.push_active()
    chunks = st_.compile_low_chunks()
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get('extra_filerefs', ''),
                              __opts__.get('extra_filerefs', '')))
    # Check for errors
    for chunk in chunks:
        if not isinstance(chunk, dict):
            __context__['retcode'] = 1
            return chunks
    # Create the tar containing the state pkg and relevant files.
    _cleanup_slsmod_low_data(chunks)
    trans_tar = salt.client.ssh.state.prep_trans_tar(__context__['fileclient'],
                                                     chunks, file_refs,
                                                     __pillar__,
                                                     st_kwargs['id_'])
    trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
    cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
        __opts__['thin_dir'], test, trans_tar_sum, __opts__['hash_type'])
    single = salt.client.ssh.Single(__opts__,
                                    cmd,
                                    fsclient=__context__['fileclient'],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar,
                      '{0}/salt_state.tgz'.format(__opts__['thin_dir']))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except (OSError, IOError):
        pass

    # Read in the JSON data and return the data structure
    try:
        return json.loads(stdout, object_hook=salt.utils.decode_dict)
    except Exception as e:
        log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #49
0
def single(fun, name, test=None, **kwargs):
    """
    .. versionadded:: 2015.5.0

    Execute a single state function with the named kwargs, returns False if
    insufficient data is sent to the command

    By default, the values of the kwargs will be parsed as YAML. So, you can
    specify lists values, or lists of single entry key-value maps, as you
    would in a YAML salt file. Alternatively, JSON format of keyword values
    is also supported.

    CLI Example:

    .. code-block:: bash

        salt '*' state.single pkg.installed name=vim

    """
    st_kwargs = __salt__.kwargs
    __opts__["grains"] = __grains__.value()

    # state.fun -> [state, fun]
    comps = fun.split(".")
    if len(comps) < 2:
        __context__["retcode"] = 1
        return "Invalid function passed"

    # Create the low chunk, using kwargs as a base
    kwargs.update({
        "state": comps[0],
        "fun": comps[1],
        "__id__": name,
        "name": name
    })

    opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)

    # Set test mode
    if salt.utils.args.test_mode(test=test, **kwargs):
        opts["test"] = True
    else:
        opts["test"] = __opts__.get("test", None)

    # Get the override pillar data
    __pillar__.update(kwargs.get("pillar", {}))

    # Create the State environment
    st_ = salt.client.ssh.state.SSHState(opts, __pillar__)

    # Verify the low chunk
    err = st_.verify_data(kwargs)
    if err:
        __context__["retcode"] = 1
        return err

    # Must be a list of low-chunks
    chunks = [kwargs]

    # Retrieve file refs for the state run, so we can copy relevant files down
    # to the minion before executing the state
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get("extra_filerefs", ""),
                              opts.get("extra_filerefs", "")),
    )

    roster = salt.roster.Roster(opts, opts.get("roster", "flat"))
    roster_grains = roster.opts["grains"]

    # Create the tar containing the state pkg and relevant files.
    trans_tar = salt.client.ssh.state.prep_trans_tar(
        __context__["fileclient"],
        chunks,
        file_refs,
        __pillar__.value(),
        st_kwargs["id_"],
        roster_grains,
    )

    # Create a hash so we can verify the tar on the target system
    trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts["hash_type"])

    # We use state.pkg to execute the "state package"
    cmd = "state.pkg {}/salt_state.tgz test={} pkg_sum={} hash_type={}".format(
        opts["thin_dir"], test, trans_tar_sum, opts["hash_type"])

    # Create a salt-ssh Single object to actually do the ssh work
    single = salt.client.ssh.Single(opts,
                                    cmd,
                                    fsclient=__context__["fileclient"],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)

    # Copy the tar down
    single.shell.send(trans_tar, "{}/salt_state.tgz".format(opts["thin_dir"]))

    # Run the state.pkg command on the target
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except OSError:
        pass

    # Read in the JSON data and return the data structure
    try:
        return salt.utils.json.loads(stdout)
    except Exception as e:  # pylint: disable=broad-except
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #50
0
    def run(self):
        '''
        Main loop of the ConCache, starts updates in intervals and
        answers requests from the MWorkers
        '''
        context = zmq.Context()
        # the socket for incoming cache requests
        creq_in = context.socket(zmq.REP)
        creq_in.setsockopt(zmq.LINGER, 100)
        creq_in.bind('ipc://' + self.cache_sock)

        # the socket for incoming cache-updates from workers
        cupd_in = context.socket(zmq.SUB)
        cupd_in.setsockopt(zmq.SUBSCRIBE, '')
        cupd_in.setsockopt(zmq.LINGER, 100)
        cupd_in.bind('ipc://' + self.update_sock)

        # the socket for the timer-event
        timer_in = context.socket(zmq.SUB)
        timer_in.setsockopt(zmq.SUBSCRIBE, '')
        timer_in.setsockopt(zmq.LINGER, 100)
        timer_in.connect('ipc://' + self.upd_t_sock)

        poller = zmq.Poller()
        poller.register(creq_in, zmq.POLLIN)
        poller.register(cupd_in, zmq.POLLIN)
        poller.register(timer_in, zmq.POLLIN)

        # our serializer
        serial = salt.payload.Serial(self.opts.get('serial', ''))

        # register a signal handler
        signal.signal(signal.SIGINT, self.signal_handler)

        # secure the sockets from the world
        self.secure()

        log.info('ConCache started')

        while self.running:

            # we check for new events with the poller
            try:
                socks = dict(poller.poll(1))
            except KeyboardInterrupt:
                self.stop()
            except zmq.ZMQError as zmq_err:
                log.error('ConCache ZeroMQ-Error occurred')
                log.exception(zmq_err)
                self.stop()

            # check for next cache-request
            if socks.get(creq_in) == zmq.POLLIN:
                msg = serial.loads(creq_in.recv())
                log.debug('ConCache Received request: {0}'.format(msg))

                # requests to the minion list are send as str's
                if isinstance(msg, str):
                    if msg == 'minions':
                        # Send reply back to client
                        reply = serial.dumps(self.minions)
                        creq_in.send(reply)

            # check for next cache-update from workers
            if socks.get(cupd_in) == zmq.POLLIN:
                new_c_data = serial.loads(cupd_in.recv())
                # tell the worker to exit
                #cupd_in.send(serial.dumps('ACK'))

                # check if the returned data is usable
                if not isinstance(new_c_data, list):
                    log.error('ConCache Worker returned unusable result')
                    del new_c_data
                    continue

                # the cache will receive lists of minions
                # 1. if the list only has 1 item, its from an MWorker, we append it
                # 2. if the list contains another list, its from a CacheWorker and
                #    the currently cached minions are replaced with that list
                # 3. anything else is considered malformed

                try:

                    if len(new_c_data) == 0:
                        log.debug('ConCache Got empty update from worker')
                        continue

                    data = new_c_data[0]

                    if isinstance(data, str):
                        if data not in self.minions:
                            log.debug('ConCache Adding minion {0} to cache'.format(new_c_data[0]))
                            self.minions.append(data)

                    elif isinstance(data, list):
                        log.debug('ConCache Replacing minion list from worker')
                        self.minions = data

                except IndexError:
                    log.debug('ConCache Got malformed result dict from worker')
                    del new_c_data

                log.info('ConCache {0} entries in cache'.format(len(self.minions)))

            # check for next timer-event to start new jobs
            if socks.get(timer_in) == zmq.POLLIN:
                sec_event = serial.loads(timer_in.recv())

                # update the list every 30 seconds
                if int(sec_event % 30) == 0:
                    cw = CacheWorker(self.opts)
                    cw.start()

        self.stop()
        creq_in.close()
        cupd_in.close()
        timer_in.close()
        context.term()
        log.debug('ConCache Shutting down')
Example #51
0
def sls(mods, saltenv="base", test=None, exclude=None, **kwargs):
    """
    Create the seed file for a state.sls run
    """
    st_kwargs = __salt__.kwargs
    __opts__["grains"] = __grains__.value()
    __pillar__.update(kwargs.get("pillar", {}))
    opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
    st_ = salt.client.ssh.state.SSHHighState(opts, __pillar__.value(),
                                             __salt__.value(),
                                             __context__["fileclient"])
    st_.push_active()
    mods = _parse_mods(mods)
    high_data, errors = st_.render_highstate({saltenv: mods},
                                             context=__context__.value())
    if exclude:
        if isinstance(exclude, str):
            exclude = exclude.split(",")
        if "__exclude__" in high_data:
            high_data["__exclude__"].extend(exclude)
        else:
            high_data["__exclude__"] = exclude
    high_data, ext_errors = st_.state.reconcile_extend(high_data)
    errors += ext_errors
    errors += st_.state.verify_high(high_data)
    if errors:
        return errors
    high_data, req_in_errors = st_.state.requisite_in(high_data)
    errors += req_in_errors
    high_data = st_.state.apply_exclude(high_data)
    # Verify that the high data is structurally sound
    if errors:
        return errors
    # Compile and verify the raw chunks
    chunks = st_.state.compile_high_data(high_data)
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get("extra_filerefs", ""),
                              opts.get("extra_filerefs", "")),
    )

    roster = salt.roster.Roster(opts, opts.get("roster", "flat"))
    roster_grains = roster.opts["grains"]

    # Create the tar containing the state pkg and relevant files.
    _cleanup_slsmod_low_data(chunks)
    trans_tar = salt.client.ssh.state.prep_trans_tar(
        __context__["fileclient"],
        chunks,
        file_refs,
        __pillar__.value(),
        st_kwargs["id_"],
        roster_grains,
    )
    trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts["hash_type"])
    cmd = "state.pkg {}/salt_state.tgz test={} pkg_sum={} hash_type={}".format(
        opts["thin_dir"], test, trans_tar_sum, opts["hash_type"])
    single = salt.client.ssh.Single(opts,
                                    cmd,
                                    fsclient=__context__["fileclient"],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar, "{}/salt_state.tgz".format(opts["thin_dir"]))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except OSError:
        pass

    # Read in the JSON data and return the data structure
    try:
        return salt.utils.json.loads(stdout)
    except Exception as e:  # pylint: disable=broad-except
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #52
0
def sls(mods, saltenv='base', test=None, exclude=None, env=None, **kwargs):
    '''
    Create the seed file for a state.sls run
    '''
    st_kwargs = __salt__.kwargs
    __opts__['grains'] = __grains__
    if env is not None:
        salt.utils.warn_until(
            'Boron',
            'Passing a salt environment should be done using \'saltenv\' '
            'not \'env\'. This functionality will be removed in Salt Boron.')
        # Backwards compatibility
        saltenv = env

    __pillar__.update(kwargs.get('pillar', {}))
    st_ = salt.client.ssh.state.SSHHighState(__opts__, __pillar__, __salt__,
                                             __context__['fileclient'])
    if isinstance(mods, str):
        mods = mods.split(',')
    high_data, errors = st_.render_highstate({saltenv: mods})
    if exclude:
        if isinstance(exclude, str):
            exclude = exclude.split(',')
        if '__exclude__' in high_data:
            high_data['__exclude__'].extend(exclude)
        else:
            high_data['__exclude__'] = exclude
    high_data, ext_errors = st_.state.reconcile_extend(high_data)
    errors += ext_errors
    errors += st_.state.verify_high(high_data)
    if errors:
        return errors
    high_data, req_in_errors = st_.state.requisite_in(high_data)
    errors += req_in_errors
    high_data = st_.state.apply_exclude(high_data)
    # Verify that the high data is structurally sound
    if errors:
        return errors
    # Compile and verify the raw chunks
    chunks = st_.state.compile_high_data(high_data)
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get('extra_filerefs', ''),
                              __opts__.get('extra_filerefs', '')))
    trans_tar = salt.client.ssh.state.prep_trans_tar(__context__['fileclient'],
                                                     chunks, file_refs,
                                                     __pillar__)
    trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
    cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
        __opts__['thin_dir'], test, trans_tar_sum, __opts__['hash_type'])
    single = salt.client.ssh.Single(__opts__,
                                    cmd,
                                    fsclient=__context__['fileclient'],
                                    **st_kwargs)
    single.shell.send(trans_tar,
                      '{0}/salt_state.tgz'.format(__opts__['thin_dir']))
    stdout, stderr, _ = single.cmd_block()
    try:
        return json.loads(stdout, object_hook=salt.utils.decode_dict)
    except Exception, e:
        log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
        log.error(str(e))
Example #53
0
def low(data, **kwargs):
    """
    Execute a single low data call
    This function is mostly intended for testing the state system

    CLI Example:

    .. code-block:: bash

        salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
    """
    st_kwargs = __salt__.kwargs
    __opts__["grains"] = __grains__.value()
    chunks = [data]
    st_ = salt.client.ssh.state.SSHHighState(__opts__, __pillar__.value(),
                                             __salt__.value(),
                                             __context__["fileclient"])
    for chunk in chunks:
        chunk["__id__"] = chunk["name"] if not chunk.get(
            "__id__") else chunk["__id__"]
    err = st_.state.verify_data(data)
    if err:
        return err
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get("extra_filerefs", ""),
                              __opts__.get("extra_filerefs", "")),
    )
    roster = salt.roster.Roster(__opts__, __opts__.get("roster", "flat"))
    roster_grains = roster.opts["grains"]

    # Create the tar containing the state pkg and relevant files.
    trans_tar = salt.client.ssh.state.prep_trans_tar(
        __context__["fileclient"],
        chunks,
        file_refs,
        __pillar__.value(),
        st_kwargs["id_"],
        roster_grains,
    )
    trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar,
                                                  __opts__["hash_type"])
    cmd = "state.pkg {}/salt_state.tgz pkg_sum={} hash_type={}".format(
        __opts__["thin_dir"], trans_tar_sum, __opts__["hash_type"])
    single = salt.client.ssh.Single(__opts__,
                                    cmd,
                                    fsclient=__context__["fileclient"],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar,
                      "{}/salt_state.tgz".format(__opts__["thin_dir"]))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except OSError:
        pass

    # Read in the JSON data and return the data structure
    try:
        return salt.utils.json.loads(stdout)
    except Exception as e:  # pylint: disable=broad-except
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #54
0
    # Create a salt-ssh Single object to actually do the ssh work
    single = salt.client.ssh.Single(
        opts,
        cmd,
        fsclient=__context__["fileclient"],
        minion_opts=__salt__.minion_opts,
        **st_kwargs
    )

    # Copy the tar down
    single.shell.send(trans_tar, "{0}/salt_state.tgz".format(opts["thin_dir"]))

    # Run the state.pkg command on the target
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except (OSError, IOError):
        pass

    # Read in the JSON data and return the data structure
    try:
        return salt.utils.json.loads(stdout)
    except Exception as e:  # pylint: disable=broad-except
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(six.text_type(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #55
0
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
    '''
    Create the seed file for a state.sls run
    '''
    st_kwargs = __salt__.kwargs
    __opts__['grains'] = __grains__
    __pillar__.update(kwargs.get('pillar', {}))
    st_ = salt.client.ssh.state.SSHHighState(__opts__, __pillar__, __salt__,
                                             __context__['fileclient'])
    st_.push_active()
    if isinstance(mods, str):
        mods = mods.split(',')
    high_data, errors = st_.render_highstate({saltenv: mods})
    if exclude:
        if isinstance(exclude, str):
            exclude = exclude.split(',')
        if '__exclude__' in high_data:
            high_data['__exclude__'].extend(exclude)
        else:
            high_data['__exclude__'] = exclude
    high_data, ext_errors = st_.state.reconcile_extend(high_data)
    errors += ext_errors
    errors += st_.state.verify_high(high_data)
    if errors:
        return errors
    high_data, req_in_errors = st_.state.requisite_in(high_data)
    errors += req_in_errors
    high_data = st_.state.apply_exclude(high_data)
    # Verify that the high data is structurally sound
    if errors:
        return errors
    # Compile and verify the raw chunks
    chunks = st_.state.compile_high_data(high_data)
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get('extra_filerefs', ''),
                              __opts__.get('extra_filerefs', '')))
    # Create the tar containing the state pkg and relevant files.
    _cleanup_slsmod_low_data(chunks)
    trans_tar = salt.client.ssh.state.prep_trans_tar(__context__['fileclient'],
                                                     chunks, file_refs,
                                                     __pillar__,
                                                     st_kwargs['id_'])
    trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
    cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
        __opts__['thin_dir'], test, trans_tar_sum, __opts__['hash_type'])
    single = salt.client.ssh.Single(__opts__,
                                    cmd,
                                    fsclient=__context__['fileclient'],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar,
                      '{0}/salt_state.tgz'.format(__opts__['thin_dir']))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except (OSError, IOError):
        pass

    # Read in the JSON data and return the data structure
    try:
        return json.loads(stdout, object_hook=salt.utils.decode_dict)
    except Exception as e:
        log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #56
0
    def run(self):
        '''
        Main loop of the ConCache, starts updates in intervals and
        answers requests from the MWorkers
        '''
        context = zmq.Context()
        # the socket for incoming cache requests
        creq_in = context.socket(zmq.REP)
        creq_in.setsockopt(zmq.LINGER, 100)
        creq_in.bind('ipc://' + self.cache_sock)

        # the socket for incoming cache-updates from workers
        cupd_in = context.socket(zmq.SUB)
        cupd_in.setsockopt(zmq.SUBSCRIBE, b'')
        cupd_in.setsockopt(zmq.LINGER, 100)
        cupd_in.bind('ipc://' + self.update_sock)

        # the socket for the timer-event
        timer_in = context.socket(zmq.SUB)
        timer_in.setsockopt(zmq.SUBSCRIBE, b'')
        timer_in.setsockopt(zmq.LINGER, 100)
        timer_in.connect('ipc://' + self.upd_t_sock)

        poller = zmq.Poller()
        poller.register(creq_in, zmq.POLLIN)
        poller.register(cupd_in, zmq.POLLIN)
        poller.register(timer_in, zmq.POLLIN)

        # our serializer
        serial = salt.payload.Serial(self.opts.get('serial', ''))

        # register a signal handler
        signal.signal(signal.SIGINT, self.signal_handler)

        # secure the sockets from the world
        self.secure()

        log.info('ConCache started')

        while self.running:

            # we check for new events with the poller
            try:
                socks = dict(poller.poll(1))
            except KeyboardInterrupt:
                self.stop()
            except zmq.ZMQError as zmq_err:
                log.error('ConCache ZeroMQ-Error occurred')
                log.exception(zmq_err)
                self.stop()

            # check for next cache-request
            if socks.get(creq_in) == zmq.POLLIN:
                msg = serial.loads(creq_in.recv())
                log.debug('ConCache Received request: %s', msg)

                # requests to the minion list are send as str's
                if isinstance(msg, six.string_types):
                    if msg == 'minions':
                        # Send reply back to client
                        reply = serial.dumps(self.minions)
                        creq_in.send(reply)

            # check for next cache-update from workers
            if socks.get(cupd_in) == zmq.POLLIN:
                new_c_data = serial.loads(cupd_in.recv())
                # tell the worker to exit
                #cupd_in.send(serial.dumps('ACK'))

                # check if the returned data is usable
                if not isinstance(new_c_data, list):
                    log.error('ConCache Worker returned unusable result')
                    del new_c_data
                    continue

                # the cache will receive lists of minions
                # 1. if the list only has 1 item, its from an MWorker, we append it
                # 2. if the list contains another list, its from a CacheWorker and
                #    the currently cached minions are replaced with that list
                # 3. anything else is considered malformed

                try:

                    if not new_c_data:
                        log.debug('ConCache Got empty update from worker')
                        continue

                    data = new_c_data[0]

                    if isinstance(data, six.string_types):
                        if data not in self.minions:
                            log.debug('ConCache Adding minion %s to cache',
                                      new_c_data[0])
                            self.minions.append(data)

                    elif isinstance(data, list):
                        log.debug('ConCache Replacing minion list from worker')
                        self.minions = data

                except IndexError:
                    log.debug('ConCache Got malformed result dict from worker')
                    del new_c_data

                log.info('ConCache %s entries in cache', len(self.minions))

            # check for next timer-event to start new jobs
            if socks.get(timer_in) == zmq.POLLIN:
                sec_event = serial.loads(timer_in.recv())

                # update the list every 30 seconds
                if int(sec_event % 30) == 0:
                    cw = CacheWorker(self.opts)
                    cw.start()

        self.stop()
        creq_in.close()
        cupd_in.close()
        timer_in.close()
        context.term()
        log.debug('ConCache Shutting down')
Example #57
0
def top(topfn, test=None, **kwargs):
    '''
    Execute a specific top file instead of the default

    CLI Example:

    .. code-block:: bash

        salt '*' state.top reverse_top.sls
        salt '*' state.top reverse_top.sls exclude=sls_to_exclude
        salt '*' state.top reverse_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
    '''
    __pillar__.update(kwargs.get('pillar', {}))
    st_kwargs = __salt__.kwargs
    __opts__['grains'] = __grains__
    if salt.utils.test_mode(test=test, **kwargs):
        __opts__['test'] = True
    else:
        __opts__['test'] = __opts__.get('test', None)
    st_ = salt.client.ssh.state.SSHHighState(__opts__, __pillar__, __salt__,
                                             __context__['fileclient'])
    st_.opts['state_top'] = os.path.join('salt://', topfn)
    st_.push_active()
    chunks = st_.compile_low_chunks()
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get('extra_filerefs', ''),
                              __opts__.get('extra_filerefs', '')))
    # Create the tar containing the state pkg and relevant files.
    _cleanup_slsmod_low_data(chunks)
    trans_tar = salt.client.ssh.state.prep_trans_tar(__context__['fileclient'],
                                                     chunks, file_refs,
                                                     __pillar__,
                                                     st_kwargs['id_'])
    trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
    cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
        __opts__['thin_dir'], test, trans_tar_sum, __opts__['hash_type'])
    single = salt.client.ssh.Single(__opts__,
                                    cmd,
                                    fsclient=__context__['fileclient'],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar,
                      '{0}/salt_state.tgz'.format(__opts__['thin_dir']))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except (OSError, IOError):
        pass

    # Read in the JSON data and return the data structure
    try:
        return json.loads(stdout, object_hook=salt.utils.decode_dict)
    except Exception as e:
        log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout
Example #58
0
    def handle_ssh(self, mine=False):
        '''
        Spin up the needed threads or processes and execute the subsequent
        routines
        '''
        que = multiprocessing.Queue()
        running = {}
        target_iter = self.targets.__iter__()
        returned = set()
        rets = set()
        init = False
        if not self.targets:
            raise salt.exceptions.SaltClientError(
                'No matching targets found in roster.')
        while True:
            if len(running) < self.opts.get('ssh_max_procs', 25) and not init:
                try:
                    host = next(target_iter)
                except StopIteration:
                    init = True
                    continue
                for default in self.defaults:
                    if default not in self.targets[host]:
                        self.targets[host][default] = self.defaults[default]
                args = (
                    que,
                    self.opts,
                    host,
                    self.targets[host],
                    mine,
                )
                routine = multiprocessing.Process(target=self.handle_routine,
                                                  args=args)
                routine.start()
                running[host] = {'thread': routine}
                continue
            ret = {}
            try:
                ret = que.get(False)
                if 'id' in ret:
                    returned.add(ret['id'])
                    yield {ret['id']: ret['ret']}
            except Exception:
                pass
            for host in running:
                if not running[host]['thread'].is_alive():
                    if host not in returned:
                        # Try to get any returns that came through since we
                        # last checked
                        try:
                            while True:
                                ret = que.get(False)
                                if 'id' in ret:
                                    returned.add(ret['id'])
                                    yield {ret['id']: ret['ret']}
                        except Exception:
                            pass

                        if host not in returned:
                            error = ('Target \'{0}\' did not return any data, '
                                     'probably due to an error.').format(host)
                            ret = {'id': host, 'ret': error}
                            log.error(error)
                            yield {ret['id']: ret['ret']}
                    running[host]['thread'].join()
                    rets.add(host)
            for host in rets:
                if host in running:
                    running.pop(host)
            if len(rets) >= len(self.targets):
                break
            # Sleep when limit or all threads started
            if len(running) >= self.opts.get('ssh_max_procs', 25) or len(
                    self.targets) >= len(running):
                time.sleep(0.1)
Example #59
0
    def run(self, jid=None):
        '''
        Execute the overall routine, print results via outputters
        '''
        fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
        jid = self.returners[fstr](
            passed_jid=jid or self.opts.get('jid', None))

        # Save the invocation information
        argv = self.opts['argv']

        if self.opts.get('raw_shell', False):
            fun = 'ssh._raw'
            args = argv
        else:
            fun = argv[0] if argv else ''
            args = argv[1:]

        job_load = {
            'jid': jid,
            'tgt_type': self.tgt_type,
            'tgt': self.opts['tgt'],
            'user': self.opts['user'],
            'fun': fun,
            'arg': args,
        }

        # save load to the master job cache
        try:
            if isinstance(jid, bytes):
                jid = jid.decode('utf-8')
            self.returners['{0}.save_load'.format(
                self.opts['master_job_cache'])](jid, job_load)
        except Exception as exc:
            log.exception(exc)
            log.error('Could not save load with returner {0}: {1}'.format(
                self.opts['master_job_cache'], exc))

        if self.opts.get('verbose'):
            msg = 'Executing job with jid {0}'.format(jid)
            print(msg)
            print('-' * len(msg) + '\n')
            print('')
        sret = {}
        outputter = self.opts.get('output', 'nested')
        final_exit = 0
        for ret in self.handle_ssh():
            host = next(six.iterkeys(ret))
            if isinstance(ret[host], dict):
                host_ret = ret[host].get('retcode', 0)
                if host_ret != 0:
                    final_exit = 1
            else:
                # Error on host
                final_exit = 1

            self.cache_job(jid, host, ret[host], fun)
            ret = self.key_deploy(host, ret)
            if not isinstance(ret[host], dict):
                p_data = {host: ret[host]}
            elif 'return' not in ret[host]:
                p_data = ret
            else:
                outputter = ret[host].get('out',
                                          self.opts.get('output', 'nested'))
                p_data = {host: ret[host].get('return', {})}
            if self.opts.get('static'):
                sret.update(p_data)
            else:
                salt.output.display_output(p_data, outputter, self.opts)
            if self.event:
                self.event.fire_event(
                    ret, salt.utils.event.tagify([jid, 'ret', host], 'job'))
        if self.opts.get('static'):
            salt.output.display_output(sret, outputter, self.opts)
        if final_exit:
            sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
Example #60
0
def high(data, **kwargs):
    """
    Execute the compound calls stored in a single set of high data
    This function is mostly intended for testing the state system

    CLI Example:

    .. code-block:: bash

        salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
    """
    __pillar__.update(kwargs.get("pillar", {}))
    st_kwargs = __salt__.kwargs
    __opts__["grains"] = __grains__.value()
    opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
    st_ = salt.client.ssh.state.SSHHighState(opts, __pillar__.value(),
                                             __salt__.value(),
                                             __context__["fileclient"])
    st_.push_active()
    chunks = st_.state.compile_high_data(data)
    file_refs = salt.client.ssh.state.lowstate_file_refs(
        chunks,
        _merge_extra_filerefs(kwargs.get("extra_filerefs", ""),
                              opts.get("extra_filerefs", "")),
    )

    roster = salt.roster.Roster(opts, opts.get("roster", "flat"))
    roster_grains = roster.opts["grains"]

    # Create the tar containing the state pkg and relevant files.
    _cleanup_slsmod_low_data(chunks)
    trans_tar = salt.client.ssh.state.prep_trans_tar(
        __context__["fileclient"],
        chunks,
        file_refs,
        __pillar__.value(),
        st_kwargs["id_"],
        roster_grains,
    )
    trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts["hash_type"])
    cmd = "state.pkg {}/salt_state.tgz pkg_sum={} hash_type={}".format(
        opts["thin_dir"], trans_tar_sum, opts["hash_type"])
    single = salt.client.ssh.Single(opts,
                                    cmd,
                                    fsclient=__context__["fileclient"],
                                    minion_opts=__salt__.minion_opts,
                                    **st_kwargs)
    single.shell.send(trans_tar, "{}/salt_state.tgz".format(opts["thin_dir"]))
    stdout, stderr, _ = single.cmd_block()

    # Clean up our tar
    try:
        os.remove(trans_tar)
    except OSError:
        pass

    # Read in the JSON data and return the data structure
    try:
        return salt.utils.json.loads(stdout)
    except Exception as e:  # pylint: disable=broad-except
        log.error("JSON Render failed for: %s\n%s", stdout, stderr)
        log.error(str(e))

    # If for some reason the json load fails, return the stdout
    return stdout