Exemple #1
0
    def _write_state(self):
        '''
        writes a current status to the defined status-file
        this includes the current pid, events received/handled
        and threads created/joined
        '''
        try:
            # write the info to the specified log
            statf = open(self.state_file, 'w')
            statf.writelines(simplejson.dumps({'events_received':self.events_rec,
                                               'events_handled':self.events_han,
                                               'threads_created':self.threads_cre,
                                               'threads_joined':self.threads_join}
                                             ))
            # if we have the same pid as the pidfile, we are the running daemon
            # and also print the current counters to the logfile with 'info'
            if( os.getpid() == self.pid ):
                log.info("running with pid {0}".format(self.pid))
                log.info("events (han/recv): {0}/{1}".format(self.events_han,
                                                             self.events_rec))
                log.info("threads (cre/joi):{0}/{1}".format(self.threads_cre,
                                                            self.threads_join))


            statf.write("\n")
            statf.close()
            sys.stdout.flush()
        except IOError as ioerr:
            log.critical("Failed to write state to {0}".format(self.state_file))
            log.exception(ioerr)
        except OSError as oserr:
            log.critical("Failed to write state to {0}".format(self.state_file))
            log.exception(oserr)
def _dism(action,
          image=None):
    '''
    Run a DISM servicing command on the given image.
    '''
    command='dism {0} {1}'.format(
        '/Image:{0}'.format(image) if image else '/Online',
        action
    )
    ret = {'action': action,
           'image': image,
           'result': True,
           'message': ''}
    output = __salt__['cmd.run'](command, ignore_retcode=True)
    if not re.search('The operation completed successfully.', output):
        ret['result'] = False
        ret['message'] = re.search(
            '(Error: \d+\r?\n\r?\n([^\r\n]+\r?\n)+\r?\nThe DISM log file can be found at [^\r\n]\r?\n)',
            output,
            re.MULTILINE
        ).group(1)
        log.exception('DISM command \'{0}\' on image {1} failed: {2}'.format(action, image, ret['message']))
    else:
        ret['message'] = output
        log.debug('DISM command \'{0}\' on image {1} completed with the following output: {2}'.format(action, image, output))
    return ret
def get_elb_lbs():
    """
    Returns a dictionary of load balancer names as keys
    each with their respective attributes
    """

    # attributes to extract from the load balancer boto objects
    # this could possibly be a named argument too
    extract_attrs = ['scheme', 'dns_name', 'vpc_id', 'name', 'security_groups']

    try:
        instance_metadata = boto.utils.get_instance_metadata(timeout=5, num_retries=2)
    except Exception as e:
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    # Setup the lbs grain
    lbs_grain = {'lbs': {}}

    # Collect details about this instance
    vpc_id = instance_metadata['network']['interfaces']['macs'].values()[0]['vpc-id']
    region = instance_metadata['placement']['availability-zone'][:-1]

    # Collect load balancers of this instance (in the same vpc)
    try:
        elb_connection = boto.ec2.elb.connect_to_region(region)

        # find load balancers by vpc_id
        all_lbs = [lb for lb in elb_connection.get_all_load_balancers()
                   if lb.vpc_id == vpc_id]
        log.debug('all lbs before filtering by instance id: {}'.format(all_lbs))

        # further filter the load balancers by instance id
        lbs = [lb for lb in all_lbs for inst in lb.instances
               if inst.id == instance_metadata['instance-id']]
        # initialise and populate the output of load balancers
        out = {}
        [out.update({l.name: {}}) for l in lbs]
        [out[l.name].update({attr: getattr(l, attr, None)})
         for attr in extract_attrs for l in lbs]

        if not out:
            # This loglevel could perhaps be adjusted to something more visible
            log.warning("No ELBs found for this instance, this is unusual, "
                        "but we will not break highstate")

        lbs_grain['lbs'] = out

    except Exception as e:
        # This prints a user-friendly error with stacktrace
        log.exception("Error getting ELB names: {}".format(e))
        return {'custom_grain_error': True}

    return lbs_grain
Exemple #4
0
def version_cmp(pkg1, pkg2):
    '''
    Compares two version strings using distutils.version.LooseVersion. This is
    a fallback for providers which don't have a version comparison utility
    built into them.  Return -1 if version1 < version2, 0 if version1 ==
    version2, and 1 if version1 > version2. Return None if there was a problem
    making the comparison.
    '''
    try:
        if distutils.version.LooseVersion(pkg1) < \
                distutils.version.LooseVersion(pkg2):
            return -1
        elif distutils.version.LooseVersion(pkg1) == \
                distutils.version.LooseVersion(pkg2):
            return 0
        elif distutils.version.LooseVersion(pkg1) > \
                distutils.version.LooseVersion(pkg2):
            return 1
    except Exception as e:
        log.exception(e)
    return None
Exemple #5
0
    'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}

log = logging.getLogger(__name__)

HAS_WMI = False
if salt.utils.is_windows():
    # attempt to import the python wmi module
    # the Windows minion uses WMI for some of its grains
    try:
        import wmi
        import salt.utils.winapi
        HAS_WMI = True
    except ImportError:
        log.exception(
            'Unable to import Python wmi module, some core grains '
            'will be missing'
        )


def _windows_cpudata():
    '''
    Return some CPU information on Windows minions
    '''
    # Provides:
    #   num_cpus
    #   cpu_model
    grains = {}
    if 'NUMBER_OF_PROCESSORS' in os.environ:
        # Cast to int so that the logic isn't broken when used as a
        # conditional in templating. Also follows _linux_cpudata()
        try:
Exemple #6
0
    def run(self):
        '''
        Main loop of the ConCache, starts updates in intervals and
        answers requests from the MWorkers
        '''
        context = zmq.Context()
        # the socket for incoming cache requests
        creq_in = context.socket(zmq.REP)
        creq_in.setsockopt(zmq.LINGER, 100)
        creq_in.bind('ipc://' + self.cache_sock)

        # the socket for incoming cache-updates from workers
        cupd_in = context.socket(zmq.SUB)
        cupd_in.setsockopt(zmq.SUBSCRIBE, '')
        cupd_in.setsockopt(zmq.LINGER, 100)
        cupd_in.bind('ipc://' + self.update_sock)

        # the socket for the timer-event
        timer_in = context.socket(zmq.SUB)
        timer_in.setsockopt(zmq.SUBSCRIBE, '')
        timer_in.setsockopt(zmq.LINGER, 100)
        timer_in.connect('ipc://' + self.upd_t_sock)

        poller = zmq.Poller()
        poller.register(creq_in, zmq.POLLIN)
        poller.register(cupd_in, zmq.POLLIN)
        poller.register(timer_in, zmq.POLLIN)

        # our serializer
        serial = salt.payload.Serial(self.opts.get('serial', ''))

        # register a signal handler
        signal.signal(signal.SIGINT, self.signal_handler)

        # secure the sockets from the world
        self.secure()

        log.info('ConCache started')

        while self.running:

            # we check for new events with the poller
            try:
                socks = dict(poller.poll(1))
            except KeyboardInterrupt:
                self.stop()
            except zmq.ZMQError as zmq_err:
                log.error('ConCache ZeroMQ-Error occurred')
                log.exception(zmq_err)
                self.stop()

            # check for next cache-request
            if socks.get(creq_in) == zmq.POLLIN:
                msg = serial.loads(creq_in.recv())
                log.debug('ConCache Received request: {0}'.format(msg))

                # requests to the minion list are send as str's
                if isinstance(msg, str):
                    if msg == 'minions':
                        # Send reply back to client
                        reply = serial.dumps(self.minions)
                        creq_in.send(reply)

            # check for next cache-update from workers
            if socks.get(cupd_in) == zmq.POLLIN:
                new_c_data = serial.loads(cupd_in.recv())
                # tell the worker to exit
                #cupd_in.send(serial.dumps('ACK'))

                # check if the returned data is usable
                if not isinstance(new_c_data, list):
                    log.error('ConCache Worker returned unusable result')
                    del new_c_data
                    continue

                # the cache will receive lists of minions
                # 1. if the list only has 1 item, its from an MWorker, we append it
                # 2. if the list contains another list, its from a CacheWorker and
                #    the currently cached minions are replaced with that list
                # 3. anything else is considered malformed

                try:

                    if len(new_c_data) == 0:
                        log.debug('ConCache Got empty update from worker')
                        continue

                    data = new_c_data[0]

                    if isinstance(data, str):
                        if data not in self.minions:
                            log.debug('ConCache Adding minion {0} to cache'.format(new_c_data[0]))
                            self.minions.append(data)

                    elif isinstance(data, list):
                        log.debug('ConCache Replacing minion list from worker')
                        self.minions = data

                except IndexError:
                    log.debug('ConCache Got malformed result dict from worker')
                    del new_c_data

                log.info('ConCache {0} entries in cache'.format(len(self.minions)))

            # check for next timer-event to start new jobs
            if socks.get(timer_in) == zmq.POLLIN:
                sec_event = serial.loads(timer_in.recv())

                # update the list every 30 seconds
                if int(sec_event % 30) == 0:
                    cw = CacheWorker(self.opts)
                    cw.start()

        self.stop()
        creq_in.close()
        cupd_in.close()
        timer_in.close()
        context.term()
        log.debug('ConCache Shutting down')
Exemple #7
0
    'cmd.run': salt.modules.cmdmod._run_quiet,
    'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}

log = logging.getLogger(__name__)

HAS_WMI = False
if salt.utils.is_windows():
    # attempt to import the python wmi module
    # the Windows minion uses WMI for some of its grains
    try:
        import wmi
        import salt.utils.winapi
        HAS_WMI = True
    except ImportError:
        log.exception("Unable to import Python wmi module, some core grains "
                      "will be missing")


def _windows_cpudata():
    '''
    Return some CPU information on Windows minions
    '''
    # Provides:
    #   num_cpus
    #   cpu_model
    grains = {}
    if 'NUMBER_OF_PROCESSORS' in os.environ:
        # Cast to int so that the logic isn't broken when used as a
        # conditional in templating. Also follows _linux_cpudata()
        try:
            grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
Exemple #8
0
    def run(self, jid=None):
        '''
        Execute the overall routine, print results via outputters
        '''
        fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
        jid = self.returners[fstr](passed_jid=jid or self.opts.get('jid', None))

        # Save the invocation information
        argv = self.opts['argv']

        if self.opts.get('raw_shell', False):
            fun = 'ssh._raw'
            args = argv
        else:
            fun = argv[0] if argv else ''
            args = argv[1:]

        job_load = {
            'jid': jid,
            'tgt_type': self.tgt_type,
            'tgt': self.opts['tgt'],
            'user': self.opts['user'],
            'fun': fun,
            'arg': args,
            }

        # save load to the master job cache
        try:
            if isinstance(jid, bytes):
                jid = jid.decode('utf-8')
            self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
        except Exception as exc:
            log.exception(exc)
            log.error('Could not save load with returner {0}: {1}'.format(self.opts['master_job_cache'], exc))

        if self.opts.get('verbose'):
            msg = 'Executing job with jid {0}'.format(jid)
            print(msg)
            print('-' * len(msg) + '\n')
            print('')
        sret = {}
        outputter = self.opts.get('output', 'nested')
        final_exit = 0
        for ret in self.handle_ssh():
            host = next(six.iterkeys(ret))
            if isinstance(ret[host], dict):
                host_ret = ret[host].get('retcode', 0)
                if host_ret != 0:
                    final_exit = 1
            else:
                # Error on host
                final_exit = 1

            self.cache_job(jid, host, ret[host], fun)
            ret = self.key_deploy(host, ret)
            if not isinstance(ret[host], dict):
                p_data = {host: ret[host]}
            elif 'return' not in ret[host]:
                p_data = ret
            else:
                outputter = ret[host].get('out', self.opts.get('output', 'nested'))
                p_data = {host: ret[host].get('return', {})}
            if self.opts.get('static'):
                sret.update(p_data)
            else:
                salt.output.display_output(
                        p_data,
                        outputter,
                        self.opts)
            if self.event:
                self.event.fire_event(
                        ret,
                        salt.utils.event.tagify(
                            [jid, 'ret', host],
                            'job'))
        if self.opts.get('static'):
            salt.output.display_output(
                    sret,
                    outputter,
                    self.opts)
        if final_exit:
            sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
Exemple #9
0
    def _write_state(self):
        '''
        Writes a current status to the defined status-file
        this includes the current pid, events received/handled
        and threads created/joined
        '''
        ev_hdl_per_s = float((float(self.events_han - self.stat_hdl_count)) / float(self.state_timer_intrvl))
        ev_tot_per_s = float((float(self.events_rec - self.stat_rec_count)) / float(self.state_timer_intrvl))

        if self.config['stat_worker']:
            stat_data = {
                'events_rec': self.events_rec,
                'events_hdl': self.events_han,
                'events_hdl_sec': round(ev_hdl_per_s, 2),
                'events_tot_sec': round(ev_tot_per_s, 2),
                'threads_created': self.threads_cre,
                'threads_joined': self.threads_join
            }

            self.threads_cre += 1

            st_worker = SaltEventsdWorker(
                stat_data,
                self.threads_cre,
                None,
                self.backends,
                **self.opts
            )
            st_worker.start()

            try:
                self.running_workers.append(st_worker)
            except AttributeError:
                log.error('self is missing running_workers')
                try:
                    log.info(self)
                    log.info(dir(self))
                except Exception:
                    log.error('Failed to dump dir(self)')

        try:
            # write the info to the specified log
            statf = open(self.state_file, 'w')
            statf.writelines(
                json.dumps({
                    'events_rec': self.events_rec,
                    'events_hdl': self.events_han,
                    'events_hdl_sec': round(ev_hdl_per_s, 2),
                    'events_tot_sec': round(ev_tot_per_s, 2),
                    'threads_created': self.threads_cre,
                    'threads_joined': self.threads_join
                })
            )

            # if we have the same pid as the pidfile, we are the running daemon
            # and also print the current counters to the logfile with 'info'
            if os.getpid() == self.pid:
                log.info("Running with pid {0}".format(self.pid))
                log.info("Events (han/recv): {0}/{1}".format(
                    self.events_han,
                    self.events_rec,
                ))
                log.info("Threads (cre/joi):{0}/{1}".format(
                    self.threads_cre,
                    self.threads_join,
                ))

            statf.write("\n")
            statf.close()
            sys.stdout.flush()
        except IOError as ioerr:
            log.critical("Failed to write state to {0}".format(self.state_file))
            log.exception(ioerr)
        except OSError as oserr:
            log.critical("Failed to write state to {0}".format(self.state_file))
            log.exception(oserr)
        self.stat_rec_count = self.events_rec
        self.stat_hdl_count = self.events_han
Exemple #10
0
    def run(self, jid=None):
        '''
        Execute the overall routine, print results via outputters
        '''
        fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
        jid = self.returners[fstr](
            passed_jid=jid or self.opts.get('jid', None))

        # Save the invocation information
        argv = self.opts['argv']

        if self.opts.get('raw_shell', False):
            fun = 'ssh._raw'
            args = argv
        else:
            fun = argv[0] if argv else ''
            args = argv[1:]

        job_load = {
            'jid': jid,
            'tgt_type': self.tgt_type,
            'tgt': self.opts['tgt'],
            'user': self.opts['user'],
            'fun': fun,
            'arg': args,
        }

        # save load to the master job cache
        try:
            if isinstance(jid, bytes):
                jid = jid.decode('utf-8')
            if self.opts['master_job_cache'] == 'local_cache':
                self.returners['{0}.save_load'.format(
                    self.opts['master_job_cache'])](
                        jid, job_load, minions=self.targets.keys())
            else:
                self.returners['{0}.save_load'.format(
                    self.opts['master_job_cache'])](jid, job_load)
        except Exception as exc:
            log.exception(exc)
            log.error('Could not save load with returner {0}: {1}'.format(
                self.opts['master_job_cache'], exc))

        if self.opts.get('verbose'):
            msg = 'Executing job with jid {0}'.format(jid)
            print(msg)
            print('-' * len(msg) + '\n')
            print('')
        sret = {}
        outputter = self.opts.get('output', 'nested')
        final_exit = 0
        for ret in self.handle_ssh():
            host = next(six.iterkeys(ret))
            if isinstance(ret[host], dict):
                host_ret = ret[host].get('retcode', 0)
                if host_ret != 0:
                    final_exit = 1
            else:
                # Error on host
                final_exit = 1

            self.cache_job(jid, host, ret[host], fun)
            ret = self.key_deploy(host, ret)

            if isinstance(ret[host], dict) and ret[host].get(
                    'stderr', '').startswith('ssh:'):
                ret[host] = ret[host]['stderr']

            if not isinstance(ret[host], dict):
                p_data = {host: ret[host]}
            elif 'return' not in ret[host]:
                p_data = ret
            else:
                outputter = ret[host].get('out',
                                          self.opts.get('output', 'nested'))
                p_data = {host: ret[host].get('return', {})}
            if self.opts.get('static'):
                sret.update(p_data)
            else:
                salt.output.display_output(p_data, outputter, self.opts)
            if self.event:
                self.event.fire_event(
                    ret, salt.utils.event.tagify([jid, 'ret', host], 'job'))
        if self.opts.get('static'):
            salt.output.display_output(sret, outputter, self.opts)
        if final_exit:
            sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
Exemple #11
0
    def run(self):
        '''
        Main loop of the ConCache, starts updates in intervals and
        answers requests from the MWorkers
        '''
        context = zmq.Context()
        # the socket for incoming cache requests
        creq_in = context.socket(zmq.REP)
        creq_in.setsockopt(zmq.LINGER, 100)
        creq_in.bind('ipc://' + self.cache_sock)

        # the socket for incoming cache-updates from workers
        cupd_in = context.socket(zmq.SUB)
        cupd_in.setsockopt(zmq.SUBSCRIBE, b'')
        cupd_in.setsockopt(zmq.LINGER, 100)
        cupd_in.bind('ipc://' + self.update_sock)

        # the socket for the timer-event
        timer_in = context.socket(zmq.SUB)
        timer_in.setsockopt(zmq.SUBSCRIBE, b'')
        timer_in.setsockopt(zmq.LINGER, 100)
        timer_in.connect('ipc://' + self.upd_t_sock)

        poller = zmq.Poller()
        poller.register(creq_in, zmq.POLLIN)
        poller.register(cupd_in, zmq.POLLIN)
        poller.register(timer_in, zmq.POLLIN)

        # our serializer
        serial = salt.payload.Serial(self.opts.get('serial', ''))

        # register a signal handler
        signal.signal(signal.SIGINT, self.signal_handler)

        # secure the sockets from the world
        self.secure()

        log.info('ConCache started')

        while self.running:

            # we check for new events with the poller
            try:
                socks = dict(poller.poll(1))
            except KeyboardInterrupt:
                self.stop()
            except zmq.ZMQError as zmq_err:
                log.error('ConCache ZeroMQ-Error occurred')
                log.exception(zmq_err)
                self.stop()

            # check for next cache-request
            if socks.get(creq_in) == zmq.POLLIN:
                msg = serial.loads(creq_in.recv())
                log.debug('ConCache Received request: %s', msg)

                # requests to the minion list are send as str's
                if isinstance(msg, six.string_types):
                    if msg == 'minions':
                        # Send reply back to client
                        reply = serial.dumps(self.minions)
                        creq_in.send(reply)

            # check for next cache-update from workers
            if socks.get(cupd_in) == zmq.POLLIN:
                new_c_data = serial.loads(cupd_in.recv())
                # tell the worker to exit
                #cupd_in.send(serial.dumps('ACK'))

                # check if the returned data is usable
                if not isinstance(new_c_data, list):
                    log.error('ConCache Worker returned unusable result')
                    del new_c_data
                    continue

                # the cache will receive lists of minions
                # 1. if the list only has 1 item, its from an MWorker, we append it
                # 2. if the list contains another list, its from a CacheWorker and
                #    the currently cached minions are replaced with that list
                # 3. anything else is considered malformed

                try:

                    if not new_c_data:
                        log.debug('ConCache Got empty update from worker')
                        continue

                    data = new_c_data[0]

                    if isinstance(data, six.string_types):
                        if data not in self.minions:
                            log.debug('ConCache Adding minion %s to cache',
                                      new_c_data[0])
                            self.minions.append(data)

                    elif isinstance(data, list):
                        log.debug('ConCache Replacing minion list from worker')
                        self.minions = data

                except IndexError:
                    log.debug('ConCache Got malformed result dict from worker')
                    del new_c_data

                log.info('ConCache %s entries in cache', len(self.minions))

            # check for next timer-event to start new jobs
            if socks.get(timer_in) == zmq.POLLIN:
                sec_event = serial.loads(timer_in.recv())

                # update the list every 30 seconds
                if int(sec_event % 30) == 0:
                    cw = CacheWorker(self.opts)
                    cw.start()

        self.stop()
        creq_in.close()
        cupd_in.close()
        timer_in.close()
        context.term()
        log.debug('ConCache Shutting down')
Exemple #12
0
    'cmd.run': salt.modules.cmdmod._run_quiet,
    'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}

log = logging.getLogger(__name__)

HAS_WMI = False
if salt.utils.is_windows():
    # attempt to import the python wmi module
    # the Windows minion uses WMI for some of its grains
    try:
        import wmi
        import salt.utils.winapi
        HAS_WMI = True
    except ImportError:
        log.exception('Unable to import Python wmi module, some core grains '
                      'will be missing')


def _windows_cpudata():
    '''
    Return some CPU information on Windows minions
    '''
    # Provides:
    #   num_cpus
    #   cpu_model
    grains = {}
    if 'NUMBER_OF_PROCESSORS' in os.environ:
        # Cast to int so that the logic isn't broken when used as a
        # conditional in templating. Also follows _linux_cpudata()
        try:
            grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
Exemple #13
0
__salt__ = {
    'cmd.run': salt.modules.cmdmod._run_quiet,
    'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}

log = logging.getLogger(__name__)

has_wmi = False
if sys.platform.startswith('win'):
    # attempt to import the python wmi module
    # the Windows minion uses WMI for some of its grains
    try:
        import wmi
        has_wmi = True
    except ImportError:
        log.exception("Unable to import Python wmi module, some core grains "
                      "will be missing")


def _windows_cpudata():
    '''
    Return the cpu information for Windows systems architecture
    '''
    # Provides:
    #   cpuarch
    #   cpu_model
    grains = {}
    if 'NUMBER_OF_PROCESSORS' in os.environ:
        # Cast to int so that the logic isn't broken when used as a
        # conditional in templating. Also follows _linux_cpudata()
        try:
            grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])