示例#1
0
    def query(self, **kwargs):

        pgi = self.find_single_process_group(pgi_name('Windows System'))
        pgi_id = pgi.group_instance_id

        queue1 = winstats.get_perf_data(r'\MSMQ Queue(<hostname>\private$\<queuename>)\Messages in Queue', fmts='double', delay=100)
        queue2 = winstats.get_perf_data(r'\MSMQ Queue(<hostname>\private$\<queuename>)\Messages in Queue', fmts='double', delay=100)

        self.results_builder.relative(key='<queuename>', value=<queuename>[0], entity_id=pgi_id)
        self.results_builder.relative(key='<queuename>', value=<queuename>[0], entity_id=pgi_id)
示例#2
0
    def take_sample(self):
        self.sample_count += 1

        processor_count = multiprocessing.cpu_count()

        # take all data with one call
        counters = [r'\System\Processor Queue Length']
        fmts = ['double']
        for processor in range(0, processor_count):
            counters.append(r'\Processor(' + str(processor) + r')\% Processor Time')
            fmts.append('double')
        delay = int((self.period_seconds * 1000)/2)  # take the "second snapshot" in the middle of the period
        sample = winstats.get_perf_data(counters, fmts=fmts, delay=delay)

        mops.logger.log.debug('winstats.get_perf_data:%s' % str(sample))

        # when Processor Queue Length (PQL) is large, it is usually opportunity for more cores
        self.processor_queue_length_sum += sample[0]
        pql_avg = float(self.processor_queue_length_sum)/float(self.sample_count)
        pql_severity = ratio_to_severity_level(pql_avg, 1.0, 0.5)
        self.metrics['Average PQL'] = {'value': str(pql_avg), 'severity': pql_severity}

        # Sum processor activity across all processors - this is relative to 'how much of a single processor' is being
        # used.  Note that this is scaled so 1.0 means we're using one processor's worth, 2.0 is 2 processors, etc.
        self.processor_sum_sum += sum(sample[1:])/100.0
        processor_sum_avg = float(self.processor_sum_sum)/float(self.sample_count)
        self.metrics['Average Load (number of processors)'] = {'value': '{:.3}'.format(processor_sum_avg)}

        # Determine how often one processor is saturated.  Note that due to core migration, even if a single
        # thread is running continually it will appear across all core (unless it is affinitized).
        self.processor_max_sum += max(sample[1:])
        proc_avg_max = (float(self.processor_max_sum)/float(self.sample_count))/100.0
        proc_avg_max_severity = ratio_to_severity_level(proc_avg_max, 85.0, 75.0)
        self.metrics['Average Max Processor Load'] = {'value': '{:.1%}'.format(proc_avg_max), 'severity': proc_avg_max_severity}
示例#3
0
def collect():
    """
    collect iis metric
    """
    tags = ""
    for k, v in g.TAGS.items():
        t = k + "=" + v
        tags = tags + t + ","

    timestamp = int(time.time())
    hostname = g.HOSTNAME
    data = []
    step = 60

    for counter, metric, vtype in COUNTERS:
        try:
            counter_value = winstats.get_perf_data(counter, delay=100)
        except Exception as e:
            logging.debug(e)
            continue
        value = counter_value[0]

        metric_dict = {
            'metric': 'iis.{}'.format(metric),
            'endpoint': hostname,
            'timestamp': timestamp,
            'step': step,
            'value': value,
            'counterType': vtype,
            'tags': tags.strip(",")
        }
        logging.debug("%s: %s" % (metric_dict['metric'], value))
        data.append(metric_dict)

    for srv, metric in SERVICES:
        try:
            is_alive = get_service_alive_info(srv)
        except Exception as e:
            logging.debug(e)
            continue
        metric_dict = {
            'metric': metric,
            'endpoint': hostname,
            'timestamp': timestamp,
            'step': step,
            'value': is_alive,
            'counterType': 'GAUGE',
            'tags': tags.strip(",")
        }
        logging.debug("isalive: %s" % is_alive)
        logging.debug("metrics: %s" % metric_dict['metric'])
        data.append(metric_dict)

    try:
        result = send_data_to_transfer(data)
    except Exception as e:
        logging.error(e, exc_info=True)
    else:
        logging.info(result)
def collect():
    """
    collect sqlserver metric
    """
    logging.debug("enter sqlserver collect")
    timestamp = int(time.time())
    hostname = g.HOSTNAME
    tags = ''
    data = []
    step = 60

    for counter, metric, vtype in COUNTERS:
        try:
            counter_value = winstats.get_perf_data(counter, delay=100)
        except Exception as e:
            logging.debug(e)
            continue
        value = counter_value[0]

        metric_dict = {
            'metric': 'sqlserver.{}'.format(metric),
            'endpoint': hostname,
            'timestamp': timestamp,
            'step': step,
            'value': value,
            'counterType': vtype,
            'tags': tags
        }
        logging.debug("metric: %s, value: %s" % (metric_dict['metric'], value))
        data.append(metric_dict)

    for srv, metric in SERVICES:
        try:
            is_alive = get_service_alive_info(srv)
        except Exception as e:
            logging.debug(e)
            continue
        metric_dict = {
            'metric': metric,
            'endpoint': hostname,
            'timestamp': timestamp,
            'step': step,
            'value': is_alive,
            'counterType': 'GAUGE',
            'tags': tags
        }
        logging.debug("metric: %s, value: %s" %
                      (metric_dict['metric'], is_alive))
        data.append(metric_dict)

    try:
        result = send_data_to_transfer(data)
    except Exception as e:
        logging.error(e)
    else:
        logging.info(result)
示例#5
0
    def take_sample(self):
        self.sample_count += 1

        processor_count = multiprocessing.cpu_count()

        # take all data with one call
        counters = [r'\System\Processor Queue Length']
        fmts = ['double']
        for processor in range(0, processor_count):
            counters.append(r'\Processor(' + str(processor) + r')\% Processor Time')
            fmts.append('double')
        delay = int((self.period_seconds * 1000)/2)  # take the "second snapshot" in the middle of the period
        if platform.system() == 'Windows':
            sample = winstats.get_perf_data(counters, fmts=fmts, delay=delay)

            mops.logger.log.debug('winstats.get_perf_data:%s' % str(sample))

            # when Processor Queue Length (PQL) is large, it is usually opportunity for more cores
            self.processor_queue_length_sum += sample[0]
            pql_avg = float(self.processor_queue_length_sum)/float(self.sample_count)
            pql_severity = ratio_to_severity_level(pql_avg, 1.0, 0.5)
            self.metrics['Average PQL'] = {'value': str(pql_avg), 'severity': pql_severity}

            # Sum processor activity across all processors - this is relative to 'how much of a single processor' is being
            # used.  Note that this is scaled so 1.0 means we're using one processor's worth, 2.0 is 2 processors, etc.
            self.processor_sum_sum += sum(sample[1:])/100.0
            processor_sum_avg = float(self.processor_sum_sum)/float(self.sample_count)
            self.metrics['Average Load (number of processors)'] = {'value': '{:.3}'.format(processor_sum_avg)}

            # Determine how often one processor is saturated.  Note that due to core migration, even if a single
            # thread is running continually it will appear across all core (unless it is affinitized).
            self.processor_max_sum += max(sample[1:])
            proc_avg_max = (float(self.processor_max_sum)/float(self.sample_count))/100.0
            proc_avg_max_severity = ratio_to_severity_level(proc_avg_max, 85.0, 75.0)
            self.metrics['Average Max Processor Load'] = {'value': '{:.1%}'.format(proc_avg_max), 'severity': proc_avg_max_severity}
        else:
            mops.logger.log.info('platform %s not yet fully supported' % platform.system())
示例#6
0
def getCPUStats():
    usage = winstats.get_perf_data(r'\Processor(_Total)\% Processor Time',
                                   fmts='double',
                                   delay=100)
    usage_percent = '%.02f%%' % usage[0]
    validateUsage(usage_percent, 'CPU Utilization at ' + usage_percent)
示例#7
0
red = RGBColor(255, 0, 0)
blue = RGBColor(0, 0, 255)
zonenumber = 0
#myColorList = red, blue, red, blue, red, blue, red, red, blue, blue, red, blue, red, blue

cli = OpenRGBClient()
print(cli)
mobo = cli.get_devices_by_type(DeviceType.MOTHERBOARD)[0]
print(mobo)
mobo.set_mode('direct')
mobo.zones[zonenumber].resize(ledstripsize)
print(mobo.zones[zonenumber])

myLeds = mobo.zones[zonenumber].leds
for i in myLeds:
    i.set_color(red)

step = ((maxperf - minperf) / len(myLeds))

while True:
    procperf = winstats.get_perf_data(
        r'\Processor Information(_Total)\% Processor Performance',
        fmts='double',
        delay=1000)
    j = int((procperf[0] - minperf) / step)
    print(j, procperf)
    for i in range(len(myLeds)):
        if i <= j:
            myLeds[i].set_color(red)
        else:
            myLeds[i].set_color(blue)
示例#8
0
def get_meminfo(opts):
    ''' Returns a dictionary holding the current memory info,
        divided by the ouptut unit.
    '''
    meminfo = MemInfo()
    outunit = opts.outunit
    mstat = get_mem_info()  # from winstats
    pinf = get_perf_info()
    try:
        pgpcnt = get_perf_data(r'\Paging File(_Total)\% Usage',
                               'double')[0] / 100
    except WindowsError:
        pgpcnt = 0

    totl = mstat.TotalPhys
    meminfo.memtotal = totl / float(outunit)
    used = totl * mstat.MemoryLoad / 100.0  # percent, more reliable
    meminfo.used = used / float(outunit)
    left = totl - used

    # Cached
    cache = pinf.SystemCacheBytes
    if cache > left and version >= win7ver:
        # Win7 RTM bug :/ this cache number is bogus
        free = get_perf_data(r'\Memory\Free & Zero Page List Bytes', 'long')[0]
        cache = left - free
        meminfo.memfree = free / float(outunit)
    else:
        meminfo.memfree = (totl - used - cache) / float(outunit)
    meminfo.buffers = 0

    meminfo.cached = cache / float(outunit)

    # SWAP  these numbers are actually commit charge, not swap; fix
    #       should not contain RAM :/
    swpt = abs(mstat.TotalPageFile - totl)
    # these nums aren't quite right either, use perfmon instead :/
    swpu = swpt * pgpcnt
    swpf = swpt - swpu

    meminfo.swaptotal = swpt / float(outunit)
    meminfo.swapfree = swpf / float(outunit)
    meminfo.swapused = swpu / float(outunit)
    meminfo.swapcached = 0  # A linux stat for compat

    if opts.debug:
        import locale
        fmt = lambda val: locale.format('%d', val, True)
        print()
        print('TotalPhys:', fmt(totl))
        print('AvailPhys:', fmt(mstat.AvailPhys))
        print('MemoryLoad:', fmt(mstat.MemoryLoad))
        print()
        print('used:', fmt(used))
        print('left:', fmt(left))
        if 'free' in locals():
            print('PDH Free:', fmt(free))
        print('SystemCacheBytes:', fmt(pinf.SystemCacheBytes))
        print()
        print('TotalPageFile:', fmt(mstat.TotalPageFile))
        print('AvailPageFile:', fmt(mstat.AvailPageFile))
        print('TotalPageFile fixed:', fmt(swpt))
        print('AvailPageFile fixed:', fmt(swpf))

    return meminfo