def take_action(self, args): resources = self.app.cloud_obj.resources.list_zombie_resources_by_tenants() from toolz import countby dic = countby(lambda x: x.resource_type, resources) from collections import OrderedDict ordered_dic = OrderedDict(dic) return((key for key in ordered_dic.keys()), (ordered_dic[key] for key in ordered_dic.keys()))
def list_metrics(self): metrics = [] from ext_cloud.BaseCloud.BaseResources.BaseMetrics import BaseMetricscls from toolz import countby instances = self.list_instances() group_by_state = countby(lambda x: x.state, instances) metrics.append(BaseMetricscls('openstack.instances.total', len(instances))) metrics.append(BaseMetricscls('openstack.instances.running', group_by_state['RUNNING'] if 'RUNNING' in group_by_state else 0)) metrics.append(BaseMetricscls('openstack.instances.stopped', group_by_state['STOPPED'] if 'STOPPED' in group_by_state else 0)) metrics.append(BaseMetricscls('openstack.instances.paused', group_by_state['PAUSED'] if 'PAUSED' in group_by_state else 0)) metrics.append(BaseMetricscls('openstack.instances.error', group_by_state['ERROR'] if 'ERROR' in group_by_state else 0)) return metrics
def workers(s): """ Information about workers Examples -------- >>> workers(my_scheduler) # doctest: +SKIP {'127.0.0.1': {'cores': 3, 'cpu': 0.0, 'last-seen': 0.003068, 'latency': 0.01584628690034151, 'ports': ['54871', '50943'], 'processing': {'inc': 2, 'add': 1}, 'disk-read': 1234, 'disk-write': 1234, 'network-send': 1234, 'network-recv': 1234, 'memory': 16701911040, 'memory_percent': 85}} """ hosts = {host: ['%s:%s' % (host, port) for port in d['ports']] for host, d in s.host_info.items()} processing = {host: countby(key_split, concat(s.processing[w] for w in addrs)) for host, addrs in hosts.items()} now = time() result = {} for host, info in s.host_info.items(): info = info.copy() # info = dissoc(info, 'heartbeat', 'heartbeat-port') info['processing'] = processing[host] result[host] = info info['ports'] = list(info['ports']) if 'last-seen' in info: info['last-seen'] = (now - info['last-seen']) return result
def test_countby(): assert countby(even, [1, 2, 3]) == {True: 1, False: 2} assert countby(len, ['cat', 'dog', 'mouse']) == {3: 2, 5: 1}
def test_countby(): assert countby(iseven, [1, 2, 3]) == {True: 1, False: 2} assert countby(len, ['cat', 'dog', 'mouse']) == {3: 2, 5: 1} assert countby(0, ('ab', 'ac', 'bc')) == {'a': 2, 'b': 1}
from csv import DictReader, DictWriter from toolz import compose, pluck, countby import sys sightings = DictReader(open('data/processed/nuforc_reports.csv', 'r')) bad_cities_file = \ DictWriter(open('bad_cities.csv', 'w'), fieldnames=['city','state','count']) bad_cities_file.writeheader() bad_cities = [] for sighting in sightings: if not sighting['city_latitude'] or not sighting['city_longitude']: bad_cities.append( { "city": sighting["city"], "state": sighting["state"] } ) bad_cities_counts = countby(['state','city'], bad_cities) total = 0 for (state, city), count in \ sorted(bad_cities_counts.items(), key=lambda x: x[1], reverse=True): total += count bad_cities_file.writerow({'state': state, 'city': city, 'count': count}) print("Total number of incomplete geocodes: {}".format(total))
uptime_seconds, idle_seconds = f.readline().split() idle_seconds = float(idle_seconds)/cpu_count print "%s.cpu.uptime %f %d" % (hostname,float(uptime_seconds), now) print "%s.cpu.idletime %f %d" % (hostname,idle_seconds, now) #memory stats mem_stats =psutil.virtual_memory() print "%s.memory.total %d %d" % (hostname,mem_stats.total, now) print "%s.memory.used %d %d" % (hostname,mem_stats.used, now) print "%s.memory.free %d %d" % (hostname,mem_stats.free, now) print "%s.memory.cached %d %d" % (hostname,mem_stats.cached, now) #process stats proc_stats = psutil.get_process_list() proc_dict=collections.defaultdict(int, toolz.countby(lambda x:x.status, proc_stats)) print "%s.process.total %d %d" % (hostname,len(proc_stats), now) print "%s.process.running %d %d" % (hostname,proc_dict['running'], now) print "%s.process.sleeping %d %d" % (hostname,proc_dict['sleeping'], now) print "%s.process.stopped %d %d" % (hostname,proc_dict['stopped'], now) #disk stats disk_stats = psutil.disk_io_counters(perdisk=True) for key in disk_stats: print "%s.disk.%s.reads %d %d" % (hostname,key,disk_stats[key].read_count, now) print "%s.disk.%s.writes %d %d" % (hostname,key,disk_stats[key].write_count, now)
def test_countby(): assert countby(iseven, [1, 2, 3]) == {True: 1, False: 2} assert countby(len, ['cat', 'dog', 'mouse']) == {3: 2, 5: 1}
#!/usr/bin/python from ext_cloud import get_ext_cloud import sys import warnings warnings.filterwarnings("ignore") cloud_obj = get_ext_cloud("openstack") resources = cloud_obj.resources.list_zombie_resources() from toolz import countby dic = countby(lambda x: x.resource_type, resources) if len(dic) is 0: sys.exit(0) print dic sys.exit(1)
idle_seconds = float(idle_seconds)/cpu_count print "%s.cpu.uptime %f %d" % (hostname,float(uptime_seconds), now) print "%s.cpu.idletime %f %d" % (hostname,idle_seconds, now) #memory stats mem_stats =psutil.virtual_memory() print "%s.memory.total %d %d" % (hostname,mem_stats.total, now) print "%s.memory.used %d %d" % (hostname,mem_stats.used, now) print "%s.memory.free %d %d" % (hostname,mem_stats.free, now) print "%s.memory.cached %d %d" % (hostname,mem_stats.cached, now) #process stats proc_stats = list(psutil.process_iter()) proc_dict=collections.defaultdict(int, toolz.countby(lambda x:x.status(), proc_stats)) print "%s.process.total %d %d" % (hostname,len(proc_stats), now) print "%s.process.running %d %d" % (hostname,proc_dict['running'], now) print "%s.process.sleeping %d %d" % (hostname,proc_dict['sleeping'], now) print "%s.process.stopped %d %d" % (hostname,proc_dict['stopped'], now) #disk stats disk_stats = psutil.disk_io_counters(perdisk=True) for key in disk_stats: #skip ram disks if key[:3] == 'ram': continue # skip cdrom devices if key[:2] == 'sr': continue print "%s.disk.%s.reads %d %d" % (hostname,key,disk_stats[key].read_count, now)