def start(self): super(Service, self).start() # pylint: disable=protected-access sysinv_conf = self.conf._namespace._normalized[0]['DEFAULT'] url = "rabbit://{user}:{password}@{host}:{port}"\ "".format(user=sysinv_conf['rabbit_userid'][0], password=sysinv_conf['rabbit_password'][0], host=utils.ipv6_bracketed(sysinv_conf['rabbit_host'][0]), port=sysinv_conf['rabbit_port'][0]) transport = messaging.get_transport(self.conf, url=url) self.sysinv_conductor = messaging.RPCClient( transport, messaging.Target(topic=constants.SYSINV_CONDUCTOR_TOPIC)) self.ceph_api = wrapper.CephWrapper( endpoint='http://localhost:{}'.format(constants.CEPH_MGR_PORT)) # Get initial config from sysinv and send it to # services that need it before starting them self.rpc_server = messaging.get_rpc_server( transport, messaging.Target(topic=constants.CEPH_MANAGER_TOPIC, server=self.conf.sysinv_api_bind_ip), [RpcEndpoint(self)], executor='eventlet') self.rpc_server.start() eventlet.spawn_n(self.monitor.run)
def activity(_): ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL) sresp, cluster_status = ceph.status(body='json') pgmap = cluster_status['output']['pgmap'] activities = {} if 'read_bytes_sec' in pgmap: activities['Read'] = pgmap.get('read_bytes_sec') if 'write_bytes_sec' in pgmap: activities['Write'] = pgmap.get('write_bytes_sec') if 'op_per_sec' in pgmap: activities['Ops'] = pgmap.get('op_per_sec') if 'recovering_objects_per_sec' in pgmap: activities['Recovering_Objects'] = pgmap.get( 'recovering_objects_per_sec') if 'recovering_bytes_per_sec' in pgmap: activities['Recovery_Speed'] = pgmap.get('recovering_bytes_per_sec') if 'recovering_keys_per_sec' in pgmap: activities['Recovering_Keys'] = pgmap.get('recovering_keys_per_sec') # Free size bytes_total = pgmap.get('bytes_total') bytes_used = pgmap.get('bytes_used') activities['Used'] = bytes_used activities['Total'] = bytes_total return JsonResponse(activities)
def start(self): super(Service, self).start() transport = messaging.get_transport(self.conf) self.sysinv_conductor = messaging.RPCClient( transport, messaging.Target(topic=constants.SYSINV_CONDUCTOR_TOPIC)) self.ceph_api = wrapper.CephWrapper( endpoint='http://localhost:5001/api/v0.1/') # Get initial config from sysinv and send it to # services that need it before starting them config = self.get_caching_tier_config() self.monitor.setup(config) self.rpc_server = messaging.get_rpc_server( transport, messaging.Target(topic=constants.CEPH_MANAGER_TOPIC, server=self.conf.sysinv_api_bind_ip), [RpcEndpoint(self)], executor='eventlet') self.rpc_server.start() self.cache_tiering.set_initial_config(config) eventlet.spawn_n(self.monitor.run) periodic = loopingcall.FixedIntervalLoopingCall( self.update_ceph_target_max_bytes) periodic.start(interval=300)
def osd_details(request, osd_num): ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL) osd_num = int(osd_num) reponse, osd_dump = ceph.osd_dump(body='json') osd_disk_details = filter(lambda x: x['osd'] == int(osd_num), osd_dump['output']['osds'])[0] response, osd_perf = ceph.osd_perf(body='json') osd_disk_perf = filter(lambda x: x['id'] == int(osd_num), osd_perf['output']['osd_perf_infos'])[0] return render_to_response('osd_details.html', locals())
def start(self): super(Service, self).start() transport = messaging.get_transport(self.conf) self.sysinv_conductor = messaging.RPCClient( transport, messaging.Target(topic=constants.SYSINV_CONDUCTOR_TOPIC)) self.ceph_api = wrapper.CephWrapper(endpoint='https://localhost:5001') # Get initial config from sysinv and send it to # services that need it before starting them self.rpc_server = messaging.get_rpc_server( transport, messaging.Target(topic=constants.CEPH_MANAGER_TOPIC, server=self.conf.sysinv_api_bind_ip), [RpcEndpoint(self)], executor='eventlet') self.rpc_server.start() eventlet.spawn_n(self.monitor.run)
def osd_details(request, osd_num): ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL) osd_num = int(osd_num) reponse, osd_dump = ceph.osd_dump(body='json') osd_disk_details = filter( lambda x: x['osd'] == int(osd_num), osd_dump['output']['osds'] )[0] import socket osd_disk_details["name"] = socket.gethostbyaddr( osd_disk_details["public_addr"].split(":")[0] )[0].split(".")[0] response, osd_perf = ceph.osd_perf(body='json') osd_disk_perf = filter(lambda x: x['id'] == int(osd_num), osd_perf['output']['osd_perf_infos'])[0] return render_to_response('osd_details.html', {'osd_disk': {'details': osd_disk_details, 'perf': osd_disk_perf}})
def __init__(self): self._ceph_api = ceph.CephWrapper(endpoint='http://localhost:5001') self._default_tier = constants.SB_TIER_DEFAULT_NAMES[ constants.SB_TIER_TYPE_CEPH]
def cephwrapper(): return wrapper.CephWrapper()
def __init__(self): self._ceph_api = ceph.CephWrapper( endpoint='http://localhost:{}'.format(constants.CEPH_MGR_PORT)) self._default_tier = constants.SB_TIER_DEFAULT_NAMES[ constants.SB_TIER_TYPE_CEPH]
def home(request): """ Main dashboard, Overall cluster health and status """ response = {} ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL) cresp, response['cluster_health'] = ceph.health(body='json') sresp, cluster_status = ceph.status(body='json') # Monitors all_mons = cluster_status['output']['monmap']['mons'] up_mons = (cluster_status['output']['health']['health'] ['health_services'][0]['mons']) total_mon_count = len(all_mons) response['mons'] = {'ok': 0, 'warn': 0, 'crit': 0} for mon in up_mons: if mon['health'] == "HEALTH_OK": response['mons']['ok'] += 1 else: response['mons']['warn'] += 1 response['mons']['crit'] = total_mon_count - ( response['mons']['ok'] + response['mons']['crit'] ) # Get a rough estimate of cluster free space. Is this accurate ? bytes_total = cluster_status['output']['pgmap']['bytes_total'] bytes_used = cluster_status['output']['pgmap']['bytes_used'] def filesize(value): value = float(value) if value == 1: return '1 Byte' elif value < 1024 ** 2: return '%d Bytes' % value for i, s in enumerate(suffixes['decimal']): unit = 1024 ** (i + 2) if value < unit * 1024: return i, (1024 * value / unit), s return i, (1024 * value / unit), s (response['scale'], response['data_avail'], response['data_scale']) = filesize(bytes_total) response['data_used'] = round(float(bytes_used)/(1024.0 ** (response['scale'] + 1)), 1) # pgs pg_statuses = cluster_status['output']['pgmap'] response['pg'] = {'ok': 0, 'warn': 0, 'crit': 0} # pg states pg_warn_status = re.compile("(creating|degraded|replay|splitting|" "scrubbing|repair|recovering|backfill" "|wait-backfill|remapped)") pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)") for state in pg_statuses['pgs_by_state']: if state['state_name'] == "active+clean": response['pg']['ok'] += state['count'] elif pg_warn_status.search(state['state_name']): response['pg']['warn'] += state['count'] elif pg_crit_status.search(state['state_name']): response['pg']['crit'] += state['count'] # pg statuses response['pg']['stat'] = dict() for state in pg_statuses['pgs_by_state']: response['pg']['stat'][state['state_name']] = state['count'] # osds dresp, osd_dump = ceph.osd_dump(body='json') response['osd'] = {'state': osd_dump['output']['osds'], 'ok': 0, 'warn': 0, 'crit': 0} for osd_status in response['osd']['state']: if osd_status["in"] and osd_status["up"]: response['osd']['ok'] += 1 elif osd_status["in"] == 0 and osd_status["up"] == 0: response['osd']['crit'] += 1 else: response['osd']['warn'] += 1 if 'json' in request.GET: return JsonResponse(response) else: return render_to_response('dashboard.html', response)
from rest_framework.decorators import api_view from rest_framework.reverse import reverse from rest_framework.response import Response from django.conf import settings from cephclient import wrapper from humanize import filesize from collections import Counter import json import re """ API Endpoint that grabs the cluster data """ #get_data = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL) get_data = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL) """ Master API view that has all sublinks """ @api_view(['GET']) def api(request, format=None): return Response({ "clusters-url": reverse("clusters", request=request), "health-url": reverse("cluster-health", request=request), "status-url": reverse("cluster-status", request=request) }) """ Master clusters view that just has hyperlinks to the subviews for now
def __init__(self): self._ceph_api = ceph.CephWrapper( endpoint='http://localhost:5001/api/v0.1/')
def home(request): """ Main dashboard, Overall cluster health and status """ ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL) cresp, cluster_health = ceph.health(body='json') sresp, cluster_status = ceph.status(body='json') # Monitors all_mons = cluster_status['output']['monmap']['mons'] up_mons = cluster_status['output']['health']['timechecks']['mons'] total_mon_count = len(all_mons) mons_ok = 0 mons_warn = 0 mons_crit = 0 for mon in up_mons: if mon['health'] == "HEALTH_OK": mons_ok += 1 else: mons_warn += 1 mons_crit = total_mon_count - (mons_ok + mons_warn) # Activity pgmap = cluster_status['output']['pgmap'] activities = {} if 'read_bytes_sec' in pgmap: activities['Read'] = filesize.naturalsize(pgmap.get('read_bytes_sec')) if 'write_bytes_sec' in pgmap: activities['Write'] = filesize.naturalsize( pgmap.get('write_bytes_sec')) if 'op_per_sec' in pgmap: activities['Ops'] = pgmap.get('op_per_sec') if 'recovering_objects_per_sec' in pgmap: activities['Recovering Objects'] = pgmap.get( 'recovering_objects_per_sec') if 'recovering_bytes_per_sec' in pgmap: activities['Recovery Speed'] = filesize.naturalsize( pgmap.get('recovering_bytes_per_sec')) if 'recovering_keys_per_sec' in pgmap: activities['Recovering Keys'] = pgmap.get('recovering_keys_per_sec') # Get a rough estimate of cluster free space. Is this accurate ? presp, pg_stat = ceph.pg_stat(body='json') bytes_total = cluster_status['output']['pgmap']['bytes_total'] bytes_used = cluster_status['output']['pgmap']['bytes_used'] data_avail, data_scale = filesize.naturalsize(bytes_total).split() scale = filesize.suffixes['decimal'].index(data_scale) + 1 data_used = round(float(bytes_used) / pow(1024, scale), 1) # pgs pg_statuses = cluster_status['output']['pgmap'] pg_ok = 0 pg_warn = 0 pg_crit = 0 # pg states pg_warn_status = re.compile( "(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)" ) pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)") for state in pg_statuses['pgs_by_state']: if state['state_name'] == "active+clean": pg_ok = pg_ok + state['count'] elif pg_warn_status.search(state['state_name']): pg_warn = pg_warn + state['count'] elif pg_crit_status.search(state['state_name']): pg_crit = pg_crit + state['count'] # pg statuses pg_states = dict() for state in pg_statuses['pgs_by_state']: pg_states[state['state_name']] = state['count'] # osds dresp, osd_dump = ceph.osd_dump(body='json') osd_state = osd_dump['output']['osds'] osds_ok = 0 osds_warn = 0 osds_crit = 0 # Possible states are: exists, up, autoout, new, ??? osd_up = re.compile("(?=.*exists)(?=.*up)") osd_down = re.compile("(?=.*exists)(?=.*autoout)") for osd_status in osd_state: if osd_up.search(str(osd_status['state'])): osds_ok += 1 elif osd_down.search(str(osd_status['state'])): osds_warn += 1 else: osds_crit += 1 return render_to_response('dashboard.html', locals())