def get_storage(): #get the threshold for storage group settings = vsmapi.get_setting_dict(None) _sgs = vsmapi.storage_group_status(None) _num = 0 _num_normal = 0 _num_near_full = 0 _num_full = 0 for _sg in _sgs: _sg.capacity_total = 1 if not _sg.capacity_total else _sg.capacity_total capcity_percent_used = 0 if not _sg.capacity_total else _sg.capacity_used * 100 / _sg.capacity_total if capcity_percent_used > float( settings["storage_group_full_threshold"]): _num_full += 1 elif capcity_percent_used > float( settings["storage_group_near_full_threshold"]): _num_near_full += 1 else: _num_normal += 1 _num += 1 Storage_dict = { "nearfull": _num_near_full, "full": _num_full, "normal": _num_normal, "update": get_time_delta(_sgs[0].updated_at) } storagedata = json.dumps(Storage_dict) return storagedata
def get_summary(self): _sgs = vsmapi.storage_group_status(self.request,) _cfg = {"storage_group_near_full_threshold":65, "storage_group_full_threshold":85,} _num = 0 _num_near_full = 0 _num_full = 0 for _sg in _sgs: _sg.capacity_total = 1 if not _sg.capacity_total else _sg.capacity_total capcity_percent_used = 0 if not _sg.capacity_total else _sg.capacity_used * 100 / _sg.capacity_total if capcity_percent_used <_cfg["storage_group_near_full_threshold"]: pass elif capcity_percent_used < _cfg["storage_group_full_threshold"]: _num_near_full += 1 else: _num_full += 1 _num += 1 data = SortedDict() data["Total Storage Groups"] = _num data["Storage Groups Near Full"] = _num_near_full data["Storage Groups Full"] = _num_full try: data["Last Updated"] = get_time_delta(_sgs[0].updated_at) except: pass return data
def get_OSD(): in_up = 0 in_down = 0 out_up = 0 out_down = 0 osd_summary = vsmapi.osd_summary(None) _osd_status = vsmapi.osd_status(None) for _osd in _osd_status: print _osd.state if _osd.state == "In-Up": in_up=in_up+1 elif _osd.state == "In-Down": in_dowm=in_down+1 elif _osd.state == "Out-Up": out_up=out_up+1 elif _osd.state == "Out-Down": out_down=out_down+1 OSD_dict = {"epoch":osd_summary.epoch ,"update":get_time_delta(osd_summary.updated_at) ,"in_up":in_up ,"in_down":in_down ,"out_up":out_up ,"out_down":out_down } OSDdata = json.dumps(OSD_dict) return OSDdata
def get_MDS(): mds_summary = vsmapi.mds_summary(None) ecpoch = mds_summary.epoch Up = mds_summary.num_up_mdses In = mds_summary.num_in_mdses Failed = mds_summary.num_failed_mdses Stopped = mds_summary.num_stopped_mdses mds_status = vsmapi.mds_status(None) update = "" for mds in mds_status: update = get_time_delta(mds.updated_at) MDS_dict = { "epoch": ecpoch, "update": update, "Up": Up, "In": In, "Failed": Failed, "Stopped": Stopped, "PoolData": "--", "MetaData": "--" } MDSdata = json.dumps(MDS_dict) return MDSdata
def get_OSD(): in_up = 0 in_down = 0 out_up = 0 out_down = 0 osd_summary = vsmapi.osd_summary(None) _osd_status = vsmapi.osd_status(None) for _osd in _osd_status: print _osd.state if _osd.state == "In-Up": in_up = in_up + 1 elif _osd.state == "In-Down": in_dowm = in_down + 1 elif _osd.state == "Out-Up": out_up = out_up + 1 elif _osd.state == "Out-Down": out_down = out_down + 1 OSD_dict = { "epoch": osd_summary.epoch, "update": get_time_delta(osd_summary.updated_at), "in_up": in_up, "in_down": in_down, "out_up": out_up, "out_down": out_down } OSDdata = json.dumps(OSD_dict) return OSDdata
def get_MDS(): mds_summary = vsmapi.mds_summary(None) ecpoch = mds_summary.epoch Up = mds_summary.num_up_mdses In = mds_summary.num_in_mdses Failed = mds_summary.num_failed_mdses Stopped = mds_summary.num_stopped_mdses PoolData = mds_summary.data_pools MetaData = mds_summary.metadata_pool mds_status = vsmapi.mds_status(None) update = "" for mds in mds_status: update = get_time_delta(mds.updated_at) MDS_dict = {"epoch":ecpoch ,"update":update ,"Up":Up ,"In":In ,"Failed":Failed ,"Stopped":Stopped ,"PoolData":PoolData ,"MetaData":MetaData} MDSdata = json.dumps(MDS_dict) return MDSdata
def get_storage(): _sgs = vsmapi.storage_group_status(None) _cfg = { "nearfull_threshold": 65, "full_threshold": 85, } _num = 0 _num_normal = 0 _num_near_full = 0 _num_full = 0 for _sg in _sgs: _sg.capacity_total = 1 if not _sg.capacity_total else _sg.capacity_total capcity_percent_used = 0 if not _sg.capacity_total else _sg.capacity_used * 100 / _sg.capacity_total if capcity_percent_used < _cfg["nearfull_threshold"]: _num_normal += 1 elif capcity_percent_used < _cfg["full_threshold"]: _num_near_full += 1 else: _num_full += 1 _num += 1 Storage_dict = { "nearfull": _num_near_full, "full": _num_full, "normal": _num_normal, "update": get_time_delta(_sgs[0].updated_at) } storagedata = json.dumps(Storage_dict) return storagedata
def get_storage(): #get the threshold for storage group settings = vsmapi.get_setting_dict(None) _sgs = vsmapi.storage_group_status(None) _num = 0 _num_normal = 0 _num_near_full = 0 _num_full = 0 for _sg in _sgs: _sg.capacity_total = 1 if not _sg.capacity_total else _sg.capacity_total capcity_percent_used = 0 if not _sg.capacity_total else _sg.capacity_used * 100 / _sg.capacity_total if capcity_percent_used < settings["storage_group_near_full_threshold"]: _num_normal+=1 elif capcity_percent_used < settings["storage_group_full_threshold"]: _num_near_full+=1 else: _num_full+=1 _num+=1 Storage_dict = {"nearfull":_num_near_full ,"full":_num_full ,"normal":_num_normal ,"update":get_time_delta(_sgs[0].updated_at)} storagedata = json.dumps(Storage_dict) return storagedata
def get_data(self): _sgs = [] #_sgs= vsmapi.get_sg_list(self.request,) try: _sgs = vsmapi.storage_group_status(self.request,) if _sgs: logging.debug("resp body in view: %s" % _sgs) settings = vsmapi.get_setting_dict(self.request) sg_near_full_threshold = settings['storage_group_near_full_threshold'] sg_full_threshold = settings['storage_group_full_threshold'] except: exceptions.handle(self.request, _('Unable to retrieve sever list. ')) storage_group_status = [] for _sg in _sgs: sg = {"id": _sg.id, "name": _sg.name, "friendly_name", _sg.friendly_name "attached_pools": _sg.attached_pools, "capacity_total": 0 if not _sg.capacity_total else round(_sg.capacity_total * 1.0 / 1024 / 1024, 1), "capacity_used": 0 if not _sg.capacity_used else round(_sg.capacity_used * 1.0 / 1024 / 1024, 1), "capacity_avail": 0 if not _sg.capacity_avail else round(_sg.capacity_avail * 1.0 / 1024 / 1024, 1), "capacity_percent_used": 0 if not _sg.capacity_total else _sg.capacity_used * 10000 / _sg.capacity_total / 100.0, "largest_node_capacity_used": 0 if not _sg.largest_node_capacity_used else round(_sg.largest_node_capacity_used * 1.0 / 1024 / 1024, 1), "status": _sg.status, "updated_at": get_time_delta(_sg.updated_at), } if sg['capacity_percent_used'] >= int(sg_full_threshold):
def get_data(self): _monitor_status = [] #_monitors= vsmapi.get_monitor_list(self.request,) try: _monitor_status = vsmapi.monitor_status(self.request,) LOG.info("MONITOR STATUS: %s "%_monitor_status) #LOG.error(_monitor_status) #LOG.error("") if _monitor_status: logging.debug("resp body in view: %s" % _monitor_status) except: exceptions.handle(self.request, _('Unable to retrieve mon list. ')) monitor_status = [] for _monitor in _monitor_status: monitor = {"id": _monitor.id, "name": _monitor.name, "address": _monitor.address, "health": _monitor.health, "details": _monitor.details, "skew": _monitor.skew, "latency": _monitor.latency, "mb_total": 0 if not _monitor.kb_total else int(_monitor.kb_total/1024), "mb_used": 0 if not _monitor.kb_used else int(_monitor.kb_used/1024), "mb_avail": 0 if not _monitor.kb_avail else int(_monitor.kb_avail/1024), "percent_avail": _monitor.avail_percent, "updated_at": get_time_delta(_monitor.updated_at), } monitor_status.append(monitor) return monitor_status
def get_data(self): default_limit = utils.get_page_size(self.request) default_sort_dir = "asc" marker = self.request.GET.get('marker', "") _rbd_status = [] try: _rbd_status = vsmapi.rbd_pool_status(self.request, paginate_opts={ "limit": default_limit, "sort_dir": default_sort_dir, "marker": marker, }) if _rbd_status: logging.debug("resp body in view: %s" % _rbd_status) except: exceptions.handle(self.request, _('Unable to retrieve sever list. ')) rbd_status = [] for _rbd in _rbd_status: rbd = { "id": _rbd.id, "pool": _rbd.pool, "image_name": _rbd.image_name, "size": _rbd.size/(1024*1024), "objects": _rbd.objects, "order": _rbd.order, "format": _rbd.format, "updated_at": get_time_delta(_rbd.updated_at), } rbd_status.append(rbd) return rbd_status
def get_OSD(): #get the full or near full threshold settings = vsmapi.get_setting_dict(None) disk_near_full_threshold = int(settings['disk_near_full_threshold']) disk_full_threshold = int(settings['disk_full_threshold']) in_up = 0 in_down = 0 out_up = 0 out_down = 0 available_count = 0 near_full_count = 0 full_count = 0 osd_summary = vsmapi.osd_summary(None) _osd_status = vsmapi.osd_status(None) for _osd in _osd_status: _osd_capacity_avaliable = 0 if not _osd.device['avail_capacity_kb']\ else int(_osd.device['avail_capacity_kb']/1024) _osd_capacity_used = 0 if not _osd.device['used_capacity_kb']\ else int(_osd.device['used_capacity_kb']/1024) _osd_capacity_total = 0 if not _osd.device['total_capacity_kb']\ else int(_osd.device['total_capacity_kb']/1024) if _osd_capacity_total and _osd.state in [ "In-Up", "In-Down", "Out-Up", "Out-Down", "Out-Down-Autoout" ]: _osd_capacity_status = round( _osd_capacity_used * 1.0 / _osd_capacity_total * 100, 2) if _osd_capacity_status >= disk_full_threshold: full_count = full_count + 1 elif _osd_capacity_status >= disk_near_full_threshold: near_full_count = near_full_count + 1 else: available_count = available_count + 1 if _osd.state == "In-Up": in_up = in_up + 1 elif _osd.state == "In-Down": in_down = in_down + 1 elif _osd.state == "Out-Up": out_up = out_up + 1 elif _osd.state == "Out-Down" or _osd.state == "Out-Down-Autoout": out_down = out_down + 1 OSD_dict = { "epoch": osd_summary.epoch, "update": get_time_delta(osd_summary.updated_at), "in_up": in_up, "in_down": in_down, "out_up": out_up, "out_down": out_down, "capacity_full_count": full_count, "capacity_near_full_count": near_full_count, "capacity_available_count": available_count } OSDdata = json.dumps(OSD_dict) return OSDdata
def get_monitor(): monitor_summary = vsmapi.monitor_summary(None) epoch = monitor_summary.monmap_epoch update = get_time_delta(monitor_summary.updated_at) quorumlist = monitor_summary.quorum.split(" ") # monitors = monitor_summary.monitors Monitor_dict = {"epoch": epoch, "update": update, "quorum": quorumlist, "selMonitor": 1} Monitordata = json.dumps(Monitor_dict) return Monitordata
def monitor(): monitor_summary = vsmapi.monitor_summary(None) monitor_summary_dict = { "monmap_epoch":monitor_summary.monmap_epoch ,"monitors":monitor_summary.monitors ,"election_epoch": monitor_summary.election_epoch ,"quorum": monitor_summary.quorum ,"update":get_time_delta(monitor_summary.updated_at) } return monitor_summary_dict
def osd(): osd_summary = vsmapi.osd_summary(None) osd_summary_dict = { "epoch":osd_summary.epoch ,"total":osd_summary.num_osds ,"up": osd_summary.num_up_osds ,"in": osd_summary.num_in_osds ,"update":get_time_delta(osd_summary.updated_at) } return osd_summary_dict
def get_OSD(): #get the full or near full threshold settings = vsmapi.get_setting_dict(None) disk_near_full_threshold = int(settings['disk_near_full_threshold']) disk_full_threshold = int(settings['disk_full_threshold']) in_up = 0 in_down = 0 out_up = 0 out_down = 0 available_count = 0 near_full_count = 0 full_count = 0 osd_summary = vsmapi.osd_summary(None) _osd_status = vsmapi.osd_status(None) for _osd in _osd_status: _osd_capacity_avaliable = 0 if not _osd.device['avail_capacity_kb']\ else int(_osd.device['avail_capacity_kb']/1024) _osd_capacity_used = 0 if not _osd.device['used_capacity_kb']\ else int(_osd.device['used_capacity_kb']/1024) _osd_capacity_total = 0 if not _osd.device['total_capacity_kb']\ else int(_osd.device['total_capacity_kb']/1024) if _osd_capacity_total: _osd_capacity_status = round(_osd_capacity_used * 1.0 / _osd_capacity_total * 100, 2) if _osd_capacity_status >= disk_full_threshold: full_count = full_count + 1 elif _osd_capacity_status >= disk_near_full_threshold: near_full_count = near_full_count + 1 else: available_count = available_count + 1 if _osd.state == "In-Up": in_up=in_up+1 elif _osd.state == "In-Down": in_dowm=in_down+1 elif _osd.state == "Out-Up": out_up=out_up+1 elif _osd.state == "Out-Down": out_down=out_down+1 OSD_dict = {"epoch":osd_summary.epoch ,"update":get_time_delta(osd_summary.updated_at) ,"in_up":in_up ,"in_down":in_down ,"out_up":out_up ,"out_down":out_down ,"capacity_full_count":full_count ,"capacity_near_full_count":near_full_count ,"capacity_available_count":available_count } OSDdata = json.dumps(OSD_dict) return OSDdata
def get_summary(self): monitor_summary = vsmapi.monitor_summary(self.request) LOG.error("(monitor_summary)") LOG.error(dir(monitor_summary)) LOG.error("(monitor_summary)") data = SortedDict() data["Monmap Epoch"] = monitor_summary.monmap_epoch data["Monitors"] = monitor_summary.monitors data["Election epoch"] = monitor_summary.election_epoch data["Quorum"] = monitor_summary.quorum data["Last Updated"] = get_time_delta(monitor_summary.updated_at) return data
def get_summary(self): pg_summary = vsmapi.placement_group_summary(self.request) LOG.info('pg_summary:%s'%pg_summary) #LOG.error(pg_summary) #LOG.error('pg_summary>') data = SortedDict() data["PGmap Version"] = pg_summary.version data["Total PGs"] = pg_summary.num_pgs for pgs in pg_summary.pgs_by_state: data["PGs " + pgs['state_name']] = pgs['count'] data["Last Updated"] = get_time_delta(pg_summary.updated_at) return data
def get_datasource(page_index, keyword): paginate_opts = { "limit": 10000, "marker": 0, "sort_keys": 'id', "sort_dir": 'asc', "osd_name": keyword, "server_name": keyword, "zone_name": keyword, "state": keyword } #get the datasource datasource = vsmapi.osd_status_sort_and_filter(None, paginate_opts) #get the paginate paginate = calculate_paginate(page_index, len(datasource)) #orgnize the data osd_data = {"osd_list": [], "paginate": paginate} for item in datasource: capacity_total = 0 if not item.device['total_capacity_kb'] else int( item.device['total_capacity_kb'] / 1024) capacity_used = 0 if not item.device['used_capacity_kb'] else int( item.device['used_capacity_kb'] / 1024) capacity_avail = 0 if not item.device['avail_capacity_kb'] else int( item.device['avail_capacity_kb'] / 1024) capacity_percent_used = 0 if not item.device[ 'total_capacity_kb'] else item.device[ 'used_capacity_kb'] * 100 / item.device['total_capacity_kb'] osd = { "id": item.id, "osd_name": item.osd_name, "vsm_status": item.operation_status, "osd_state": item.state, "crush_weight": item.weight, "capacity_total": capacity_total, "capacity_used": capacity_used, "capacity_avail": capacity_avail, "capacity_percent_used": capacity_percent_used, "server": item.service['host'], "storage_group": item.storage_group['name'], "zone": item.zone, "updated_at": get_time_delta(item.updated_at), "deviceInfo": "", "page_index": paginate["page_index"], "page_count": paginate["page_count"], "pager_index": paginate["pager_index"], "pager_count": paginate["pager_count"], } osd_data["osd_list"].append(osd) return osd_data
def get_data(self): default_limit = 10000 default_sort_dir = "asc" default_sort_keys = ['osd_name'] marker = self.request.GET.get('marker', "") LOG.info("CEPH_LOG VSM OSD SUMMARY:%s" % vsmapi.osd_summary(self.request)) #LOG.error(vsmapi.osd_summary(self.request)) #LOG.error(">CEPH_LOG VSM OSD SUMMARY") #LOG.error(vsmapi.osd_status(self.request)) #LOG.error("CEPH_LOG VSM OSD SUMMARY") try: _osd_status = vsmapi.osd_status(self.request, paginate_opts={ "limit": default_limit, "sort_dir": default_sort_dir, "marker": marker, }) if _osd_status: logging.debug("resp body in view: %s" % _osd_status) except: exceptions.handle(self.request, _('Unable to retrieve osd list. ')) osd_status = [] for _osd in _osd_status: LOG.info("DEVICE:%s" % _osd.device.keys()) #LOG.error(_osd.device.keys()) #LOG.error(">DEVICE") osd = { "id":_osd.id, "osd_name": _osd.osd_name, "vsm_status": _osd.operation_status, "osd_state": _osd.state, "crush_weight": _osd.weight, "capacity_total": 0 if not _osd.device['total_capacity_kb']\ else int(_osd.device['total_capacity_kb']/1024),#TODO dict to obj ? "capacity_used": 0 if not _osd.device['used_capacity_kb']\ else int(_osd.device['used_capacity_kb']/1024), "capacity_avail": 0 if not _osd.device['avail_capacity_kb']\ else int(_osd.device['avail_capacity_kb']/1024), "capacity_percent_used": 0 if not _osd.device['total_capacity_kb'] \ else _osd.device['used_capacity_kb']\ * 100 / _osd.device['total_capacity_kb'], #TODO "server": _osd.service['host'], "storage_group": _osd.storage_group['name'], "zone": _osd.zone, "updated_at": get_time_delta(_osd.updated_at), } osd_status.append(osd) return osd_status
def get_summary(self): monitor_summary = vsmapi.monitor_summary(self.request) LOG.debug("dir monitor_summary:%s"%(dir(monitor_summary))) #LOG.error(dir(monitor_summary)) #LOG.error("(monitor_summary)") data = SortedDict() data["Monmap Epoch"] = monitor_summary.monmap_epoch data["Monitors"] = monitor_summary.monitors data["Election epoch"] = monitor_summary.election_epoch data["Quorum"] = monitor_summary.quorum data["Last Updated"] = get_time_delta(monitor_summary.updated_at) return data
def get_datasource(page_index,keyword): paginate_opts = { "limit":10000, "marker":0, "sort_keys":'id', "sort_dir":'asc', "osd_name":keyword, "server_name":keyword, "zone_name":keyword, "state":keyword } #get the datasource datasource = vsmapi.osd_status_sort_and_filter(None,paginate_opts) #get the paginate paginate = calculate_paginate(page_index,len(datasource)) #organize the data osd_data = {"osd_list":[],"paginate":paginate} index = 0 for item in datasource: index += 1 if index <= paginate['data_start_index'] or index > paginate['data_end_index']: continue capacity_total = 0 if not item.device['total_capacity_kb'] else int(item.device['total_capacity_kb']/1024) capacity_used = 0 if not item.device['used_capacity_kb'] else int(item.device['used_capacity_kb']/1024) capacity_avail = 0 if not item.device['avail_capacity_kb'] else int(item.device['avail_capacity_kb']/1024) capacity_percent_used = 0 if not item.device['total_capacity_kb'] else item.device['used_capacity_kb'] * 100 / item.device['total_capacity_kb'] osd = { "id":item.id, "osd_name": item.osd_name, "vsm_status": item.operation_status, "osd_state": item.state, "crush_weight": item.weight, "capacity_total":capacity_total, "capacity_used":capacity_used, "capacity_avail":capacity_avail, "capacity_percent_used":capacity_percent_used, "server": item.service['host'], "storage_group": item.storage_group['name'], "zone": item.zone, "updated_at": get_time_delta(item.updated_at), "deviceInfo":"", "page_index":paginate["page_index"], "page_count":paginate["page_count"], "pager_index":paginate["pager_index"], "pager_count":paginate["pager_count"], } osd_data["osd_list"].append(osd) return osd_data
def get_data(self): default_limit = 10000; default_sort_dir = "asc"; default_sort_keys = ['osd_name'] marker = self.request.GET.get('marker', "") LOG.error("<CEPH_LOG VSM OSD SUMMARY") LOG.error(vsmapi.osd_summary(self.request)) LOG.error(">CEPH_LOG VSM OSD SUMMARY") LOG.error(vsmapi.osd_status(self.request)) LOG.error("CEPH_LOG VSM OSD SUMMARY") try: _osd_status = vsmapi.osd_status(self.request, paginate_opts={ "limit": default_limit, "sort_dir": default_sort_dir, "marker": marker, }) if _osd_status: logging.debug("resp body in view: %s" % _osd_status) except: exceptions.handle(self.request, _('Unable to retrieve osd list. ')) osd_status = [] for _osd in _osd_status: LOG.error("DEVICE") LOG.error(_osd.device.keys()) LOG.error(">DEVICE") osd = { "id":_osd.id, "osd_name": _osd.osd_name, "vsm_status": _osd.operation_status, "osd_state": _osd.state, "crush_weight": _osd.weight, "capacity_total": 0 if not _osd.device['total_capacity_kb']\ else int(_osd.device['total_capacity_kb']/1024),#TODO dict to obj ? "capacity_used": 0 if not _osd.device['used_capacity_kb']\ else int(_osd.device['used_capacity_kb']/1024), "capacity_avail": 0 if not _osd.device['avail_capacity_kb']\ else int(_osd.device['avail_capacity_kb']/1024), "capacity_percent_used": 0 if not _osd.device['total_capacity_kb'] \ else _osd.device['used_capacity_kb']\ * 100 / _osd.device['total_capacity_kb'], #TODO "server": _osd.service['host'], "storage_group": _osd.storage_group['name'], "zone": _osd.zone, "updated_at": get_time_delta(_osd.updated_at), } osd_status.append(osd) return osd_status
def get_monitor(): monitor_summary = vsmapi.monitor_summary(None) epoch = monitor_summary.monmap_epoch update = get_time_delta(monitor_summary.updated_at) quorumlist = monitor_summary.quorum.split(" ") quorum_leader_rank = monitor_summary.quorum_leader_rank leader_list_index = quorumlist.index(quorum_leader_rank) #monitors = monitor_summary.monitors Monitor_dict = {"epoch":epoch ,"update":update ,"quorum":quorumlist ,"selMonitor":leader_list_index} Monitordata = json.dumps(Monitor_dict) return Monitordata
def get_summary(self): osd_summary = vsmapi.osd_summary(self.request) LOG.debug("dir osd_summary:%s"%(dir(osd_summary))) #LOG.error(dir(osd_summary)) #LOG.error("(osd_summary)") data = SortedDict() data["Osdmap Epoch"] = osd_summary.epoch data["Total OSDs"] = osd_summary.num_osds data["OSDs up"] = osd_summary.num_up_osds data["OSDs in"] = osd_summary.num_in_osds #data["Near Full"] = osd_summary.nearfull #data["Full"] = osd_summary.full data["Last Updated"] = get_time_delta(osd_summary.updated_at) return data
def get_summary(self): osd_summary = vsmapi.osd_summary(self.request) LOG.error("(osd_summary)") LOG.error(dir(osd_summary)) LOG.error("(osd_summary)") data = SortedDict() data["Osdmap Epoch"] = osd_summary.epoch data["Total OSDs"] = osd_summary.num_osds data["OSDs up"] = osd_summary.num_up_osds data["OSDs in"] = osd_summary.num_in_osds #data["Near Full"] = osd_summary.nearfull #data["Full"] = osd_summary.full data["Last Updated"] = get_time_delta(osd_summary.updated_at) return data
def get_summary(self): pg_summary = vsmapi.placement_group_summary(self.request) #LOG.error('<pg_summary') #LOG.error(pg_summary) #LOG.error(dir(pg_summary)) #LOG.error('pg_summary>') data = SortedDict() data["PGmap Version"] = pg_summary.version data["Total PGs"] = pg_summary.num_pgs data["PGs active+clean"] = sum([pgs['count'] for pgs in pg_summary.pgs_by_state if pgs['state_name'] == "active+clean"]) data["PGs not active+clean"] = sum([pgs['count'] for pgs in pg_summary.pgs_by_state if pgs['state_name'] != "active+clean"]) data["Last Updated"] = get_time_delta(pg_summary.updated_at) return data
def pg(): pg_summary = vsmapi.placement_group_summary(None) #get the pag collection # pg_state_sort = SortedDict() # for pgs in pg_summary.pgs_by_state: # pg_state_sort["PGs " + pgs['state_name']] = pgs['count'] pg_summary_dict = { "pgmap_version":pg_summary.version ,"total_pgs":pg_summary.num_pgs #,"pg_state":pg_state_sort ,"update":get_time_delta(pg_summary.updated_at) } return pg_summary_dict
def get_PG(): pg_summary = vsmapi.placement_group_summary(None) version = pg_summary.version update = get_time_delta(pg_summary.updated_at) pg_total = pg_summary.num_pgs pg_active_clean = sum([pgs['count'] for pgs in pg_summary.pgs_by_state if pgs['state_name'] == "active+clean"]) pg_not_active_clean = sum([pgs['count'] for pgs in pg_summary.pgs_by_state if pgs['state_name'] != "active+clean"]) pg_dict = {"version":version ,"update":update ,"total":pg_total ,"active_clean":pg_active_clean ,"not_active_clean":pg_not_active_clean} pgdata = json.dumps(pg_dict) return pgdata
def get_version(): ceph_version = '' up_time = '' try: vsm_summary = vsmapi.vsm_summary(None) if vsm_summary is not None: up_time = get_time_delta(vsm_summary.created_at) ceph_version = vsm_summary.ceph_version except: pass vsm_version = get_vsm_version() vsm_version = {"version": vsm_version, "update": up_time, "ceph_version":ceph_version, } version_data = json.dumps(vsm_version) return version_data
def get_data(self): _monitor_status = [] #_monitors= vsmapi.get_monitor_list(self.request,) try: _monitor_status = vsmapi.monitor_status(self.request, ) LOG.info("MONITOR STATUS: %s " % _monitor_status) #LOG.error(_monitor_status) #LOG.error("") if _monitor_status: logging.debug("resp body in view: %s" % _monitor_status) except: exceptions.handle(self.request, _('Unable to retrieve mon list. ')) monitor_status = [] for _monitor in _monitor_status: monitor = { "id": _monitor.id, "name": _monitor.name, "address": _monitor.address, "health": _monitor.health, "details": _monitor.details, "skew": _monitor.skew, "latency": _monitor.latency, "mb_total": 0 if not _monitor.kb_total else int(_monitor.kb_total / 1024), "mb_used": 0 if not _monitor.kb_used else int(_monitor.kb_used / 1024), "mb_avail": 0 if not _monitor.kb_avail else int(_monitor.kb_avail / 1024), "percent_avail": _monitor.avail_percent, "updated_at": get_time_delta(_monitor.updated_at), } monitor_status.append(monitor) return monitor_status
def get_data(self): _servers = [] #_servers= vsmapi.get_server_list(self.request,) try: _mds_summary = vsmapi.mds_summary(self.request) _mds_status = vsmapi.mds_status(self.request) except: exceptions.handle(self.request, _('Unable to retrieve sever list. ')) mds_status = [] for _mds in _mds_status: mds = {"gid": _mds.gid, "id": _mds.id, "name": _mds.name, "state": _mds.state, "address": _mds.address, "updated_at": get_time_delta(_mds.updated_at), } mds_status.append(mds) return mds_status
def get_data(self): #_pool.= vsmapi.get_pool.list(self.request,) _pool_status = [] try: _pool_status = vsmapi.pool_status(self.request) if _pool_status: logging.debug("resp body in view: %s" % _pool_status) except: exceptions.handle(self.request, _('Unable to retrieve sever list. ')) pool_status = [] for _pool in _pool_status: pool = {"id": _pool.poolId, "name": _pool.name, "tag": _pool.tag, "storage_group": _pool.storageGroup, "size": _pool.size, "pg_count": _pool.pgNum, "pgp_count": _pool.pgpNum, "create_by": _pool.createdBy, "kb_used": (_pool.num_bytes / 1024 + 1) if _pool.num_bytes else 0, "objects": _pool.num_objects, "clones": _pool.num_object_clones, "degraded": _pool.num_objects_degraded, "unfound": _pool.num_objects_unfound, "read_ops": _pool.num_read, "read_kb": _pool.num_read_kb, "write_ops": _pool.num_write, "write_kb": _pool.num_write_kb, "client_read_b": _pool.read_bytes_sec, "client_write_b": _pool.write_bytes_sec, "client_ops": _pool.op_per_sec, "status": _pool.status, "updated_at": get_time_delta(_pool.updated_at), } pool_status.append(pool) pool_status = sorted(pool_status, lambda x,y: cmp(x['id'], y['id'])) return pool_status
def get_data(self): _servers = [] #_servers= vsmapi.get_server_list(self.request,) _mds_status = [] try: _mds_status = vsmapi.mds_status(self.request) except: exceptions.handle(self.request, _('Unable to retrieve sever list. ')) mds_status = [] for _mds in _mds_status: mds = { "gid": _mds.gid, "id": _mds.id, "name": _mds.name, "state": _mds.state, "address": _mds.address, "updated_at": get_time_delta(_mds.updated_at), } mds_status.append(mds) return mds_status
def get_data(self): default_limit = 10000; default_sort_dir = "asc"; default_sort_keys = ['osd_name'] marker = self.request.GET.get('marker', "") try: _osd_status = vsmapi.osd_status(self.request, paginate_opts={ "limit": default_limit, "sort_dir": default_sort_dir, "marker": marker, }) if _osd_status: logging.debug("resp body in view: %s" % _osd_status) except: exceptions.handle(self.request, _('Unable to retrieve osd list. ')) page_index = int(self.request.GET.get('pageIndex',1)) page_size = 20 page_count = int(len(_osd_status)/page_size) page_mod = len(_osd_status)%page_size if page_mod > 0: page_count = page_count + 1 pager_size = 10 pager_count = int(page_count/(pager_size)) pager_index = int(page_index/(pager_size)) if page_count%pager_size > 0: pager_count = pager_count + 1 if page_index%pager_size > 0: pager_index = pager_index + 1 dataStartIndex = (page_index-1)*page_size dataEndIndex = dataStartIndex+page_size _osd_status = _osd_status[dataStartIndex:dataEndIndex] osd_status = [] for _osd in _osd_status: LOG.info("DEVICE:%s"%_osd.device.keys()) #LOG.error(_osd.device.keys()) #LOG.error(">DEVICE") osd = { "id":_osd.id, "osd_name": _osd.osd_name, "vsm_status": _osd.operation_status, "osd_state": _osd.state, "crush_weight": _osd.weight, "capacity_total": 0 if not _osd.device['total_capacity_kb']\ else int(_osd.device['total_capacity_kb']/1024),#TODO dict to obj ? "capacity_used": 0 if not _osd.device['used_capacity_kb']\ else int(_osd.device['used_capacity_kb']/1024), "capacity_avail": 0 if not _osd.device['avail_capacity_kb']\ else int(_osd.device['avail_capacity_kb']/1024), "capacity_percent_used": 0 if not _osd.device['total_capacity_kb'] \ else _osd.device['used_capacity_kb']\ * 100 / _osd.device['total_capacity_kb'], #TODO "server": _osd.service['host'], "storage_group": _osd.storage_group['name'], "zone": _osd.zone, "updated_at": get_time_delta(_osd.updated_at), "pageCount":page_count, "pageIndex":page_index, "pagerCount":pager_count, "pagerIndex":pager_index, "deviceInfo":"" } osd_status.append(osd) return osd_status
def get_data(self): default_limit = 10000; default_sort_dir = "asc"; default_sort_keys = ['osd_name'] marker = self.request.GET.get('marker', "") _osd_status = "" try: _osd_status = vsmapi.osd_status(self.request, paginate_opts={ "limit": default_limit, "sort_dir": default_sort_dir, "marker": marker, }) if _osd_status: logging.debug("resp body in view: %s" % _osd_status) except: exceptions.handle(self.request, _('Unable to retrieve osd list. ')) page_index = int(self.request.GET.get('pageIndex',1)) page_size = 20 page_count = int(len(_osd_status)/page_size) page_mod = len(_osd_status)%page_size if page_mod > 0: page_count = page_count + 1 pager_size = 10 pager_count = int(page_count/(pager_size)) pager_index = int(page_index/(pager_size)) if page_count%pager_size > 0: pager_count = pager_count + 1 if page_index%pager_size > 0: pager_index = pager_index + 1 dataStartIndex = (page_index-1)*page_size dataEndIndex = dataStartIndex+page_size _osd_status = _osd_status[dataStartIndex:dataEndIndex] osd_status = [] for _osd in _osd_status: LOG.info("DEVICE:%s"%_osd.device.keys()) #LOG.error(_osd.device.keys()) #LOG.error(">DEVICE") osd = { "id":_osd.id, "osd_name": _osd.osd_name, "vsm_status": _osd.operation_status, "osd_state": _osd.state, "crush_weight": _osd.weight, "capacity_total": 0 if not _osd.device['total_capacity_kb']\ else int(_osd.device['total_capacity_kb']/1024),#TODO dict to obj ? "capacity_used": 0 if not _osd.device['used_capacity_kb']\ else int(_osd.device['used_capacity_kb']/1024), "capacity_avail": 0 if not _osd.device['avail_capacity_kb']\ else int(_osd.device['avail_capacity_kb']/1024), "capacity_percent_used": 0 if not _osd.device['total_capacity_kb'] \ else _osd.device['used_capacity_kb']\ * 100 / _osd.device['total_capacity_kb'], #TODO "server": _osd.service['host'], "storage_group": _osd.storage_group['name'], "zone": _osd.zone, "updated_at": get_time_delta(_osd.updated_at), "pageCount":page_count, "pageIndex":page_index, "pagerCount":pager_count, "pagerIndex":pager_index, "deviceInfo":"" } osd_status.append(osd) return osd_status