def _get_disk_type_list(self): group_list = db.storage_group_get_all(self._context) disk_type_list = [] if group_list: for item in group_list: disk_type_list.append(item['storage_class']) return disk_type_list
def _judge_drive_ext_threshold(self, context): storage_groups = db.storage_group_get_all(context); active_monitor = self._get_active_monitor(context) update_pool_state_tag = False; for storage_group in storage_groups: osd_num = db.osd_state_count_by_storage_group_id(context, storage_group['id']) update_db_tag = False; if osd_num >= 2 * storage_group['drive_extended_threshold']: pools = db.pool_get_by_ruleset(context, storage_group['rule_id']) for pool in pools: pg_num = self._compute_pg_num(context, osd_num, pool['size']) if pg_num > pool['pg_num']: update_pool_state_tag = True update_db_tag = True; self._agent_rpcapi.set_pool_pg_pgp_num(context, \ active_monitor['host'], \ pool['name'], pg_num, pg_num) if update_db_tag: #update db values = { 'drive_extended_threshold': osd_num, } db.storage_group_update(context, storage_group['id'], values) LOG.info("update storage_group drive extended threshold") if update_pool_state_tag: #update pool status LOG.info("update_pool_state") self._agent_rpcapi.update_pool_state(context, active_monitor['host'])
def get_mapping(self, context): LOG.info('get_mapping in conductor manager') storage_group_list = db.storage_group_get_all(context) mapping = {} if storage_group_list: for group in storage_group_list: mapping[group['rule_id']] = group['friendly_name'] return mapping
def _update_drive_ext_threshold(self, context): storage_groups = db.storage_group_get_all(context); for storage_group in storage_groups: drive_num = db.osd_state_count_by_storage_group_id(context, storage_group['id']) values = { 'drive_extended_threshold': drive_num, } db.storage_group_update(context, storage_group['id'], values)
def _get_storage_group_and_class_list(self): list = db.storage_group_get_all(self._context) storage_group_list = [] storage_class_list = [] if list: for item in list: value = {} value['friendly_name'] = item['friendly_name'] value['name'] = item['name'] value['rule_id'] = item['rule_id'] value['storage_class'] = item['storage_class'] storage_group_list.append(value) storage_class_list.append(item['storage_class']) return storage_group_list, storage_class_list
def get_storage_group_list(self, context): LOG.info('get_storage_group_list in conductor manager') storage_group_list = db.storage_group_get_all(context) storage_group_list = [x for x in storage_group_list if x.status == "IN"] osds = db.osd_get_all(context) group_list = {} if storage_group_list: for group in storage_group_list: osd_num=0 for osd in osds: if osd['storage_group_id'] == group['id']: osd_num = osd_num + 1 if osd_num > 0: group_list[group['id']] = group['name'] return group_list
def host_storage_groups_devices(self, context, init_node_id): # TODO all code refered is used as type = 'storage' # What type means? # Need to put the implementation into DB or manager. LOG.info(' conductor api:host_storage_groups_devices()') lst = self.host_devices_by_init_node_id(context, \ init_node_id) storage_group_list = db.storage_group_get_all(context) for item in lst: for storage_group in storage_group_list: if storage_group['storage_class'] == item['storage_class']: item['storage_group_id'] = storage_group['id'] # TODO should we use storage_group_name instead? item['storage_group'] = storage_group['name'] item['storage_group_name'] = storage_group['name'] return lst
def get_storage_group_list(self, context): LOG.info('get_storage_group_list in conductor manager') storage_group_list = db.storage_group_get_all(context) storage_group_list = [x for x in storage_group_list if x.status == "IN"] # osds = db.osd_get_all(context) group_list = {} group_name_list = [] if storage_group_list: for group in storage_group_list: # osd_num=0 # for osd in osds: # if osd['storage_group_id'] == group['id']: # osd_num = osd_num + 1 # if osd_num > 0: if group['name'] not in group_name_list: group_name_list.append(group['name']) group_list[group['id']] = group['name'] return group_list
def ceph_node_info(self, context, init_node_id): init_node = db.init_node_get_by_id(context, init_node_id) zone = db.zone_get_by_id(context, init_node['zone_id']) storage_class_list = db.\ device_get_distinct_storage_class_by_service_id(\ context, init_node['service_id']) #LOG.info('storage_class_list:%s', storage_class_list) storage_group_list = db.storage_group_get_all(context) #LOG.info('storage_group_list:%s', storage_group_list) list = [] for storage_class in storage_class_list: dict = {} for storage_group in storage_group_list: if storage_class == storage_group['storage_class']: dict['storage_group_id'] = storage_group['id'] dict['storage_group_name'] = storage_group['name'] break list.append(dict) #LOG.info('list:%s', list) final_list = [] for item in list: osd_state_list = db.\ osd_state_get_by_service_id_and_storage_group_id( \ context, init_node['service_id'], \ item['storage_group_id']) if not osd_state_list: continue #LOG.info('osd_state_list:%s', osd_state_list) for osd_state in osd_state_list: final_dict = {} final_dict['osd_state_id'] = osd_state['id'] final_dict['osd_state_name'] = osd_state['osd_name'] final_dict['storage_group_name'] = item['storage_group_name'] final_dict['host'] = init_node['host'] final_dict['zone'] = zone['name'] final_list.append(final_dict) return final_list
def storage_group_get_all(self, context): return db.storage_group_get_all(context)
class ConductorManager(manager.Manager): """Chooses a host to create storages.""" RPC_API_VERSION = '1.2' def __init__(self, service_name=None, *args, **kwargs): #if not scheduler_driver: # scheduler_driver = FLAGS.scheduler_driver #self.driver = importutils.import_object(scheduler_driver) super(ConductorManager, self).__init__(*args, **kwargs) def init_host(self): LOG.info('init_host in manager ') def test_service(self, context): LOG.info(' test_service in conductor') return {'key': 'test_server_in_conductor'} def check_poolname(self, context, poolname): pool_list = db.pool_get_all(context) if pool_list: for pool in pool_list: if pool['name'] == poolname: return 1 return 0 def create_storage_pool(self, context, body): #TO BE DONE body['cluster_id'] = 1 res = db.pool_create(context, body) return res def update_storage_pool(self, context, pool_id, values): return db.pool_update(context, pool_id, values) def update_storage_pool_by_name(self, context, pool_name, cluster_id, values): return db.pool_update_by_name(context, pool_name, cluster_id, values) def get_osd_num(self, context, group_id): osds = db.osd_get_all(context) osd_num = 0 for osd in osds: if osd['deleted'] == False and \ str(osd['storage_group_id']) == str(group_id): osd_num = osd_num + 1 return osd_num def list_storage_pool(self, context): LOG.info('list_storage_pool in conductor manager') pool_list = db.pool_get_all(context) pool_list_dict = {} if pool_list: for pool in pool_list: pool_list_dict[pool['id']] = pool return pool_list_dict def destroy_storage_pool(self, context, pool_name): if pool_name: db.pool_destroy(context, pool_name) def get_storage_group_list(self, context): LOG.info('get_storage_group_list in conductor manager') storage_group_list = db.storage_group_get_all(context) storage_group_list = [ x for x in storage_group_list if x.status == "IN" ] osds = db.osd_get_all(context) group_list = {} if storage_group_list: for group in storage_group_list: osd_num = 0 for osd in osds: if osd['storage_group_id'] == group['id']: osd_num = osd_num + 1 if osd_num > 0: group_list[group['id']] = group['name'] return group_list def get_server_list(self, context): LOG.info('get_server_list in conductor manager') server_list = db.init_node_get_all(context) ret = self._set_error(context) for ser in server_list: if ret: ser['status'] = ret return server_list def _set_error(self, context): summary = db.summary_get_by_cluster_id_and_type(context, 1, 'cluster') if summary: sum_data = json.loads(summary['summary_data']) h_list = sum_data.get('health_list') if len(h_list) > 0: if h_list[0].find('ERROR') != -1: return h_list[0] return None def ceph_error(self, context): return self._set_error(context) def get_cluster_list(self, context): LOG.info('get_server_list in conductor manager') cluster_list = db.init_node_get_all(context) ret = self._set_error(context) for ser in cluster_list: if ret: ser['status'] = 'unavailable' return cluster_list def get_server(self, context, id): LOG.info('get_server_list in conductor manager') server = db.init_node_get(context, id) ret = self._set_error(context) if ret: server['status'] = 'unavailable' LOG.info("CEPH_LOG log server %s" % server) return server def get_zone_list(self, context): LOG.info('get_zone_list in conductor manager') zone_list = db.zone_get_all(context) LOG.info("CEPH_LOG log server_list %s" % zone_list) return zone_list def get_mapping(self, context): LOG.info('get_mapping in conductor manager') storage_group_list = db.storage_group_get_all(context) mapping = {} if storage_group_list: for group in storage_group_list: mapping[group['rule_id']] = group['friendly_name'] return mapping def get_ruleset_id(self, context, group_id): LOG.info("Get ruleset id via storage_group id.") storage_group = db.storage_group_get(context, group_id) return storage_group['rule_id'] def count_hosts_by_storage_group_id(self, context, storage_group_id): return db.osd_state_count_service_id_by_storage_group_id(context, \ storage_group_id) def init_node_get_by_host(self, context, host): """Get init node by host name.""" return db.init_node_get_by_host(context, host) def init_node_get_by_cluster_id(self, context, cluster_id): """Get init node by cluster id.""" return db.init_node_get_by_cluster_id(context, cluster_id) def init_node_get_cluster_nodes(self, context, init_node_id): """Get cluster nodes by id""" return db.init_node_get_cluster_nodes(context, init_node_id) #init_node def init_node_get_by_id_and_type(self, context, id, type): #LOG.info("get init node by id and type") init_node = db.init_node_get_by_id_and_type(context, id, type) return init_node def init_node_get_by_id(self, context, id): #LOG.info("get init node by id") init_node = db.init_node_get_by_id(context, id) return init_node def init_node_create(self, context, values): return db.init_node_create(context, values) def init_node_update(self, context, id, values): return db.init_node_update(context, id, values) def init_node_get_by_primary_public_ip(self, context, primary_public_ip): return db.init_node_get_by_primary_public_ip(context, \ primary_public_ip) def init_node_get_by_secondary_public_ip(self, context, \ secondary_public_ip): return db.init_node_get_by_secondary_public_ip(context, \ secondary_public_ip) def init_node_get_by_cluster_ip(self, context, cluster_ip): return db.init_node_get_by_cluster_ip(context, cluster_ip) def init_node_update_status_by_id(self, context, init_node_id, status): """ConductorManager update the status of init node.""" return db.init_node_update_status_by_id(context, init_node_id, status) #osd_state def osd_get(self, context, osd_id): return db.osd_get(context, osd_id) def osd_delete(self, context, osd_id): return db.osd_delete(context, osd_id) def osd_remove(self, context, osd_id): return db.osd_remove(context, osd_id) def osd_state_get_all(self, context, limit=None, marker=None, sort_keys=None, sort_dir=None): all_osd = db.osd_state_get_all(context, limit, marker, sort_keys, sort_dir) return all_osd def osd_state_get_by_name(self, context, name): return db.osd_state_get_by_name(context, name) def osd_state_create(self, context, values): LOG.info('ADD_OSD values = %s' % values) result = db.osd_state_get_by_osd_name_and_service_id_and_cluster_id(\ context, values['osd_name'], values['service_id'],\ values['cluster_id']) LOG.info('ADD_OSD result = %s' % result) if not result: LOG.info('ADD_OSD result is None') return db.\ osd_state_create(context, values) else: LOG.info('ADD_OSD result is ok') values['id'] = result['id'] values['deleted'] = 0 return db.\ osd_state_update(context, values['id'], values) def osd_state_update(self, context, values): #LOG.info("osd_state_update") osd_ref = db.osd_state_get_by_name(context, values['osd_name']) if osd_ref: osd_state = db.osd_state_update(context, osd_ref['id'], values) return osd_state def osd_state_update_or_create(self, context, values, create=None): #LOG.info("osd_state_update_or_create") osd_ref = db.osd_state_get_by_name(context, values['osd_name']) if osd_ref: osd_state = db.osd_state_update(context, osd_ref['id'], values) return osd_state else: create = True if create is None: osd_state = db.osd_state_update_or_create(context, values) elif create == True: osd_state = db.osd_state_create(context, values) else: #LOG.info("osd values:%s" % values) osd_state_ref = db.osd_state_get_by_name(context, values['osd_name']) if osd_state_ref: values['id'] = osd_state_ref.id osd_state = db.osd_state_update(context, values['id'], values) else: return None return osd_state def osd_state_count_by_init_node_id(self, context, init_node_id): return db.osd_state_count_by_init_node_id(context, init_node_id) def osd_state_get_by_service_id_and_storage_group_id(self, context, \ service_id, \ storage_group_id): return db.osd_state_get_by_service_id_and_storage_group_id(\ context, service_id, storage_group_id) def osd_state_get_by_service_id(self, context, service_id): return db.osd_state_get_by_service_id(context, service_id) def osd_state_get_by_osd_name_and_service_id_and_cluster_id(self, \ context, osd_name, service_id, cluster_id): return db.osd_state_get_by_osd_name_and_service_id_and_cluster_id(\ context, osd_name, service_id, cluster_id) #device def device_get_all(self, context): error = self._set_error(context) all_devices = db.device_get_all(context) if not error: return all_devices for dev in all_devices: dev['state'] = error dev['journal_state'] = error return all_devices def device_get_by_hostname(self, context, hostname): init_node = db.init_node_get_by_hostname(context, hostname) if init_node: service_id = init_node['service_id'] device_list = db.device_get_by_service_id(context, service_id) if device_list: return device_list else: return None def device_create(self, context, values): return db.device_create(context, values) def device_update_or_create(self, context, values, create=None): if create is None: device = db.device_update_or_create(context, values) elif create is True: device = db.device_create(context, values) else: device = db.device_update(context,\ values['id'],\ values) return device def device_get_all_by_service_id(self, context, service_id): return db.device_get_all_by_service_id(context, service_id) def device_get_distinct_storage_class_by_service_id(self, context,\ service_id): return db.device_get_distinct_storage_class_by_service_id(context,\ service_id) def device_get_by_name_and_journal_and_service_id(self, context, \ name, journal, service_id): return db.device_get_by_name_and_journal_and_service_id(context, name, \ journal, service_id) #storage_group def storage_group_get_all(self, context): return db.storage_group_get_all(context) def create_storage_group(self, context, values): if values is None: LOG.warn("Error: Empty values") try: raise exception.GetNoneError except exception.GetNoneError, e: LOG.error("%s:%s", e.code, e.message) return False res = db.storage_group_get_all(context) name_list = [] for item in res: name_list.append(item['name']) if values['name'] not in name_list: db.storage_group_create(context, values) else: LOG.info('Warnning: name exists in table %s' % values['name']) return False return True