def get_by_name(self, req): """ Get one vsm setting by name """ context = req.environ['vsm.context'] search_opts = {} search_opts.update(req.GET) LOG.debug('search options %s' % search_opts) vsm_name = search_opts.pop('name', None) if not vsm_name: raise exc.HTTPBadRequest( explanation=_('Invalid request: vsm name is required.')) try: utils.check_string_length(vsm_name, 'name', min_length=1, max_length=255) setting = db.vsm_settings_get_by_name(context, vsm_name) except db_exc.DBError as e: raise exc.HTTPServerError(explanation=e.message) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.message) if not setting: raise exc.HTTPNotFound( explanation=_('The vsm setting(%s) does not exists.' % vsm_name)) return self._view_builder.basic(req, setting)
def get_by_name(self, req): """ Get one vsm setting by name """ context = req.environ['vsm.context'] search_opts = {} search_opts.update(req.GET) LOG.debug('search options %s' % search_opts) vsm_name = search_opts.pop('name', None) if not vsm_name: raise exc.HTTPBadRequest(explanation=_('Invalid request: vsm name is required.')) try: utils.check_string_length(vsm_name, 'name', min_length=1, max_length=255) setting = db.vsm_settings_get_by_name(context, vsm_name) except db_exc.DBError as e: raise exc.HTTPServerError(explanation=e.message) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.message) if not setting: raise exc.HTTPNotFound(explanation=_('The vsm setting(%s) does not exists.' % vsm_name)) return self._view_builder.basic(req, setting)
def settings_get_by_name(self, context, name): return db.vsm_settings_get_by_name(context, name)
def create(self, req, body=None): """Create a storage pool.""" LOG.info(body) #{'pool': # {'replicationFactor': 3, # 'name': 'test', # 'enablePoolQuota': False, # 'storageGroupId': '1', # 'u'replicatedStorageGroupId': '1', # 'clusterId': '0', # 'tag': 'abc', # 'createdBy': 'VSM', # 'ecProfileId': '1', # 'ecFailureDomain': 'osd', # 'poolQuota': 0 # } #} if not self.is_valid_body(body, 'pool'): raise exc.HTTPUnprocessableEntity() context = req.environ['vsm.context'] pool_dict = body['pool'] for key in ('name', 'createdBy', 'storageGroupName'): if not key in pool_dict: msg = _("%s is not defined in pool" % key) raise exc.HTTPBadRequest(explanation=msg) name = pool_dict['name'].strip() created_by = pool_dict['createdBy'].strip() storage_group_name = pool_dict['storageGroupName'] tag = pool_dict['tag'].strip() cluster_id = pool_dict['clusterId'] try: cluster_id = int(str(cluster_id)) except ValueError: msg = _('cluster_id must be an interger value') raise exc.HTTPBadRequest(explanation=msg) storage_group = db.storage_group_get_by_name(context, storage_group_name) rule_id = storage_group['rule_id'] storage_group_id = storage_group['id'] size = db.get_size_by_storage_group_name(context, storage_group_name) size = int(size) if size == 0: pool_default_size = db.vsm_settings_get_by_name( context, 'osd_pool_default_size') size = int(pool_default_size.value) #LOG.info('size=====%s'%size) #osd_num = 2 #TODO self.scheduler_api.get_osd_num_from_crushmap_by_rule(context, rule_id) is_ec_pool = pool_dict.get('ecProfileId') if is_ec_pool: #erasure code pool body_info = { 'name': name, 'cluster_id': cluster_id, 'storage_group_id': storage_group_id, 'storage_group_name': storage_group_name, 'ec_profile_id': pool_dict['ecProfileId'], 'ec_ruleset_root': storage_group['name'], 'ec_failure_domain': pool_dict['ecFailureDomain'], 'created_by': created_by, 'tag': tag } else: #replicated pool crush_ruleset = rule_id #self.conductor_api.get_ruleset_id(context, storage_group_id) if crush_ruleset < 0: msg = _('crush_ruleset must be a non-negative integer value') raise exc.HTTPBadRequest(explanation=msg) #size = pool_dict['replicationFactor'] #replica_storage_group_id = pool_dict['replicatedStorageGroupId'] #try: # size = int(str(size)) # if size < 1: # msg = _('size must be > 1') # raise exc.HTTPBadRequest(explanation=msg) # # host_num = self.conductor_api.count_hosts_by_storage_group_id(context, storage_group_id) # LOG.info("storage_group_id:%s,host_num:%s", storage_group_id, host_num) # if size > host_num: # msg = "The replication factor must be less than or equal to the number of storage nodes in the specific storage group in cluster!" # return {'message': msg} # except ValueError: # msg = _('size must be an interger value') # raise exc.HTTPBadRequest(explanation=msg) #pg_num = self._compute_pg_num(context, osd_num, size) #vsm_id = str(uuid.uuid1()).split('-')[0] pg_num = 64 auto_growth_pg = pool_dict.get("auto_growth_pg", 0) if auto_growth_pg and int(auto_growth_pg) < pg_num and int( auto_growth_pg) > 0: pg_num = int(auto_growth_pg) #self._compute_pg_num(context, osd_num, size) body_info = { 'name': name, #+ "-vsm" + vsm_id, 'cluster_id': cluster_id, 'storage_group_id': storage_group_id, 'storage_group_name': storage_group_name, 'pool_type': 'replicated', 'crush_ruleset': crush_ruleset, 'pg_num': pg_num, 'pgp_num': pg_num, 'size': size, 'min_size': size, 'created_by': created_by, 'tag': tag } body_info.update({ "quota": pool_dict.get("poolQuota"), "enable_quota": pool_dict.get("enablePoolQuota"), "max_pg_num_per_osd": pool_dict.get("max_pg_num_per_osd") or 100, "auto_growth_pg": pool_dict.get("auto_growth_pg") or 0, }) #LOG.info('body_info=====%s'%body_info) return self.scheduler_api.create_storage_pool(context, body_info)
def create(self, req, body=None): """Create a storage pool.""" LOG.info(body) #{'pool': # {'replicationFactor': 3, # 'name': 'test', # 'enablePoolQuota': False, # 'storageGroupId': '1', # 'u'replicatedStorageGroupId': '1', # 'clusterId': '0', # 'tag': 'abc', # 'createdBy': 'VSM', # 'ecProfileId': '1', # 'ecFailureDomain': 'osd', # 'poolQuota': 0 # } #} if not self.is_valid_body(body, 'pool'): raise exc.HTTPUnprocessableEntity() context = req.environ['vsm.context'] pool_dict = body['pool'] for key in ('name', 'createdBy', 'storageGroupName'): if not key in pool_dict: msg = _("%s is not defined in pool" % key) raise exc.HTTPBadRequest(explanation=msg) name = pool_dict['name'].strip() created_by = pool_dict['createdBy'].strip() storage_group_name = pool_dict['storageGroupName'] tag = pool_dict['tag'].strip() cluster_id = pool_dict['clusterId'] try: cluster_id = int(str(cluster_id)) except ValueError: msg = _('cluster_id must be an interger value') raise exc.HTTPBadRequest(explanation=msg) storage_group = db.storage_group_get_by_name(context, storage_group_name) rule_id = storage_group['rule_id'] storage_group_id = storage_group['id'] size = db.get_size_by_storage_group_name(context,storage_group_name) size = int(size) if size == 0: pool_default_size = db.vsm_settings_get_by_name(context,'osd_pool_default_size') size = int(pool_default_size.value) #LOG.info('size=====%s'%size) #osd_num = 2 #TODO self.scheduler_api.get_osd_num_from_crushmap_by_rule(context, rule_id) is_ec_pool = pool_dict.get('ecProfileId') if is_ec_pool: #erasure code pool body_info = {'name': name, 'cluster_id':cluster_id, 'storage_group_id':storage_group_id, 'storage_group_name':storage_group_name, 'ec_profile_id':pool_dict['ecProfileId'], 'ec_ruleset_root':storage_group['name'], 'ec_failure_domain':pool_dict['ecFailureDomain'], 'created_by':created_by, 'tag':tag} else: #replicated pool crush_ruleset = rule_id#self.conductor_api.get_ruleset_id(context, storage_group_id) if crush_ruleset < 0: msg = _('crush_ruleset must be a non-negative integer value') raise exc.HTTPBadRequest(explanation=msg) #size = pool_dict['replicationFactor'] #replica_storage_group_id = pool_dict['replicatedStorageGroupId'] #try: # size = int(str(size)) # if size < 1: # msg = _('size must be > 1') # raise exc.HTTPBadRequest(explanation=msg) # # host_num = self.conductor_api.count_hosts_by_storage_group_id(context, storage_group_id) # LOG.info("storage_group_id:%s,host_num:%s", storage_group_id, host_num) # if size > host_num: # msg = "The replication factor must be less than or equal to the number of storage nodes in the specific storage group in cluster!" # return {'message': msg} # except ValueError: # msg = _('size must be an interger value') # raise exc.HTTPBadRequest(explanation=msg) #pg_num = self._compute_pg_num(context, osd_num, size) #vsm_id = str(uuid.uuid1()).split('-')[0] pg_num = 64 auto_growth_pg = pool_dict.get("auto_growth_pg",0) if auto_growth_pg and int(auto_growth_pg) < pg_num and int(auto_growth_pg) > 0: pg_num = int(auto_growth_pg) #self._compute_pg_num(context, osd_num, size) body_info = {'name': name, #+ "-vsm" + vsm_id, 'cluster_id':cluster_id, 'storage_group_id':storage_group_id, 'storage_group_name':storage_group_name, 'pool_type':'replicated', 'crush_ruleset':crush_ruleset, 'pg_num':pg_num, 'pgp_num':pg_num, 'size':size, 'min_size':size, 'created_by':created_by, 'tag':tag} body_info.update({ "quota": pool_dict.get("poolQuota"), "enable_quota": pool_dict.get("enablePoolQuota"), "max_pg_num_per_osd": pool_dict.get("max_pg_num_per_osd") or 100, "auto_growth_pg": pool_dict.get("auto_growth_pg") or 0, }) #LOG.info('body_info=====%s'%body_info) return self.scheduler_api.create_storage_pool(context, body_info)