def drop_pool(self, params={}): rc = msg_pds.ResponseCode() rc.retcode = msg_pds.RC_SUCCESS pool_name = str(params['pool_name']) disk_id = params['disk_id'] disk_part = params['disk_part'] # 删除pool e, res = apipal.Pool().del_pool(pool_name) if e and res.find("not added in PAL") == -1: rc.retcode = msg_ios.RC_IOS_PAL_POOL_DROP_FAILED rc.message = "pal pool drop failed:%s" % res return rc, '' time.sleep(1) # 从pal中删除磁盘 part = common.GetPartInfo(disk_id, disk_part) if part != None: e, res = apipal.Disk().del_disk(part['DEVNAME']) if e and res.find("not added in PAL") == -1: logger.run.error("Drop pal disk failed,%s:%s:%s" % (part['DEVNAME'], e, res)) return rc, ''
def GetPoolExportInfoByName(pool_name): e, pool_list = apipal.Pool().get_pool_list() if e: return None # 获取pool导出基本信息 pool_export_info = msg_pds.PoolExportInfo() for pool in pool_list: if pool.name() == pool_name: pool_export_info.pool_name = pool.name() pool_export_info.state_str = pool.state_str() pool_export_info.state = pool.state() pool_export_info.size = pool.cache_per_disk() pool_export_info.extent = pool.extent_sectors() pool_export_info.bucket = pool.bucket_sectors() pool_export_info.sippet = pool.sippet_sectors() pool_export_info.max_size = 0 pool_export_info.dev_name.append(pool.get_disks()[0]) pool_export_info.is_variable = True if apipal.POOL_LENGTH_FIXED == pool.len_mode(): pool_export_info.is_variable = False if apipal.testBit(pool.state(), apipal.POOL_STATE_MIGRATING): pool_export_info.state_exp = "MIGRATING" context = pyudev.Context() # 获取pool所在磁盘可提供的最大pool容量 try: attr = {} attr['extent'] = pool_export_info.extent attr['bucket'] = pool_export_info.bucket attr['sippet'] = pool_export_info.sippet blk_size = int( pyudev.Devices.from_device_file( context, pool_export_info.dev_name[0]).attributes.get('size')) len_mode = apipal.POOL_LENGTH_FIXED if pool_export_info.is_variable == True: len_mode = apipal.POOL_LENGTH_VARIABLE e, max_size = apipal.Pool().calc_max_pool_size( blk_size, attr, len_mode) if e: continue pool_export_info.max_size = max_size except Exception as e: pass return pool_export_info return None
def SyncPoolDirtyThresh(self): pool_name_to_info = {} for pool_info in self.pool_infos: pool_name_to_info[pool_info.pool_name] = pool_info # pal目前认到的pool palpool = apipal.Pool() e, pal_pools = palpool.get_pool_list() if e: logger.run.error("Get pal pool list failed:%s" % pal_pools) return pal_pools_name = [pool.name() for pool in pal_pools] for pal_pool_name in pal_pools_name: if pal_pool_name not in pool_name_to_info.keys(): continue e, state = apipal.Pool().get_pool_stat(pal_pool_name) if e: logger.run.error("Get pal pool %s state failed:%s" % (pal_pool_name, state)) continue # sync level if state['sync_level'] != pool_info.sync_level: e, res = apipal.Pool().set_sync_level(pal_pool_name, pool_info.sync_level) if e: logger.run.error("Set pool %s sync level failed:%s" % (pal_pool_name, res)) # dirty thresh if pool_info.HasField('dirty_thresh') and \ (abs(int(state['p_lower_thresh']) - pool_name_to_info[pal_pool_name].dirty_thresh.lower) > 5 or \ abs(int(state['p_upper_thresh']) - pool_name_to_info[pal_pool_name].dirty_thresh.upper) > 5): lower = pool_name_to_info[pal_pool_name].dirty_thresh.lower upper = pool_name_to_info[pal_pool_name].dirty_thresh.upper logger.run.info("Start set pool %s dirty thresh to %s:%s" % (pal_pool_name, lower, upper)) e, res = apipal.Pool().set_dirty_thresh( pal_pool_name, lower, upper) if e: logger.run.error("Set pool %s dirty thresh failed:%s" % (pal_pool_name, res)) return self.SyncTargetSkipThresh()
def SetPoolCacheModel(self): pool_name = self.request_body.pool_info.pool_name is_stop_through = self.request_body.is_stop_through assert(self.request_body.pool_cache_model == msg_pds.POOL_CACHE_MODEL_WRITETHROUGH) e, res = apipal.Pool().wb2wt(pool_name, not is_stop_through) if e: self.response.rc.retcode = msg_ios.RC_IOS_PAL_POOL_CONFIG_FAILED self.response.rc.message = "set pool cache model failed:%s" % res self.SendResponse(self.response) return MS_FINISH self.response.rc.retcode = msg_pds.RC_SUCCESS self.SendResponse(self.response) return MS_FINISH
def INIT(self, request): self.response = MakeResponse(msg_ios.POOL_RESIZE_RESPONSE, request) self.request = request self.request_body = request.body.Extensions[msg_ios.pool_resize_request] if g.is_ready == False: self.response.rc.retcode = msg_ios.RC_IOS_SERVICE_IS_NOT_READY self.response.rc.message = "IOS service is not ready" self.SendResponse(self.response) return MS_FINISH # 首先检查pool的状态是否是正在迁移 e, pool = apipal.Pool().get_by_name(self.request_body.pool_info.pool_name) if e: logger.run.error("Can't find pool by name:%s" % self.request_body.pool_info.pool_name) self.response.rc.retcode = msg_ios.RC_IOS_PAL_POOL_RESIZE_FAILED self.response.rc.message = "pal pool resize failed:%s" % res self.SendResponse(self.response) return MS_FINISH if apipal.testBit(pool.state(), apipal.POOL_STATE_MIGRATING): self.response.rc.retcode = msg_ios.RC_IOS_PAL_POOL_IS_MIGRATING self.response.rc.message = "pal pool is migrating, not support resize" self.SendResponse(self.response) return MS_FINISH e, res = apipal.Pool().resize_pool(self.request_body.pool_info.pool_name, self.request_body.size) if e: self.response.rc.retcode = msg_ios.RC_IOS_PAL_POOL_RESIZE_FAILED self.response.rc.message = "pal pool resize failed:%s" % res self.SendResponse(self.response) return MS_FINISH self.response.rc.retcode = msg_pds.RC_SUCCESS self.SendResponse(self.response) return MS_FINISH
def SetPoolSyncLevel(self): g.last_modify_time = int(time.time()) pool_name = self.request_body.pool_info.pool_name sync_level = self.request_body.sync_level e, res = apipal.Pool().set_sync_level(pool_name, sync_level) if e: self.response.rc.retcode = msg_ios.RC_IOS_PAL_POOL_CONFIG_FAILED self.response.rc.message = "set pool dirty thresh failed:%s" % res self.SendResponse(self.response) return MS_FINISH self.response.rc.retcode = msg_pds.RC_SUCCESS self.SendResponse(self.response) return MS_FINISH
def SetDirtyThresh(self): g.last_modify_time = int(time.time()) pool_name = self.request_body.pool_info.pool_name lower = self.request_body.dirty_thresh.lower upper = self.request_body.dirty_thresh.upper e, res = apipal.Pool().set_dirty_thresh(pool_name, lower, upper) if e: self.response.rc.retcode = msg_ios.RC_IOS_PAL_POOL_CONFIG_FAILED self.response.rc.message = "set pool dirty thresh failed:%s" % res self.SendResponse(self.response) return MS_FINISH self.response.rc.retcode = msg_pds.RC_SUCCESS self.SendResponse(self.response) return MS_FINISH
def PreparePal(self): # 在同步pal的时候, 需要先刷新target-id, 以及检查模块 pal_ids = [] pal_ids.extend( [palcache_info.pal_id for palcache_info in self.palcache_infos]) pal_ids.extend( [palraw_info.pal_id for palraw_info in self.palraw_infos]) pal_ids.extend( [palpmt_info.pal_id for palpmt_info in self.palpmt_infos]) common.CheckDriverConfigure(pal_ids) palpool = apipal.Pool() paldisk = apipal.Disk() paltarget = apipal.Target() # pal目前认到的磁盘 e, pal_disks = paldisk.get_disk_list() if e: logger.run.error("Get pal disk list failed:%s" % pal_disks) return pal_disks_path_name = [disk.path_name() for disk in pal_disks] # pal目前认到的target e, pal_targets = paltarget.get_target_list() if e: logger.run.error("Get pal target list failed:%s" % pal_targets) return pal_targets_name = [target.name() for target in pal_targets] # pal目前认到的pool e, pal_pools = palpool.get_pool_list() if e: logger.run.error("Get pal pool list failed:%s" % pal_pools) return pal_pools_name = [pool.name() for pool in pal_pools] pool_id_to_info = {} todo_load_pool_disk = [] todo_load_cache_disk = [] todo_load_raw_disk = [] todo_del_target = [] todo_del_pool = [] # 获取需要删除的垃圾target cfg_target_name = [] cfg_target_name.extend([ palcache_info.palcache_name for palcache_info in self.palcache_infos ]) cfg_target_name.extend( [palraw_info.palraw_name for palraw_info in self.palraw_infos]) cfg_target_name.extend( [palpmt_info.palpmt_name for palpmt_info in self.palpmt_infos]) for target_name in pal_targets_name: if target_name not in cfg_target_name: todo_del_target.append(target_name) # 获取需要删除的垃圾pool for pool_name in pal_pools_name: if pool_name not in [ pool_info.pool_name for pool_info in self.pool_infos ]: todo_del_pool.append(pool_name) # 清理垃圾target for target_name in todo_del_target: logger.run.info("Auto del target %s" % target_name) e, res = paltarget.del_target(target_name) if e: logger.run.warning("Auto del target failed :%s" % res) # 清理垃圾pool for pool_name in todo_del_pool: logger.run.info("Auto del pool %s" % pool_name) e, res = palpool.del_pool(pool_name) if e: logger.run.warning("Auto del pool failed :%s" % res) # 获取pool没有load的盘 for pool_info in self.pool_infos: pool_id_to_info[pool_info.pool_id] = pool_info assert (len(pool_info.pool_disk_infos) == 1) # 如果pool已经is_invalid or is_disable则不再尝试load pool的盘 if pool_info.is_invalid == True or pool_info.is_disable == True: logger.run.info( "Skip check pool %s disk for is invalid or is disable" % pool_info.pool_name) continue pool_disk_info = pool_info.pool_disk_infos[0] part = common.GetPartInfo(pool_disk_info.disk_id, pool_disk_info.disk_part, self.disk_list) if part == None: logger.run.warning("Pal pool %s disk miss" % pool_info.pool_name) continue if part['DEVNAME'] not in pal_disks_path_name: todo_load_pool_disk.append(part['DEVNAME']) # 获取palcache没有load的盘 for palcache_info in self.palcache_infos: part = common.GetPartInfo(palcache_info.disk_id, palcache_info.disk_part, self.disk_list) if part == None: continue if part['DEVNAME'] not in pal_disks_path_name: todo_load_cache_disk.append({ "dev_name": part['DEVNAME'], "pool_id": palcache_info.pool_id }) # 获取palraw没有load的盘 for palraw_info in self.palraw_infos: part = common.GetPartInfo(palraw_info.disk_id, palraw_info.disk_part, self.disk_list) if part == None: continue if part['DEVNAME'] not in pal_disks_path_name: todo_load_raw_disk.append({"dev_name": part['DEVNAME']}) # load所有pool需要load的盘 for dev_name in todo_load_pool_disk: logger.run.info("Auto load pool disk %s" % dev_name) e, res = paldisk.load_disk(dev_name) if e: logger.run.error("Load pool disk faild %s:%s" % (dev_name, res)) # 重新获取pal目前认到的pool, 且为running状态的pool e, pal_pools = palpool.get_pool_list() if e: logger.run.error("Get pal pool list failed:%s" % pal_pools) return loading_pal_pools_id = [ pool.uuid() for pool in pal_pools if apipal.testBit(pool.state(), apipal.POOL_STATE_LOADING) ] for todo_info in todo_load_cache_disk: dev_name = todo_info['dev_name'] pool_id = todo_info['pool_id'] # 如果target对应的pool已经标记为disable, 则使用raw的方式load磁盘 if pool_id in pool_id_to_info.keys( ) and pool_id_to_info[pool_id].is_disable == True: logger.run.info( "Start load disk %s by raw for disable pool %s" % (dev_name, pool_id_to_info[pool_id].pool_name)) e, res = paldisk.load_disk(dev_name, 'raw') if e: logger.run.error("Load cache disk faild %s:%s" % (dev_name, res)) continue # 如果target对应的pool已经标记为rebuild, 则使用cache的方式load磁盘 if pool_id in pool_id_to_info.keys() and pool_id_to_info[ pool_id].is_rebuild == True and pool_id_to_info[ pool_id].is_invalid == False: logger.run.info("Start load disk %s by rebuild pool %s" % (dev_name, pool_id_to_info[pool_id].pool_name)) e, res = paldisk.load_disk(dev_name, 'cache', pool_id_to_info[pool_id].pool_name) if e: logger.run.error("Load cache disk faild %s:%s" % (dev_name, res)) continue # load所有target需要load的盘, 仅load target对应的pool是loading状态的target if pool_id not in loading_pal_pools_id: logger.run.info("Skip load disk %s by not loading pool %s" % (dev_name, pool_id)) continue logger.run.info("Auto load cache disk %s" % dev_name) e, res = paldisk.load_disk(dev_name) if e: logger.run.error("Load cache disk faild %s:%s" % (dev_name, res)) for todo_info in todo_load_raw_disk: dev_name = todo_info['dev_name'] logger.run.info("Auto load raw disk %s" % dev_name) e, res = paldisk.load_disk(dev_name) if e: logger.run.error("Load raw disk faild %s:%s" % (dev_name, res)) return self.SyncPoolDirtyThresh()
def INIT(self): if g.is_ready == False: return MS_FINISH if g.platform['sys_mode'] == "database": return MS_FINISH e, pool_list = apipal.Pool().get_pool_list() if e: logger.run.error('Get pool list failed %s:%s' % (e, pool_list)) return MS_FINISH # 获取基本信息 mds_request = MakeRequest(msg_mds.HEARTBEAT_POOL_LIST_REQUEST) for pool in pool_list: pool_export_info = msg_pds.PoolExportInfo() pool_export_info.pool_name = pool.name() pool_export_info.state_str = pool.state_str() pool_export_info.state = pool.state() pool_export_info.size = pool.cache_per_disk() pool_export_info.extent = pool.extent_sectors() pool_export_info.bucket = pool.bucket_sectors() pool_export_info.sippet = pool.sippet_sectors() pool_export_info.max_size = 0 pool_export_info.dev_name.append(pool.get_disks()[0]) pool_export_info.is_variable = True if apipal.POOL_LENGTH_FIXED == pool.len_mode(): pool_export_info.is_variable = False if apipal.testBit(pool.state(), apipal.POOL_STATE_MIGRATING): pool_export_info.state_exp = "MIGRATING" _pool_export_info = mds_request.body.Extensions[ msg_mds.heartbeat_pool_list_request].pool_export_infos.add() _pool_export_info.CopyFrom(pool_export_info) # 获取统计信息 for pool_export_info in mds_request.body.Extensions[ msg_mds.heartbeat_pool_list_request].pool_export_infos: e, state = apipal.Pool().get_pool_stat(pool_export_info.pool_name) if e: logger.run.error('Get pool state failed %s:%s' % (e, pool_export_info.pool_name)) continue pool_export_info.valid = state['valid'] pool_export_info.p_valid = state['p_valid'] pool_export_info.dirty = state['dirty'] pool_export_info.p_dirty = state['p_dirty'] pool_export_info.error = state['error'] pool_export_info.p_lower_thresh = state['p_lower_thresh'] pool_export_info.lower_thresh = state['lower_thresh'] pool_export_info.p_upper_thresh = state['p_upper_thresh'] pool_export_info.upper_thresh = state['upper_thresh'] context = pyudev.Context() # 获取pool所在磁盘可提供的最大pool容量 for pool_export_info in mds_request.body.Extensions[ msg_mds.heartbeat_pool_list_request].pool_export_infos: try: attr = {} attr['extent'] = pool_export_info.extent attr['bucket'] = pool_export_info.bucket attr['sippet'] = pool_export_info.sippet blk_size = int( pyudev.Devices.from_device_file( context, pool_export_info.dev_name[0]).attributes.get('size')) len_mode = apipal.POOL_LENGTH_FIXED if pool_export_info.is_variable == True: len_mode = apipal.POOL_LENGTH_VARIABLE e, max_size = apipal.Pool().calc_max_pool_size( blk_size, attr, len_mode) if e: continue pool_export_info.max_size = max_size except Exception as e: pass self.SendRequest(g.mds_service.listen_ip, g.mds_service.listen_port, mds_request, self.Entry_HeartBeatPoolList) return MS_CONTINUE