def setUpClass(self): """ setup ProjectService """ sys.stderr.write("\nsetUpClass\n") self.PrjMng = ProjectService() self.test_config = ConfigParser() self.test_config.read(afs.CONFIG.setup) self.ProjectName = self.test_config.get("ProjectService", "ProjectName") self.ProjectDescription = self.test_config.get("ProjectService", "ProjectDescription") self.ProjectServerPart = tuple(self.test_config.get("ProjectService", "ServerPart").split(",")) sys.stderr.write("ProjectName: %s\n" % self.ProjectName) if self.PrjMng.get_project_by_name(self.ProjectName) != None : sys.stderr.write("Test-project of name already exists!\n") sys.stderr.write("cleanup the mess yourself.\n") sys.exit(1) self.Prj = self.PrjMng.create_project(self.ProjectName, self.ProjectDescription) return
myParser.add_argument("--force", action='store_true', dest="force", default=False, help="force creation, even if Volume does not fit in given project") parseDefaultConfig(myParser) VD=VolumeLLA() VlD=VLDbLLA() FsD=FileSystemLLA() VolumeType = afs.defaultConfig.VolumeType if afs.defaultConfig.VolumeName[-9:] == ".readonly" : VolumeName = afs.defaultConfig.VolumeName[:-9] VolumeType = "RO" else : VolumeName = afs.defaultConfig.VolumeName VolSize=parseHumanWriteableSize(afs.defaultConfig.VolumeSize)/1024 PS=ProjectService() # we get a list of Projects, sorted by the Nesting Level. # only get the most specific one PrjObj=PS.getProjectsByVolumeName(VolumeName)[0] if afs.defaultConfig.ProjectName != None : thisPrjObj=PS.getProjectByName(afs.defaultConfig.ProjectName) if thisPrjObj == None : myParser.error("Project \"%s\" does not exist" % afs.defaultConfig.ProjectName) if thisPrjObj.id != PrjObj.id : if not afs.defaultConfig.force : myParser.error("VolumeName \"%s\" not matched by given Project \"%s\"\n" % (VolumeName,afs.defaultConfig.ProjectName)) sys.exit(1) PrjObj = thisPrjObj if not PrjObj :
Commands.add_argument('--importPrj', action='store_true') Commands.add_argument('--addPrj', action='store_true') Commands.add_argument('--rmPrj', action='store_true') Commands.add_argument('--modifyPrj', action='store_true') Commands.add_argument('--showFSList', action='store_true') Commands.add_argument('--updateVolumeMappings', action='store_true') Commands.add_argument('--showServerSpread', action='store_true') Commands.add_argument('--updateServerSpread', action='store_true') Commands.add_argument('--showStorageUsage', action='store_true') parseDefaultConfig(myParser) CS=CellService() VS=OSDVolService() FsS=OSDFsService() PS=ProjectService() DBM=DBManager() CellInfo=CS.getCellInfo(cached=True) CellInfo.FileServers=CellInfo.FileServers PrjObj = Project() defaultDict=PrjObj.getDict() # # # dump/import : # for serverpartitions, tranlsate for dump into human readable. # also dump raw server-uuids. # for import only consider human readable # but fail if we find no UUID for the hostname if afs.defaultConfig.dumpPrj == True :
class TestProjectServiceCachedMethods(unittest.TestCase): """ Tests ProjectService getter Methods """ @classmethod def setUpClass(self): """ setup ProjectService """ sys.stderr.write("\nsetUpClass\n") self.PrjMng = ProjectService() self.test_config = ConfigParser() self.test_config.read(afs.CONFIG.setup) self.ProjectName = self.test_config.get("ProjectService", "ProjectName") self.ProjectDescription = self.test_config.get("ProjectService", "ProjectDescription") self.ProjectServerPart = tuple(self.test_config.get("ProjectService", "ServerPart").split(",")) sys.stderr.write("ProjectName: %s\n" % self.ProjectName) if self.PrjMng.get_project_by_name(self.ProjectName) != None : sys.stderr.write("Test-project of name already exists!\n") sys.stderr.write("cleanup the mess yourself.\n") sys.exit(1) self.Prj = self.PrjMng.create_project(self.ProjectName, self.ProjectDescription) return @classmethod def tearDownClass(self) : """ cleanup """ sys.stderr.write("\ntearDownClass\n") #self.PrjMng.delete_project(self.Prj) return def test_get_project_by_name(self) : Prj = self.PrjMng.get_project_by_name(self.ProjectName) #sys.stderr.write("\nPrj=%s\n" % Prj) self.assertTrue( type(Prj) != type(None) ) def test_add_remove_server_partitions(self) : Prj = self.PrjMng.add_server_partition(self.Prj, self.ProjectServerPart, "RW") rw_serverparts, ro_serverparts = self.PrjMng.get_server_partitions(Prj) self.assertEqual(rw_serverparts, [self.ProjectServerPart]) Prj = self.PrjMng.remove_server_partition(self.Prj, self.ProjectServerPart, "RW") rw_serverparts, ro_serverparts = self.PrjMng.get_server_partitions(Prj) self.assertEqual(rw_serverparts, []) def test_set_remove_parent(self) : parent_project = self.PrjMng.create_project("parent_of_%s" % self.ProjectName, "test-parent") #sys.stderr.write("\nparent.db_id=%s\n" % parent_project.db_id) Prj = self.PrjMng.set_parent(self.Prj, parent_project) #sys.stderr.write("\nparent_db_id=%s\n" % Prj.parent_db_id) other_parent = self.PrjMng.get_parent(self.Prj) self.assertEqual(parent_project.db_id, other_parent.db_id) self.PrjMng.remove_parent(Prj) other_parent = self.PrjMng.get_parent(self.Prj) self.assertEqual(other_parent, None) def test_add_remove_location(self) : Prj = self.PrjMng.add_location(self.Prj, self.ProjectName, "RW") self.assertEqual(Prj.rw_locations, [self.ProjectName]) Prj = self.PrjMng.remove_location(Prj, self.ProjectName, "RW") self.assertEqual(Prj.rw_locations, []) def test_set_owner(self) : Prj = self.PrjMng.set_owner(self.Prj, self.ProjectName) other_prj = self.PrjMng.get_project_by_name(self.Prj.name) self.assertEqual(other_prj.owner, self.ProjectName)
Commands.add_argument('--rmPrj', action='store_true') Commands.add_argument('--modifyPrj', action='store_true') Commands.add_argument('--showFSList', action='store_true', help='display list of all fileservers') Commands.add_argument('--updateVolumeMappings', action='store_true', help='update volume <-> Project mapping') Commands.add_argument('--showServerSpread', action='store_true', help='show all servers having volumes of a project') Commands.add_argument('--showVolumes', action='store_true', help='show volumes of a project on a specific or all servers') Commands.add_argument('--updateServerSpread', action='store_true', help='update the serverspread table in DB') Commands.add_argument('--showStorageUsage', action='store_true', help='show how much storage (online,OSD-online, OSD-offline) is used by this project') Commands.add_argument('--showProjectsOnServer', action='store_true', help='show which Projects have volumes on a server') parseDefaultConfig(myParser) CS=CellService() VS=OSDVolService() FsS=OSDFsService() PS=ProjectService() DBM=DBManager() CellInfo=CS.getCellInfo(cached=True) CellInfo.FileServers=CellInfo.FileServers PrjObj = Project() defaultDict=PrjObj.getDict() # # # dump/import : # for serverpartitions, translate for dump into human readable. # also dump raw server-uuids. # for import only consider human readable # but fail if we find no UUID for the hostname if afs.defaultConfig.dumpPrj == True :
from afs.model.Volume import Volume from afs.model.Partition import Partition from afs.model.ExtendedPartitionAttributes import ExtPartAttr from afs.dao.UbikPeerDAO import UbikPeerDAO from afs.dao.OSDVolumeDAO import OSDVolumeDAO myParser=argparse.ArgumentParser(parents=[afs.argParser], add_help=False) myParser.add_argument("--prj", dest="ProjectName", help="Name of Project") myParser.add_argument("--volname", dest="VolumeName", required=True, help="Name of Volume") myParser.add_argument("--voltype", dest="VolumeType", default="RW", help="Type of Volume") myParser.add_argument("--force", action='store_true', dest="force", default=False, help="force creation, even if Volume does not fit in given project") parseDefaultConfig(myParser) PS=ProjectService() # we get a list of Projects, sorted by the Nesting Level. # only get the most specific one PrjObj=PS.getProjectsByVolumeName(afs.defaultConfig.VolumeName)[0] if afs.defaultConfig.ProjectName != None : thisPrjObj=PS.getProjectByName(afs.defaultConfig.ProjectName) if thisPrjObj == None : myParser.error("Project \"%s\" does not exist" % afs.defaultConfig.ProjectName) if thisPrjObj.id != PrjObj.id : if not afs.defaultConfig.force : myParser.error("VolumeName \"%s\" not matched by given Project \"%s\"\n" % (afs.defaultConfig.VolumeName,afs.defaultConfig.ProjectName)) sys.exit(1) PrjObj = thisPrjObj print (PS.getNewVolumeLocation(thisPrjObj.name,afs.defaultConfig.VolumeType))
group.add_argument("--ignorerx", dest="ignoreRX", action="append", help="regEx for volumenames to ignore. All volumes not matching this will be moved.") group.add_argument("--onlyrx", dest="onlyRX", action="append", help="regEx for volumenames to include all volumes not matching this will be ignored.") group.add_argument("--ignoreproject", dest="ignoreProjects", action="append", help="ignore volumes of given project.") group.add_argument("--onlyproject", dest="onlyProjects", action="append", help="only move volumes of given project.") myParser.add_argument("--dryrun",action="store_true", help="Just print out what would be done, but don't do it.") myParser.add_argument("--maxnum", default = 0, type=int, help="max number of Volumes to move.") myParser.add_argument("--untilfree", default = "0", help="move until # is free on spart.") myParser.add_argument("--rwvols", dest="moveRWVols", default=False, action="store_true", help="move rwvols with their accompanying ROs.") myParser.add_argument("--solitaryrovols", dest="moveSolitaryROVols", default=False, action="store_true", help="move solitary rovols.") myParser.add_argument("--minsize", dest="minVolumeUsage", default="0", help="only move volumes with minimalsize of") myParser.add_argument("--osdvolumes", dest="moveOSDVOlumes", default=False, action="store_true", help="also move OSD-Volumes") parseDefaultConfig(myParser) FS=OSDFsService() PS=ProjectService() VS=OSDVolService() VD=VolumeLLA() VlD=VLDbLLA() if not afs.defaultConfig.moveRWVols and not afs.defaultConfig.moveSolitaryROVols : sys.stderr.write("If you want to nmake me do anything, specify --rwvols and/or --solitaryrovols\n") sys.exit(1) if afs.defaultConfig.ignoreRX != None : ignoreRX=[] for rx in afs.defaultConfig.ignoreRX : try : ignoreRX.append(re.compile(rx)) except : sys.stderr.write("Cannot compile regular expression: '%s'\n" % rx)
def __init__(self, conf=None): BaseService.__init__(self, conf, DAOList=["fs", "bnode","vl", "vol", "rx", "ubik", "dns"]) self.FS=FsService() self.PS=ProjectService() return
class CellService(BaseService): """ Provides Service about a Cell global information. The cellname is set in the configuration passed to constructor. Thus one instance works only for cell. """ def __init__(self, conf=None): BaseService.__init__(self, conf, DAOList=["fs", "bnode","vl", "vol", "rx", "ubik", "dns"]) self.FS=FsService() self.PS=ProjectService() return def getCellInfo(self, cellname="", _user="", cached=False) : """ return full Cellobject. """ if cellname == "" : cellname = self._CFG.CELL_NAME self.Logger.debug("Using cellname : %s " % cellname) if cached : cell=self.DBManager.getFromCache(Cell,Name = cellname) if cell == None : self.Logger.info("getCellInfo: Cannot get cached Cell. Returning none.") return cell self.Logger.debug("getCellInfo: Cell.udate=%s" % cell.udate) # update Sums etc. from DB_CACHE cell.Name=cellname self.Logger.debug("getCellInfo: Cell.FileServers=%s" % cell.FileServers) cell.numRW = cell.numRO = cell.numBK = cell.numOffline = 0 numVolDict=self.bulk_getNumVolumes() for f in cell.FileServers : self.Logger.debug("getCellInfo: f=%s" % f) uuid = afs.LookupUtil[self._CFG.CELL_NAME].getFSUUID(f) if numVolDict.has_key(uuid) : cell.numRW += numVolDict[uuid].get("RW",0) cell.numRO += numVolDict[uuid].get("RO",0) cell.numBK += numVolDict[uuid].get("BK",0) cell.numOffline = -1 cell.numUsers,cell.numGroups = self.getPTInfo(cached=True) cell.allocated,cell.allocated_stale = self.getAllocated() cell.size,cell.used,cell.free=self.getUsage(cached=True) cell.Projects=[] # Projects are in DB_CACHE only for p in self.PS.getProjectList() : cell.Projects.append(p.name) self.Logger.debug("Cell=%s" % cell) return cell # refresh whole new CellObj cell=Cell() cell.Name=cellname cell.FileServers=self.getFileServers() cell.DBServers=self.getDBServers() cell.PTDBSyncSite, cell.PTDBVersion,cell.PTDBState=self.getUbikDBInfo(cell.DBServers[0],7002) cell.VLDBSyncSite, cell.VLDBVersion,cell.VLDBState=self.getUbikDBInfo(cell.DBServers[0],7003) cell.numRW = cell.numRO = cell.numBK = cell.numOffline = 0 for f in cell.FileServers : numRW,numRO,numBK,numOffline = self.FS.getNumVolumes(name_or_ip=f,cached=True) cell.numRW += numRW cell.numRO += numRO cell.numBK += numBK cell.numOffline += numOffline cell.numUsers,cell.numGroups = self.getPTInfo() cell.size,cell.used,cell.free=self.getUsage() # some information are only available if DB_CACHE is used. cell.allocated,cell.allocated_stale = -1,-1 cell.Projects=[] # Projects are in DB_CACHE only if self._CFG.DB_CACHE : for p in self.PS.getProjectList() : cell.Projects.append(p.name) cell.allocated,cell.allocated_stale = self.getAllocated() self.Logger.debug("Cell=%s" % Cell) self.DBManager.setIntoCache(Cell,cell,Name=self._CFG.CELL_NAME) return cell def refreshLiveData(self, cellname="") : """ update livedata for the cell : partition free and used space, DBVersions, list of Servers """ if cellname == "" : cellname = self._CFG.CELL_NAME cell=Cell() cell.FileServers=self.getFileServers() cell.DBServers=self.getDBServers() cell.PTDBSyncSite, cell.PTDBVersion,cell.PTDBState=self.getUbikDBInfo(cell.DBServers[0],7002) cell.VLDBSyncSite, cell.VLDBVersion,cell.VLDBState=self.getUbikDBInfo(cell.DBServers[0],7003) cell.size,cell.used,cell.free=self.getUsage() return True ############################################### # Internal helper Section ############################################### def getFileServers(self, _user="", cached=False): """ Return FileServers as a list of hostnames for each fileserver """ FileServers=[] if cached : for fs in self.DBManager.getFromCache(FileServer,mustBeunique=False) : FileServers.append(fs.servernames[0]) return FileServers self.Logger.debug("refreshing FileServers from live system") for na in self._vlDAO.getFsServList(_cfg=self._CFG, _user=_user,noresolve=True) : DNSInfo=afs.LookupUtil[self._CFG.CELL_NAME].getDNSInfo(na['name_or_ip']) FileServers.append(DNSInfo['names'][0]) self.Logger.debug("returning %s" % FileServers) return FileServers def getDBServers(self, _user="", cached=False): """ return a DB-Server-hostname list """ DBServers=[] if cached : for na in self.DBManager.getFromCache(DBServer,mustBeUnique=False) : DBServers.append(na.servernames[0]) return DBServers # we need to bootstrap ourselves now from nothing but the Cellname # just list of simple dicts hostnames DBServList=[] # try DNS _SRV Records from afsdb try : DBServList=self._dnsDAO.getDBServList(_cfg=self._CFG) except: pass if len(DBServList) == 0 : # get one fileserver and from that one the DBServList # we need to make sure to get the IP for f in self._vlDAO.getFsServList(_cfg=self._CFG, _user=_user, noresolve=True ) : if f["name_or_ip"] in self._CFG.ignoreIPList : continue DBServList = self._bnodeDAO.getDBServList(f["name_or_ip"], _cfg=self._CFG, _user=_user) # canonicalize DBServList for na in DBServList : DNSInfo=afs.LookupUtil[self._CFG.CELL_NAME].getDNSInfo(na) DBServers.append(DNSInfo['names'][0]) self.Logger.debug("returning %s" % DBServers) return DBServers def getUbikDBInfo(self, name_or_ip, Port, _user=""): """ return (SyncSite,DBVersion,DBState) tuple for DataBase accessible from Port """ shortInfo = self._ubikDAO.getShortInfo(name_or_ip, Port, _cfg=self._CFG, _user=_user) # we get DBState only from SyncSite if not shortInfo["isSyncSite"] : shortInfo = self._ubikDAO.getShortInfo(shortInfo["SyncSite"], Port, _cfg=self._CFG,_user=_user) return (shortInfo["SyncSite"],shortInfo["SyncSiteDBVersion"],shortInfo["DBState"]) def getUsage(self,cached=False) : """ Get Partition info of all Fileservers """ size = used = free = 0 return size,used,free def getAllocated(self) : """ Get sum of all given quotas of all Volumes DBCache only. """ allocated = allocated_stale= 0 return allocated,allocated_stale def getPTInfo(self,cached=False) : """ Get all sum of all users and groups defined in PTDB """ numUsers = numGroups = 0 return numUsers,numGroups def bulk_getNumVolumes(self) : """ returns all volume count for all servers from DB """ self.Logger.debug("bulk_getNumVolumes:") resDict={} conn = self._CFG.DB_ENGINE.connect() transa = conn.begin() for t in ["RW","RO","BK"] : rawsql='SELECT TF.uuid, COUNT(TV.vid) FROM tbl_volume AS TV JOIN tbl_fileserver AS TF on TV.serv_uuid = TF.uuid WHERE TV.type="%s" GROUP BY TF.uuid;' % t for uuid,count in conn.execute(rawsql).fetchall() : if not resDict.has_key(uuid) : resDict[uuid]={"RW" : 0,"RO" : 0, "BK" : 0} resDict[uuid][t]=count transa.commit() conn.close() self.Logger.debug("bulk_getNumVolumes: returning %s" % resDict) return resDict