Esempio n. 1
0
class TestFsServiceMethods(unittest.TestCase):
    """
    Tests FsService Methods
    """
    
    def setUp(self):
        """
        setup token and FsService
        """
        self.TestCfg=ConfigParser()
        self.TestCfg.read(options.setup)
        self.Cell=self.TestCfg.get("general", "Cell")
        afs.defaultConfig.AFSCell=self.Cell
        self.User=self.TestCfg.get("general", "User")
        self.Pass=self.TestCfg.get("general", "Pass")
        self.FsMng = FsService()
        self.FsName=self.TestCfg.get("FsService", "FS")
        self.FsPartitions=self.TestCfg.get("FsService", "Partitions").split(",")
        self.FsPartitions.sort()
        if afs.defaultConfig.DB_CACHE :
            from sqlalchemy.orm import sessionmaker
            self.DbSession= sessionmaker(bind=afs.defaultConfig.DB_ENGINE)
        return

    def test_getRestartTimes(self):
        TimesDict = self.FsMng.getRestartTimes(self.FsName)
        self.assertEqual("never",TimesDict["general"])
        self.assertEqual("5:00 am",TimesDict["binary"])
        return

    def test_setRestartTimes(self):
        restartTime = self.FsMng.setRestartTimes(self.FsName,"never","general")
        self.assertEqual(None,restartTime)
        restartTime = self.FsMng.setRestartTimes(self.FsName,"never","binary")
        self.assertEqual(None,restartTime)
        return
        
    def test_getServerObj(self) :
        server=self.FsMng.getFileServer(self.FsName)
        parts=[]
        for p in server.parts :
            parts.append(p["name"])
        parts.sort()
        self.assertEqual(self.FsPartitions, parts)
        return
Esempio n. 2
0
 def setUp(self):
     """
     setup token and FsService
     """
     self.TestCfg=ConfigParser()
     self.TestCfg.read(options.setup)
     self.Cell=self.TestCfg.get("general", "Cell")
     afs.defaultConfig.AFSCell=self.Cell
     self.User=self.TestCfg.get("general", "User")
     self.Pass=self.TestCfg.get("general", "Pass")
     self.FsMng = FsService()
     self.FsName=self.TestCfg.get("FsService", "FS")
     self.FsPartitions=self.TestCfg.get("FsService", "Partitions").split(",")
     self.FsPartitions.sort()
     if afs.defaultConfig.DB_CACHE :
         from sqlalchemy.orm import sessionmaker
         self.DbSession= sessionmaker(bind=afs.defaultConfig.DB_ENGINE)
     return
Esempio n. 3
0
 def __init__(self, conf=None):
     BaseService.__init__(self, conf, DAOList=["fs", "bnode","vl", "vol", "rx", "ubik", "dns"])
     self.FS=FsService()
     self.PS=ProjectService()
     return
Esempio n. 4
0
class CellService(BaseService):
    """
    Provides Service about a Cell global information.
    The cellname is set in the configuration passed to constructor.
    Thus one instance works only for cell.
    """
    def __init__(self, conf=None):
        BaseService.__init__(self, conf, DAOList=["fs", "bnode","vl", "vol", "rx", "ubik", "dns"])
        self.FS=FsService()
        self.PS=ProjectService()
        return


    def getCellInfo(self, cellname="", _user="", cached=False) :
        """
        return full Cellobject.
        """
        if cellname == "" : cellname = self._CFG.CELL_NAME
        self.Logger.debug("Using cellname : %s " % cellname)
        if cached :
            cell=self.DBManager.getFromCache(Cell,Name = cellname)
            if cell == None :
               self.Logger.info("getCellInfo: Cannot get cached Cell. Returning none.")
               return cell
            self.Logger.debug("getCellInfo: Cell.udate=%s" % cell.udate)
            # update Sums etc. from DB_CACHE
            cell.Name=cellname
            self.Logger.debug("getCellInfo: Cell.FileServers=%s" % cell.FileServers)
            cell.numRW = cell.numRO = cell.numBK = cell.numOffline = 0
            numVolDict=self.bulk_getNumVolumes()
            for f in cell.FileServers :
                self.Logger.debug("getCellInfo: f=%s" % f)
                uuid = afs.LookupUtil[self._CFG.CELL_NAME].getFSUUID(f)
                if numVolDict.has_key(uuid) :
                    cell.numRW += numVolDict[uuid].get("RW",0)
                    cell.numRO += numVolDict[uuid].get("RO",0)
                    cell.numBK += numVolDict[uuid].get("BK",0)
            cell.numOffline = -1
            cell.numUsers,cell.numGroups = self.getPTInfo(cached=True)
            cell.allocated,cell.allocated_stale = self.getAllocated()
            cell.size,cell.used,cell.free=self.getUsage(cached=True)
            cell.Projects=[] # Projects are in DB_CACHE only
            for p in self.PS.getProjectList() :
                cell.Projects.append(p.name)
            self.Logger.debug("Cell=%s" % cell)
            return cell

        # refresh whole new CellObj
        cell=Cell()
        cell.Name=cellname
        cell.FileServers=self.getFileServers()
        cell.DBServers=self.getDBServers()
        cell.PTDBSyncSite, cell.PTDBVersion,cell.PTDBState=self.getUbikDBInfo(cell.DBServers[0],7002)
        cell.VLDBSyncSite, cell.VLDBVersion,cell.VLDBState=self.getUbikDBInfo(cell.DBServers[0],7003)
        cell.numRW = cell.numRO = cell.numBK = cell.numOffline = 0
        for f in cell.FileServers :
            numRW,numRO,numBK,numOffline = self.FS.getNumVolumes(name_or_ip=f,cached=True)
            cell.numRW += numRW
            cell.numRO += numRO
            cell.numBK += numBK
            cell.numOffline += numOffline
        cell.numUsers,cell.numGroups = self.getPTInfo()
        cell.size,cell.used,cell.free=self.getUsage()
        # some information are only available if DB_CACHE is used.
        cell.allocated,cell.allocated_stale = -1,-1
        cell.Projects=[] # Projects are in DB_CACHE only

        if self._CFG.DB_CACHE :
            for p in self.PS.getProjectList() :
                cell.Projects.append(p.name)
            cell.allocated,cell.allocated_stale = self.getAllocated()
            self.Logger.debug("Cell=%s" % Cell)
            self.DBManager.setIntoCache(Cell,cell,Name=self._CFG.CELL_NAME)
        return cell

    def refreshLiveData(self, cellname="") : 
        """
        update livedata for the cell :
        partition free and used space, DBVersions, list of Servers
        """
        if cellname == "" : cellname = self._CFG.CELL_NAME
        cell=Cell()
        cell.FileServers=self.getFileServers()
        cell.DBServers=self.getDBServers()
        cell.PTDBSyncSite, cell.PTDBVersion,cell.PTDBState=self.getUbikDBInfo(cell.DBServers[0],7002)
        cell.VLDBSyncSite, cell.VLDBVersion,cell.VLDBState=self.getUbikDBInfo(cell.DBServers[0],7003)
        cell.size,cell.used,cell.free=self.getUsage()
        return True 

  
    ###############################################
    # Internal helper Section
    ###############################################    
   
    def getFileServers(self, _user="", cached=False):
        """
        Return FileServers as a list of hostnames for each fileserver
        """
        FileServers=[]
        if cached :
            for fs in self.DBManager.getFromCache(FileServer,mustBeunique=False) :
                FileServers.append(fs.servernames[0])
            return FileServers
        self.Logger.debug("refreshing FileServers from live system")
        for na in self._vlDAO.getFsServList(_cfg=self._CFG, _user=_user,noresolve=True) :
            DNSInfo=afs.LookupUtil[self._CFG.CELL_NAME].getDNSInfo(na['name_or_ip'])
            FileServers.append(DNSInfo['names'][0])
        self.Logger.debug("returning %s" % FileServers)
        return FileServers
    
    def getDBServers(self, _user="", cached=False):
        """
        return a DB-Server-hostname list
        """
        DBServers=[]
        if cached :
            for na in self.DBManager.getFromCache(DBServer,mustBeUnique=False) :
                DBServers.append(na.servernames[0])
            return DBServers
        # we need to bootstrap ourselves now from nothing but the Cellname
        # just list of simple dicts hostnames
        DBServList=[]

        # try DNS _SRV Records from afsdb
        try :
            DBServList=self._dnsDAO.getDBServList(_cfg=self._CFG)
        except:
            pass
        if len(DBServList) == 0 :
            # get one fileserver and from that one the DBServList
            # we need to make sure to get the IP
            for f in self._vlDAO.getFsServList(_cfg=self._CFG, _user=_user, noresolve=True ) :
                if  f["name_or_ip"] in self._CFG.ignoreIPList : continue
            DBServList = self._bnodeDAO.getDBServList(f["name_or_ip"], _cfg=self._CFG, _user=_user) 
        
        # canonicalize DBServList 
        for na in DBServList :
            DNSInfo=afs.LookupUtil[self._CFG.CELL_NAME].getDNSInfo(na)
            DBServers.append(DNSInfo['names'][0])
        self.Logger.debug("returning %s" % DBServers)
        return DBServers

    def getUbikDBInfo(self, name_or_ip, Port, _user=""):
        """
        return (SyncSite,DBVersion,DBState) tuple for DataBase accessible from Port
        """
        shortInfo = self._ubikDAO.getShortInfo(name_or_ip, Port, _cfg=self._CFG, _user=_user)
        # we get DBState only from SyncSite  
        if not shortInfo["isSyncSite"] : 
             shortInfo = self._ubikDAO.getShortInfo(shortInfo["SyncSite"], Port, _cfg=self._CFG,_user=_user)
        return (shortInfo["SyncSite"],shortInfo["SyncSiteDBVersion"],shortInfo["DBState"])

    def getUsage(self,cached=False) :
        """
        Get Partition info of all Fileservers
        """
        size = used = free = 0 
        return size,used,free

    def getAllocated(self) :
        """
        Get sum of all given quotas of all Volumes
        DBCache only.
        """
        allocated = allocated_stale= 0 
        return allocated,allocated_stale

    def getPTInfo(self,cached=False) :
         """
         Get all sum of all users and groups defined in PTDB
         """
         numUsers = numGroups = 0
         return numUsers,numGroups

    def bulk_getNumVolumes(self) :
        """
        returns all volume count for all servers from DB
        """
        self.Logger.debug("bulk_getNumVolumes:") 
        resDict={}
        conn = self._CFG.DB_ENGINE.connect()
        transa = conn.begin()
        for t in ["RW","RO","BK"] :
            rawsql='SELECT TF.uuid, COUNT(TV.vid) FROM tbl_volume AS TV JOIN tbl_fileserver AS TF on TV.serv_uuid = TF.uuid WHERE TV.type="%s" GROUP BY TF.uuid;' % t
            for uuid,count in conn.execute(rawsql).fetchall() :
                if not resDict.has_key(uuid) : resDict[uuid]={"RW" : 0,"RO" : 0, "BK" : 0}
                resDict[uuid][t]=count
                
        transa.commit()
        conn.close()  
        self.Logger.debug("bulk_getNumVolumes: returning %s" % resDict) 
        return resDict
Esempio n. 5
0
 def __init__(self, conf=None):
     BaseService.__init__(self, conf, DAOList=["vol","fs"])
     self.CS=CellService()
     self.FsS=FsService()
Esempio n. 6
0
class VolService (BaseService):
    """
    Provides Service about a Volume management.
    The cellname is set in the methods so that we 
    can use this for more than one cell.
    """
    
    def __init__(self, conf=None):
        BaseService.__init__(self, conf, DAOList=["vol","fs"])
        self.CS=CellService()
        self.FsS=FsService()
       
    ###############################################
    # Volume Section
    ###############################################    
    
    """
    Retrieve Volume Group
    """
    def getVolGroup(self, id , cached=False):
    
        list = self._volDAO.getVolGroupList(id,  self._CFG.CELL_NAME, self._CFG.Token)
        volGroup = None
        if len(list) > 0:
            volGroup =  VolumeGroup()
            for el in list:
                volGroup.name = el['volname']
                if el['type'] == 'RW':
                    volGroup.RW.append(el)
                elif el['type'] == 'RO':
                    volGroup.RO.append(el)
                else :
                    volGroup.BK.append(el)
        return volGroup
       
    """
    Retrieve Volume Information by Name or ID
    """
    def getVolume(self, name, serv, part,  cached=False):
        if cached :
            serv_uuid=self.FsS.getUUID(serv)
            vol=self._getFromCache(name, serv_uuid, part)
            return vol
        vdict = self._volDAO.getVolume(name, serv, part,  self._CFG.CELL_NAME, self._CFG.Token)
        vdict["serv_uuid"]=self.FsS.getUUID(serv)
        vdict.pop("serv")
        vol = None
        if vdict:
            vol = Volume()
            vol.setByDict(vdict)
            if self._CFG.DB_CACHE :
                self._setIntoCache(vol)
        return  vol

    def getExtVolAttr(self, vid):
        """
        get Extended Volume Attribute Object
        works only with cache
        """
        ext_vol_attr=self.DbSession.query(ExtVolAttr).filter(ExtVolAttr.vid == vid).first()
        return ext_vol_attr
    
    def setExtVolAttr(self, vid, dict):
        """
        set Extended Volume Attributes by dict
        """
        thisExtVolAttr=ExtVolAttr(vid=vid)
        thisExtVolAttr.setByDict(dict)
        volCache = self.DbSession.query(ExtVolAttr).filter(ExtVolAttr.vid == vid).first()
        if volCache:
            volCache.copyObj(thisExtVolAttr)
            self.DbSession.flush()
        else:
            volCache=self.DbSession.merge(thisExtVolAttr)  
            self.DbSession.flush()
        self.DbSession.commit()  
        return thisExtVolAttr
 
    ################################################
    # AFS-operations
    ################################################
 
    def release(self, id) :
        #Check before the call (must be RW)
        pass
 
    ################################################
    #  Cache Query 
    ################################################
    def getVolCountByQuery(self,query):
         if not self._CFG.DB_CACHE:
            raise VolError('Error, no db Cache defined ',None)
        
         query._tbl= "Volume"
         queryc = query.getQueryCount()
         count  = eval(queryc)         
         
         return count
 
    def getVolByQuery(self,query):
        if not self._CFG.DB_CACHE:
            raise VolError('Error, no db Cache defined ',None)
        
        query._tbl= "Volume"
        query  = query.getQuery()
        res    = eval(query)
        return res
 
    def refreshCache(self, serv, part):
        if not self._CFG.DB_CACHE:
            raise VolError('Error, no db Cache defined ',None)
       
        part = afsutil.canonicalizePartition(part)
        list = self._fsDAO.getVolList( serv, part, self._CFG.CELL_NAME, self._CFG.Token)
        #Convert into dictionary
        idVolDict = {}
        cUpdate = len(list)
        for el in list:
            idVolDict[el['vid']] = el
        res  = self.DbSession.query(Volume).filter(self.or_(Volume.serv_uuid == serv,Volume.servername == serv )).filter(Volume.part == part)
        
        flush = 0
        for vol in res:
            flush +=1
            if idVolDict.has_key(vol.vid):
                vol.setByDict(idVolDict[vol.vid])
                del idVolDict[vol.vid]
            else:     
                self.DbSession.delete(vol) 
            
            if flush > self._CFG.DB_FLUSH:    
                self.DbSession.flush() 
        
        # ADD section 
        flush = 0
        for key in idVolDict.keys():
            flush +=1
            vol = Volume()
            vol.setByDict(idVolDict[key])
            self.DbSession.add(vol)    
            if flush > self._CFG.DB_FLUSH:    
                self.DbSession.flush() 
        self.DbSession.commit()
        
        return cUpdate
    
    ################################################
    #  Internal Cache Management 
    ################################################


    def _getFromCache(self,id, serv_uuid, part):
        #STORE info into  CACHE
        if not self._CFG.DB_CACHE:
            raise AfsError("DB_CACHE not configured")
        # Do update
        vol = self.DbSession.query(Volume).filter(self.or_(Volume.vid == id, Volume.name == id)).filter(Volume.serv_uuid == serv_uuid).filter(Volume.part == part).first()
        return vol
        
    def _setIntoCache(self,vol):
         #STORE info into  CACHE
       
        if not self._CFG.DB_CACHE:
            raise AfsError("DB_CACHE not configured")
        
        volCache = self.DbSession.query(Volume).filter(Volume.vid == vol.vid).filter(self.or_(Volume.serv_uuid == vol.serv_uuid,Volume.servername == vol.servername )).filter(Volume.part == vol.part).first()
        
        if volCache:
            volCache.copyObj(vol)
            self.DbSession.flush()
        else:
            volCache=self.DbSession.merge(vol)  
            self.DbSession.flush()
        
        self.DbSession.commit()  
        return volCache
    
    def _delCache(self,vol):
         #STORE info into  CACHE
        if not self._CFG.DB_CACHE:
            return None
        # Do update
        self.DbSession.delete(vol)
            
        self.DbSession.commit()