def __generateNewVersion(self): """ After changing configuration, we use this method to save them """ if gConfigurationData.isMaster(): gConfigurationData.generateNewVersion() gConfigurationData.writeRemoteConfigurationToDisk()
def __loadConfigurationData( self ): try: os.makedirs( os.path.join( DIRAC.rootPath, "etc", "csbackup" ) ) except: pass gConfigurationData.loadConfigurationData() if gConfigurationData.isMaster(): bBuiltNewConfiguration = False if not gConfigurationData.getName(): DIRAC.abort( 10, "Missing name for the configuration to be exported!" ) gConfigurationData.exportName() sVersion = gConfigurationData.getVersion() if sVersion == "0": gLogger.info( "There's no version. Generating a new one" ) gConfigurationData.generateNewVersion() bBuiltNewConfiguration = True if self.sURL not in gConfigurationData.getServers(): gConfigurationData.setServers( self.sURL ) bBuiltNewConfiguration = True gConfigurationData.setMasterServer( self.sURL ) if bBuiltNewConfiguration: gConfigurationData.writeRemoteConfigurationToDisk()
def clearCFG(): """SUPER UGLY: one must recreate the CFG objects of gConfigurationData not to conflict with other tests that might be using a local dirac.cfg""" gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion()
def fts3db(): FTS3DB.utc_timestamp = func.datetime FTS3DB.fts3FileTable.columns["lastUpdate"].onupdate = func.datetime FTS3DB.fts3JobTable.columns["lastUpdate"].onupdate = func.datetime FTS3DB.fts3OperationTable.columns["lastUpdate"].onupdate = func.datetime db = FTS3DB.FTS3DB(url="sqlite+pysqlite:///:memory:") @event.listens_for(engine.Engine, "connect") def set_sqlite_pragma(dbapi_connection, connection_record): """Make sure that the foreign keys are checked See https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#foreign-key-support """ cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() db.createTables() yield db # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg # Note that here we do not use it specifically, but the FTS3 objets # are doing it gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion()
def __loadConfigurationData(self): try: os.makedirs(os.path.join(DIRAC.rootPath, "etc", "csbackup")) except: pass gConfigurationData.loadConfigurationData() if gConfigurationData.isMaster(): bBuiltNewConfiguration = False if not gConfigurationData.getName(): DIRAC.abort( 10, "Missing name for the configuration to be exported!") gConfigurationData.exportName() sVersion = gConfigurationData.getVersion() if sVersion == "0": gLogger.info("There's no version. Generating a new one") gConfigurationData.generateNewVersion() bBuiltNewConfiguration = True if self.sURL not in gConfigurationData.getServers(): gConfigurationData.setServers(self.sURL) bBuiltNewConfiguration = True gConfigurationData.setMasterServer(self.sURL) if bBuiltNewConfiguration: gConfigurationData.writeRemoteConfigurationToDisk()
def updateConfiguration(self, sBuffer, committer="", updateVersionOption=False): """ Update the master configuration with the newly received changes :param str sBuffer: newly received configuration data :param str committer: the user name of the committer :param bool updateVersionOption: flag to update the current configuration version :return: S_OK/S_ERROR of the write-to-disk of the new configuration """ if not gConfigurationData.isMaster(): return S_ERROR("Configuration modification is not allowed in this server") # Load the data in a ConfigurationData object oRemoteConfData = ConfigurationData(False) oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer) if updateVersionOption: oRemoteConfData.setVersion(gConfigurationData.getVersion()) # Test that remote and new versions are the same sRemoteVersion = oRemoteConfData.getVersion() sLocalVersion = gConfigurationData.getVersion() gLogger.info("Checking versions\nremote: %s\nlocal: %s" % (sRemoteVersion, sLocalVersion)) if sRemoteVersion != sLocalVersion: if not gConfigurationData.mergingEnabled(): return S_ERROR("Local and remote versions differ (%s vs %s). Cannot commit." % (sLocalVersion, sRemoteVersion)) else: gLogger.info("AutoMerging new data!") if updateVersionOption: return S_ERROR("Cannot AutoMerge! version was overwritten") result = self.__mergeIndependentUpdates(oRemoteConfData) if not result['OK']: gLogger.warn("Could not AutoMerge!", result['Message']) return S_ERROR("AutoMerge failed: %s" % result['Message']) requestedRemoteCFG = result['Value'] gLogger.info("AutoMerge successful!") oRemoteConfData.setRemoteCFG(requestedRemoteCFG) # Test that configuration names are the same sRemoteName = oRemoteConfData.getName() sLocalName = gConfigurationData.getName() if sRemoteName != sLocalName: return S_ERROR("Names differ: Server is %s and remote is %s" % (sLocalName, sRemoteName)) # Update and generate a new version gLogger.info("Committing new data...") gConfigurationData.lock() gLogger.info("Setting the new CFG") gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG()) gConfigurationData.unlock() gLogger.info("Generating new version") gConfigurationData.generateNewVersion() # self.__checkSlavesStatus( forceWriteConfiguration = True ) gLogger.info("Writing new version to disk") retVal = gConfigurationData.writeRemoteConfigurationToDisk("%s@%s" % (committer, gConfigurationData.getVersion())) gLogger.info("New version", gConfigurationData.getVersion()) # Attempt to update the configuration on currently registered slave services if gConfigurationData.getAutoSlaveSync(): result = self.forceSlavesUpdate() if not result['OK']: gLogger.warn('Failed to update slave servers') return retVal
def tearDown( self ): try: os.remove(self.testCfgFileName) except OSError: pass # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG=CFG() gConfigurationData.remoteCFG=CFG() gConfigurationData.mergedCFG=CFG() gConfigurationData.generateNewVersion()
def tearDown(self): try: os.remove(self.testCfgFileName) except OSError: pass # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion()
def updateConfiguration(self, sBuffer, commiterDN="", updateVersionOption=False): if not gConfigurationData.isMaster(): return S_ERROR( "Configuration modification is not allowed in this server") #Load the data in a ConfigurationData object oRemoteConfData = ConfigurationData(False) oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer) if updateVersionOption: oRemoteConfData.setVersion(gConfigurationData.getVersion()) #Test that remote and new versions are the same sRemoteVersion = oRemoteConfData.getVersion() sLocalVersion = gConfigurationData.getVersion() gLogger.info("Checking versions\nremote: %s\nlocal: %s" % (sRemoteVersion, sLocalVersion)) if sRemoteVersion != sLocalVersion: if not gConfigurationData.mergingEnabled(): return S_ERROR( "Local and remote versions differ (%s vs %s). Cannot commit." % (sLocalVersion, sRemoteVersion)) else: gLogger.info("AutoMerging new data!") if updateVersionOption: return S_ERROR("Cannot AutoMerge! version was overwritten") result = self.__mergeIndependentUpdates(oRemoteConfData) if not result['OK']: gLogger.warn("Could not AutoMerge!", result['Message']) return S_ERROR("AutoMerge failed: %s" % result['Message']) requestedRemoteCFG = result['Value'] gLogger.info("AutoMerge successful!") oRemoteConfData.setRemoteCFG(requestedRemoteCFG) #Test that configuration names are the same sRemoteName = oRemoteConfData.getName() sLocalName = gConfigurationData.getName() if sRemoteName != sLocalName: return S_ERROR("Names differ: Server is %s and remote is %s" % (sLocalName, sRemoteName)) #Update and generate a new version gLogger.info("Committing new data...") gConfigurationData.lock() gLogger.info("Setting the new CFG") gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG()) gConfigurationData.unlock() gLogger.info("Generating new version") gConfigurationData.generateNewVersion() #self.__checkSlavesStatus( forceWriteConfiguration = True ) gLogger.info("Writing new version to disk!") retVal = gConfigurationData.writeRemoteConfigurationToDisk( "%s@%s" % (commiterDN, gConfigurationData.getVersion())) gLogger.info("New version it is!") return retVal
def create_serverAndClient(request): """This function starts a server, and closes it after The server will use the parametrized transport type """ # Reinitialize the configuration. # We do it here rather than at the start of the module # to accommodate for pytest when going through all the DIRAC tests gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion() gConfigurationData.setOptionInCFG("/DIRAC/Security/CALocation", caLocation) gConfigurationData.setOptionInCFG("/DIRAC/Security/CertFile", hostCertLocation) gConfigurationData.setOptionInCFG("/DIRAC/Security/KeyFile", hostKeyLocation) testStr = request.param serverName, clientName = testStr.split("-") serverClass = transportByName(serverName) clientClass = transportByName(clientName) sr = DummyServiceReactor(serverClass, PORT_NUMBER) server_thread = threading.Thread(target=sr.serve) sr.prepare() server_thread.start() # Create the client clientOptions = { "clientMode": True, "proxyLocation": proxyFile, } clientTransport = clientClass(("localhost", PORT_NUMBER), bServerMode=False, **clientOptions) res = clientTransport.initAsClient() assert res["OK"], res yield sr, clientTransport clientTransport.close() sr.closeListeningConnections() server_thread.join() # Clean the config gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion()
def updateConfiguration(self, sBuffer, commiter="", updateVersionOption=False): if not gConfigurationData.isMaster(): return S_ERROR("Configuration modification is not allowed in this server") # Load the data in a ConfigurationData object oRemoteConfData = ConfigurationData(False) oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer) if updateVersionOption: oRemoteConfData.setVersion(gConfigurationData.getVersion()) # Test that remote and new versions are the same sRemoteVersion = oRemoteConfData.getVersion() sLocalVersion = gConfigurationData.getVersion() gLogger.info("Checking versions\nremote: %s\nlocal: %s" % (sRemoteVersion, sLocalVersion)) if sRemoteVersion != sLocalVersion: if not gConfigurationData.mergingEnabled(): return S_ERROR( "Local and remote versions differ (%s vs %s). Cannot commit." % (sLocalVersion, sRemoteVersion) ) else: gLogger.info("AutoMerging new data!") if updateVersionOption: return S_ERROR("Cannot AutoMerge! version was overwritten") result = self.__mergeIndependentUpdates(oRemoteConfData) if not result["OK"]: gLogger.warn("Could not AutoMerge!", result["Message"]) return S_ERROR("AutoMerge failed: %s" % result["Message"]) requestedRemoteCFG = result["Value"] gLogger.info("AutoMerge successful!") oRemoteConfData.setRemoteCFG(requestedRemoteCFG) # Test that configuration names are the same sRemoteName = oRemoteConfData.getName() sLocalName = gConfigurationData.getName() if sRemoteName != sLocalName: return S_ERROR("Names differ: Server is %s and remote is %s" % (sLocalName, sRemoteName)) # Update and generate a new version gLogger.info("Committing new data...") gConfigurationData.lock() gLogger.info("Setting the new CFG") gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG()) gConfigurationData.unlock() gLogger.info("Generating new version") gConfigurationData.generateNewVersion() # self.__checkSlavesStatus( forceWriteConfiguration = True ) gLogger.info("Writing new version to disk!") retVal = gConfigurationData.writeRemoteConfigurationToDisk( "%s@%s" % (commiter, gConfigurationData.getVersion()) ) gLogger.info("New version it is!") return retVal
def setUp(self, _mk_generateStorage, _mk_isLocalSE, _mk_addAccountingOperation): # Creating test configuration file self.testCfgFileName = os.path.join(tempfile.gettempdir(), 'test_StorageElement.cfg') cfgContent = ''' DIRAC { Setup=TestSetup } Resources{ StorageElements{ DiskStorageA { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_SRM2 { Host = srm-diskandtape.cern.ch SpaceToken = Disk Protocol = srm Path = /base/pathDisk } } # Same end point as DiskStorageA, but with a different space token # So they should be considered the same TapeStorageA { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_SRM2 { Host = srm-diskandtape.cern.ch Protocol = srm SpaceToken = Tape Path = /base/pathDisk } } # Normally does not happen in practice, but this is the same as DiskStorageA with more plugins DiskStorageAWithMoreProtocol { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_SRM2 { Host = srm-diskandtape.cern.ch SpaceToken = Disk Protocol = srm Path = /base/pathDisk } GFAL2_GSIFTP { Host = gsiftp-diskandtape.cern.ch SpaceToken = Disk Protocol = gsiftp Path = /base/pathDisk } } # A different storage StorageB { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_GSIFTP { Host = otherstorage.cern.ch SpaceToken = Disk Protocol = gsiftp Path = /base/pathDisk } } # The same endpoint as StorageB but with differetn base path, so not the same StorageBWithOtherBasePath { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_GSIFTP { Host = otherstorage.cern.ch SpaceToken = Disk Protocol = gsiftp Path = /base/otherPath } } } Operations{ Defaults { DataManagement{ AccessProtocols = fakeProto AccessProtocols += root WriteProtocols = srm } } } ''' with open(self.testCfgFileName, 'w') as f: f.write(cfgContent) # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion() gConfig = ConfigurationClient(fileToLoadList=[self.testCfgFileName]) # we replace the configuration by our own one. self.diskStorageA = StorageElementItem('DiskStorageA') self.diskStorageA.vo = 'lhcb' self.tapeStorageA = StorageElementItem('TapeStorageA') self.tapeStorageA.vo = 'lhcb' self.diskStorageAWithMoreProtocol = StorageElementItem('DiskStorageAWithMoreProtocol') self.diskStorageAWithMoreProtocol.vo = 'lhcb' self.storageB = StorageElementItem('StorageB') self.storageB.vo = 'lhcb' self.storageBWithOtherBasePath = StorageElementItem('StorageBWithOtherBasePath') self.storageBWithOtherBasePath.vo = 'lhcb'
def generateConfig(): """ This generates the test configuration once for the module, and removes it when done """ # Clean first the config from potential other leaking tests gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion() testCfgFileName = os.path.join(tempfile.gettempdir(), "test_FTS3Plugin.cfg") cfgContent = """ Resources { StorageElementBases { CERN-Disk { BackendType = EOS AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } CERN-Tape { BackendType = CTA AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } RAL-Disk { BackendType = Echo AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } RAL-Tape { BackendType = Castor AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } CNAF-Disk { BackendType = EOS AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } CNAF-Tape { BackendType = CTA AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } IN2P3-Disk { BackendType = EOS AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } } StorageElements { CERN-DST { BaseSE = CERN-Disk } CERN-RAW { BaseSE = CERN-Tape } RAL-DST { BaseSE = RAL-Disk } RAL-RAW { BaseSE = RAL-Tape } CNAF-DST { BaseSE = CNAF-Disk } CNAF_MC-DST { BaseSE = CNAF-Disk } IN2P3-DST { BaseSE = IN2P3-Disk } } } Operations{ Defaults { DataManagement { MultiHopMatrixOfShame { # Used for any source which does not have a more specific rule Default { # Default -> Default basically means "anything else than all the other defined routes" Default = GlobalDefault # Hop between "anything else" and IN3P3-DST IN2P3-DST = DefaultToIN2P3-DST # Hop between "anything else" and any SE inheriting from CNAF-Disk CNAF-Disk = DefaultToCNAF-Disk } # Any transfer starting from CERN-RAW CERN-RAW { # CERN-RAW -> anywhere else Default = DefaultFromCERN-RAW # Do not use multihop between CERN-RAW and SE inheriting from CERN-Disk CERN-Disk = disabled # CERN-RAW -> any SE inheriting from CNAF-Disk CNAF-Disk = CERN-RAW-CNAF-Disk # CERN-RAW->CNAF-DST (takes precedence over CERN-RAW -> CNAF-Disk) CNAF-DST = CERN-RAW-CNAF-DST # CERN-RAW -> IN2P3-DST IN2P3-DST = disabled } # Any transfer starting from any SE inheriting from RAL-Tape RAL-Tape { # RAL-Tape -> anywhere else: do not use multihop Default = disabled # any SE inheriting from RAL-Tape -> any SE inheriting from CNAF-Disk CNAF-Disk = RAL-Tape-CNAF-Disk # any SE inheriting from RAL-Tape -> CNAF-DST (takes precedence over the previous rule) CNAF-DST = RAL-Tape-CNAF-DST } # Any transfer starting from IN2P3-DST IN2P3-DST { # IN2P2-DST -> CNAF-DST CNAF-DST = IN2P3-DST-CNAF-DST } } } } } """ with open(testCfgFileName, "w") as f: f.write(cfgContent) # Load the configuration ConfigurationClient(fileToLoadList=[ testCfgFileName ]) # we replace the configuration by our own one. yield try: os.remove(testCfgFileName) except OSError: pass # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion()
def setUp( self, _mk_generateStorage, _mk_isLocalSE, _mk_addAccountingOperation ): #Creating test configuration file self.testCfgFileName = os.path.join(tempfile.gettempdir(), 'test_StorageElement.cfg') cfgContent=''' DIRAC { Setup=TestSetup } Resources{ StorageElements{ StorageA { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } StorageB { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = SRM2 Protocol = srm Path = } } StorageC { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = XROOT Protocol = root Path = } } StorageD { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = SRM2 Protocol = srm Path = } AccessProtocol.1 { Host = PluginName = XROOT Protocol = root Path = } } StorageE { BackendType = local ReadAccess = Active WriteAccess = Active WriteProtocols = root WriteProtocols += srm AccessProtocol.0 { Host = PluginName = SRM2 Protocol = srm Path = } AccessProtocol.1 { Host = PluginName = XROOT Protocol = root Path = } } StorageX { BackendType = local ReadAccess = Active WriteAccess = Active WriteProtocols = gsiftp AccessProtocols = root AccessProtocol.0 { Host = PluginName = GSIFTP Protocol = gsiftp Path = } AccessProtocol.1 { Host = PluginName = XROOT Protocol = root Path = } } StorageY { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocols = gsiftp AccessProtocols += srm AccessProtocol.0 { Host = PluginName = GSIFTP Protocol = gsiftp Path = } AccessProtocol.1 { Host = PluginName = SRM2 Protocol = srm Path = } } StorageZ { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocols = root AccessProtocols += srm WriteProtocols = root WriteProtocols += srm AccessProtocol.0 { Host = PluginName = ROOT Protocol = root Path = } AccessProtocol.1 { Host = PluginName = SRM2 Protocol = srm Path = } } } } Operations{ Defaults { DataManagement{ AccessProtocols = fakeProto AccessProtocols += root WriteProtocols = srm } } } ''' with open(self.testCfgFileName, 'w') as f: f.write(cfgContent) # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG=CFG() gConfigurationData.remoteCFG=CFG() gConfigurationData.mergedCFG=CFG() gConfigurationData.generateNewVersion() gConfig = ConfigurationClient(fileToLoadList = [self.testCfgFileName]) #we replace the configuration by our own one. self.seA = StorageElementItem( 'StorageA' ) self.seA.vo = 'lhcb' self.seB = StorageElementItem( 'StorageB' ) self.seB.vo = 'lhcb' self.seC = StorageElementItem( 'StorageC' ) self.seC.vo = 'lhcb' self.seD = StorageElementItem( 'StorageD' ) self.seD.vo = 'lhcb' self.seE = StorageElementItem( 'StorageE' ) self.seE.vo = 'lhcb' self.seX = StorageElementItem( 'StorageX' ) self.seX.vo = 'lhcb' self.seY = StorageElementItem( 'StorageY' ) self.seY.vo = 'lhcb' self.seZ = StorageElementItem( 'StorageZ' ) self.seZ.vo = 'lhcb'
def config(request): """ fixture is the pytest way to declare initalization function. Scope = module significate that this function will be called only time for this file. If no scope precised it call config for each test. This function can have a return value, it will be the value of 'config' argument for the tests """ cfgContent = """ DIRAC { Setup=TestSetup Setups { TestSetup { WorkloadManagement=MyWM } } } Systems { WorkloadManagement { MyWM { URLs { ServiceDips = dips://$MAINSERVERS$:1234/WorkloadManagement/ServiceDips ServiceHttps = https://$MAINSERVERS$:1234/WorkloadManagement/ServiceHttps } } } } Operations{ Defaults { MainServers = server1, server2 } } """ with open(testCfgFileName, "w") as f: f.write(cfgContent) gConfig = ConfigurationClient(fileToLoadList=[testCfgFileName]) # we replace the configuration by our own one. # def tearDown(): # Wait for teardown yield config """ This function is called at the end of the test. """ try: os.remove(testCfgFileName) except OSError: pass # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion() print("TearDown")
def __generateNewVersion( self ): if gConfigurationData.isMaster(): gConfigurationData.generateNewVersion() gConfigurationData.writeRemoteConfigurationToDisk()
def generateConfig(): """ Generate the configuration that will be used for all the test """ testCfgFileName = os.path.join(tempfile.gettempdir(), "test_FTS3Plugin.cfg") cfgContent = """ DIRAC { VirtualOrganization = lhcb } Resources { # We define a few SEBases with various protocols StorageElementBases { CERN-Disk { BackendType = EOS GFAL2_HTTPS { Host = cerneos.cern.ch Protocol = https Path = /eos Access = remote } GFAL2_XROOT { Host = cerneos.cern.ch Protocol = root Path = /eos Access = remote } } CERN-Tape { BackendType = CTA SEType = T1D0 # This StageProtocol will triger some multihop staging cases StageProtocols = root CTA { Host = cerncta.cern.ch Protocol = root Path = /eos/ctalhcbpps/archivetest/ Access = remote } GFAL2_HTTPS { Host = cerncta.cern.ch Protocol = https Path = /eos/ctalhcbpps/archivetest/ Access = remote } } RAL-Disk { BackendType = Echo GFAL2_XROOT { Host = ralecho.gridpp.uk Protocol = root Path = /echo Access = remote } } RAL-Tape { BackendType = Castor AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } CNAF-Disk { BackendType = Storm GFAL2_HTTPS { Host = cnafstorm.infc.it Protocol = https Path = /storm Access = remote } } } StorageElements { CERN-DST { BaseSE = CERN-Disk } CERN-RAW { BaseSE = CERN-Tape } CNAF-DST { BaseSE = CNAF-Disk } RAL-DST { BaseSE = RAL-Disk } } } Operations{ Defaults { DataManagement { AccessProtocols=https,root WriteProtocols=https,root ThirdPartyProtocols = https,root } } } """ with open(testCfgFileName, "w") as f: f.write(cfgContent) # Load the configuration ConfigurationClient(fileToLoadList=[testCfgFileName]) # we replace the configuration by our own one. yield try: os.remove(testCfgFileName) except OSError: pass # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion()
def setUp(self, _mk_generateStorage, _mk_isLocalSE, _mk_addAccountingOperation): # Creating test configuration file self.testCfgFileName = os.path.join(tempfile.gettempdir(), 'test_StorageElement.cfg') cfgContent = ''' DIRAC { Setup=TestSetup } Resources{ StorageElements{ DiskStorageA { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_SRM2 { Host = srm-diskandtape.cern.ch SpaceToken = Disk Protocol = srm Path = /base/pathDisk } } # Same end point as DiskStorageA, but with a different space token # So they should be considered the same TapeStorageA { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_SRM2 { Host = srm-diskandtape.cern.ch Protocol = srm SpaceToken = Tape Path = /base/pathDisk } } # Normally does not happen in practice, but this is the same as DiskStorageA with more plugins DiskStorageAWithMoreProtocol { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_SRM2 { Host = srm-diskandtape.cern.ch SpaceToken = Disk Protocol = srm Path = /base/pathDisk } GFAL2_GSIFTP { Host = gsiftp-diskandtape.cern.ch SpaceToken = Disk Protocol = gsiftp Path = /base/pathDisk } } # A different storage StorageB { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_GSIFTP { Host = otherstorage.cern.ch SpaceToken = Disk Protocol = gsiftp Path = /base/pathDisk } } # The same endpoint as StorageB but with differetn base path, so not the same StorageBWithOtherBasePath { BackendType = local ReadAccess = Active WriteAccess = Active GFAL2_GSIFTP { Host = otherstorage.cern.ch SpaceToken = Disk Protocol = gsiftp Path = /base/otherPath } } } Operations{ Defaults { DataManagement{ AccessProtocols = fakeProto AccessProtocols += root WriteProtocols = srm } } } ''' with open(self.testCfgFileName, 'w') as f: f.write(cfgContent) # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion() gConfig = ConfigurationClient(fileToLoadList=[ self.testCfgFileName ]) # we replace the configuration by our own one. self.diskStorageA = StorageElementItem('DiskStorageA') self.diskStorageA.vo = 'lhcb' self.tapeStorageA = StorageElementItem('TapeStorageA') self.tapeStorageA.vo = 'lhcb' self.diskStorageAWithMoreProtocol = StorageElementItem( 'DiskStorageAWithMoreProtocol') self.diskStorageAWithMoreProtocol.vo = 'lhcb' self.storageB = StorageElementItem('StorageB') self.storageB.vo = 'lhcb' self.storageBWithOtherBasePath = StorageElementItem( 'StorageBWithOtherBasePath') self.storageBWithOtherBasePath.vo = 'lhcb'
def setUp(self, _mk_generateStorage, _mk_isLocalSE, _mk_addAccountingOperation): # Creating test configuration file self.testCfgFileName = os.path.join(tempfile.gettempdir(), 'test_StorageElement.cfg') cfgContent = ''' DIRAC { Setup=TestSetup } Resources{ StorageElements{ StorageA { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = File Protocol = file Path = } } StorageB { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = SRM2 Protocol = srm Path = } } StorageC { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = XROOT Protocol = root Path = } } StorageD { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocol.0 { Host = PluginName = SRM2 Protocol = srm Path = } AccessProtocol.1 { Host = PluginName = XROOT Protocol = root Path = } } StorageE { BackendType = local ReadAccess = Active WriteAccess = Active WriteProtocols = root WriteProtocols += srm AccessProtocol.0 { Host = PluginName = SRM2 Protocol = srm Path = } AccessProtocol.1 { Host = PluginName = XROOT Protocol = root Path = } } StorageX { BackendType = local ReadAccess = Active WriteAccess = Active WriteProtocols = gsiftp AccessProtocols = root AccessProtocol.0 { Host = PluginName = GSIFTP Protocol = gsiftp Path = } AccessProtocol.1 { Host = PluginName = XROOT Protocol = root Path = } } StorageY { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocols = gsiftp AccessProtocols += srm AccessProtocol.0 { Host = PluginName = GSIFTP Protocol = gsiftp Path = } AccessProtocol.1 { Host = PluginName = SRM2 Protocol = srm Path = } } StorageZ { BackendType = local ReadAccess = Active WriteAccess = Active AccessProtocols = root AccessProtocols += srm WriteProtocols = root WriteProtocols += srm AccessProtocol.0 { Host = PluginName = ROOT Protocol = root Path = } AccessProtocol.1 { Host = PluginName = SRM2 Protocol = srm Path = } } } } Operations{ Defaults { DataManagement{ AccessProtocols = fakeProto AccessProtocols += root WriteProtocols = srm } } } ''' with open(self.testCfgFileName, 'w') as f: f.write(cfgContent) # SUPER UGLY: one must recreate the CFG objects of gConfigurationData # not to conflict with other tests that might be using a local dirac.cfg gConfigurationData.localCFG = CFG() gConfigurationData.remoteCFG = CFG() gConfigurationData.mergedCFG = CFG() gConfigurationData.generateNewVersion() gConfig = ConfigurationClient(fileToLoadList=[ self.testCfgFileName ]) # we replace the configuration by our own one. self.seA = StorageElementItem('StorageA') self.seA.vo = 'lhcb' self.seB = StorageElementItem('StorageB') self.seB.vo = 'lhcb' self.seC = StorageElementItem('StorageC') self.seC.vo = 'lhcb' self.seD = StorageElementItem('StorageD') self.seD.vo = 'lhcb' self.seE = StorageElementItem('StorageE') self.seE.vo = 'lhcb' self.seX = StorageElementItem('StorageX') self.seX.vo = 'lhcb' self.seY = StorageElementItem('StorageY') self.seY.vo = 'lhcb' self.seZ = StorageElementItem('StorageZ') self.seZ.vo = 'lhcb'
def __generateNewVersion(self): if gConfigurationData.isMaster(): gConfigurationData.generateNewVersion() gConfigurationData.writeRemoteConfigurationToDisk()