def getInfo(params): ''' Retrieve information from BDII ''' from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin diracAdmin = DiracAdmin() if params['info'] == 'ce': result = diracAdmin.getBDIICE(params['ce'], host=params['host']) if params['info'] == 'ce-state': result = diracAdmin.getBDIICEState(params['ce'], useVO=params['vo'], host=params['host']) if params['info'] == 'ce-cluster': result = diracAdmin.getBDIICluster(params['ce'], host=params['host']) if params['info'] == 'ce-vo': result = diracAdmin.getBDIICEVOView(params['ce'], useVO=params['vo'], host=params['host']) if params['info'] == 'site': result = diracAdmin.getBDIISite(params['site'], host=params['host']) if params['info'] == 'site-se': result = diracAdmin.getBDIISE(params['site'], useVO=params['vo'], host=params['host']) if not result['OK']: print result['Message'] DIRAC.exit(2) return result
def run( parameters , delete, nthreads ): """ The main user interface """ source_dir = parameters[0] dest_dir = parameters[1] upload = False storage = None if len( parameters ) == 3: storage = parameters[2] source_dir = os.path.abspath(source_dir) dest_dir = dest_dir.rstrip('/') upload = True if not os.path.isdir(source_dir): gLogger.fatal("Source directory does not exist") DIRAC.exit( 1 ) if len (parameters ) == 2: dest_dir = os.path.abspath(dest_dir) source_dir = source_dir.rstrip('/') if not os.path.isdir(dest_dir): gLogger.fatal("Destination directory does not exist") DIRAC.exit( 1 ) res = syncDestinations( upload, source_dir, dest_dir, storage, delete, nthreads ) if not res['OK']: return S_ERROR(res['Message']) return S_OK("Successfully mirrored " + source_dir + " into " + dest_dir)
def showHelp( self, dummy = False ): """ Printout help message including a Usage message if defined via setUsageMessage method """ if self.__usageMessage: gLogger.notice( self.__usageMessage ) else: gLogger.notice( "Usage:" ) gLogger.notice( " %s (<options>|<cfgFile>)*" % os.path.basename( sys.argv[0] ) ) if dummy: gLogger.notice( dummy ) gLogger.notice( "General options:" ) iLastOpt = 0 for iPos in range( len( self.commandOptionList ) ): optionTuple = self.commandOptionList[ iPos ] gLogger.notice( " %s --%s : %s" % ( ( "-" + optionTuple[0].ljust( 3 ) if optionTuple[0] else " " * 4 ), optionTuple[1].ljust( 15 ), optionTuple[2] ) ) iLastOpt = iPos if optionTuple[0] == 'h': #Last general opt is always help break if iLastOpt + 1 < len( self.commandOptionList ): gLogger.notice( " \nOptions:" ) for iPos in range( iLastOpt + 1, len( self.commandOptionList ) ): optionTuple = self.commandOptionList[ iPos ] if optionTuple[0]: gLogger.notice( " -%s --%s : %s" % ( optionTuple[0].ljust( 3 ), optionTuple[1].ljust( 15 ), optionTuple[2] ) ) else: gLogger.notice( " %s --%s : %s" % ( ' ', optionTuple[1].ljust( 15 ), optionTuple[2] ) ) DIRAC.exit( 0 )
def addProperty( arg ): global groupProperties if not arg: Script.showHelp() DIRAC.exit( -1 ) if not arg in groupProperties: groupProperties.append( arg )
def addUserName( arg ): global userNames if not arg: Script.showHelp() DIRAC.exit( -1 ) if not arg in userNames: userNames.append( arg )
def addProperty( arg ): global hostProperties if not arg: Script.showHelp() DIRAC.exit( -1 ) if not arg in hostProperties: hostProperties.append( arg )
def addUserGroup(arg): global userGroups if not arg: Script.showHelp() DIRAC.exit(-1) if not arg in userGroups: userGroups.append(arg)
def __loadConfigurationData( self ): try: os.makedirs( os.path.join( DIRAC.rootPath, "etc", "csbackup" ) ) except: pass gConfigurationData.loadConfigurationData() if gConfigurationData.isMaster(): bBuiltNewConfiguration = False if not gConfigurationData.getName(): DIRAC.abort( 10, "Missing name for the configuration to be exported!" ) gConfigurationData.exportName() sVersion = gConfigurationData.getVersion() if sVersion == "0": gLogger.info( "There's no version. Generating a new one" ) gConfigurationData.generateNewVersion() bBuiltNewConfiguration = True if self.sURL not in gConfigurationData.getServers(): gConfigurationData.setServers( self.sURL ) bBuiltNewConfiguration = True gConfigurationData.setMasterServer( self.sURL ) if bBuiltNewConfiguration: gConfigurationData.writeRemoteConfigurationToDisk()
def setUserMail(arg): global userMail if userMail or not arg: Script.showHelp() DIRAC.exit(-1) if not arg.find("@") > 0: Script.gLogger.error("Not a valid mail address", arg) DIRAC.exit(-1) userMail = arg
def getTimeLeft( self, cpuConsumed ): """Returns the CPU Time Left for supported batch systems. The CPUConsumed is the current raw total CPU. """ #Quit if no scale factor available if not self.scaleFactor: return S_ERROR( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() ) if not self.batchPlugin: return S_ERROR( self.batchError ) resourceDict = self.batchPlugin.getResourceUsage() if not resourceDict['OK']: self.log.warn( 'Could not determine timeleft for batch system at site %s' % DIRAC.siteName() ) return resourceDict resources = resourceDict['Value'] self.log.verbose( resources ) if not resources['CPULimit'] or not resources['WallClockLimit']: return S_ERROR( 'No CPU / WallClock limits obtained' ) cpuFactor = 100 * float( resources['CPU'] ) / float( resources['CPULimit'] ) cpuRemaining = 100 - cpuFactor cpuLimit = float( resources['CPULimit'] ) wcFactor = 100 * float( resources['WallClock'] ) / float( resources['WallClockLimit'] ) wcRemaining = 100 - wcFactor wcLimit = float( resources['WallClockLimit'] ) self.log.verbose( 'Used CPU is %.02f, Used WallClock is %.02f.' % ( cpuFactor, wcFactor ) ) self.log.verbose( 'Remaining WallClock %.02f, Remaining CPU %.02f, margin %s' % ( wcRemaining, cpuRemaining, self.cpuMargin ) ) timeLeft = None if wcRemaining > cpuRemaining and ( wcRemaining - cpuRemaining ) > self.cpuMargin: # In some cases cpuFactor might be 0 # timeLeft = float(cpuConsumed*self.scaleFactor*cpuRemaining/cpuFactor) # We need time left in the same units used by the Matching timeLeft = float( cpuRemaining * cpuLimit / 100 * self.scaleFactor ) self.log.verbose( 'Remaining WallClock %.02f > Remaining CPU %.02f and difference > margin %s' % ( wcRemaining, cpuRemaining, self.cpuMargin ) ) else: if cpuRemaining > self.cpuMargin and wcRemaining > self.cpuMargin: self.log.verbose( 'Remaining WallClock %.02f and Remaining CPU %.02f both > margin %s' % ( wcRemaining, cpuRemaining, self.cpuMargin ) ) # In some cases cpuFactor might be 0 # timeLeft = float(cpuConsumed*self.scaleFactor*(wcRemaining-self.cpuMargin)/cpuFactor) timeLeft = float( cpuRemaining * cpuLimit / 100 * self.scaleFactor ) else: self.log.verbose( 'Remaining CPU %.02f < margin %s and WallClock %.02f < margin %s so no time left' % ( cpuRemaining, self.cpuMargin, wcRemaining, self.cpuMargin ) ) if timeLeft: self.log.verbose( 'Remaining CPU in normalized units is: %.02f' % timeLeft ) return S_OK( timeLeft ) else: return S_ERROR( 'No time left for slot' )
def autoRefreshAndPublish( self, sURL ): gLogger.debug( "Setting configuration refresh as automatic" ) if not gConfigurationData.getAutoPublish(): gLogger.debug( "Slave server won't auto publish itself" ) if not gConfigurationData.getName(): import DIRAC DIRAC.abort( 10, "Missing configuration name!" ) self.__url = sURL self.__automaticUpdate = True self.setDaemon( 1 ) self.start()
def __createTables( self ): """ Create tables if not already created """ self.log.info( "Creating tables in db" ) try: filePath = "%s/monitoringSchema.sql" % os.path.dirname( __file__ ) fd = open( filePath ) buff = fd.read() fd.close() except IOError, e: DIRAC.abort( 1, "Can't read monitoring schema", filePath )
def downloadListOfFiles(dm, source_dir, dest_dir, listOfFiles, tID): """ Wrapper for multithreaded downloading of a list of files """ log = gLogger.getSubLogger("[Thread %s] " % tID) threadLine = "[Thread %s]" % tID for filename in listOfFiles: res = downloadRemoteFile(dm, source_dir + "/" + filename, dest_dir + ("/" + filename).rsplit("/", 1)[0]) if not res['OK']: log.fatal(threadLine + ' Downloading ' + filename + ' -X- [FAILED] ' + res['Message']) DIRAC.exit( 1 ) else: log.notice(threadLine+ " Downloading " + filename + " -> [DONE]")
def uploadListOfFiles(dm, source_dir, dest_dir, storage, listOfFiles, tID): """ Wrapper for multithreaded uploading of a list of files """ log = gLogger.getSubLogger("[Thread %s] " % tID) threadLine = "[Thread %s]" % tID for filename in listOfFiles: res = uploadLocalFile(dm, dest_dir+"/"+filename, source_dir+"/"+filename, storage) if not res['OK']: log.fatal(threadLine + ' Uploading ' + filename + ' -X- [FAILED] ' + res['Message']) DIRAC.exit( 1 ) else: log.notice(threadLine+ " Uploading " + filename + " -> [DONE]")
def showLicense( self, dummy = False ): """ Print license """ lpath = os.path.join( DIRAC.rootPath, "DIRAC", "LICENSE" ) sys.stdout.write( " - DIRAC is GPLv3 licensed\n\n" ) try: with open( lpath ) as fd: sys.stdout.write( fd.read() ) except IOError: sys.stdout.write( "Can't find GPLv3 license at %s. Somebody stole it!\n" % lpath ) sys.stdout.write( "Please check out http://www.gnu.org/licenses/gpl-3.0.html for more info\n" ) DIRAC.exit(0)
def am_initialize(self, *initArgs): """ Common initialization for all the agents. This is executed every time an agent (re)starts. This is called by the AgentReactor, should not be overridden. """ agentName = self.am_getModuleParam('fullName') result = self.initialize(*initArgs) if not isReturnStructure(result): return S_ERROR("initialize must return S_OK/S_ERROR") if not result['OK']: return S_ERROR("Error while initializing %s: %s" % (agentName, result['Message'])) mkDir(self.am_getControlDirectory()) workDirectory = self.am_getWorkDirectory() mkDir(workDirectory) # Set the work directory in an environment variable available to subprocesses if needed os.environ['AGENT_WORKDIRECTORY'] = workDirectory self.__moduleProperties['shifterProxy'] = self.am_getOption('shifterProxy') if self.am_monitoringEnabled(): self.monitor.enable() if len(self.__moduleProperties['executors']) < 1: return S_ERROR("At least one executor method has to be defined") if not self.am_Enabled(): return S_ERROR("Agent is disabled via the configuration") self.log.notice("=" * 40) self.log.notice("Loaded agent module %s" % self.__moduleProperties['fullName']) self.log.notice(" Site: %s" % DIRAC.siteName()) self.log.notice(" Setup: %s" % gConfig.getValue("/DIRAC/Setup")) self.log.notice(" Base Module version: %s " % __RCSID__) self.log.notice(" Agent version: %s" % self.__codeProperties['version']) self.log.notice(" DIRAC version: %s" % DIRAC.version) self.log.notice(" DIRAC platform: %s" % DIRAC.getPlatform()) pollingTime = int(self.am_getOption('PollingTime')) if pollingTime > 3600: self.log.notice(" Polling time: %s hours" % (pollingTime / 3600.)) else: self.log.notice(" Polling time: %s seconds" % self.am_getOption('PollingTime')) self.log.notice(" Control dir: %s" % self.am_getControlDirectory()) self.log.notice(" Work dir: %s" % self.am_getWorkDirectory()) if self.am_getOption('MaxCycles') > 0: self.log.notice(" Cycles: %s" % self.am_getMaxCycles()) else: self.log.notice(" Cycles: unlimited") if self.am_getWatchdogTime() > 0: self.log.notice(" Watchdog interval: %s" % self.am_getWatchdogTime()) else: self.log.notice(" Watchdog interval: disabled ") self.log.notice("=" * 40) self.__initialized = True return S_OK()
def _getTSFiles(self): """ Helper function - get files from the TS """ selectDict = {'TransformationID': self.prod} if self._lfns: selectDict['LFN'] = self._lfns elif self.runStatus and self.fromProd: res = self.transClient.getTransformationRuns( {'TransformationID': self.fromProd, 'Status': self.runStatus}) if not res['OK']: gLogger.error("Failed to get runs for transformation %d" % self.prod) else: if res['Value']: self.runsList.extend( [run['RunNumber'] for run in res['Value'] if run['RunNumber'] not in self.runsList]) gLogger.notice("%d runs selected" % len(res['Value'])) elif not self.runsList: gLogger.notice("No runs selected, check completed") DIRAC.exit(0) if not self._lfns and self.runsList: selectDict['RunNumber'] = self.runsList res = self.transClient.getTransformation(self.prod) if not res['OK']: gLogger.error("Failed to find transformation %s" % self.prod) return [], [], [] status = res['Value']['Status'] if status not in ('Active', 'Stopped', 'Completed', 'Idle'): gLogger.notice("Transformation %s in status %s, will not check if files are processed" % ( self.prod, status)) processedLFNs = [] nonProcessedLFNs = [] nonProcessedStatuses = [] if self._lfns: processedLFNs = self._lfns else: res = self.transClient.getTransformationFiles(selectDict) if not res['OK']: gLogger.error("Failed to get files for transformation %d" % self.prod, res['Message']) return [], [], [] else: processedLFNs = [item['LFN'] for item in res['Value'] if item['Status'] == 'Processed'] nonProcessedLFNs = [item['LFN'] for item in res['Value'] if item['Status'] != 'Processed'] nonProcessedStatuses = list( set(item['Status'] for item in res['Value'] if item['Status'] != 'Processed')) return processedLFNs, nonProcessedLFNs, nonProcessedStatuses
def run( self, lfn, metas ): retVal = self.catalog.getMeta( lfn ) if not retVal[ "OK" ]: print "Error:", retVal[ "Message" ] DIRAC.exit( -1 ) metadict = retVal[ "Value" ] if not metas: for k, v in metadict.items( ): print k+"="+str( v ) else: for meta in metas: if meta in metadict.keys( ): print meta+"="+metadict[ meta ]
def getInfo(): """Retrieve information about setup, etc.""" records = [] records.append(('Setup', gConfig.getValue('/DIRAC/Setup', 'Unknown'))) records.append(('ConfigurationServer', gConfig.getValue('/DIRAC/Configuration/Servers', []))) records.append(('Installation path', DIRAC.rootPath)) if os.path.exists(os.path.join(DIRAC.rootPath, DIRAC.getPlatform(), 'bin', 'mysql')): records.append(('Installation type', 'server')) else: records.append(('Installation type', 'client')) records.append(('Platform', DIRAC.getPlatform())) ret = getProxyInfo(disableVOMS=False) if ret['OK']: print(pprint.pformat(ret)) if 'group' in ret['Value']: vo = getVOForGroup(ret['Value']['group']) else: vo = getVOForGroup('') if not vo: vo = "None" records.append(('VirtualOrganization', vo)) if 'identity' in ret['Value']: records.append(('User DN', ret['Value']['identity'])) if 'secondsLeft' in ret['Value']: records.append(('Proxy validity, secs', {'Value': str(ret['Value']['secondsLeft']), 'Just': 'L'})) if gConfig.getValue('/DIRAC/Security/UseServerCertificate', True): records.append(('Use Server Certificate', 'Yes')) else: records.append(('Use Server Certificate', 'No')) if gConfig.getValue('/DIRAC/Security/SkipCAChecks', False): records.append(('Skip CA Checks', 'Yes')) else: records.append(('Skip CA Checks', 'No')) try: import gfalthr # pylint: disable=import-error records.append(('gfal version', gfalthr.gfal_version())) except BaseException: pass fields = ['Option', 'Value'] return fields, records
def jobexec(jobxml, wfParameters): jobfile = os.path.abspath(jobxml) if not os.path.exists(jobfile): gLogger.warn('Path to specified workflow %s does not exist' % (jobfile)) sys.exit(1) workflow = fromXMLFile(jobfile) gLogger.debug(workflow) code = workflow.createCode() gLogger.debug(code) jobID = 0 if 'JOBID' in os.environ: jobID = os.environ['JOBID'] gLogger.info('DIRAC JobID %s is running at site %s' % (jobID, DIRAC.siteName())) workflow.addTool('JobReport', JobReport(jobID)) workflow.addTool('AccountingReport', DataStoreClient()) workflow.addTool('Request', Request()) # Propagate the command line parameters to the workflow if any for pName, pValue in wfParameters.items(): workflow.setValue(pName, pValue) # Propagate the command line parameters to the workflow module instances of each step for stepdefinition in workflow.step_definitions.itervalues(): for moduleInstance in stepdefinition.module_instances: for pName, pValue in wfParameters.iteritems(): if moduleInstance.parameters.find(pName): moduleInstance.parameters.setValue(pName, pValue) return workflow.execute()
def __findServiceURL( self ): if not self.__initStatus[ 'OK' ]: return self.__initStatus gatewayURL = False if self.KW_IGNORE_GATEWAYS not in self.kwargs or not self.kwargs[ self.KW_IGNORE_GATEWAYS ]: dRetVal = gConfig.getOption( "/DIRAC/Gateways/%s" % DIRAC.siteName() ) if dRetVal[ 'OK' ]: rawGatewayURL = List.randomize( List.fromChar( dRetVal[ 'Value'], "," ) )[0] gatewayURL = "/".join( rawGatewayURL.split( "/" )[:3] ) for protocol in gProtocolDict.keys(): if self._destinationSrv.find( "%s://" % protocol ) == 0: gLogger.debug( "Already given a valid url", self._destinationSrv ) if not gatewayURL: return S_OK( self._destinationSrv ) gLogger.debug( "Reconstructing given URL to pass through gateway" ) path = "/".join( self._destinationSrv.split( "/" )[3:] ) finalURL = "%s/%s" % ( gatewayURL, path ) gLogger.debug( "Gateway URL conversion:\n %s -> %s" % ( self._destinationSrv, finalURL ) ) return S_OK( finalURL ) if gatewayURL: gLogger.debug( "Using gateway", gatewayURL ) return S_OK( "%s/%s" % ( gatewayURL, self._destinationSrv ) ) try: urls = getServiceURL( self._destinationSrv, setup = self.setup ) except Exception, e: return S_ERROR( "Cannot get URL for %s in setup %s: %s" % ( self._destinationSrv, self.setup, str( e ) ) )
def __init__(self): super(OverlayInput, self).__init__() self.enable = True self.STEP_NUMBER = '' self.log = gLogger.getSubLogger( "OverlayInput" ) self.applicationName = 'OverlayInput' self.curdir = os.getcwd() self.applicationLog = '' self.printoutflag = '' self.prodid = 0 self.detector = '' ##needed for backward compatibility self.detectormodel = "" self.energytouse = '' self.energy = 0 self.nbofeventsperfile = 100 self.lfns = [] self.nbfilestoget = 0 self.BkgEvtType = 'gghad' self.BXOverlay = 0 self.ggtohadint = 3.2 self.nbsigeventsperfile = 0 self.nbinputsigfile = 1 self.NbSigEvtsPerJob = 0 self.rm = ReplicaManager() self.fc = FileCatalogClient() self.site = DIRAC.siteName() self.machine = 'clic_cdr'
def getLocationOrderedCatalogs( siteName = '' ): # First get a list of the active catalogs and their location res = getActiveCatalogs() if not res['OK']: gLogger.error( "Failed to get list of active catalogs", res['Message'] ) return res catalogDict = res['Value'] # Get the tier1 associated to the current location if not siteName: import DIRAC siteName = DIRAC.siteName() countryCode = siteName.split( '.' )[-1] res = getCountryMappingTier1( countryCode ) if not res['OK']: gLogger.error( "Failed to resolve closest Tier1", res['Message'] ) return res tier1 = res['Value'] # Create a sorted list of the active readonly catalogs catalogList = [] if catalogDict.has_key( tier1 ): catalogList.append( catalogDict[tier1] ) catalogDict.pop( tier1 ) for catalogURL in randomize( catalogDict.values() ): catalogList.append( catalogURL ) return S_OK( catalogList )
def setReplicaProblematic(self,lfn,se,pfn='',reason='Access failure'): """ Set replica status to Problematic in the File Catalog @param lfn: lfn of the problematic file @param se: storage element @param pfn: physical file name @param reason: as name suggests... @return: S_OK() """ rm = ReplicaManager() source = "Job %d at %s" % (self.jobID,DIRAC.siteName()) result = rm.setReplicaProblematic((lfn,pfn,se,reason),source) if not result['OK'] or result['Value']['Failed']: # We have failed the report, let's attempt the Integrity DB faiover integrityDB = RPCClient('DataManagement/DataIntegrity',timeout=120) fileMetadata = {'Prognosis':reason,'LFN':lfn,'PFN':pfn,'StorageElement':se} result = integrityDB.insertProblematic(source,fileMetadata) if not result['OK']: # Add it to the request if self.workflow_commons.has_key('Request'): request = self.workflow_commons['Request'] subrequest = DISETSubRequest(result['rpcStub']).getDictionary() request.addSubRequest(subrequest,'integrity') return S_OK()
def __init__( self ): BaseAccountingType.__init__( self ) self.definitionKeyFields = [ ( 'OperationType' , "VARCHAR(32)" ), ( 'User', "VARCHAR(32)" ), ( 'ExecutionSite', 'VARCHAR(32)' ), ( 'Source', 'VARCHAR(32)' ), ( 'Destination', 'VARCHAR(32)' ), ( 'Protocol', 'VARCHAR(32)' ), ( 'FinalStatus', 'VARCHAR(32)' ) ] self.definitionAccountingFields = [ ( 'TransferSize', 'BIGINT UNSIGNED' ), ( 'TransferTime', 'FLOAT' ), ( 'RegistrationTime', 'FLOAT' ), ( 'TransferOK', 'INT UNSIGNED' ), ( 'TransferTotal', 'INT UNSIGNED' ), ( 'RegistrationOK', 'INT UNSIGNED' ), ( 'RegistrationTotal', 'INT UNSIGNED' ) ] self.bucketsLength = [ ( 86400 * 3, 900 ), #<3d = 15m ( 86400 * 8, 3600 ), #<1w+1d = 1h ( 15552000, 86400 ), #>1w+1d <6m = 1d ( 31104000, 604800 ), #>6m = 1w ] self.checkType() self.setValueByKey( 'ExecutionSite', DIRAC.siteName() )
def _initMonitoring( self ): #Init extra bits of monitoring self._monitor.setComponentType( MonitoringClient.COMPONENT_SERVICE ) self._monitor.setComponentName( self._name ) self._monitor.setComponentLocation( self._cfg.getURL() ) self._monitor.initialize() self._monitor.registerActivity( "Connections", "Connections received", "Framework", "connections", MonitoringClient.OP_RATE ) self._monitor.registerActivity( "Queries", "Queries served", "Framework", "queries", MonitoringClient.OP_RATE ) self._monitor.registerActivity( 'CPU', "CPU Usage", 'Framework', "CPU,%", MonitoringClient.OP_MEAN, 600 ) self._monitor.registerActivity( 'MEM', "Memory Usage", 'Framework', 'Memory,MB', MonitoringClient.OP_MEAN, 600 ) self._monitor.registerActivity( 'PendingQueries', "Pending queries", 'Framework', 'queries', MonitoringClient.OP_MEAN ) self._monitor.registerActivity( 'ActiveQueries', "Active queries", 'Framework', 'threads', MonitoringClient.OP_MEAN ) self._monitor.registerActivity( 'RunningThreads', "Running threads", 'Framework', 'threads', MonitoringClient.OP_MEAN ) self._monitor.registerActivity( 'MaxFD', "Max File Descriptors", 'Framework', 'fd', MonitoringClient.OP_MEAN ) self._monitor.setComponentExtraParam( 'DIRACVersion', DIRAC.version ) self._monitor.setComponentExtraParam( 'platform', DIRAC.getPlatform() ) self._monitor.setComponentExtraParam( 'startTime', Time.dateTime() ) for prop in ( ( "__RCSID__", "version" ), ( "__doc__", "description" ) ): try: value = getattr( self._handler[ 'module' ], prop[0] ) except Exception as e: gLogger.exception( e ) gLogger.error( "Missing property", prop[0] ) value = 'unset' self._monitor.setComponentExtraParam( prop[1], value ) for secondaryName in self._cfg.registerAlsoAs(): gLogger.info( "Registering %s also as %s" % ( self._name, secondaryName ) ) self._validNames.append( secondaryName ) return S_OK()
def __init__(self): BaseAccountingType.__init__(self) self.definitionKeyFields = [('User', 'VARCHAR(32)'), ('UserGroup', 'VARCHAR(32)'), ('JobGroup', "VARCHAR(64)"), ('JobType', 'VARCHAR(32)'), ('JobClass', 'VARCHAR(32)'), ('ProcessingType', 'VARCHAR(256)'), ('Site', 'VARCHAR(32)'), ('FinalMajorStatus', 'VARCHAR(32)'), ('FinalMinorStatus', 'VARCHAR(256)') ] self.definitionAccountingFields = [('CPUTime', "INT UNSIGNED"), ('NormCPUTime', "INT UNSIGNED"), ('ExecTime', "INT UNSIGNED"), ('InputDataSize', 'BIGINT UNSIGNED'), ('OutputDataSize', 'BIGINT UNSIGNED'), ('InputDataFiles', 'INT UNSIGNED'), ('OutputDataFiles', 'INT UNSIGNED'), ('DiskSpace', 'BIGINT UNSIGNED'), ('InputSandBoxSize', 'BIGINT UNSIGNED'), ('OutputSandBoxSize', 'BIGINT UNSIGNED'), ('ProcessedEvents', 'INT UNSIGNED') ] self.bucketsLength = [(86400 * 8, 3600), # <1w+1d = 1h (86400 * 35, 3600 * 4), # <35d = 4h (86400 * 30 * 6, 86400), # <6m = 1d (86400 * 365, 86400 * 2), # <1y = 2d (86400 * 600, 604800), # >1y = 1w ] self.checkType() # Fill the site self.setValueByKey("Site", DIRAC.siteName())
def __getBatchSystemPlugin( self ): """Using the name of the batch system plugin, will return an instance of the plugin class. """ batchSystems = {'LSF':'LSB_JOBID', 'PBS':'PBS_JOBID', 'BQS':'QSUB_REQNAME', 'SGE':'SGE_TASK_ID'} #more to be added later name = None for batchSystem, envVar in batchSystems.items(): if os.environ.has_key( envVar ): name = batchSystem break if name == None: self.log.warn( 'Batch system type for site %s is not currently supported' % DIRAC.siteName() ) return S_ERROR( 'Current batch system is not supported' ) self.log.debug( 'Creating plugin for %s batch system' % ( name ) ) try: batchSystemName = "%sTimeLeft" % ( name ) batchPlugin = __import__( 'DIRAC.Core.Utilities.TimeLeft.%s' % batchSystemName, globals(), locals(), [batchSystemName] ) except Exception, x: msg = 'Could not import DIRAC.Core.Utilities.TimeLeft.%s' % ( batchSystemName ) self.log.warn( x ) self.log.warn( msg ) return S_ERROR( msg )
def determineSeFromSite(): siteName = DIRAC.siteName() SEname = SeSiteMap.get(siteName, "") if not SEname: result = getSEsForSite(siteName) if result["OK"] and result["Value"]: SEname = result["Value"][0] return SEname
def checkFunction(): """ gets CPU normalisation from MFJ or calculate itself """ from DIRAC.WorkloadManagementSystem.Client.CPUNormalization import getPowerFromMJF from ILCDIRAC.Core.Utilities.CPUNormalization import getCPUNormalization from DIRAC import gLogger, gConfig result = getCPUNormalization() if not result['OK']: gLogger.error( result['Message'] ) norm = round( result['Value']['NORM'], 1 ) gLogger.notice( 'Estimated CPU power is %.1f %s' % ( norm, result['Value']['UNIT'] ) ) mjfPower = getPowerFromMJF() if mjfPower: gLogger.notice( 'CPU power from MJF is %.1f HS06' % mjfPower ) else: gLogger.notice( 'MJF not available on this node' ) if update and not configFile: gConfig.setOptionValue( '/LocalSite/CPUScalingFactor', mjfPower if mjfPower else norm ) gConfig.setOptionValue( '/LocalSite/CPUNormalizationFactor', norm ) gConfig.dumpLocalCFGToFile( gConfig.diracConfigFilePath ) if configFile: from DIRAC.Core.Utilities.CFG import CFG cfg = CFG() try: # Attempt to open the given file cfg.loadFromFile( configFile ) except: pass # Create the section if it does not exist if not cfg.existsKey( 'LocalSite' ): cfg.createNewSection( 'LocalSite' ) cfg.setOption( '/LocalSite/CPUScalingFactor', mjfPower if mjfPower else norm ) cfg.setOption( '/LocalSite/CPUNormalizationFactor', norm ) cfg.writeToFile( configFile ) DIRAC.exit()
def setTemplate( optionValue ): global template template = optionValue return DIRAC.S_OK()
if not (read or write or check or remove): # No switch was specified, means we need all of them gLogger.notice("No option given, all accesses will be allowed if they were not") read = True write = True check = True remove = True ses = resolveSEGroup(ses) diracAdmin = DiracAdmin() errorList = [] setup = gConfig.getValue('/DIRAC/Setup', '') if not setup: print('ERROR: Could not contact Configuration Service') DIRAC.exit(2) res = getProxyInfo() if not res['OK']: gLogger.error('Failed to get proxy information', res['Message']) DIRAC.exit(2) userName = res['Value'].get('username') if not userName: gLogger.error('Failed to get username for proxy') DIRAC.exit(2) if site: res = getSites() if not res['OK']: gLogger.error(res['Message'])
def setExecutable( optionValue ): global executable executable = optionValue return DIRAC.S_OK()
def __resolveInputData(self): """This method controls the execution of the DIRAC input data modules according to the ILC VO policy defined in the configuration service. """ if 'SiteName' in self.arguments['Configuration']: site = self.arguments['Configuration']['SiteName'] else: site = DIRAC.siteName() policy = [] if 'Job' not in self.arguments: self.arguments['Job'] = {} if 'InputDataPolicy' in self.arguments['Job']: policy = self.arguments['Job']['InputDataPolicy'] #In principle this can be a list of modules with the first taking precedence if type(policy) in types.StringTypes: policy = [policy] self.log.info('Job has a specific policy setting: %s' % (string.join(policy, ', '))) else: self.log.verbose( 'Attempting to resolve input data policy for site %s' % site) inputDataPolicy = self.ops.getOptionsDict('/InputDataPolicy') if not inputDataPolicy: return S_ERROR( 'Could not resolve InputDataPolicy from /InputDataPolicy') options = inputDataPolicy['Value'] if site in options: policy = options[site] policy = [x.strip() for x in string.split(policy, ',')] self.log.info( 'Found specific input data policy for site %s:\n%s' % (site, string.join(policy, ',\n'))) elif 'Default' in options: policy = options['Default'] policy = [x.strip() for x in string.split(policy, ',')] self.log.info( 'Applying default input data policy for site %s:\n%s' % (site, string.join(policy, ',\n'))) dataToResolve = None #if none, all supplied input data is resolved allDataResolved = False successful = {} failedReplicas = [] for modulePath in policy: if not allDataResolved: result = self.__runModule(modulePath, dataToResolve) if not result['OK']: self.log.warn('Problem during %s execution' % modulePath) return result if 'Failed' in result: failedReplicas = result['Failed'] if failedReplicas: self.log.info( '%s failed for the following files:\n%s' % (modulePath, string.join(failedReplicas, '\n'))) dataToResolve = failedReplicas else: self.log.info('All replicas resolved after %s execution' % (modulePath)) allDataResolved = True successful.update(result['Successful']) self.log.verbose(successful) result = S_OK() result['Successful'] = successful result['Failed'] = failedReplicas return result
print("Users in group %s:" % group) else: print("All users registered:") for username in result['Value']: print(" %s" % username) def describeUsersInGroup(group=False): result = diracAdmin.csListUsers(group) if result['OK']: if group: print("Users in group %s:" % group) else: print("All users registered:") result = diracAdmin.csDescribeUsers(result['Value']) print(diracAdmin.pPrint.pformat(result['Value'])) for group in args: if 'all' in args: group = False if not extendedInfo: printUsersInGroup(group) else: describeUsersInGroup(group) for error in errorList: print("ERROR %s: %s" % error) DIRAC.exit(exitCode)
def setRun( optionValue ): global run run = optionValue return DIRAC.S_OK()
def setGateway(optionValue): global gatewayServer gatewayServer = optionValue setServer(gatewayServer + '/Configuration/Server') DIRAC.gConfig.setOptionValue(cfgInstallPath('Gateway'), gatewayServer) return DIRAC.S_OK()
def setUseVersionsDir(optionValue): global useVersionsDir useVersionsDir = True DIRAC.gConfig.setOptionValue(cfgInstallPath('UseVersionsDir'), useVersionsDir) return DIRAC.S_OK()
def setSkipCAChecks(optionValue): global skipCAChecks skipCAChecks = True DIRAC.gConfig.setOptionValue(cfgInstallPath('SkipCAChecks'), skipCAChecks) return DIRAC.S_OK()
def main(): from DIRAC.Core.Base import Script Script.registerSwitch( "p:", "run_number=", "Run Number", setRunNumber ) Script.registerSwitch( "R:", "run=", "Run", setRun ) Script.registerSwitch( "P:", "config_path=", "Config Path", setConfigPath ) Script.registerSwitch( "T:", "template=", "Template", setTemplate ) Script.registerSwitch( "E:", "executable=", "Executable", setExecutable ) Script.registerSwitch( "V:", "version=", "Version", setVersion ) Script.registerSwitch( "M:", "mode=", "Mode", setMode ) Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() if len( args ) < 1: Script.showHelp() if version == None or executable == None or run_number == None or run == None or template == None: Script.showHelp() jobReport.setApplicationStatus('Options badly specified') DIRAC.exit( -1 ) from CTADIRAC.Core.Workflow.Modules.CorsikaApp import CorsikaApp from CTADIRAC.Core.Utilities.SoftwareInstallation import checkSoftwarePackage from CTADIRAC.Core.Utilities.SoftwareInstallation import installSoftwarePackage from CTADIRAC.Core.Utilities.SoftwareInstallation import installSoftwareEnviron from CTADIRAC.Core.Utilities.SoftwareInstallation import localArea from CTADIRAC.Core.Utilities.SoftwareInstallation import sharedArea from CTADIRAC.Core.Utilities.SoftwareInstallation import workingArea from DIRAC.Core.Utilities.Subprocess import systemCall from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport jobID = os.environ['JOBID'] jobID = int( jobID ) jobReport = JobReport( jobID ) CorsikaSimtelPack = 'corsika_simhessarray/' + version + '/corsika_simhessarray' packs = [CorsikaSimtelPack] for package in packs: DIRAC.gLogger.notice( 'Checking:', package ) if sharedArea: if checkSoftwarePackage( package, sharedArea() )['OK']: DIRAC.gLogger.notice( 'Package found in Shared Area:', package ) installSoftwareEnviron( package, workingArea() ) packageTuple = package.split('/') corsika_subdir = sharedArea() + '/' + packageTuple[0] + '/' + version cmd = 'cp -r ' + corsika_subdir + '/* .' os.system(cmd) continue if workingArea: if checkSoftwarePackage( package, workingArea() )['OK']: DIRAC.gLogger.notice( 'Package found in Local Area:', package ) continue if installSoftwarePackage( package, workingArea() )['OK']: ############## compile ############################# if version == 'clean_23012012': cmdTuple = ['./build_all','ultra','qgs2'] elif version in ['prod-2_21122012','prod-2_08032013','prod-2_06052013']: cmdTuple = ['./build_all','prod2','qgs2'] ret = systemCall( 0, cmdTuple, sendOutput) if not ret['OK']: DIRAC.gLogger.error( 'Failed to execute build') DIRAC.exit( -1 ) continue DIRAC.gLogger.error( 'Check Failed for software package:', package ) DIRAC.gLogger.error( 'Software package not available') DIRAC.exit( -1 ) cs = CorsikaApp() cs.setSoftwarePackage(CorsikaSimtelPack) cs.csExe = executable cs.csArguments = ['--run-number',run_number,'--run',run,template] corsikaReturnCode = cs.execute() if corsikaReturnCode != 0: DIRAC.gLogger.error( 'Failed to execute corsika Application') jobReport.setApplicationStatus('Corsika Application: Failed') DIRAC.exit( -1 ) ###### rename corsika file ################################# rundir = 'run' + run_number corsikaKEYWORDS = ['TELFIL'] dictCorsikaKW = fileToKWDict(template,corsikaKEYWORDS) corsikafilename = rundir + '/' + dictCorsikaKW['TELFIL'][0] destcorsikafilename = 'corsika_run' + run_number + '.corsika.gz' cmd = 'mv ' + corsikafilename + ' ' + destcorsikafilename os.system(cmd) ### create corsika tar #################### corsika_tar = 'corsika_run' + run_number + '.tar.gz' filetar1 = rundir + '/'+'input' filetar2 = rundir + '/'+ 'DAT' + run_number + '.dbase' filetar3 = rundir + '/run' + str(int(run_number)) + '.log' cmdTuple = ['/bin/tar','zcf',corsika_tar, filetar1,filetar2,filetar3] DIRAC.gLogger.notice( 'Executing command tuple:', cmdTuple ) ret = systemCall( 0, cmdTuple, sendOutput) if not ret['OK']: DIRAC.gLogger.error( 'Failed to execute tar') DIRAC.exit( -1 ) DIRAC.exit()
return DIRAC.S_OK() def registerCLISwitches(self): Script.registerSwitch("v:", "valid=", "Required HH:MM for the users", self.setProxyLifeTime) params = Params() params.registerCLISwitches() Script.parseCommandLine(ignoreErrors=True) args = Script.getPositionalArgs() result = gProxyManager.getDBContents() if not result['OK']: print "Can't retrieve list of users: %s" % result['Message'] DIRAC.exit(1) keys = result['Value']['ParameterNames'] records = result['Value']['Records'] dataDict = {} now = Time.dateTime() for record in records: expirationDate = record[3] dt = expirationDate - now secsLeft = dt.days * 86400 + dt.seconds if secsLeft > params.proxyLifeTime: userName, userDN, userGroup, _, persistent = record if not userName in dataDict: dataDict[userName] = [] dataDict[userName].append( (userDN, userGroup, expirationDate, persistent))
def setRunNumber( optionValue ): global run_number run_number = optionValue.split('ParametricParameters=')[1] return DIRAC.S_OK()
res = prod3dm._checkemptydir(outputpattern) if not res['OK']: return res for localfile in glob.glob(outputpattern): filename = os.path.basename(localfile) run_number = getRunNumber(filename, package) runpath = prod3dm._getRunPath(run_number) #lfn = os.path.join( path, 'Data', runpath, filename ) lfn = os.path.join(path, outputType, runpath, filename) res = prod3dm.putAndRegister(lfn, localfile, filemetadata, package) if not res['OK']: return res return DIRAC.S_OK() #################################################### if __name__ == '__main__': args = Script.getPositionalArgs() try: res = putAndRegisterPROD3(args) if not res['OK']: DIRAC.gLogger.error(res['Message']) DIRAC.exit(-1) else: DIRAC.gLogger.notice('Done') except Exception: DIRAC.gLogger.exception() DIRAC.exit(-1)
def __findServiceURL(self): """ Discovers the URL of a service, taking into account gateways, multiple URLs, banned URLs If the site on which we run is configured to use gateways (/DIRAC/Gateways/<siteName>), these URLs will be used. To ignore the gateway, it is possible to set KW_IGNORE_GATEWAYS to False in kwargs. If self._destinationSrv (given as constructor attribute) is a properly formed URL, we just return this one. If we have to use a gateway, we just replace the server name in the url. The list of URLs defined in the CS (<System>/URLs/<Component>) is randomized This method also sets some attributes: * self.__nbOfUrls = number of URLs * self.__nbOfRetry = 2 if we have more than 2 urls, otherwise 3 * self.__bannedUrls is reinitialized if all the URLs are banned :return: the selected URL """ if not self.__initStatus['OK']: return self.__initStatus # Load the Gateways URLs for the current site Name gatewayURL = False if self.KW_IGNORE_GATEWAYS not in self.kwargs or not self.kwargs[self.KW_IGNORE_GATEWAYS]: dRetVal = gConfig.getOption("/DIRAC/Gateways/%s" % DIRAC.siteName()) if dRetVal['OK']: rawGatewayURL = List.randomize(List.fromChar(dRetVal['Value'], ","))[0] gatewayURL = "/".join(rawGatewayURL.split("/")[:3]) # If what was given as constructor attribute is a properly formed URL, # we just return this one. # If we have to use a gateway, we just replace the server name in it for protocol in gProtocolDict: if self._destinationSrv.find("%s://" % protocol) == 0: gLogger.debug("Already given a valid url", self._destinationSrv) if not gatewayURL: return S_OK(self._destinationSrv) gLogger.debug("Reconstructing given URL to pass through gateway") path = "/".join(self._destinationSrv.split("/")[3:]) finalURL = "%s/%s" % (gatewayURL, path) gLogger.debug("Gateway URL conversion:\n %s -> %s" % (self._destinationSrv, finalURL)) return S_OK(finalURL) if gatewayURL: gLogger.debug("Using gateway", gatewayURL) return S_OK("%s/%s" % (gatewayURL, self._destinationSrv)) # We extract the list of URLs from the CS (System/URLs/Component) try: urls = getServiceURL(self._destinationSrv, setup=self.setup) except Exception as e: return S_ERROR("Cannot get URL for %s in setup %s: %s" % (self._destinationSrv, self.setup, repr(e))) if not urls: return S_ERROR("URL for service %s not found" % self._destinationSrv) failoverUrls = [] # Try if there are some failover URLs to use as last resort try: failoverUrlsStr = getServiceFailoverURL(self._destinationSrv, setup=self.setup) if failoverUrlsStr: failoverUrls = failoverUrlsStr.split(',') except Exception as e: pass # We randomize the list, and add at the end the failover URLs (System/FailoverURLs/Component) urlsList = List.randomize(List.fromChar(urls, ",")) + failoverUrls self.__nbOfUrls = len(urlsList) self.__nbOfRetry = 2 if self.__nbOfUrls > 2 else 3 # we retry 2 times all services, if we run more than 2 services if self.__nbOfUrls == len(self.__bannedUrls): self.__bannedUrls = [] # retry all urls gLogger.debug("Retrying again all URLs") if len(self.__bannedUrls) > 0 and len(urlsList) > 1: # we have host which is not accessible. We remove that host from the list. # We only remove if we have more than one instance for i in self.__bannedUrls: gLogger.debug("Removing banned URL", "%s" % i) urlsList.remove(i) # Take the first URL from the list #randUrls = List.randomize( urlsList ) + failoverUrls sURL = urlsList[0] # If we have banned URLs, and several URLs at disposals, we make sure that the selected sURL # is not on a host which is banned. If it is, we take the next one in the list using __selectUrl # If we have banned URLs, and several URLs at disposals, we make sure that the selected sURL # is not on a host which is banned. If it is, we take the next one in the list using __selectUrl if len(self.__bannedUrls) > 0 and self.__nbOfUrls > 2: # when we have multiple services then we can # have a situation when two services are running on the same machine with different ports... retVal = Network.splitURL(sURL) nexturl = None if retVal['OK']: nexturl = retVal['Value'] found = False for i in self.__bannedUrls: retVal = Network.splitURL(i) if retVal['OK']: bannedurl = retVal['Value'] else: break # We found a banned URL on the same host as the one we are running on if nexturl[1] == bannedurl[1]: found = True break if found: nexturl = self.__selectUrl(nexturl, urlsList[1:]) if nexturl: # an url found which is in different host sURL = nexturl gLogger.debug("Discovering URL for service", "%s -> %s" % (self._destinationSrv, sURL)) return S_OK(sURL)
def setConfigPath( optionValue ): global config_path config_path = optionValue return DIRAC.S_OK()
def setCEName(optionValue): global ceName ceName = optionValue DIRAC.gConfig.setOptionValue(cfgInstallPath('CEName'), ceName) return DIRAC.S_OK()
def main(): from DIRAC import gLogger from DIRAC.Core.Base import Script Script.parseCommandLine(ignoreErrors=True) from CTADIRAC.Core.Utilities.SoftwareInstallation import checkSoftwarePackage from CTADIRAC.Core.Utilities.SoftwareInstallation import installSoftwarePackage from CTADIRAC.Core.Utilities.SoftwareInstallation import installSoftwareEnviron from CTADIRAC.Core.Utilities.SoftwareInstallation import localArea from CTADIRAC.Core.Utilities.SoftwareInstallation import sharedArea from CTADIRAC.Core.Utilities.SoftwareInstallation import getSoftwareEnviron from CTADIRAC.Core.Utilities.SoftwareInstallation import workingArea from DIRAC.Core.Utilities.Subprocess import systemCall args = Script.getPositionalArgs() version = args[0] CorsikaSimtelPack = 'corsika_simhessarray/' + version + '/corsika_simhessarray' packs = [CorsikaSimtelPack] for package in packs: DIRAC.gLogger.notice('Checking:', package) if sharedArea: if checkSoftwarePackage(package, sharedArea())['OK']: DIRAC.gLogger.notice('Package found in Shared Area:', package) installSoftwareEnviron(package, workingArea()) packageTuple = package.split('/') corsika_subdir = sharedArea( ) + '/' + packageTuple[0] + '/' + version cmd = 'cp -u -r ' + corsika_subdir + '/* .' os.system(cmd) continue if workingArea: if checkSoftwarePackage(package, workingArea())['OK']: DIRAC.gLogger.notice('Package found in Local Area:', package) continue if installSoftwarePackage(package, workingArea())['OK']: ############## compile ############################# if version == 'clean_23012012': cmdTuple = ['./build_all', 'ultra', 'qgs2'] elif version in [ 'prod-2_21122012', 'prod-2_08032013', 'prod-2_06052013' ]: cmdTuple = ['./build_all', 'prod2', 'qgs2'] ret = systemCall(0, cmdTuple, sendOutput) if not ret['OK']: DIRAC.gLogger.error('Failed to compile') DIRAC.exit(-1) continue DIRAC.gLogger.error('Check Failed for software package:', package) DIRAC.gLogger.error('Software package not available') DIRAC.exit(-1) ret = getSoftwareEnviron(CorsikaSimtelPack) if not ret['OK']: error = ret['Message'] DIRAC.gLogger.error(error, CorsikaSimtelPack) DIRAC.exit(-1) corsikaEnviron = ret['Value'] executable_file = args[1] cmd = 'chmod u+x ' + executable_file os.system(cmd) cmdTuple = args[1:] DIRAC.gLogger.notice('Executing command tuple:', cmdTuple) ret = systemCall(0, cmdTuple, sendOutput, env=corsikaEnviron) if not ret['OK']: DIRAC.gLogger.error('Failed to execute read_hess:', ret['Message']) DIRAC.exit(-1) status, stdout, stderr = ret['Value'] if status: DIRAC.gLogger.error('read_hess execution reports Error:', status) DIRAC.gLogger.error(stdout) DIRAC.gLogger.error(stderr) DIRAC.exit(-1) DIRAC.exit()
""" This module will run some job descriptions defined with an older version of DIRAC """ # pylint: disable=protected-access, wrong-import-position, invalid-name, missing-docstring import unittest import os import sys import shutil import DIRAC DIRAC.initialize() # Initialize configuration from DIRAC import gLogger, rootPath from DIRAC.tests.Utilities.utils import find_all from DIRAC.tests.Utilities.IntegrationTest import IntegrationTest from DIRAC.Interfaces.API.Job import Job from DIRAC.Interfaces.API.Dirac import Dirac class RegressionTestCase(IntegrationTest): """Base class for the Regression test cases""" def setUp(self): super(RegressionTestCase, self).setUp() gLogger.setLevel("DEBUG") self.dirac = Dirac() try:
outputDir = value elif sw.lower() in ('f', 'file'): if os.path.exists(value): jFile = open(value) jobs += jFile.read().split() jFile.close() elif sw.lower() in ('g', 'jobgroup'): group = value jobDate = toString(date() - 30 * day) # Choose jobs in final state, no more than 30 days old result = dirac.selectJobs(jobGroup=value, date=jobDate, status='Done') if not result['OK']: if not "No jobs selected" in result['Message']: print "Error:", result['Message'] DIRAC.exit(-1) else: jobs += result['Value'] result = dirac.selectJobs(jobGroup=value, date=jobDate, status='Failed') if not result['OK']: if not "No jobs selected" in result['Message']: print "Error:", result['Message'] DIRAC.exit(-1) else: jobs += result['Value'] for arg in args: if os.path.isdir(arg): print "Output for job %s already retrieved, remove the output directory to redownload" % arg
def setVersion( optionValue ): global version version = optionValue return DIRAC.S_OK()
def forceUpdate(optionValue): global update update = True DIRAC.gLogger.notice('Will update dirac.cfg') return DIRAC.S_OK()
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup from DIRAC.Core.Utilities.PrettyPrint import printTable Script.setUsageMessage('\n'.join([__doc__.split('\n')[1], 'Usage:', ' %s [option|cfgfile] ... Site' % Script.scriptName, ])) Script.parseCommandLine(ignoreErrors=True) args = Script.getPositionalArgs() records = [] records.append(('Setup', gConfig.getValue('/DIRAC/Setup', 'Unknown'))) records.append(('ConfigurationServer', gConfig.getValue('/DIRAC/Configuration/Servers', []))) records.append(('Installation path', DIRAC.rootPath)) if os.path.exists(os.path.join(DIRAC.rootPath, DIRAC.getPlatform(), 'bin', 'mysql')): records.append(('Installation type', 'server')) else: records.append(('Installation type', 'client')) records.append(('Platform', DIRAC.getPlatform())) ret = getProxyInfo(disableVOMS=True) if ret['OK']: if 'group' in ret['Value']: vo = getVOForGroup(ret['Value']['group']) else: vo = getVOForGroup('') if not vo: vo = "None" records.append(('VirtualOrganization', vo))
def setSkipCADownload(optionValue): global skipCADownload skipCADownload = True DIRAC.gConfig.setOptionValue(cfgInstallPath('SkipCADownload'), skipCADownload) return DIRAC.S_OK()
Script.localCfg.addDefaultEntry('/DIRAC/Security/SkipCAChecks', 'yes') else: # Necessary to allow initial download of CA's if not skipCADownload: DIRAC.gConfig.setOptionValue('/DIRAC/Security/SkipCAChecks', 'yes') if not skipCADownload: Script.enableCS() try: dirName = os.path.join(DIRAC.rootPath, 'etc', 'grid-security', 'certificates') if not os.path.exists(dirName): os.makedirs(dirName) except: DIRAC.gLogger.exception() DIRAC.gLogger.fatal('Fail to create directory:', dirName) DIRAC.exit(-1) try: from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient bdc = BundleDeliveryClient() result = bdc.syncCAs() if result['OK']: result = bdc.syncCRLs() except: DIRAC.gLogger.exception('Could not import BundleDeliveryClient') pass if not skipCAChecks: Script.localCfg.deleteOption('/DIRAC/Security/SkipCAChecks') if ceName or siteName: # This is used in the pilot context, we should have a proxy and access to CS Script.enableCS()
def setServerCert(optionValue): global useServerCert useServerCert = True DIRAC.gConfig.setOptionValue(cfgInstallPath('UseServerCertificate'), useServerCert) return DIRAC.S_OK()
def _getTSFiles(self): """ Helper function - get files from the TS """ selectDict = {'TransformationID': self.prod} if self._lfns: selectDict['LFN'] = self._lfns elif self.runStatus and self.fromProd: res = self.transClient.getTransformationRuns({ 'TransformationID': self.fromProd, 'Status': self.runStatus }) if not res['OK']: gLogger.error("Failed to get runs for transformation %d" % self.prod) else: if res['Value']: self.runsList.extend([ run['RunNumber'] for run in res['Value'] if run['RunNumber'] not in self.runsList ]) gLogger.notice("%d runs selected" % len(res['Value'])) elif not self.runsList: gLogger.notice("No runs selected, check completed") DIRAC.exit(0) if not self._lfns and self.runsList: selectDict['RunNumber'] = self.runsList res = self.transClient.getTransformation(self.prod) if not res['OK']: gLogger.error("Failed to find transformation %s" % self.prod) return [], [], [] status = res['Value']['Status'] if status not in ('Active', 'Stopped', 'Completed', 'Idle'): gLogger.notice( "Transformation %s in status %s, will not check if files are processed" % (self.prod, status)) processedLFNs = [] nonProcessedLFNs = [] nonProcessedStatuses = [] if self._lfns: processedLFNs = self._lfns else: res = self.transClient.getTransformationFiles(selectDict) if not res['OK']: gLogger.error( "Failed to get files for transformation %d" % self.prod, res['Message']) return [], [], [] else: processedLFNs = [ item['LFN'] for item in res['Value'] if item['Status'] == 'Processed' ] nonProcessedLFNs = [ item['LFN'] for item in res['Value'] if item['Status'] != 'Processed' ] nonProcessedStatuses = list( set(item['Status'] for item in res['Value'] if item['Status'] != 'Processed')) return processedLFNs, nonProcessedLFNs, nonProcessedStatuses
def setMode( optionValue ): global mode mode = optionValue return DIRAC.S_OK()
def getTimeLeft(self, cpuConsumed): """Returns the CPU Time Left for supported batch systems. The CPUConsumed is the current raw total CPU. """ #Quit if no scale factor available if not self.scaleFactor: return S_ERROR( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName()) if not self.batchPlugin: return S_ERROR(self.batchError) resourceDict = self.batchPlugin.getResourceUsage() if not resourceDict['OK']: self.log.warn( 'Could not determine timeleft for batch system at site %s' % DIRAC.siteName()) return resourceDict resources = resourceDict['Value'] self.log.verbose(resources) if not resources['CPULimit'] or not resources['WallClockLimit']: return S_ERROR('No CPU / WallClock limits obtained') cpuFactor = 100 * float(resources['CPU']) / float( resources['CPULimit']) cpuRemaining = 100 - cpuFactor cpuLimit = float(resources['CPULimit']) wcFactor = 100 * float(resources['WallClock']) / float( resources['WallClockLimit']) wcRemaining = 100 - wcFactor wcLimit = float(resources['WallClockLimit']) self.log.verbose('Used CPU is %.02f, Used WallClock is %.02f.' % (cpuFactor, wcFactor)) self.log.verbose( 'Remaining WallClock %.02f, Remaining CPU %.02f, margin %s' % (wcRemaining, cpuRemaining, self.cpuMargin)) timeLeft = None if wcRemaining > cpuRemaining and (wcRemaining - cpuRemaining) > self.cpuMargin: # In some cases cpuFactor might be 0 # timeLeft = float(cpuConsumed*self.scaleFactor*cpuRemaining/cpuFactor) # We need time left in the same units used by the Matching timeLeft = float(cpuRemaining * cpuLimit / 100 * self.scaleFactor) self.log.verbose( 'Remaining WallClock %.02f > Remaining CPU %.02f and difference > margin %s' % (wcRemaining, cpuRemaining, self.cpuMargin)) else: if cpuRemaining > self.cpuMargin and wcRemaining > self.cpuMargin: self.log.verbose( 'Remaining WallClock %.02f and Remaining CPU %.02f both > margin %s' % (wcRemaining, cpuRemaining, self.cpuMargin)) # In some cases cpuFactor might be 0 # timeLeft = float(cpuConsumed*self.scaleFactor*(wcRemaining-self.cpuMargin)/cpuFactor) timeLeft = float(cpuRemaining * cpuLimit / 100 * self.scaleFactor) else: self.log.verbose( 'Remaining CPU %.02f < margin %s and WallClock %.02f < margin %s so no time left' % (cpuRemaining, self.cpuMargin, wcRemaining, self.cpuMargin)) if timeLeft: self.log.verbose('Remaining CPU in normalized units is: %.02f' % timeLeft) return S_OK(timeLeft) else: return S_ERROR('No time left for slot')
print output print '**************************' if not output['Value'][0]: ldlibs = output['Value'][1].split( '\n' ) for lib in ldlibs: if os.path.exists( lib ): if re.search( 'RELAX', lib ) is not None: filename = os.path.basename( lib ) output = shellCall( 0, 'ln -s ' + str( lib ) + ' ' + str( filename ) ) if DEBUG: if not output['OK']: print '********************************' print 'Warning, problem creating link:' print 'File: ', filename print 'Path: ', lib print output print '********************************' os.chdir( start ) sys.stdout.flush() Script.parseCommandLine() positionalArgs = Script.getPositionalArgs() if len( positionalArgs ) != 3: DIRAC.abort( 1, "Must specify which is the role you want" ) fixLDPath( positionalArgs[0], positionalArgs[1], positionalArgs[2] )
from DIRAC import gLogger, gConfig from DIRAC.WorkloadManagementSystem.Client.DIRACbenchmark import singleDiracBenchmark from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.Core.Utilities import MJF mjf = MJF.MJF() mjf.updateConfig() db12JobFeature = mjf.getJobFeature('db12') hs06JobFeature = mjf.getJobFeature('hs06') result = singleDiracBenchmark(1) if result is None: gLogger.error('Cannot make benchmark measurements') DIRAC.exit(1) db12Measured = round(result['NORM'], 1) corr = Operations().getValue('JobScheduling/CPUNormalizationCorrection', 1.) norm = round(result['NORM'] / corr, 1) gLogger.notice('Estimated CPU power is %.1f HS06' % norm) if update: gConfig.setOptionValue( '/LocalSite/CPUScalingFactor', hs06JobFeature if hs06JobFeature else norm) # deprecate? gConfig.setOptionValue('/LocalSite/CPUNormalizationFactor', norm) # deprecate? gConfig.setOptionValue('/LocalSite/DB12measured', db12Measured)