def do_resetBucketLength(self, args): """ Set the bucket Length. Will trigger a recalculation of buckets. Can take a while. Usage : resetBucketLength <typeName> <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName> should exist and inherit the base type """ try: argList = args.split() if argList: typeName = argList[0].strip() else: gLogger.error("No type name specified") return # Try to import the type try: typeModule = __import__( "DIRAC.AccountingSystem.Client.Types.%s" % typeName, globals(), locals(), typeName) typeClass = getattr(typeModule, typeName) except Exception as e: gLogger.error("Can't load type %s: %s" % (typeName, str(e))) return gLogger.info("Loaded type %s" % typeClass.__name__) typeDef = typeClass().getDefinition() acClient = DataStoreClient() retVal = acClient.setBucketsLength(typeDef[0], typeDef[3]) if retVal["OK"]: gLogger.info("Type registered successfully") else: gLogger.error("Error: %s" % retVal["Message"]) except Exception: self.showTraceback()
def do_registerType(self, args): """ Registers a new accounting type Usage : registerType <typeName> <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName> should exist and inherit the base type """ try: argList = args.split() if argList: typeName = argList[0].strip() else: gLogger.error("No type name specified") return # Try to import the type try: typeModule = __import__( "DIRAC.AccountingSystem.Client.Types.%s" % typeName, globals(), locals(), typeName) typeClass = getattr(typeModule, typeName) except Exception as e: gLogger.error("Can't load type %s: %s" % (typeName, str(e))) return gLogger.info("Loaded type %s" % typeClass.__name__) typeDef = typeClass().getDefinition() acClient = DataStoreClient() retVal = acClient.registerType(*typeDef) if retVal['OK']: gLogger.info("Type registered successfully") else: gLogger.error("Error: %s" % retVal['Message']) except BaseException: self.showTraceback()
def do_deleteType(self, args): """ Delete a registered accounting type. Usage : deleteType <typeName> WARN! It will delete all data associated to that type! VERY DANGEROUS! If you screw it, you'll discover a new dimension of pain and doom! :) """ try: argList = args.split() if argList: typeName = argList[0].strip() else: gLogger.error("No type name specified") return while True: choice = six.moves.input( "Are you completely sure you want to delete type %s and all it's data? yes/no [no]: " % typeName) choice = choice.lower() if choice in ("yes", "y"): break else: print("Delete aborted") return acClient = DataStoreClient() retVal = acClient.deleteType(typeName) if not retVal["OK"]: gLogger.error("Error: %s" % retVal["Message"]) return print("Hope you meant it, because it's done") except Exception: self.showTraceback()
def do_connect(self, args): """ Tries to connect to the server Usage: connect """ gLogger.info("Trying to connect to server") self.connected = False self.prompt = "(%s)> " % colorize("Not connected", "red") acClient = DataStoreClient() retVal = acClient.ping() if retVal["OK"]: self.prompt = "(%s)> " % colorize("Connected", "green") self.connected = True
def do_compactBuckets(self, args): """ Compact buckets table Usage : compactBuckets """ try: acClient = DataStoreClient() retVal = acClient.compactDB() if not retVal["OK"]: gLogger.error("Error: %s" % retVal["Message"]) return gLogger.info("Done") except Exception: self.showTraceback()
def jobexec(jobxml, wfParameters): jobfile = os.path.abspath(jobxml) if not os.path.exists(jobfile): gLogger.warn('Path to specified workflow %s does not exist' % (jobfile)) sys.exit(1) workflow = fromXMLFile(jobfile) gLogger.debug(workflow) code = workflow.createCode() gLogger.debug(code) jobID = 0 if 'JOBID' in os.environ: jobID = os.environ['JOBID'] gLogger.info('DIRAC JobID %s is running at site %s' % (jobID, DIRAC.siteName())) workflow.addTool('JobReport', JobReport(jobID)) workflow.addTool('AccountingReport', DataStoreClient()) workflow.addTool('Request', Request()) # Propagate the command line parameters to the workflow if any for pName, pValue in wfParameters.items(): workflow.setValue(pName, pValue) # Propagate the command line parameters to the workflow module instances of each step for stepdefinition in workflow.step_definitions.values(): for moduleInstance in stepdefinition.module_instances: for pName, pValue in wfParameters.items(): if moduleInstance.parameters.find(pName): moduleInstance.parameters.setValue(pName, pValue) return workflow.execute()
def initialize(self): """Standard initialization""" # This agent will always loop every 15 minutes self.am_setOption("PollingTime", 900) # Check whether to send to Monitoring or Accounting or both self.jobMonitoringOption = Operations().getMonitoringBackends( monitoringType="WMSHistory") self.pilotMonitoringOption = Operations().getMonitoringBackends( monitoringType="PilotsHistory") messageQueue = self.am_getOption("MessageQueue", "dirac.wmshistory") self.datastores = { } # For storing the clients to Accounting and Monitoring if "Accounting" in self.jobMonitoringOption: self.datastores["Accounting"] = DataStoreClient(retryGraceTime=900) if "Monitoring" in self.jobMonitoringOption: self.datastores["Monitoring"] = MonitoringReporter( monitoringType="WMSHistory", failoverQueueName=messageQueue) if "Monitoring" in self.pilotMonitoringOption: self.pilotReporter = MonitoringReporter( monitoringType="PilotsHistory", failoverQueueName=messageQueue) self.__jobDBFields = [] for field in self.__summaryKeyFieldsMapping: if field == "User": field = "Owner" elif field == "UserGroup": field = "OwnerGroup" self.__jobDBFields.append(field) return S_OK()
def initialize(self): """Standard initialization""" # This agent will always loop every 15 minutes self.am_setOption("PollingTime", 900) self.backends = self.am_getOption("Backends", "Accounting").replace(" ", "").split(",") messageQueue = self.am_getOption("MessageQueue", "dirac.wmshistory") self.log.info("Committing to %s backend" % "and ".join(self.backends)) self.datastores = { } # For storing the clients to Accounting and Monitoring if "Accounting" in self.backends: self.datastores["Accounting"] = DataStoreClient(retryGraceTime=900) if "Monitoring" in self.backends: self.datastores["Monitoring"] = MonitoringReporter( monitoringType="WMSHistory", failoverQueueName=messageQueue) self.__jobDBFields = [] for field in self.__summaryKeyFieldsMapping: if field == "User": field = "Owner" elif field == "UserGroup": field = "OwnerGroup" self.__jobDBFields.append(field) return S_OK()
def jobexec(jobxml, wfParameters={}): jobfile = os.path.abspath(jobxml) if not os.path.exists(jobfile): gLogger.warn('Path to specified workflow %s does not exist' % (jobfile)) sys.exit(1) workflow = fromXMLFile(jobfile) gLogger.debug(workflow) code = workflow.createCode() gLogger.debug(code) jobID = 0 if os.environ.has_key('JOBID'): jobID = os.environ['JOBID'] gLogger.info('DIRAC JobID %s is running at site %s' % (jobID, DIRAC.siteName())) workflow.addTool('JobReport', JobReport(jobID)) workflow.addTool('AccountingReport', DataStoreClient()) workflow.addTool('Request', RequestContainer()) # Propagate the command line parameters to the workflow if any for name, value in wfParameters.items(): workflow.setValue(name, value) result = workflow.execute() return result
def initialize(self): """ Standard initialization """ # This agent will always loop every 15 minutes self.am_setOption("PollingTime", 900) self.backends = self.am_getOption("Backends", "Accounting").replace(' ', '').split(',') messageQueue = self.am_getOption("MessageQueue", "dirac.wmshistory") self.datastores = { } # For storing the clients to Accounting and Monitoring if 'Accounting' in self.backends: self.datastores['Accounting'] = DataStoreClient(retryGraceTime=900) if 'Monitoring' in self.backends: self.datastores['Monitoring'] = MonitoringReporter( monitoringType="WMSHistory", failoverQueueName=messageQueue) self.__jobDBFields = [] for field in self.__summaryKeyFieldsMapping: if field == 'User': field = 'Owner' elif field == 'UserGroup': field = 'OwnerGroup' self.__jobDBFields.append(field) return S_OK()
def execute(self): """ Main execution method """ result = gConfig.getSections("/DIRAC/Setups") if not result['OK']: return result validSetups = result['Value'] gLogger.info("Valid setups for this cycle are %s" % ", ".join(validSetups)) #Get the WMS Snapshot! result = self.jobDB.getSummarySnapshot(self.__jobDBFields) now = Time.dateTime() if not result['OK']: gLogger.error("Can't the the jobdb summary", result['Message']) else: values = result['Value'][1] for record in values: recordSetup = record[0] if recordSetup not in validSetups: gLogger.error("Setup %s is not valid" % recordSetup) continue if recordSetup not in self.dsClients: gLogger.info("Creating DataStore client for %s" % recordSetup) self.dsClients[recordSetup] = DataStoreClient( setup=recordSetup, retryGraceTime=900) record = record[1:] rD = {} for fV in self.__summaryDefinedFields: rD[fV[0]] = fV[1] for iP in range(len(self.__summaryKeyFieldsMapping)): fieldName = self.__summaryKeyFieldsMapping[iP] rD[self.__renameFieldsMapping.get(fieldName, fieldName)] = record[iP] record = record[len(self.__summaryKeyFieldsMapping):] for iP in range(len(self.__summaryValueFieldsMapping)): rD[self.__summaryValueFieldsMapping[iP]] = int(record[iP]) acWMS = WMSHistory() acWMS.setStartTime(now) acWMS.setEndTime(now) acWMS.setValuesFromDict(rD) retVal = acWMS.checkValues() if not retVal['OK']: gLogger.error("Invalid accounting record ", "%s -> %s" % (retVal['Message'], rD)) else: self.dsClients[recordSetup].addRegister(acWMS) for setup in self.dsClients: gLogger.info("Sending records for setup %s" % setup) result = self.dsClients[setup].commit() if not result['OK']: gLogger.error( "Couldn't commit wms history for setup %s" % setup, result['Message']) else: gLogger.info("Sent %s records for setup %s" % (result['Value'], setup)) return S_OK()
def do_showRegisteredTypes(self, args): """ Get a list of registered types Usage : showRegisteredTypes """ try: acClient = DataStoreClient() retVal = acClient.getRegisteredTypes() print(retVal) if not retVal["OK"]: gLogger.error("Error: %s" % retVal["Message"]) return for typeList in retVal["Value"]: print(typeList[0]) print(" Key fields:\n %s" % "\n ".join(typeList[1])) print(" Value fields:\n %s" % "\n ".join(typeList[2])) except Exception: self.showTraceback()
this is pytest! """ # pylint: disable=invalid-name,wrong-import-position from DIRAC.Core.Base.Script import parseCommandLine parseCommandLine() from DIRAC import gLogger from DIRAC.AccountingSystem.Client.DataStoreClient import DataStoreClient from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation gLogger.setLevel('DEBUG') dsc = DataStoreClient() def createAccountingRecord(): accountingDict = {} accountingDict['OperationType'] = 'putAndRegister' accountingDict['User'] = '******' accountingDict['Protocol'] = 'DataManager' accountingDict['RegistrationTime'] = 0.0 accountingDict['RegistrationOK'] = 0 accountingDict['RegistrationTotal'] = 0 accountingDict['Destination'] = 'se' accountingDict['TransferTotal'] = 1 accountingDict['TransferOK'] = 1 accountingDict['TransferSize'] = 1 accountingDict['TransferTime'] = 0.0
def execute(self): """ Main execution method """ result = gConfig.getSections("/DIRAC/Setups") if not result['OK']: return result validSetups = result['Value'] self.log.info("Valid setups for this cycle are %s" % ", ".join(validSetups)) # Get the WMS Snapshot! result = self.jobDB.getSummarySnapshot(self.__jobDBFields) now = Time.dateTime() if not result['OK']: self.log.error( "Can't get the JobDB summary", "%s: won't commit at this cycle" % result['Message']) else: values = result['Value'][1] if self.retryOnce: self.log.verbose( "Adding to records to commit those not committed within the previous cycle" ) acWMSListAdded = [] for record in values: recordSetup = record[0] if recordSetup not in validSetups: self.log.error("Setup %s is not valid" % recordSetup) continue if recordSetup not in self.dsClients: self.log.info("Creating DataStore client for %s" % recordSetup) self.dsClients[recordSetup] = DataStoreClient( retryGraceTime=900) record = record[1:] rD = {} for fV in self.__summaryDefinedFields: rD[fV[0]] = fV[1] for iP in range(len(self.__summaryKeyFieldsMapping)): fieldName = self.__summaryKeyFieldsMapping[iP] rD[self.__renameFieldsMapping.get(fieldName, fieldName)] = record[iP] record = record[len(self.__summaryKeyFieldsMapping):] for iP in range(len(self.__summaryValueFieldsMapping)): rD[self.__summaryValueFieldsMapping[iP]] = int(record[iP]) acWMS = WMSHistory() acWMS.setStartTime(now) acWMS.setEndTime(now) acWMS.setValuesFromDict(rD) retVal = acWMS.checkValues() if not retVal['OK']: self.log.error("Invalid accounting record ", "%s -> %s" % (retVal['Message'], rD)) else: self.dsClients[recordSetup].addRegister(acWMS) acWMSListAdded.append(acWMS) if self.retryOnce and self.retryValues: for acWMSCumulated in self.retryValues: retVal = acWMSCumulated.checkValues() if not retVal['OK']: self.log.error("Invalid accounting record ", "%s" % (retVal['Message'])) else: self.dsClients[recordSetup].addRegister(acWMSCumulated) for setup in self.dsClients: self.log.info("Sending records for setup %s" % setup) result = self.dsClients[setup].commit() if not result['OK']: self.log.error( "Couldn't commit wms history for setup %s" % setup, result['Message']) # Re-creating the client: for new connection, and for avoiding accumulating too large of a backlog self.dsClients[setup] = DataStoreClient(retryGraceTime=900) if not self.retryOnce: self.log.info("Will try again at next cycle") self.retryOnce = True self.retryValues = acWMSListAdded else: self.log.warn("Won't retry one more time") self.retryOnce = False self.retryValues = [] else: self.log.info("Sent %s records for setup %s" % (result['Value'], setup)) self.retryOnce = False return S_OK()