コード例 #1
0
ファイル: Handler.py プロジェクト: HassenRiahi/CRABServer
def handleKill(resthost, resturi, config, task, procnum, *args, **kwargs):
    """Asks to kill jobs

    :arg str resthost: the hostname where the rest interface is running
    :arg str resturi: the rest base url to contact
    :arg WMCore.Configuration config: input configuration
    :arg TaskWorker.DataObjects.Task task: the task to work on
    :arg int procnum: the process number taking care of the work
    :*args and *kwargs: extra parameters currently not defined
    :return: the result of the handler operation."""
    server = HTTPRequests(resthost,
                          config.TaskWorker.cmscert,
                          config.TaskWorker.cmskey,
                          retry=2)
    handler = TaskHandler(task, procnum, server, 'handleKill')
    handler.addWork(
        MyProxyLogon(config=config,
                     server=server,
                     resturi=resturi,
                     procnum=procnum,
                     myproxylen=60 * 5))

    def glidein(config):
        """Performs kill of jobs sent through Glidein
        :arg WMCore.Configuration config: input configuration"""
        handler.addWork(
            DagmanKiller(config=config,
                         server=server,
                         resturi=resturi,
                         procnum=procnum))

    locals()[getattr(config.TaskWorker, 'backend',
                     DEFAULT_BACKEND).lower()](config)
    return handler.actionWork(args, kwargs)
コード例 #2
0
def handleNewTask(resthost, resturi, config, task, procnum, *args, **kwargs):
    """Performs the injection of a new task

    :arg str resthost: the hostname where the rest interface is running
    :arg str resturi: the rest base url to contact
    :arg WMCore.Configuration config: input configuration
    :arg TaskWorker.DataObjects.Task task: the task to work on
    :arg int procnum: the process number taking care of the work
    :*args and *kwargs: extra parameters currently not defined
    :return: the handler."""
    server = HTTPRequests(resthost, config.TaskWorker.cmscert, config.TaskWorker.cmskey, retry=20, logger=logging.getLogger(str(procnum)))
    handler = TaskHandler(task, procnum, server, config, 'handleNewTask', createTempDir=True)
    handler.addWork(MyProxyLogon(config=config, server=server, resturi=resturi, procnum=procnum, myproxylen=60 * 60 * 24))
    handler.addWork(StageoutCheck(config=config, server=server, resturi=resturi, procnum=procnum))
    if task['tm_job_type'] == 'Analysis':
        if task.get('tm_user_files'):
            handler.addWork(UserDataDiscovery(config=config, server=server, resturi=resturi, procnum=procnum))
        else:
            handler.addWork(DBSDataDiscovery(config=config, server=server, resturi=resturi, procnum=procnum))
    elif task['tm_job_type'] == 'PrivateMC':
        handler.addWork(MakeFakeFileSet(config=config, server=server, resturi=resturi, procnum=procnum))
    handler.addWork(Splitter(config=config, server=server, resturi=resturi, procnum=procnum))
    handler.addWork(DagmanCreator(config=config, server=server, resturi=resturi, procnum=procnum))
    if task['tm_dry_run'] == 'T':
        handler.addWork(DryRunUploader(config=config, server=server, resturi=resturi, procnum=procnum))
    else:
        handler.addWork(DagmanSubmitter(config=config, server=server, resturi=resturi, procnum=procnum))

    return handler.actionWork(args, kwargs)
コード例 #3
0
ファイル: CRABServer.py プロジェクト: spigad/GangaCRAB3
    def submit(self, job):
        """Submit a new task to CRAB3 """
        logger.info('userproxy: %s' % job.backend.userproxy)
        logger.info('server_name: %s' % job.backend.server_name)
        logger.info('apiresource: %s' % job.backend.apiresource)

        server = HTTPRequests(job.backend.server_name, job.backend.userproxy)
        resource = job.backend.apiresource + 'workflow'

        try:
            cachefilename = self.uploadArchive(job.inputdata.pset,
                                               job.inputdata.cacheurl)[1]
        except HTTPException, e:
            logger.error(type(e))
            logger.error(dir(e))
            logger.error(e.req_headers)
            logger.error(e.req_data)
            logger.error(e.reason)
            logger.error(e.message)
            logger.error(e.headers)
            logger.error(e.result)
            logger.error(e.status)
            logger.error(e.url)
            logger.error(e.args)
            raise CRABServerError("Error uploading cache")
コード例 #4
0
def getProxiedWebDir(task, host, uri, cert, logFunction=print):
    """ The function simply queries the REST interface specified to get the proxied webdir to use
        for the task. Returns None in case the API could not find the url (either an error or the schedd
        is not configured)
    """
    #This import is here because SeverUtilities is also used on the worker nodes,
    #and I want to avoid the dependency to pycurl right now. We should actually add it one day
    #so that other code in cmscp that uses Requests.py from WMCore can be migrated to RESTInteractions
    from RESTInteractions import HTTPRequests
    data = {
        'subresource': 'webdirprx',
        'workflow': task,
    }
    res = None
    try:
        server = HTTPRequests(host, cert, cert, retry=2)
        dictresult, _, _ = server.get(
            uri, data=data)  #the second and third parameters are deprecated
        if dictresult.get('result'):
            res = dictresult['result'][0]
    except HTTPException as hte:
        logFunction(traceback.format_exc())
        logFunction(hte.headers)
        logFunction(hte.result)

    return res
コード例 #5
0
ファイル: Handler.py プロジェクト: HassenRiahi/CAFTaskWorker
def handleKill(instance, resturl, config, task, *args, **kwargs):
    """Asks to kill jobs

    :arg str instance: the hostname where the rest interface is running
    :arg str resturl: the rest base url to contact
    :arg WMCore.Configuration config: input configuration
    :arg TaskWorker.DataObjects.Task task: the task to work on
    :*args and *kwargs: extra parameters currently not defined
    :return: the result of the handler operation."""
    server = HTTPRequests(instance,
                          config.TaskWorker.cmscert,
                          config.TaskWorker.cmskey,
                          version=__version__)
    handler = TaskHandler(task)
    handler.addWork(
        MyProxyLogon(config=config,
                     server=server,
                     resturl=resturl,
                     myproxylen=60 * 5))

    def glidein(config):
        """Performs kill of jobs sent through Glidein
        :arg WMCore.Configuration config: input configuration"""
        raise NotImplementedError
        #handler.addWork( DagmanKiller(glideinconfig=config, server=server, resturl=resturl) )

    def panda(config):
        """Performs the re-injection into PanDA
        :arg WMCore.Configuration config: input configuration"""
        handler.addWork(
            PanDAKill(pandaconfig=config, server=server, resturl=resturl))

    locals()[getattr(config.TaskWorker, 'backend',
                     DEFAULT_BACKEND).lower()](config)
    return handler.actionWork(args, kwargs)
コード例 #6
0
 def get_backendurls(self):
     self.logger.info("Querying server %s for HTCondor schedds and pool names." % self.resturi)
     server = HTTPRequests(self.resthost, self.config.TaskWorker.cmscert, self.config.TaskWorker.cmskey, retry = 2)
     result = server.get(self.resturi, data={'subresource':'backendurls'})[0]['result'][0]
     self.pool = str(result['htcondorPool'])
     self.schedds = [str(i) for i in result['htcondorSchedds']]
     self.logger.info("Resulting pool %s; schedds %s" % (self.pool, ",".join(self.schedds)))
コード例 #7
0
 def setUp(self):
     """
     Setup for unit tests
     """
     self.server = HTTPRequests(os.environ['SERVER_HOST'],
                                os.environ['X509_USER_PROXY'],
                                os.environ['X509_USER_PROXY'])
     self.lfnBase = '/store/temp/user/%s/my_cool_dataset-%s/file-%s-%s.root'
     self.fileDoc = {
         'id': 'OVERWRITE',
         'username': '******',
         'taskname': 'OVERWRITE',
         'start_time': 0,
         'destination': 'T2_CH_CERN',
         'destination_lfn': 'OVERWRITE',
         'source': 'T2_US_Caltech',
         'source_lfn': 'OVERWRITE',
         'filesize': random.randint(1, 9999),
         'publish': 1,
         'transfer_state': 'OVERWRITE',
         'publication_state': 'OVERWRITE',
         'job_id': 1,
         'job_retry_count': 0,
         'type': 'log',
         'rest_host': 'cmsweb.cern.ch',
         'rest_uri': '/crabserver/prod/'
     }
     self.ids = []
     self.users = [
         'jbalcas', 'mmascher', 'dciangot', 'riahi', 'erupeika', 'sbelforte'
     ]  # just random users for tests
     self.tasks = {}
     self.totalFiles = 10
コード例 #8
0
    def testExecute(self):
        #recycle DataDiscoveryTest code to create the input of this test
        ddObj, task, requestname, datasetfiles, locations = DataDiscoveryTest.prepareObjects(
        )
        res = ddObj.formatOutput(task=task,
                                 requestname=requestname,
                                 datasetfiles=datasetfiles,
                                 locations=locations)

        #Test the case where the lumimask is empty. Thats the most interesting case
        cert, key = Requests().getKeyCert()
        server = HTTPRequests(os.environ['REST_URL'],
                              cert,
                              key,
                              version="0.debug")
        lmb = LumiMaskBuilder(None, server, "/crabserver/dev/workflowdb")

        task = {}
        task['tm_split_args'] = {}
        #this is a wf name I had in the REST db. Used to check by hand if the db was updated.
        #we should create a RESTMock for unit tests
        task['tm_taskname'] = "130719_090932_mmascher_crab_tmp"
        task['tm_split_args']['lumis'] = {}
        task['tm_split_args']['runs'] = {}
        lmb.execute(res.result, task=task)

        self.assertEqual(lmb.runs, ['1', '2', '3', '4'])
        self.assertEqual(lmb.lumis[1:],
                         ['1,5,8,9,20,22', '11,13', '1,2,5,7,100,100'
                          ])  #first run too long to check in a unit test
コード例 #9
0
def handleNewTask(resthost, resturi, config, task, *args, **kwargs):
    """Performs the injection of a new task

    :arg str resthost: the hostname where the rest interface is running
    :arg str resturi: the rest base url to contact
    :arg WMCore.Configuration config: input configuration
    :arg TaskWorker.DataObjects.Task task: the task to work on
    :*args and *kwargs: extra parameters currently not defined
    :return: the handler."""
    server = HTTPRequests(resthost, config.TaskWorker.cmscert, config.TaskWorker.cmskey)
    handler = TaskHandler(task)
    handler.addWork( MyProxyLogon(config=config, server=server, resturi=resturi, myproxylen=60*60*24) )
    if task['tm_job_type'] == 'Analysis': 
        if task.get('tm_arguments', {}).get('userfiles'):
            handler.addWork( UserDataDiscovery(config=config, server=server, resturi=resturi) )
        else:
            handler.addWork( DBSDataDiscovery(config=config, server=server, resturi=resturi) )
    elif task['tm_job_type'] == 'PrivateMC': 
        handler.addWork( MakeFakeFileSet(config=config, server=server, resturi=resturi) )
    handler.addWork( Splitter(config=config, server=server, resturi=resturi) )

    def glidein(config):
        """Performs the injection of a new task into Glidein
        :arg WMCore.Configuration config: input configuration"""
        handler.addWork( DagmanCreator(config=config, server=server, resturi=resturi) )
        handler.addWork( DagmanSubmitter(config=config, server=server, resturi=resturi) )

    def panda(config):
        """Performs the injection into PanDA of a new task
        :arg WMCore.Configuration config: input configuration"""
        handler.addWork( PanDABrokerage(pandaconfig=config, server=server, resturi=resturi) )
        handler.addWork( PanDAInjection(pandaconfig=config, server=server, resturi=resturi) )

    locals()[getattr(config.TaskWorker, 'backend', DEFAULT_BACKEND).lower()](config)
    return handler.actionWork(args)
コード例 #10
0
def handleResubmit(resthost, resturi, config, task, *args, **kwargs):
    """Performs the re-injection of failed jobs

    :arg str resthost: the hostname where the rest interface is running
    :arg str resturi: the rest base url to contact
    :arg WMCore.Configuration config: input configuration
    :arg TaskWorker.DataObjects.Task task: the task to work on
    :*args and *kwargs: extra parameters currently not defined
    :return: the result of the handler operation."""
    server = HTTPRequests(resthost, config.TaskWorker.cmscert, config.TaskWorker.cmskey)
    handler = TaskHandler(task)
    handler.addWork( MyProxyLogon(config=config, server=server, resturi=resturi, myproxylen=60*60*24) )
    def glidein(config):
        """Performs the re-injection into Glidein
        :arg WMCore.Configuration config: input configuration"""
        handler.addWork( DagmanResubmitter(config=config, server=server, resturi=resturi) )

    def panda(config):
        """Performs the re-injection into PanDA
        :arg WMCore.Configuration config: input configuration"""
        handler.addWork( PanDAgetSpecs(pandaconfig=config, server=server, resturi=resturi) )
        handler.addWork( PanDASpecs2Jobs(pandaconfig=config, server=server, resturi=resturi) )
        handler.addWork( PanDABrokerage(pandaconfig=config, server=server, resturi=resturi) )
        handler.addWork( PanDAInjection(pandaconfig=config, server=server, resturi=resturi) )

    locals()[getattr(config.TaskWorker, 'backend', DEFAULT_BACKEND).lower()](config)
    return handler.actionWork(args)
コード例 #11
0
ファイル: Handler.py プロジェクト: todor-ivanov/CRABServer
def handleKill(resthost, resturi, config, task, procnum, *args, **kwargs):
    """Asks to kill jobs

    :arg str resthost: the hostname where the rest interface is running
    :arg str resturi: the rest base url to contact
    :arg WMCore.Configuration config: input configuration
    :arg TaskWorker.DataObjects.Task task: the task to work on
    :arg int procnum: the process number taking care of the work
    :*args and *kwargs: extra parameters currently not defined
    :return: the result of the handler operation."""
    server = HTTPRequests(resthost,
                          config.TaskWorker.cmscert,
                          config.TaskWorker.cmskey,
                          retry=20,
                          logger=logging.getLogger(str(procnum)))
    handler = TaskHandler(task, procnum, server, config, 'handleKill')
    handler.addWork(
        MyProxyLogon(config=config,
                     server=server,
                     resturi=resturi,
                     procnum=procnum,
                     myproxylen=60 * 5))
    handler.addWork(
        DagmanKiller(config=config,
                     server=server,
                     resturi=resturi,
                     procnum=procnum))

    return handler.actionWork(args, kwargs)
コード例 #12
0
def serverCall(ddmServer, cert, key, verbose, call, api, data):
    server = HTTPRequests(url=ddmServer,
                          localcert=cert,
                          localkey=key,
                          verbose=verbose)
    commonAPI = '/registry/request'
    ddmRequest = getattr(server, call)(commonAPI + '/' + api, data=data)
    return ddmRequest[0]
コード例 #13
0
 def uploadWarning(self, warning, userProxy, taskname):
     try:
         userServer = HTTPRequests(self.server['host'], userProxy, userProxy, retry=2)
         configreq = {'subresource': 'addwarning',
                      'workflow': taskname,
                      'warning': b64encode(warning)}
         userServer.post(self.restURInoAPI + '/task', data = urllib.urlencode(configreq))
     except HTTPException as hte:
         self.logger.error(hte.headers)
         self.logger.warning("Cannot add a warning to REST interface. Warning message: %s" % warning)
コード例 #14
0
def serverCall(ddmServer, cert, key, verbose, call, api, data):
    server = HTTPRequests(url=ddmServer, localcert=cert, localkey=key, verbose=verbose)
    commonAPI = '/registry/request'
    try:
        ddmRequest = getattr(server, call)(commonAPI+'/'+api, data=data)
    except HTTPException as hte:
        msg = "HTTP Error while contacting the DDM server %s:\n%s" % (ddmServer, str(hte))
        msg += "\nHTTP Headers are: %s" % hte.headers
        raise TaskWorkerException(msg, retry=True)

    return ddmRequest[0]
コード例 #15
0
def server_info(subresource, server, proxyfilename, baseurl):
    """
    Get relevant information about the CRAB REST server
    """
    server = HTTPRequests(url=server,
                          localcert=proxyfilename,
                          localkey=proxyfilename,
                          version='HC')

    dictresult, status, reason = server.get(baseurl,
                                            {'subresource': subresource})

    return dictresult['result'][0]
コード例 #16
0
ファイル: TaskAction.py プロジェクト: dciangot/CRABServer
 def deleteWarnings(self, userProxy, taskname):
     userServer = HTTPRequests(self.server['host'],
                               userProxy,
                               userProxy,
                               retry=2,
                               logger=self.logger)
     configreq = {'subresource': 'deletewarnings', 'workflow': taskname}
     try:
         userServer.post(self.restURInoAPI + '/task',
                         data=urllib.urlencode(configreq))
     except HTTPException as hte:
         self.logger.error("Error deleting warnings: %s", str(hte))
         self.logger.warning("Can not delete warnings from REST interface.")
コード例 #17
0
    def __init__(self, logger, config):
        """
        Initialize connection to the db and logging/config

        :param logger: pass the logging
        :param config: refer to the configuration file
        """
        self.oracleDB = HTTPRequests(config.oracleDB,
                                     config.opsProxy,
                                     config.opsProxy)

        self.config = config
        self.logger = logger
コード例 #18
0
    def _execute(self, resthost, resturi, config, task):
        mw = MasterWorker(config, quiet=False, debug=True, test=False)

        tapeRecallStatus = 'TAPERECALL'
        self.logger.info("Retrieving %s tasks", tapeRecallStatus)
        recallingTasks = mw.getWork(limit=999999, getstatus=tapeRecallStatus)
        if len(recallingTasks) > 0:
            self.logger.info("Retrieved a total of %d %s tasks",
                             len(recallingTasks), tapeRecallStatus)
            self.logger.debug("Retrieved the following %s tasks: \n%s",
                              tapeRecallStatus, str(recallingTasks))
            for recallingTask in recallingTasks:
                if not recallingTask['tm_DDM_reqid']:
                    self.logger.debug(
                        "tm_DDM_reqid' is not defined for task %s, skipping such task",
                        recallingTask['tm_taskname'])
                    continue
                ddmRequest = statusRequest(recallingTask['tm_DDM_reqid'],
                                           config.TaskWorker.DDMServer,
                                           config.TaskWorker.cmscert,
                                           config.TaskWorker.cmskey,
                                           verbose=False)
                self.logger.info("Contacted %s using %s and %s, got:\n%s",
                                 config.TaskWorker.DDMServer,
                                 config.TaskWorker.cmscert,
                                 config.TaskWorker.cmskey, ddmRequest)
                # The query above returns a JSON with a format {"result": "OK", "message": "Request found", "data": [{"request_id": 14, "site": <site>, "item": [<list of blocks>], "group": "AnalysisOps", "n": 1, "status": "new", "first_request": "2018-02-26 23:25:41", "last_request": "2018-02-26 23:25:41", "request_count": 1}]}
                if ddmRequest["data"][0][
                        "status"] == "completed":  # possible values: new, activated, updated, completed, rejected, cancelled
                    self.logger.info(
                        "Request %d is completed, setting status of task %s to NEW",
                        recallingTask['tm_DDM_reqid'],
                        recallingTask['tm_taskname'])
                    mw.updateWork(recallingTask['tm_taskname'],
                                  recallingTask['tm_task_command'], 'NEW')
                    # Delete all task warnings (the tapeRecallStatus added a dataset warning which is no longer valid now)
                    server = HTTPRequests(config.TaskWorker.resturl,
                                          config.TaskWorker.cmscert,
                                          config.TaskWorker.cmskey,
                                          retry=20,
                                          logger=self.logger)
                    mpl = MyProxyLogon(config=config,
                                       server=server,
                                       resturi=config.TaskWorker.restURInoAPI,
                                       myproxylen=self.pollingTime)
                    mpl.execute(task=recallingTask
                                )  # this adds 'user_proxy' to recallingTask
                    mpl.deleteWarnings(recallingTask['user_proxy'],
                                       recallingTask['tm_taskname'])
コード例 #19
0
    def __call__(self):
        server = HTTPRequests(self.serverurl,
                              self.proxyfilename,
                              self.proxyfilename,
                              version=__version__)
        dictresult, status, reason = server.get(self.uri,
                                                data={'timestamp': self.date})
        dictresult = dictresult['result']  #take just the significant part

        if status != 200:
            msg = "Problem retrieving tasks:\ninput:%s\noutput:%s\nreason:%s" % (
                str(self.date), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        dictresult.sort()
        dictresult.reverse()

        if self.options.status:
            dictresult = [
                item for item in dictresult if item[1] == self.options.status
            ]

        result = [item[0:2] for item in dictresult]

        today = date.today()

        if not dictresult:
            msg = "No tasks found from %s until %s" % (self.date, today)
            if self.options.status:
                msg += " with status %s" % (self.options.status)
            self.logger.info(msg)
            return result

        msg = "\nList of tasks from %s until %s" % (self.date, today)
        if self.options.status:
            msg += " with status %s" % (self.options.status)
        self.logger.info(msg)
        msg = "Beware that STATUS here does not include information from grid jobs"
        self.logger.info(msg)
        self.logger.info('=' * 80)
        self.logger.info('NAME\t\t\t\t\t\t\t\tSTATUS')
        self.logger.info('=' * 80)
        for item in dictresult:
            name, status = item[0:2]
            self.logger.info('%s\n\t\t\t\t\t\t\t\t%s' % (name, status))
            self.logger.info('-' * 80)
        self.logger.info('\n')

        return result
コード例 #20
0
ファイル: GenerateMONIT.py プロジェクト: vlimant/CRABServer
 def getCountTasksByStatusAbs(self):
     try:
         resturi = "/crabserver/prod/task"
         configreq = {'minutes': "1000000000", 'subresource': "counttasksbystatus"}
         server = HTTPRequests(self.resthost, "/data/certs/servicecert.pem", "/data/certs/servicekey.pem", retry=10)
         result = server.get(resturi, data=configreq)
         return dict(result[0]['result'])
     except Exception:
         e = sys.exc_info()
         if hasattr(e,"headers"):
             self.logger.error(str(e.headers))
         self.logger.exception("Error in getCountTasksByStatusAbs:")
         pprint(e[1])
         traceback.print_tb(e[2])
         return []
コード例 #21
0
 def sendScheddToREST(self, task, schedd):
     """ Try to set the schedd to the oracle database in the REST interface
         Raises TaskWorkerException in case of failure
     """
     task['tm_schedd'] = schedd
     userServer = HTTPRequests(self.server['host'], task['user_proxy'], task['user_proxy'], retry=20, logger=self.logger)
     configreq = {'workflow':task['tm_taskname'], 'subresource':'updateschedd',
         'scheddname':schedd}
     try:
         userServer.post(self.restURInoAPI + '/task', data=urllib.urlencode(configreq))
     except HTTPException as hte:
         msg = "Unable to contact cmsweb and update scheduler on which task will be submitted. Error msg: %s" % hte.headers
         self.logger.warning(msg)
         time.sleep(20)
         raise TaskWorkerException(msg) #we already tried 20 times, give up
コード例 #22
0
ファイル: Handler.py プロジェクト: HassenRiahi/CAFTaskWorker
def handleNewTask(instance, resturl, config, task, *args, **kwargs):
    """Performs the injection of a new task

    :arg str instance: the hostname where the rest interface is running
    :arg str resturl: the rest base url to contact
    :arg WMCore.Configuration config: input configuration
    :arg TaskWorker.DataObjects.Task task: the task to work on
    :*args and *kwargs: extra parameters currently not defined
    :return: the handler."""
    server = HTTPRequests(instance,
                          config.TaskWorker.cmscert,
                          config.TaskWorker.cmskey,
                          version=__version__)
    handler = TaskHandler(task)
    handler.addWork(
        MyProxyLogon(config=config,
                     server=server,
                     resturl=resturl,
                     myproxylen=60 * 60 * 24))
    if task['tm_job_type'] == 'Analysis':
        handler.addWork(
            DBSDataDiscovery(config=config, server=server, resturl=resturl))
        handler.addWork(
            LumiMaskBuilder(config=config, server=server, resturl=resturl))
    elif task['tm_job_type'] == 'PrivateMC':
        handler.addWork(
            MakeFakeFileSet(config=config, server=server, resturl=resturl))
    handler.addWork(Splitter(config=config, server=server, resturl=resturl))

    def glidein(config):
        """Performs the injection of a new task into Glidein
        :arg WMCore.Configuration config: input configuration"""
        raise NotImplementedError
        #handler.addWork( DagmanCreator(glideinconfig=config, server=server, resturl=resturl) )

    def panda(config):
        """Performs the injection into PanDA of a new task
        :arg WMCore.Configuration config: input configuration"""
        handler.addWork(
            PanDABrokerage(pandaconfig=config, server=server, resturl=resturl))
        handler.addWork(
            PanDAInjection(pandaconfig=config, server=server, resturl=resturl))

    locals()[getattr(config.TaskWorker, 'backend',
                     DEFAULT_BACKEND).lower()](config)
    return handler.actionWork(args)
コード例 #23
0
def updatewebdir(ad):
    data = {'subresource': 'addwebdir'}
    host = ad['CRAB_RestHost']
    uri = ad['CRAB_RestURInoAPI'] + '/task'
    data['workflow'] = ad['CRAB_ReqName']
    data['webdirurl'] = ad['CRAB_UserWebDir']
    cert = ad['X509UserProxy']
    try:
        from RESTInteractions import HTTPRequests
        from httplib import HTTPException
        import urllib
        server = HTTPRequests(host, cert, cert)
        server.post(uri, data=urllib.urlencode(data))
        return 0
    except:
        print traceback.format_exc()
        return 1
コード例 #24
0
ファイル: CRABServer.py プロジェクト: spigad/GangaCRAB3
    def getOutput(self, job):
        """Retrieve the output of the job."""
        """
        if not os.path.exists(job.inputdata.ui_working_dir):
            raise CRABServerError('Workdir "%s" not found.' %
                                  job.inputdata.ui_working_dir)

        cmd = 'crab -getoutput %d -c %s' % (int(job.id) + 1,
                                            job.inputdata.ui_working_dir)
        self._send_with_retry(cmd, 'getoutput', job.backend.crab_env)
        # Make output files coming from the WMS readable.
        for root, _, files in os.walk(os.path.join(job.inputdata.ui_working_dir,
                                                   'res')): # Just 'res'.
            for f in files:
                os.chmod(os.path.join(root, f), 0644)
        """
        logger.info('geting Output for jon %s:%s' %
                    (job.backend.taskname, job.backend.crabid))
        inputlist = [('workflow', job.backend.taskname)]
        inputlist.extend([('subresource', 'logs')])
        inputlist.extend([('jobids', job.backend.crabid)])

        #srv='hammer-crab3.cern.ch'#  'cmsweb-testbed.cern.ch'
        #proxypath= '/afs/cern.ch/user/r/riahi/public/proxy'#'/afs/cern.ch/user/s/spiga/public/PerValentaina/proxy'
        #resource='/crabserver/dev/workflow'
        #server = HTTPRequests(srv, proxypath)

        server = HTTPRequests(job.backend.server_name, job.backend.userproxy)

        resource = job.backend.apiresource + 'workflow'

        try:
            dictresult, status, reason = server.get(resource, data=inputlist)

        except HTTPException, e:
            print type(e)
            print dir(e)
            print e.req_headers
            print e.req_data
            print e.reason
            print e.message
            print e.headers
            print e.result
            print e.status
            print e.url
            print e.args
コード例 #25
0
    def _execute(self, resthost, resturi, config, task):
        self.logger.info('Cleaning filemetadata older than 30 days..')
        server = HTTPRequests(resthost,
                              config.TaskWorker.cmscert,
                              config.TaskWorker.cmskey,
                              retry=2)
        ONE_MONTH = 24 * 30
        try:
            instance = resturi.split('/')[2]
            server.delete('/crabserver/%s/filemetadata' % instance,
                          data=urllib.urlencode({'hours': ONE_MONTH}))


#TODO return fro the server a value (e.g.: ["ok"]) to see if everything is ok
#            result = server.delete('/crabserver/dev/filemetadata', data=urllib.urlencode({'hours': ONE_MONTH}))[0]['result'][0]
#            self.logger.info('FMDCleaner, got %s' % result)
        except HTTPException as hte:
            self.logger.error(hte.headers)
コード例 #26
0
def updateWebDir(ad):
    """
    Need a doc string here.
    """
    data = {'subresource': 'addwebdir'}
    host = ad['CRAB_RestHost']
    uri = ad['CRAB_RestURInoAPI'] + '/task'
    data['workflow'] = ad['CRAB_ReqName']
    data['webdirurl'] = ad['CRAB_UserWebDir']
    cert = ad['X509UserProxy']
    try:
        server = HTTPRequests(host, cert, cert)
        server.post(uri, data=urllib.urlencode(data))
        return 0
    except HTTPException as hte:
        printLog(traceback.format_exc())
        printLog(hte.headers)
        printLog(hte.result)
        return 1
コード例 #27
0
    def _execute(self, resthost, resturi, config, task):
        mw = MasterWorker(config, logWarning=False, logDebug=False, sequential=True, console=False)

        tapeRecallStatus = 'TAPERECALL'
        self.logger.info("Retrieving %s tasks", tapeRecallStatus)
        recallingTasks = mw.getWork(limit=999999, getstatus=tapeRecallStatus)
        if len(recallingTasks) > 0:
            self.logger.info("Retrieved a total of %d %s tasks", len(recallingTasks), tapeRecallStatus)
            self.logger.debug("Retrieved the following %s tasks: \n%s", tapeRecallStatus, str(recallingTasks))
            for recallingTask in recallingTasks:
                if not recallingTask['tm_DDM_reqid']:
                    self.logger.debug("tm_DDM_reqid' is not defined for task %s, skipping such task", recallingTask['tm_taskname'])
                    continue

                # Make sure the task sandbox in the crabcache is not deleted until the tape recall is completed
                from WMCore.Services.UserFileCache.UserFileCache import UserFileCache
                ufc = UserFileCache({'endpoint': recallingTask['tm_cache_url'], "pycurl": True})
                sandbox = recallingTask['tm_user_sandbox'].replace(".tar.gz","")
                try:
                    ufc.download(sandbox, sandbox, recallingTask['tm_username'])
                    os.remove(sandbox)
                except Exception as ex:
                    self.logger.exception(ex)
                    self.logger.info("The CRAB3 server backend could not download the input sandbox (%s) from the frontend (%s) using the '%s' username."+\
                                     " This could be a temporary glitch, will try again in next occurrence of the recurring action."+\
                                     " Error reason:\n%s", sandbox, recallingTask['tm_cache_url'], recallingTask['tm_username'], str(ex))

                ddmRequest = statusRequest(recallingTask['tm_DDM_reqid'], config.TaskWorker.DDMServer, config.TaskWorker.cmscert, config.TaskWorker.cmskey, verbose=False)
                self.logger.info("Contacted %s using %s and %s, got:\n%s", config.TaskWorker.DDMServer, config.TaskWorker.cmscert, config.TaskWorker.cmskey, ddmRequest)
                # The query above returns a JSON with a format {"result": "OK", "message": "Request found", "data": [{"request_id": 14, "site": <site>, "item": [<list of blocks>], "group": "AnalysisOps", "n": 1, "status": "new", "first_request": "2018-02-26 23:25:41", "last_request": "2018-02-26 23:25:41", "request_count": 1}]}
                if ddmRequest["data"][0]["status"] == "completed": # possible values: new, activated, updated, completed, rejected, cancelled
                    self.logger.info("Request %d is completed, setting status of task %s to NEW", recallingTask['tm_DDM_reqid'], recallingTask['tm_taskname'])
                    mw.updateWork(recallingTask['tm_taskname'], recallingTask['tm_task_command'], 'NEW')
                    # Delete all task warnings (the tapeRecallStatus added a dataset warning which is no longer valid now)
                    server = HTTPRequests(config.TaskWorker.resturl, config.TaskWorker.cmscert, config.TaskWorker.cmskey, retry=20, logger=self.logger)
                    mpl = MyProxyLogon(config=config, server=server, resturi=config.TaskWorker.restURInoAPI, myproxylen=self.pollingTime)
                    mpl.execute(task=recallingTask) # this adds 'user_proxy' to recallingTask
                    mpl.deleteWarnings(recallingTask['user_proxy'], recallingTask['tm_taskname'])

        else:
            self.logger.info("No %s task retrieved.", tapeRecallStatus)
コード例 #28
0
ファイル: Final.py プロジェクト: HassenRiahi/CRABServer
    def execute(self, *args):
        dag_status = int(args[0])
        failed_count = int(args[1])
        restinstance = args[2]
        resturl = args[3]
        if dag_status in [1, 2, 3]:
            if dag_status == 3:
                msg = "Task aborted because the maximum number of failures was hit; %d total failed jobs." % failed_count
            else:
                msg = "Task failed overall; %d failed jobs" % failed_count
            configreq = {
                'workflow': kw['task']['tm_taskname'],
                'substatus': "FAILED",
                'subfailure': base64.b64encode(str(e)),
            }
            data = urllib.urlencode(configreq)
            server = HTTPRequests(restinstance, os.environ['X509_USER_PROXY'],
                                  os.environ['X509_USER_PROXY'])
            server.put(resturl, data=data)

        return dag_status
コード例 #29
0
ファイル: CRABServer.py プロジェクト: spigad/GangaCRAB3
    def status(self, job):
        """Get the status of a jobset."""
        """
        if not os.path.exists(job.inputdata.ui_working_dir):
            raise CRABServerError('Workdir "%s" not found.' %
                                  job.inputdata.ui_working_dir)

        cmd = 'crab -status -c %s' % job.inputdata.ui_working_dir
        self._send_with_retry(cmd, 'status', job.backend.crab_env)
        return True
        """
        #from RESTInteractions import HTTPRequests

        logger.info('checkin status')

        try:
            server = HTTPRequests(job.backend.server_name,
                                  job.backend.userproxy)
            resource = job.backend.apiresource + 'workflow'
            dictresult, status, reason = server.get(
                resource, data={'workflow': job.backend.taskname})
            logger.info("status %s, reason %s" % (status, reason))
            return dictresult, status, reason

        except HTTPException, e:
            print type(e)
            print dir(e)
            print e.req_headers
            print e.req_data
            print e.reason
            print e.message
            print e.headers
            print e.result
            print e.status
            print e.url
            print e.args

            raise e
コード例 #30
0
ファイル: CRABServer.py プロジェクト: spigad/GangaCRAB3
    def kill(self, job):
        """Kill all the jobs on the task."""
        """
        if not os.path.exists(job.inputdata.ui_working_dir):
            raise CRABServerError('Workdir "%s" not found.' %
                                  job.inputdata.ui_working_dir)

        if not job.master:
            cmd = 'crab -kill all -c %s' % job.inputdata.ui_working_dir
        else:
            cmd = 'crab -kill %d -c %s' % (int(job.id) + 1,
                                           job.inputdata.ui_working_dir)
        self._send_with_retry(cmd, 'kill', job.backend.crab_env)
        return True
        """
        try:
            server = HTTPRequests(job.backend.server_name,
                                  job.backend.userproxy)
            resource = job.backend.apiresource + 'workflow'
            dictresult, status, reason = server.delete(
                resource,
                data=urllib.urlencode({'workflow': job.backend.taskname}))
            logger.info("Kill answer: %s" % status)
            logger.info("Kill dictresult: %s" % dictresult)
            return True
        except HTTPException, e:
            logger.error(type(e))
            logger.error(e.req_headers)
            logger.error(e.req_data)
            logger.error(e.reason)
            logger.error(e.message)
            logger.error(e.headers)
            logger.error(e.result)
            logger.error(e.status)
            logger.error(e.url)
            logger.error(e.args)
            raise e