Esempio n. 1
0
    def __call__(self):
        if self.options.task is None:
            return CommandResult(2001, 'ERROR: Task option is required')

        server = HTTPRequests(self.cachedinfo['Server'] + ':' + str(self.cachedinfo['Port']))

        self.logger.debug('Looking up detailed postmortem of task %s' % self.cachedinfo['RequestName'])
        dictresult, postmortem, reason = server.get(self.uri + self.cachedinfo['RequestName'])

        if postmortem != 200:
            msg = "Problem retrieving postmortem:\ninput:%s\noutput:%s\nreason:%s" % (str(self.cachedinfo['RequestName']), str(dictresult), str(reason))
            return CommandResult(1, msg)

        for workflow in dictresult['errors']:
            self.logger.info("#%i %s" % (workflow['subOrder'], workflow['request']))
            if self.options.verbose or self.options.outputfile:
                self.printVerbose(workflow['details'], self.options.outputfile, os.path.join(self.requestarea, 'results', 'jobFailures.log'))
            else:
                self.logger.debug("   Aggregating job failures")
                groupederrs = self.aggregateFailures(workflow['details'])
                if not groupederrs:
                    self.logger.info("   No failures")
                    continue
                self.logger.info("   List of failures and jobs per each failure: (one job could have more then one failure, one per each step)")
                for hkey in groupederrs:
                    ## removing duplicates and sort
                    joberrs = list(set(groupederrs[hkey]['jobs']))
                    joberrs.sort()
                    self.logger.info('     %s jobs failed with error "%s"' %(len(joberrs), groupederrs[hkey]['error']))
                    self.logger.info('       (%s)'  %(', '.join([ str(jobid[0]) for jobid in joberrs])) )

        return CommandResult(0, None)
Esempio n. 2
0
    def __call__(self):

        server = HTTPRequests(self.serverurl, self.options.proxyfile if self.options.proxyfile else self.proxyfilename)

        self.logger.debug('Looking type for task %s' % self.cachedinfo['RequestName'])
        dictresult, status, reason = server.get(self.uri, data = {'workflow': self.cachedinfo['RequestName'], 'subresource': 'type'})
        self.logger.debug('Task type %s' % dictresult['result'][0])
        return dictresult['result'][0]
Esempio n. 3
0
def server_info(subresource, server, proxyfilename):
    """
    Get relevant information about the server
    """

    server = HTTPRequests(server, proxyfilename)

    dictresult, status, reason = server.get('/crabserver/dev/info', {'subresource' : subresource})

    return dictresult['result'][0]
Esempio n. 4
0
    def __call__(self):
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug('Looking up report for task %s' %
                          self.cachedinfo['RequestName'])
        dictresult, status, reason = server.get(
            self.uri,
            data={
                'workflow': self.cachedinfo['RequestName'],
                'subresource': 'report'
            })

        self.logger.debug("Result: %s" % dictresult)

        if status != 200:
            msg = "Problem retrieving report:\ninput:%s\noutput:%s\nreason:%s" % (
                str(self.cachedinfo['RequestName']), str(dictresult),
                str(reason))
            raise RESTCommunicationException(msg)

        runlumiLists = map(lambda x: literal_eval(x['runlumi']),
                           dictresult['result'][0]['runsAndLumis'].values())
        #convert lumi lists from strings to integers
        for runlumi in runlumiLists:
            for run in runlumi:
                runlumi[run] = map(int, runlumi[run])
        analyzed, diff = BasicJobType.mergeLumis(
            runlumiLists, dictresult['result'][0]['lumiMask'])
        numFiles = len(
            reduce(
                set().union,
                map(lambda x: literal_eval(x['parents']),
                    dictresult['result'][0]['runsAndLumis'].values())))
        self.logger.info("%d files have been read" % numFiles)
        self.logger.info("%d events have been read" % sum(
            map(lambda x: x['events'],
                dictresult['result'][0]['runsAndLumis'].values())))

        if self.outdir:
            jsonFileDir = self.outdir
        else:
            jsonFileDir = os.path.join(self.requestarea, 'results')
        if analyzed:
            with open(os.path.join(jsonFileDir, 'analyzed.json'),
                      'w') as jsonFile:
                json.dump(diff, os.path.join(jsonFile))
                jsonFile.write("\n")
                self.logger.info("Analyzed lumi written to %s/analyzed.json" %
                                 jsonFileDir)
        if diff:
            with open(os.path.join(jsonFileDir, 'diff.json'), 'w') as jsonFile:
                json.dump(diff, jsonFile)
                jsonFile.write("\n")
                self.logger.info("Not Analyzed lumi written to %s/diff.json" %
                                 jsonFileDir)
Esempio n. 5
0
def server_info(subresource, server, proxyfilename):
    """
    Get relevant information about the server
    """

    server = HTTPRequests(server, proxyfilename)

    dictresult, status, reason = server.get('/crabserver/dev/info',
                                            {'subresource': subresource})

    return dictresult['result'][0]
Esempio n. 6
0
    def __call__(self, **argv):
        #Setting default destination if -o is not provided
        if not self.dest:
            self.dest = os.path.join(self.requestarea, 'results')

        #Creating the destination directory if necessary
        if not os.path.exists(self.dest):
            self.logger.debug("Creating directory %s " % self.dest)
            os.makedirs(self.dest)
        elif not os.path.isdir(self.dest):
            raise ConfigurationException('Destination directory is a file')

        self.logger.info("Setting the destination directory to %s " %
                         self.dest)

        #Retrieving output files location from the server
        self.logger.debug('Retrieving locations for task %s' %
                          self.cachedinfo['RequestName'])
        inputlist = [('workflow', self.cachedinfo['RequestName'])]
        inputlist.extend(list(argv.iteritems()))
        if getattr(self.options, 'quantity', None):
            self.logger.debug('Retrieving %s file locations' %
                              self.options.quantity)
            inputlist.append(('limit', self.options.quantity))
        if getattr(self.options, 'jobids', None):
            self.logger.debug('Retrieving jobs %s' % self.options.jobids)
            inputlist.extend(self.options.jobids)
        server = HTTPRequests(self.serverurl, self.proxyfilename)
        dictresult, status, reason = server.get(self.uri, data=inputlist)
        self.logger.debug('Server result: %s' % dictresult)
        dictresult = self.processServerResult(dictresult)

        if status != 200:
            msg = "Problem retrieving information from the server:\ninput:%s\noutput:%s\nreason:%s" % (
                str(inputlist), str(dictresult), str(reason))
            raise ConfigurationException(msg)

        totalfiles = len(dictresult['result'])
        cpresults = []
        #        for workflow in dictresult['result']: TODO re-enable this when we will have resubmissions
        workflow = dictresult[
            'result']  #TODO assigning workflow to dictresult. for the moment we have only one wf
        arglist = ['-d', self.dest, '-i', workflow]
        if self.options.skipProxy:
            arglist.append('-p')
        if len(workflow) > 0:
            self.logger.info("Retrieving %s files" % totalfiles)
            copyoutput = remote_copy(self.logger, arglist)
            copyoutput()

        if totalfiles == 0:
            self.logger.info("No files to retrieve")
Esempio n. 7
0
    def __call__(self):
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug('Killing task %s' % self.cachedinfo['RequestName'])
        dictresult, status, reason = server.delete(self.uri, data = urllib.urlencode({ 'workflow' : self.cachedinfo['RequestName']}))
        self.logger.debug("Result: %s" % dictresult)

        if status != 200:
            msg = "Problem killing task %s:\ninput:%s\noutput:%s\nreason:%s" % \
                    (self.cachedinfo['RequestName'], str(self.cachedinfo['RequestName']), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        self.logger.info("Kill request succesfully sent")
Esempio n. 8
0
    def __call__(self):
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug("Looking up detailed status of task %s" % self.cachedinfo["RequestName"])
        dictresult, status, reason = server.get(self.uri, data={"workflow": self.cachedinfo["RequestName"]})
        dictresult = dictresult["result"][0]  # take just the significant part

        if status != 200:
            msg = "Problem retrieving status:\ninput:%s\noutput:%s\nreason:%s" % (
                str(self.cachedinfo["RequestName"]),
                str(dictresult),
                str(reason),
            )
            raise RESTCommunicationException(msg)

        self.logger.debug(dictresult)  # should be something like {u'result': [[123, u'ciao'], [456, u'ciao']]}

        self.logger.info("Task name:\t\t\t%s" % self.cachedinfo["RequestName"])
        self.logger.info("Task status:\t\t\t%s" % dictresult["status"])

        # Print the url of the panda monitor
        if dictresult["taskFailureMsg"]:
            self.logger.error(
                "%sError during task injection:%s\t%s" % (colors.RED, colors.NORMAL, dictresult["taskFailureMsg"])
            )
        elif dictresult["jobSetID"]:
            p = Proxy({"logger": self.logger})
            username = urllib.quote(p.getUserName())
            self.logger.info(
                "Panda url:\t\t\thttp://panda.cern.ch/server/pandamon/query?job=*&jobsetID=%s&user=%s"
                % (dictresult["jobSetID"], username)
            )

        if dictresult["jobdefErrors"]:
            self.logger.error(
                "%sSubmission partially failed:%s\t%s jobgroup not submittet out of %s:"
                % (colors.RED, colors.NORMAL, dictresult["failedJobdefs"], dictresult["totalJobdefs"])
            )
            for error in dictresult["jobdefErrors"]:
                self.logger.info("\t%s" % error)

        # Print information about jobs
        states = dictresult["jobsPerStatus"]
        total = sum(states[st] for st in states)
        frmt = ""
        for status in states:
            frmt += status + " %s\t" % self._percentageString(states[status], total)
        if frmt:
            self.logger.info("Details:\t\t\t%s" % frmt)
Esempio n. 9
0
    def __call__(self):
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug('Looking up detailed status of task %s' %
                          self.cachedinfo['RequestName'])
        dictresult, status, reason = server.get(
            self.uri, data={'workflow': self.cachedinfo['RequestName']})
        dictresult = dictresult['result'][0]  #take just the significant part

        if status != 200:
            msg = "Problem retrieving status:\ninput:%s\noutput:%s\nreason:%s" % (
                str(self.cachedinfo['RequestName']), str(dictresult),
                str(reason))
            raise RESTCommunicationException(msg)

        self.logger.debug(
            dictresult
        )  #should be something like {u'result': [[123, u'ciao'], [456, u'ciao']]}

        self.logger.info("Task name:\t\t\t%s" % self.cachedinfo['RequestName'])
        self.logger.info("Task status:\t\t\t%s" % dictresult['status'])

        #Print the url of the panda monitor
        if dictresult['taskFailureMsg']:
            self.logger.error(
                "%sError during task injection:%s\t%s" %
                (colors.RED, colors.NORMAL, dictresult['taskFailureMsg']))
        elif dictresult['jobSetID']:
            p = Proxy({'logger': self.logger})
            username = urllib.quote(p.getUserName())
            self.logger.info(
                "Panda url:\t\t\thttp://panda.cern.ch/server/pandamon/query?job=*&jobsetID=%s&user=%s"
                % (dictresult['jobSetID'], username))

        if dictresult['jobdefErrors']:
            self.logger.error("%sSubmission partially failed:%s\t%s jobgroup not submittet out of %s:" % (colors.RED, colors.NORMAL,\
                                                            dictresult['failedJobdefs'], dictresult['totalJobdefs']))
            for error in dictresult['jobdefErrors']:
                self.logger.info("\t%s" % error)

        #Print information about jobs
        states = dictresult['jobsPerStatus']
        total = sum(states[st] for st in states)
        frmt = ''
        for status in states:
            frmt += status + ' %s\t' % self._percentageString(
                states[status], total)
        if frmt:
            self.logger.info('Details:\t\t\t%s' % frmt)
Esempio n. 10
0
    def __call__(self):
        ## retrieving output files location from the server
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug('Requesting resubmission for failed jobs in task %s' % self.cachedinfo['RequestName'] )
        #inputdict = { "TaskResubmit": "Analysis", "ForceResubmit" : force }
        dictresult, status, reason = server.post(self.uri, data = urllib.urlencode({ 'workflow' : self.cachedinfo['RequestName']}) + \
                                                    self.sitewhitelist + self.siteblacklist)
        self.logger.debug("Result: %s" % dictresult)

        if status != 200:
            msg = "Problem retrieving resubmitting the task to the server:\ninput:%s\noutput:%s\nreason:%s" % (str(inputdict), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        self.logger.info("Resubmission succesfully requested")
Esempio n. 11
0
    def __call__(self):

        server = HTTPRequests(
            self.serverurl, self.options.proxyfile
            if self.options.proxyfile else self.proxyfilename)

        self.logger.debug('Looking type for task %s' %
                          self.cachedinfo['RequestName'])
        dictresult, status, reason = server.get(
            self.uri,
            data={
                'workflow': self.cachedinfo['RequestName'],
                'subresource': 'type'
            })
        self.logger.debug('Task type %s' % dictresult['result'][0])
        return dictresult['result'][0]
Esempio n. 12
0
    def __call__(self):
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug('Killing task %s' % self.cachedinfo['RequestName'])
        dictresult, status, reason = server.delete(
            self.uri,
            data=urllib.urlencode({'workflow':
                                   self.cachedinfo['RequestName']}))
        self.logger.debug("Result: %s" % dictresult)

        if status != 200:
            msg = "Problem killing task %s:\ninput:%s\noutput:%s\nreason:%s" % \
                    (self.cachedinfo['RequestName'], str(self.cachedinfo['RequestName']), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        self.logger.info("Task killed")
Esempio n. 13
0
    def __call__(self, **argv):
        #Setting default destination if -o is not provided
        if not self.dest:
            self.dest = os.path.join(self.requestarea, 'results')

        #Creating the destination directory if necessary
        if not os.path.exists( self.dest ):
            self.logger.debug("Creating directory %s " % self.dest)
            os.makedirs( self.dest )
        elif not os.path.isdir( self.dest ):
            raise ConfigurationException('Destination directory is a file')

        self.logger.info("Setting the destination directory to %s " % self.dest )

        #Retrieving output files location from the server
        self.logger.debug('Retrieving locations for task %s' % self.cachedinfo['RequestName'] )
        inputlist =  [ ('workflow', self.cachedinfo['RequestName']) ]
        inputlist.extend(list(argv.iteritems()))
        if getattr(self.options, 'quantity', None):
            self.logger.debug('Retrieving %s file locations' % self.options.quantity )
            inputlist.append( ('limit',self.options.quantity) )
        if getattr(self.options, 'jobids', None):
            self.logger.debug('Retrieving jobs %s' % self.options.jobids )
            inputlist.extend( self.options.jobids )
        server = HTTPRequests(self.serverurl, self.proxyfilename)
        dictresult, status, reason = server.get(self.uri, data = inputlist)
        self.logger.debug('Server result: %s' % dictresult )
        dictresult = self.processServerResult(dictresult)

        if status != 200:
            msg = "Problem retrieving information from the server:\ninput:%s\noutput:%s\nreason:%s" % (str(inputlist), str(dictresult), str(reason))
            raise ConfigurationException(msg)

        totalfiles = len( dictresult['result'] )
        cpresults = []
#        for workflow in dictresult['result']: TODO re-enable this when we will have resubmissions
        workflow = dictresult['result']        #TODO assigning workflow to dictresult. for the moment we have only one wf
        arglist = ['-d', self.dest, '-i', workflow]
        if self.options.skipProxy:
            arglist.append('-p')
        if len(workflow) > 0:
            self.logger.info("Retrieving %s files" % totalfiles )
            copyoutput = remote_copy( self.logger, arglist )
            copyoutput()

        if totalfiles == 0:
            self.logger.info("No files to retrieve")
Esempio n. 14
0
    def __call__(self):
        ## retrieving output files location from the server
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug(
            'Requesting resubmission for failed jobs in task %s' %
            self.cachedinfo['RequestName'])
        #inputdict = { "TaskResubmit": "Analysis", "ForceResubmit" : force }
        dictresult, status, reason = server.post(self.uri, data = urllib.urlencode({ 'workflow' : self.cachedinfo['RequestName']}) + \
                                                    self.sitewhitelist + self.siteblacklist)
        self.logger.debug("Result: %s" % dictresult)

        if status != 200:
            msg = "Problem retrieving resubmitting the task to the server:\ninput:%s\noutput:%s\nreason:%s" % (
                str(inputdict), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        self.logger.info("Resubmission succesfully requested")
Esempio n. 15
0
    def __call__(self):
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug('Looking up detailed status of task %s' % self.cachedinfo['RequestName'])
        dictresult, status, reason = server.get(self.uri, data = { 'workflow' : self.cachedinfo['RequestName']})
        dictresult = dictresult['result'][0] #take just the significant part

        if status != 200:
            msg = "Problem retrieving status:\ninput:%s\noutput:%s\nreason:%s" % (str(self.cachedinfo['RequestName']), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        self.logger.debug(dictresult) #should be something like {u'result': [[123, u'ciao'], [456, u'ciao']]}

        self.logger.info("Task name:\t\t\t%s" % self.cachedinfo['RequestName'])
        self.logger.info("Task status:\t\t\t%s" % dictresult['status'])

        def logJDefErr(jdef):
            """Printing job def failures if any"""
            if jdef['jobdefErrors']:
                self.logger.error("%sFailed to inject %s\t%s out of %s:" %(colors.RED, colors.NORMAL,\
                                                                           jdef['failedJobdefs'], jdef['totalJobdefs']))
                for error in jdef['jobdefErrors']:
                    self.logger.info("\t%s" % error)

        #Print the url of the panda monitor
        if dictresult['taskFailureMsg']:
            self.logger.error("%sError during task injection:%s\t%s" % (colors.RED,colors.NORMAL,dictresult['taskFailureMsg']))
            # We might also have more information in the job def errors 
            logJDefErr(jdef=dictresult)
        elif dictresult['jobSetID']:
            username = urllib.quote(getUserName(self.logger))
            self.logger.info("Panda url:\t\t\thttp://panda.cern.ch/server/pandamon/query?job=*&jobsetID=%s&user=%s" % (dictresult['jobSetID'], username))
            # We have cases where the job def errors are there but we have a job def id
            logJDefErr(jdef=dictresult)

        #Print information about jobs
        states = dictresult['jobsPerStatus']
        total = sum( states[st] for st in states )
        frmt = ''
        for status in states:
            frmt += status + ' %s\t' % self._percentageString(states[status], total)
        if frmt:
            self.logger.info('Details:\t\t\t%s' % frmt)
Esempio n. 16
0
    def __call__(self):
        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.debug('Looking up report for task %s' % self.cachedinfo['RequestName'])
        dictresult, status, reason = server.get(self.uri, data = {'workflow': self.cachedinfo['RequestName'], 'subresource': 'report'})

        self.logger.debug("Result: %s" % dictresult)

        if status != 200:
            msg = "Problem retrieving report:\ninput:%s\noutput:%s\nreason:%s" % (str(self.cachedinfo['RequestName']), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)
        if not dictresult['result'][0]['runsAndLumis'] :
            self.logger.info('No jobs finished yet. Report is available when jobs complete')
            return

        runlumiLists = map(lambda x: literal_eval(x['runlumi']), dictresult['result'][0]['runsAndLumis'].values())
        #convert lumi lists from strings to integers
        for runlumi in runlumiLists:
            for run in runlumi:
                runlumi[run] = map(int, runlumi[run])
        analyzed, diff = BasicJobType.mergeLumis(runlumiLists, dictresult['result'][0]['lumiMask'])
        numFiles = len(reduce(set().union, map(lambda x: literal_eval(x['parents']), dictresult['result'][0]['runsAndLumis'].values())))
        self.logger.info("%d files have been read" % numFiles)
        self.logger.info("%d events have been read" % sum(map(lambda x: x['events'], dictresult['result'][0]['runsAndLumis'].values())))

        if self.outdir:
            jsonFileDir = self.outdir
        else:
            jsonFileDir = os.path.join(self.requestarea, 'results')
        if analyzed:
            with open(os.path.join(jsonFileDir, 'analyzed.json'), 'w') as jsonFile:
                json.dump(diff, os.path.join(jsonFile))
                jsonFile.write("\n")
                self.logger.info("Analyzed lumi written to %s/analyzed.json" % jsonFileDir)
        if diff:
            with open(os.path.join(jsonFileDir, 'diff.json'), 'w') as jsonFile:
                json.dump(diff, jsonFile)
                jsonFile.write("\n")
                self.logger.info("Not Analyzed lumi written to %s/diff.json" % jsonFileDir)
Esempio n. 17
0
    def __call__(self):
        valid = False
        configmsg = 'Default'

        if not os.path.isfile(self.options.config):
            raise MissingOptionException("Configuration file '%s' not found" % self.options.config)

        #store the configuration file in self.configuration
        self.loadConfig( self.options.config, self.args )

        requestarea, requestname, self.logfile = createWorkArea( self.logger,
                                                                 getattr(self.configuration.General, 'workArea', None),
                                                                 getattr(self.configuration.General, 'requestName', None)
                                                               )

        self.logger.debug("Started submission")

        #determine the serverurl
        if self.options.server:
            self.serverurl = self.options.server
        elif getattr( self.configuration.General, 'serverUrl', None ) is not None:
            self.serverurl = self.configuration.General.serverUrl
#TODO: For sure the server url should not be handled here. Find an intelligent way for this
        else:
            self.serverurl = 'http://cmsweb.cern.ch'
        if not hasattr( self.configuration.General, 'ufccacheUrl' ):
            self.configuration.General.ufccacheUrl = self.serverurl
        if not hasattr( self.configuration.General, 'configcacheUrl' ):
            #https is required because configcache does not use ServerInteractions
            self.configuration.General.configcacheUrl = 'https://' + self.serverurl + '/couchdb'
        if not hasattr( self.configuration.General, 'configcacheName' ):
            self.configuration.General.configcacheName = 'analysis_reqmgr_config_cache'

        self.createCache( self.serverurl )

        ######### Check if the user provided unexpected parameters ########
        #init the dictionary with all the known parameters
        SpellChecker.DICTIONARY = SpellChecker.train( [ val['config'] for _, val in self.requestmapper.iteritems() if val['config'] ] + \
                                                      [ x for x in self.otherConfigParams ] )
        #iterate on the parameters provided by the user
        for section in self.configuration.listSections_():
            for attr in getattr(self.configuration, section).listSections_():
                par = (section + '.' + attr)
                #if the parameter is not know exit, but try to correct it before
                if not SpellChecker.is_correct( par ):
                    msg = 'The parameter %s is not known.' % par
                    msg += '' if SpellChecker.correct(par) == par else ' Did you mean %s?' % SpellChecker.correct(par)
                    raise ConfigurationException(msg)

        #usertarball and cmsswconfig use this parameter and we should set it up in a correct way
        self.configuration.General.serverUrl = self.serverurl

        #delegating the proxy (creation done in SubCommand)
        self.voRole = getattr(self.configuration.User, "voRole", "")
        self.voGroup = getattr(self.configuration.User, "voGroup", "")
        self.handleProxy()

        uniquerequestname = None

        self.logger.debug("Working on %s" % str(requestarea))

        configreq = {}
        for param in self.requestmapper:
            mustbetype = getattr(types, self.requestmapper[param]['type'])
            if self.requestmapper[param]['config']:
                attrs = self.requestmapper[param]['config'].split('.')
                temp = self.configuration
                for attr in attrs:
                    temp = getattr(temp, attr, None)
                    if temp is None:
                        break
                if temp:
                    if mustbetype == type(temp):
                        configreq[param] = temp
                    else:
                        raise ConfigurationException(1, "Invalid type " + str(type(temp)) + " for parameter " + self.requestmapper[param]['config'] \
                                   + ". It is needed a " + str(mustbetype) + ".")
                elif self.requestmapper[param]['default'] is not None:
                    configreq[param] = self.requestmapper[param]['default']
                elif self.requestmapper[param]['required']:
                    raise ConfigurationException(1, "Missing parameter " + self.requestmapper[param]['config'] + " from the configuration.")
                else:
                    ## parameter not strictly required
                    pass
            if param == "workflow":
                if mustbetype == type(requestname):
                    configreq["workflow"] = requestname
            elif param == "savelogsflag":
                configreq["savelogsflag"] = 1 if temp else 0
            elif param == "publication":
                configreq["publication"] = 1 if temp else 0
            elif param == "blacklistT1":
                blacklistT1 = self.voRole != 't1access'
                #if the user choose to remove the automatic T1 blacklisting and has not the t1acces role
                if getattr (self.configuration.Site, 'removeT1Blacklisting', False) and blacklistT1:
                    self.logger.info("WARNING: You disabled the T1 automatic blacklisting without having the t1access role")
                    blacklistT1 = False
                configreq["blacklistT1"] = 1 if blacklistT1 else 0

        jobconfig = {}
        self.configuration.JobType.proxyfilename = self.proxyfilename
        self.configuration.JobType.capath = HTTPRequests.getCACertPath()
        pluginParams = [ self.configuration, self.logger, os.path.join(requestarea, 'inputs') ]
        if getattr(self.configuration.JobType, 'pluginName', None) is not None:
            jobtypes    = getJobTypes()
            plugjobtype = jobtypes[upper(self.configuration.JobType.pluginName)](*pluginParams)
            inputfiles, jobconfig, isbchecksum = plugjobtype.run(configreq)
        else:
            fullname = self.configuration.JobType.externalPluginFile
            basename = os.path.basename(fullname).split('.')[0]
            plugin = addPlugin(fullname)[basename]
            pluginInst = plugin(*pluginParams)
            inputfiles, jobconfig, isbchecksum = pluginInst.run(configreq)

        configreq['publishname'] = "%s-%s" %(configreq['publishname'], isbchecksum)
        configreq.update(jobconfig)

        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.info("Sending the request to the server")
        self.logger.debug("Submitting %s " % str( configreq ) )

        dictresult, status, reason = server.put( self.uri, data = self._encodeRequest(configreq) )
        self.logger.debug("Result: %s" % dictresult)
        if status != 200:
            msg = "Problem sending the request:\ninput:%s\noutput:%s\nreason:%s" % (str(configreq), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)
        elif dictresult.has_key("result"):
            uniquerequestname = dictresult["result"][0]["RequestName"]
        else:
            msg = "Problem during submission, no request ID returned:\ninput:%s\noutput:%s\nreason:%s" \
                   % (str(configreq), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        tmpsplit = self.serverurl.split(':')
        createCache( requestarea, tmpsplit[0], tmpsplit[1] if len(tmpsplit)>1 else '', uniquerequestname, voRole = self.voRole, voGroup = self.voGroup )

        self.logger.info("Submission completed")
        self.logger.debug("Request ID: %s " % uniquerequestname)

        self.logger.debug("Ended submission")

        return uniquerequestname
Esempio n. 18
0
    def __call__(self):
        valid = False
        configmsg = 'Default'

        if not os.path.isfile(self.options.config):
            raise MissingOptionException("Configuration file '%s' not found" %
                                         self.options.config)

        #store the configuration file in self.configuration
        self.loadConfig(self.options.config, self.args)

        requestarea, requestname, self.logfile = createWorkArea(
            self.logger, getattr(self.configuration.General, 'workArea', None),
            getattr(self.configuration.General, 'requestName', None))

        self.logger.debug("Started submission")

        #determine the serverurl
        if self.options.server:
            self.serverurl = self.options.server
        elif getattr(self.configuration.General, 'serverUrl',
                     None) is not None:
            self.serverurl = self.configuration.General.serverUrl
#TODO: For sure the server url should not be handled here. Find an intelligent way for this
        else:
            self.serverurl = 'http://cmsweb.cern.ch'
        if not hasattr(self.configuration.General, 'ufccacheUrl'):
            self.configuration.General.ufccacheUrl = self.serverurl
        if not hasattr(self.configuration.General, 'configcacheUrl'):
            #https is required because configcache does not use ServerInteractions
            self.configuration.General.configcacheUrl = 'https://' + self.serverurl + '/couchdb'
        if not hasattr(self.configuration.General, 'configcacheName'):
            self.configuration.General.configcacheName = 'analysis_reqmgr_config_cache'

        self.createCache(self.serverurl)

        ######### Check if the user provided unexpected parameters ########
        #init the dictionary with all the known parameters
        SpellChecker.DICTIONARY = SpellChecker.train( [ val['config'] for _, val in self.requestmapper.iteritems() if val['config'] ] + \
                                                      [ x for x in self.otherConfigParams ] )
        #iterate on the parameters provided by the user
        for section in self.configuration.listSections_():
            for attr in getattr(self.configuration, section).listSections_():
                par = (section + '.' + attr)
                #if the parameter is not know exit, but try to correct it before
                if not SpellChecker.is_correct(par):
                    msg = 'The parameter %s is not known.' % par
                    msg += '' if SpellChecker.correct(
                        par
                    ) == par else ' Did you mean %s?' % SpellChecker.correct(
                        par)
                    raise ConfigurationException(msg)

        #usertarball and cmsswconfig use this parameter and we should set it up in a correct way
        self.configuration.General.serverUrl = self.serverurl

        #delegating the proxy (creation done in SubCommand)
        self.voRole = getattr(self.configuration.User, "voRole", "")
        self.voGroup = getattr(self.configuration.User, "voGroup", "")
        self.handleProxy()

        uniquerequestname = None

        self.logger.debug("Working on %s" % str(requestarea))

        configreq = {}
        for param in self.requestmapper:
            mustbetype = getattr(types, self.requestmapper[param]['type'])
            if self.requestmapper[param]['config']:
                attrs = self.requestmapper[param]['config'].split('.')
                temp = self.configuration
                for attr in attrs:
                    temp = getattr(temp, attr, None)
                    if temp is None:
                        break
                if temp:
                    if mustbetype == type(temp):
                        configreq[param] = temp
                    else:
                        raise ConfigurationException(1, "Invalid type " + str(type(temp)) + " for parameter " + self.requestmapper[param]['config'] \
                                   + ". It is needed a " + str(mustbetype) + ".")
                elif self.requestmapper[param]['default'] is not None:
                    configreq[param] = self.requestmapper[param]['default']
                elif self.requestmapper[param]['required']:
                    raise ConfigurationException(
                        1, "Missing parameter " +
                        self.requestmapper[param]['config'] +
                        " from the configuration.")
                else:
                    ## parameter not strictly required
                    pass
            if param == "workflow":
                if mustbetype == type(requestname):
                    configreq["workflow"] = requestname
            elif param == "savelogsflag":
                configreq["savelogsflag"] = 1 if temp else 0
            elif param == "publication":
                configreq["publication"] = 1 if temp else 0
            elif param == "blacklistT1":
                blacklistT1 = self.voRole != 't1access'
                #if the user choose to remove the automatic T1 blacklisting and has not the t1acces role
                if getattr(self.configuration.Site, 'removeT1Blacklisting',
                           False) and blacklistT1:
                    self.logger.info(
                        "WARNING: You disabled the T1 automatic blacklisting without having the t1access role"
                    )
                    blacklistT1 = False
                configreq["blacklistT1"] = 1 if blacklistT1 else 0

        jobconfig = {}
        self.configuration.JobType.proxyfilename = self.proxyfilename
        self.configuration.JobType.capath = HTTPRequests.getCACertPath()
        pluginParams = [
            self.configuration, self.logger,
            os.path.join(requestarea, 'inputs')
        ]
        if getattr(self.configuration.JobType, 'pluginName', None) is not None:
            jobtypes = getJobTypes()
            plugjobtype = jobtypes[upper(
                self.configuration.JobType.pluginName)](*pluginParams)
            inputfiles, jobconfig, isbchecksum = plugjobtype.run(configreq)
        else:
            fullname = self.configuration.JobType.externalPluginFile
            basename = os.path.basename(fullname).split('.')[0]
            plugin = addPlugin(fullname)[basename]
            pluginInst = plugin(*pluginParams)
            inputfiles, jobconfig, isbchecksum = pluginInst.run(configreq)

        configreq['publishname'] = "%s-%s" % (configreq['publishname'],
                                              isbchecksum)
        configreq.update(jobconfig)

        server = HTTPRequests(self.serverurl, self.proxyfilename)

        self.logger.info("Sending the request to the server")
        self.logger.debug("Submitting %s " % str(configreq))

        dictresult, status, reason = server.put(
            self.uri, data=self._encodeRequest(configreq))
        self.logger.debug("Result: %s" % dictresult)
        if status != 200:
            msg = "Problem sending the request:\ninput:%s\noutput:%s\nreason:%s" % (
                str(configreq), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)
        elif dictresult.has_key("result"):
            uniquerequestname = dictresult["result"][0]["RequestName"]
        else:
            msg = "Problem during submission, no request ID returned:\ninput:%s\noutput:%s\nreason:%s" \
                   % (str(configreq), str(dictresult), str(reason))
            raise RESTCommunicationException(msg)

        tmpsplit = self.serverurl.split(':')
        createCache(requestarea,
                    tmpsplit[0],
                    tmpsplit[1] if len(tmpsplit) > 1 else '',
                    uniquerequestname,
                    voRole=self.voRole,
                    voGroup=self.voGroup)

        self.logger.info("Submission completed")
        self.logger.debug("Request ID: %s " % uniquerequestname)

        self.logger.debug("Ended submission")

        return uniquerequestname