Пример #1
0
    def _publishIfOutput(self, obj):
        """object can be either a logical filename or a command,
        or a list of either"""
        if getattr(obj, "actualOutputs", False): # duck-typing
            actfiles = obj.actualOutputs
        elif getattr(obj, "outputs", False):  
            log.error("publishifoutput expected cmd, but got %s"%str(obj))
            #don't know how to publish.
            return
        else: 
            # Remote cmd will have empty actualOutputs 
            # (which evaluates to False)
            return # I don't need to publish if actualOutputs is empty
            pass
        log.debug("raw outs are %s" %(str(actfiles)))
        files = filter(lambda f: f[0] in self.logOuts, actfiles)
        log.debug("filtered is %s" %(str(files)))
        if files and (len(files[0]) > 3):
            localfiles = imap(itemgetter(3), files)
        else:
            localfiles = itertools.repeat(None)
        #Unmap local files.
#        map(self.outMap.discardLogical,
 #           imap(itemgetter(0), ifilter(lambda t: t[1], izip(ft,localfiles))))
        targetfiles = map(lambda ft: (ft[0], ft[1],
                                      self.outMap.mapWriteFile(ft[0])),
                          files)        
        # fork a thread for this in the future.
        self._publishHelper(izip(targetfiles, localfiles))
Пример #2
0
    def _fetchLogicals(self, logicals, srcs):
        fetched = []
        if len(logicals) == 0:
            return []
        log.info("need fetch for %s from %s" %(str(logicals),str(srcs)))
        d = dict(srcs)
        for lf in logicals:
            self._fetchLock.acquire()
            if self.filemap.existsForRead(lf):
                self._fetchLock.release()
                log.debug("satisfied by other thread")
                continue
            start = time.time()
        
            self.fetchFile = lf
            phy = self.filemap.mapWriteFile(lf)
            if lf not in d:
                log.error("Missing source for %s" %lf)
                continue
            log.debug("fetching %s from %s" % (lf, d[lf]))
            self._fetchPhysical(phy, d[lf])
            fetched.append((lf, phy))
            self.fetchFile = None
            end = time.time()
            diff = end-start
            statistics.tracker().logTransfer(d[lf], diff)
            self._fetchLock.release()

        return fetched
Пример #3
0
 def cleanOutputName(self, scriptFilename):
     # I can't think of a "good" or "best" way, so for now,
     # we'll just take the last part and garble it a little
     (head, tail) = os.path.split(scriptFilename)
     if tail == "":
         log.error("empty filename: %s" % (scriptFilename))
         raise StandardError
     if head != "":
         # take the last 4 hex digits of the head's hash value
         head = ("%x" % hash(head))[-4:]
     return head + tail
Пример #4
0
    def graduate(self, cmd, gradHook, executor, fail, custom):
        #The dispatcher isn't really in charge of dependency
        #checking, so it doesn't really need to know when things
        #are finished.
        gradHook(cmd, fail, custom) # Service the hook function first (better later?)
        # this is the executor's hook
        #print "graduate",cmd.cmd, cmd.argList, "total=",self.count, fail
        self.count += 1
        if fail:
            origline = ' '.join([cmd.cmd] + map(lambda t: ' '.join(t), cmd.argList) + cmd.leftover)
            s = "Bad return code %s from cmdline %s %d outs=%s" % (
                "", origline, cmd.referenceLineNum, str(cmd.outputs))
            log.error(s)
            # For nicer handling, we should find the original command line
            # and pass it back as the failing line (+ line number)
            # It would be nice to trap the stderr for that command, but that
            # can be done later, since it's a different set of pipes
            # to connect.

            self.result = "Error at line %d : %s" %(cmd.referenceLineNum,
                                                    origline)
            self.resultEvent.set()
            return
            #raise StandardError(s)
        else:
            # figure out which one finished, and graduate it.
            #self.finished[cmd] = code
            log.debug("graduating %s %d" %(cmd.cmd,
                                           cmd.referenceLineNum))
            self.finished.add(cmd)
            #print "New Finished set:", len(self.finished),"\n","\n".join(map(lambda x:x.original,self.finished))
            # Are any clusters made ready?
            # Check this cluster's descendents.  For each of them,
            # see if the all their parent cmds are finished.
            # For now, don't dispatch a cluster until all its parents
            # are ready.

            # If it's a leaf cmd, then publish its results.
            # Apply reaper logic: should be same as before.

            # delete consumed files.
            if self.okayToReap:
                self._reapCommands([cmd])
            e = executor # token is (executor, etoken)
            map(lambda o: appendList(self.execLocation, o[0], (executor,o[1])),
                cmd.actualOutputs)
            

            self.gradHook(cmd)
            if self.idle():
                self._cleanupExecs()
                self.result = True 
                self.resultEvent.set()
            return
Пример #5
0
    def registerWorker(self, certificate, offer):
        # for now, accept all certificates.
        log.debug("Received offer from %s with %d slots" %(offer[0],offer[1]))
        (workerUrl, workerSlots) = (offer[0], offer[1])
        result = self.swampInterface.addWorker(workerUrl, workerSlots)
        token = self._nextWorkerToken
        self._nextWorkerToken += 1
        self._workers[token] = result

        if not result:
            log.error("Error registering worker " + url)
            return None
        return token
Пример #6
0
 def executeSerialAll(self, executor=None):
 
     def run(cmd):
         if executor:
             tok = executor.launch(cmd)
             retcode = executor.join(tok)
             return retcode
     for c in self.cmdList:
         ret = run(c)
         if ret != 0:
             log.debug( "ret was "+str(ret))
             log.error("error running command %s" % (c))
             self.result = "ret was %s, error running %s" %(str(ret), c)
             break
Пример #7
0
 def executeParallelAll(self, executors=None):
     """executors is a container(currently, a list) of the available
     executors for this job.
     In the future, we would like to allow this container to grow/contract,
     and in other words, allow run-time add/remove of executors.
     """
     if not executors:
         log.error("Missing executor for parallel execution. Skipping.")
         return
     #self.pd = ParallelDispatcher(self.config, executors)
     self.pd = NewParallelDispatcher(self.config, executors)
     self.fileLocations = self.pd.dispatchAll(self.cmdList,
                                              self._graduateAction)
     self.result = self.pd.result
     pass
Пример #8
0
 def mapInput(self, scriptFilename):
     temps = fnfilter(self.scriptOuts, scriptFilename)
     # FIXME: should really match against filemap, since
     # logicals may be renamed
     if temps:
         temps.sort()  # sort script wildcard expansion.
         # we need to convert script filenames to logicals
         # to handle real dependencies
         return map(lambda s: self.logicalOutByScript[s], temps)
     else:
         inList = self.expandConcreteInput(scriptFilename)
         if inList:
             self.scriptIns.update(inList)
             return inList
         else:
             log.error("%s is not allowed as an input filename" % (scriptFilename))
             raise StandardError("Input illegal or nonexistant %s" % (scriptFilename))
     pass
Пример #9
0
 def pollState(self, token):
     """ this can be merged soon"""
     if token not in self.jobs:
         time.sleep(0.2) # possible race
         if token not in self.jobs:
             log.warning("token not ready after waiting.")
             if self.config.serviceMode == "master":
                 return SwampTaskState.newState(token, "missing").packed()
             else:
                 return None
     if self.config.serviceMode != "master":
         log.error("pollState not implemented here yet")
     stateObject = self._taskStateObject(self.jobs[token])
     stateObject.token = token
     if not stateObject:
         log.error("SOAP interface found weird object in self.jobs:" +
                   "token(%d) has %s" %(token, str(self.jobs[token])) )
         return SwampTaskState.newState(token, "system error").packed()
     else:
         return stateObject.packed()
Пример #10
0
    def __init__(self, targetParser):
        self.parser = targetParser
        if os.path.sep != "/":
            log.error("Path resolution logic not tested for pathsep != /")

        # These need to be tracked in parser's env variables
        # Also, for this to really work, NcoParser needs to be able to
        # pull path info from env vars, which means envvars need to live
        # in the factory, or the factory needs a link to the env vars.
        self.pathHome = "." # point at HOME
        self.pwd = "." # change to point at PWD variable
        
        # --Construct action list--
        # Filter cmds to export
        l = itertools.ifilter(lambda x: x.startswith("cmd"), dir(self))
        # Map cmds to method calls
        c = itertools.imap(lambda x: [x[3:], getattr(self,x)], l)
        # Make dict ("cd", self.cmdcd)
        self.action = dict(c)
        pass
Пример #11
0
 def __init__(self, mode='fake', binaryFinder=None, filemap=None, slots=1):
     """set mode to 'fake' to provide a faking executor,
     or 'local' to use a normal local execution engine"""
     self.runningClusters = {}
     self.finishedClusters = set()
     #self.token = 0
     self.execMode = mode
     self.alive = True
     self._runningCmds = [] # (cmd, containingcluster)
     self._roots = []
     self.binaryFinder = binaryFinder
     self.filemap = filemap
     self.actual = {} # shared: not safe across tasks
     self.slots = slots
     if mode == 'fake':
         self._initFakeMode()
     elif mode == 'local':
         self._initLocalMode()
     else:
         log.error("Serious Error: constructed mode-less executor")
     self.thread = threading.Thread(target=self._threadRun, args=())
     self.thread.start()
     pass
Пример #12
0
 def unregisterEvent(self, url):
     try:
         self.urlTable.remove(url)
     except:
         log.error("Tried to remove non-registered callback %s" % url)
     pass