Пример #1
0
class CompositeStepMixin():
    def addLogForRemoteCommands(self, logname):
        """This method must be called by user classes
        composite steps could create several logs, this mixin functions will write
        to the last one.
        """
        self.rc_log = self.addLog(logname)
        return self.rc_log

    def runRemoteCommand(self,
                         cmd,
                         args,
                         abandonOnFailure=True,
                         evaluateCommand=lambda cmd: cmd.didFail()):
        """generic RemoteCommand boilerplate"""
        cmd = remotecommand.RemoteCommand(cmd, args)
        if hasattr(self, "rc_log"):
            cmd.useLog(self.rc_log, False)
        d = self.runCommand(cmd)

        def commandComplete(cmd):
            if abandonOnFailure and cmd.didFail():
                raise buildstep.BuildStepFailed()
            return evaluateCommand(cmd)

        d.addCallback(lambda res: commandComplete(cmd))
        return d

    def runRmdir(self, dir, timeout=None, **kwargs):
        """ remove a directory from the worker """
        cmd_args = {'dir': dir, 'logEnviron': self.logEnviron}
        if timeout:
            cmd_args['timeout'] = timeout
        return self.runRemoteCommand('rmdir', cmd_args, **kwargs)

    def pathExists(self, path):
        """ test whether path exists"""
        def commandComplete(cmd):
            return not cmd.didFail()

        return self.runRemoteCommand('stat', {
            'file': path,
            'logEnviron': self.logEnviron,
        },
                                     abandonOnFailure=False,
                                     evaluateCommand=commandComplete)

    def runMkdir(self, _dir, **kwargs):
        """ create a directory and its parents"""
        return self.runRemoteCommand('mkdir', {
            'dir': _dir,
            'logEnviron': self.logEnviron,
        }, **kwargs)

    def runGlob(self, path):
        """ find files matching a shell-style pattern"""
        def commandComplete(cmd):
            return cmd.updates['files'][-1]

        return self.runRemoteCommand('glob', {
            'path': path,
            'logEnviron': self.logEnviron,
        },
                                     evaluateCommand=commandComplete)

    def getFileContentFromWorker(self, filename, abandonOnFailure=False):
        self.checkWorkerHasCommand("uploadFile")
        fileWriter = remotetransfer.StringFileWriter()
        # default arguments
        args = {
            'workdir': self.workdir,
            'writer': fileWriter,
            'maxsize': None,
            'blocksize': 32 * 1024,
        }

        if self.workerVersionIsOlderThan('uploadFile', '3.0'):
            args['slavesrc'] = filename
        else:
            args['workersrc'] = filename

        def commandComplete(cmd):
            if cmd.didFail():
                return None
            return fileWriter.buffer

        return self.runRemoteCommand('uploadFile',
                                     args,
                                     abandonOnFailure=abandonOnFailure,
                                     evaluateCommand=commandComplete)

    deprecatedWorkerClassMethod(locals(), getFileContentFromWorker)
Пример #2
0
class Build(properties.PropertiesMixin, WorkerAPICompatMixin):

    """I represent a single build by a single worker. Specialized Builders can
    use subclasses of Build to hold status information unique to those build
    processes.

    I control B{how} the build proceeds. The actual build is broken up into a
    series of steps, saved in the .buildSteps[] array as a list of
    L{buildbot.process.step.BuildStep} objects. Each step is a single remote
    command, possibly a shell command.

    During the build, I put status information into my C{BuildStatus}
    gatherer.

    After the build, I go away.

    I can be used by a factory by setting buildClass on
    L{buildbot.process.factory.BuildFactory}

    @ivar requests: the list of L{BuildRequest}s that triggered me
    @ivar build_status: the L{buildbot.status.build.BuildStatus} that
                        collects our status
    """

    VIRTUAL_BUILDERNAME_PROP = "virtual_builder_name"
    VIRTUAL_BUILDERDESCRIPTION_PROP = "virtual_builder_description"
    VIRTUAL_BUILDERTAGS_PROP = "virtual_builder_tags"
    workdir = "build"
    build_status = None
    reason = "changes"
    finished = False
    results = None
    stopped = False
    set_runtime_properties = True
    subs = None

    _sentinel = []  # used as a sentinel to indicate unspecified initial_value

    def __init__(self, requests):
        self.requests = requests
        self.locks = []
        # build a source stamp
        self.sources = requests[0].mergeSourceStampsWith(requests[1:])
        self.reason = requests[0].mergeReasons(requests[1:])

        self.currentStep = None
        self.workerEnvironment = {}
        self.buildid = None
        self.number = None

        self.terminate = False

        self._acquiringLock = None
        self._builderid = None
        # overall results, may downgrade after each step
        self.results = SUCCESS
        self.properties = properties.Properties()

    def setBuilder(self, builder):
        """
        Set the given builder as our builder.

        @type  builder: L{buildbot.process.builder.Builder}
        """
        self.builder = builder
        self.master = builder.master

    def setLocks(self, lockList):
        # convert all locks into their real forms
        self.locks = [(self.builder.botmaster.getLockFromLockAccess(access), access)
                      for access in lockList]

    def setWorkerEnvironment(self, env):
        # TODO: remove once we don't have anything depending on this method or attribute
        # e.g., old-style steps (ShellMixin pulls the environment out of the
        # builder directly)
        self.workerEnvironment = env

    def getSourceStamp(self, codebase=''):
        for source in self.sources:
            if source.codebase == codebase:
                return source
        return None

    def getAllSourceStamps(self):
        return list(self.sources)

    def allChanges(self):
        for s in self.sources:
            for c in s.changes:
                yield c

    def allFiles(self):
        # return a list of all source files that were changed
        files = []
        for c in self.allChanges():
            for f in c.files:
                files.append(f)
        return files

    def __repr__(self):
        return "<Build %s number:%r results:%s>" % (
            self.builder.name, self.number, statusToString(self.results))

    def blamelist(self):
        # FIXME: kill this. This belongs to reporter.utils
        blamelist = []
        for c in self.allChanges():
            if c.who not in blamelist:
                blamelist.append(c.who)
        for source in self.sources:
            if source.patch:  # Add patch author to blamelist
                blamelist.append(source.patch_info[0])
        blamelist.sort()
        return blamelist

    def changesText(self):
        changetext = ""
        for c in self.allChanges():
            changetext += "-" * 60 + "\n\n" + c.asText() + "\n"
        # consider sorting these by number
        return changetext

    def setStepFactories(self, step_factories):
        """Set a list of 'step factories', which are tuples of (class,
        kwargs), where 'class' is generally a subclass of step.BuildStep .
        These are used to create the Steps themselves when the Build starts
        (as opposed to when it is first created). By creating the steps
        later, their __init__ method will have access to things like
        build.allFiles() ."""
        self.stepFactories = list(step_factories)

    useProgress = True

    def getWorkerCommandVersion(self, command, oldversion=None):
        return self.workerforbuilder.getWorkerCommandVersion(command, oldversion)

    def getWorkerName(self):
        return self.workerforbuilder.worker.workername
    deprecatedWorkerClassMethod(locals(), getWorkerName)

    def setupProperties(self):
        props = interfaces.IProperties(self)

        # give the properties a reference back to this build
        props.build = self

        # start with global properties from the configuration
        props.updateFromProperties(self.master.config.properties)

        # from the SourceStamps, which have properties via Change
        for change in self.allChanges():
            props.updateFromProperties(change.properties)

        # and finally, get any properties from requests (this is the path
        # through which schedulers will send us properties)
        for rq in self.requests:
            props.updateFromProperties(rq.properties)

        self.builder.setupProperties(props)

    def setupOwnProperties(self):
        # now set some properties of our own, corresponding to the
        # build itself
        props = self.getProperties()
        props.setProperty("buildnumber", self.number, "Build")

        if self.sources and len(self.sources) == 1:
            # old interface for backwards compatibility
            source = self.sources[0]
            props.setProperty("branch", source.branch, "Build")
            props.setProperty("revision", source.revision, "Build")
            props.setProperty("repository", source.repository, "Build")
            props.setProperty("codebase", source.codebase, "Build")
            props.setProperty("project", source.project, "Build")

    def setupWorkerForBuilder(self, workerforbuilder):
        self.path_module = workerforbuilder.worker.path_module

        # navigate our way back to the L{buildbot.worker.Worker}
        # object that came from the config, and get its properties
        worker_properties = workerforbuilder.worker.properties
        self.getProperties().updateFromProperties(worker_properties)
        if workerforbuilder.worker.worker_basedir:
            builddir = self.path_module.join(
                bytes2NativeString(workerforbuilder.worker.worker_basedir),
                bytes2NativeString(self.builder.config.workerbuilddir))
            self.setProperty("builddir", builddir, "Worker")

        self.workername = workerforbuilder.worker.workername
        self._registerOldWorkerAttr("workername")
        self.build_status.setWorkername(self.workername)

    @defer.inlineCallbacks
    def getBuilderId(self):
        if self._builderid is None:
            if self.hasProperty(self.VIRTUAL_BUILDERNAME_PROP):
                self._builderid = yield self.builder.getBuilderIdForName(
                    self.getProperty(self.VIRTUAL_BUILDERNAME_PROP))
                description = self.getProperty(
                    self.VIRTUAL_BUILDERDESCRIPTION_PROP,
                    self.builder.config.description)
                tags = self.getProperty(
                    self.VIRTUAL_BUILDERTAGS_PROP,
                    self.builder.config.tags)

                self.master.data.updates.updateBuilderInfo(self._builderid,
                                                           description,
                                                           tags)

            else:
                self._builderid = yield self.builder.getBuilderId()
        defer.returnValue(self._builderid)

    @defer.inlineCallbacks
    def startBuild(self, build_status, workerforbuilder):
        """This method sets up the build, then starts it by invoking the
        first Step. It returns a Deferred which will fire when the build
        finishes. This Deferred is guaranteed to never errback."""
        self.workerforbuilder = workerforbuilder
        self.conn = None

        worker = workerforbuilder.worker

        log.msg("%s.startBuild" % self)

        self.build_status = build_status
        # TODO: this will go away when build collapsing is implemented; until
        # then we just assign the build to the first buildrequest
        brid = self.requests[0].id
        builderid = yield self.getBuilderId()
        self.buildid, self.number = \
            yield self.master.data.updates.addBuild(
                builderid=builderid,
                buildrequestid=brid,
                workerid=worker.workerid)

        self.stopBuildConsumer = yield self.master.mq.startConsuming(self.controlStopBuild,
                                                                     ("control", "builds",
                                                                      str(self.buildid),
                                                                      "stop"))
        self.setupOwnProperties()

        # then narrow WorkerLocks down to the right worker
        self.locks = [(l.getLock(workerforbuilder.worker), a)
                      for l, a in self.locks]
        metrics.MetricCountEvent.log('active_builds', 1)

        # make sure properties are available to people listening on 'new'
        # events
        yield self._flushProperties(None)
        self.build_status.buildStarted(self)
        yield self.master.data.updates.setBuildStateString(self.buildid, u'starting')
        yield self.master.data.updates.generateNewBuildEvent(self.buildid)

        try:
            self.setupBuild()  # create .steps
        except Exception:
            yield self.buildPreparationFailure(Failure(), "worker_prepare")
            self.buildFinished(['Build.setupBuild', 'failed'], EXCEPTION)
            return

        # flush properties in the beginning of the build
        yield self._flushProperties(None)

        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'preparing worker')
        try:
            ready_or_failure = yield workerforbuilder.prepare(self)
        except Exception:
            ready_or_failure = Failure()

        # If prepare returns True then it is ready and we start a build
        # If it returns failure then we don't start a new build.
        if ready_or_failure is not True:
            yield self.buildPreparationFailure(ready_or_failure, "worker_prepare")
            if self.stopped:
                self.buildFinished(["worker", "cancelled"], self.results)
            else:
                self.buildFinished(["worker", "not", "available"], RETRY)
            return

        # ping the worker to make sure they're still there. If they've
        # fallen off the map (due to a NAT timeout or something), this
        # will fail in a couple of minutes, depending upon the TCP
        # timeout.
        #
        # TODO: This can unnecessarily suspend the starting of a build, in
        # situations where the worker is live but is pushing lots of data to
        # us in a build.
        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'pinging worker')
        log.msg("starting build %s.. pinging the worker %s"
                % (self, workerforbuilder))
        try:
            ping_success_or_failure = yield workerforbuilder.ping()
        except Exception:
            ping_success_or_failure = Failure()

        if ping_success_or_failure is not True:
            yield self.buildPreparationFailure(ping_success_or_failure, "worker_ping")
            self.buildFinished(["worker", "not", "pinged"], RETRY)
            return

        self.conn = workerforbuilder.worker.conn
        self.setupWorkerForBuilder(workerforbuilder)
        self.subs = self.conn.notifyOnDisconnect(self.lostRemote)

        # tell the remote that it's starting a build, too
        try:
            yield self.conn.remoteStartBuild(self.builder.name)
        except Exception:
            yield self.buildPreparationFailure(Failure(), "start_build")
            self.buildFinished(["worker", "not", "building"], RETRY)
            return

        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'acquiring locks')
        yield self.acquireLocks()

        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'building')

        # This worker looks sane!
        worker.resetQuarantine()

        # start the sequence of steps
        self.startNextStep()

    @defer.inlineCallbacks
    def buildPreparationFailure(self, why, state_string):
        log.err(why, "while " + state_string)
        self.workerforbuilder.worker.putInQuarantine()
        step = buildstep.BuildStep(name=state_string)
        step.setBuild(self)
        yield step.addStep()
        if isinstance(why, failure.Failure):
            yield step.addLogWithFailure(why)
        yield self.master.data.updates.finishStep(step.stepid, EXCEPTION, False)

    @staticmethod
    def canStartWithWorkerForBuilder(lockList, workerforbuilder):
        for lock, access in lockList:
            worker_lock = lock.getLock(workerforbuilder.worker)
            if not worker_lock.isAvailable(None, access):
                return False
        return True

    def acquireLocks(self, res=None):
        self._acquiringLock = None
        if not self.locks:
            return defer.succeed(None)
        if self.stopped:
            return defer.succeed(None)
        log.msg("acquireLocks(build %s, locks %s)" % (self, self.locks))
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                log.msg("Build %s waiting for lock %s" % (self, lock))
                d = lock.waitUntilMaybeAvailable(self, access)
                d.addCallback(self.acquireLocks)
                self._acquiringLock = (lock, access, d)
                return d
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return defer.succeed(None)

    def setUniqueStepName(self, step):
        # If there are any name collisions, we add a count to the loser
        # until it is unique.
        name = step.name
        if name in self.stepnames:
            count = self.stepnames[name]
            count += 1
            self.stepnames[name] = count
            name = "%s_%d" % (step.name, count)
        else:
            self.stepnames[name] = 0
        step.name = name

    def setupBuildSteps(self, step_factories):
        steps = []
        for factory in step_factories:
            step = factory.buildStep()
            step.setBuild(self)
            step.setWorker(self.workerforbuilder.worker)
            self.setUniqueStepName(step)
            steps.append(step)

            if self.useProgress:
                step.setupProgress()
        return steps

    def setupBuild(self):
        # create the actual BuildSteps.
        self.executedSteps = []
        self.stepnames = {}

        self.steps = self.setupBuildSteps(self.stepFactories)

        # we are now ready to set up our BuildStatus.
        # pass all sourcestamps to the buildstatus
        self.build_status.setSourceStamps(self.sources)
        self.build_status.setReason(self.reason)
        self.build_status.setBlamelist(self.blamelist())

        # gather owners from build requests
        owners = [r.properties['owner'] for r in self.requests
                  if "owner" in r.properties]
        if owners:
            self.setProperty('owners', owners, 'Build')
        self.text = []  # list of text string lists (text2)

    def _addBuildSteps(self, step_factories):
        factories = [interfaces.IBuildStepFactory(s) for s in step_factories]
        return self.setupBuildSteps(factories)

    def addStepsAfterCurrentStep(self, step_factories):
        # Add the new steps after the step that is running.
        # The running step has already been popped from self.steps
        self.steps[0:0] = self._addBuildSteps(step_factories)

    def addStepsAfterLastStep(self, step_factories):
        # Add the new steps to the end.
        self.steps.extend(self._addBuildSteps(step_factories))

    def getNextStep(self):
        """This method is called to obtain the next BuildStep for this build.
        When it returns None (or raises a StopIteration exception), the build
        is complete."""
        if not self.steps:
            return None
        if not self.conn:
            return None
        if self.terminate or self.stopped:
            # Run any remaining alwaysRun steps, and skip over the others
            while True:
                s = self.steps.pop(0)
                if s.alwaysRun:
                    return s
                if not self.steps:
                    return None
        else:
            return self.steps.pop(0)

    def startNextStep(self):
        try:
            s = self.getNextStep()
        except StopIteration:
            s = None
        if not s:
            return self.allStepsDone()
        self.executedSteps.append(s)
        self.currentStep = s
        d = defer.maybeDeferred(s.startStep, self.conn)
        d.addBoth(self._flushProperties)
        d.addCallback(self._stepDone, s)
        d.addErrback(self.buildException)

    @defer.inlineCallbacks
    def _flushProperties(self, results):
        # `results` is just passed on to the next callback
        yield self.master.data.updates.setBuildProperties(self.buildid, self)

        defer.returnValue(results)

    @defer.inlineCallbacks
    def _stepDone(self, results, step):
        self.currentStep = None
        if self.finished:
            return  # build was interrupted, don't keep building
        terminate = yield self.stepDone(results, step)  # interpret/merge results
        if terminate:
            self.terminate = True
        yield self.startNextStep()

    @defer.inlineCallbacks
    def stepDone(self, results, step):
        """This method is called when the BuildStep completes. It is passed a
        status object from the BuildStep and is responsible for merging the
        Step's results into those of the overall Build."""

        terminate = False
        text = None
        if isinstance(results, tuple):
            results, text = results
        assert isinstance(results, type(SUCCESS)), "got %r" % (results,)
        summary = yield step.getBuildResultSummary()
        if 'build' in summary:
            text = [summary['build']]
        log.msg(" step '%s' complete: %s (%s)" % (step.name, statusToString(results), text))
        if text:
            self.text.extend(text)
            self.master.data.updates.setBuildStateString(self.buildid,
                                                         bytes2unicode(" ".join(self.text)))
        self.results, terminate = computeResultAndTermination(step, results,
                                                              self.results)
        if not self.conn:
            # force the results to retry if the connection was lost
            self.results = RETRY
            terminate = True
        defer.returnValue(terminate)

    def lostRemote(self, conn=None):
        # the worker went away. There are several possible reasons for this,
        # and they aren't necessarily fatal. For now, kill the build, but
        # TODO: see if we can resume the build when it reconnects.
        log.msg("%s.lostRemote" % self)
        self.conn = None
        self.text = ["lost", "connection"]
        self.results = RETRY
        if self.currentStep and self.currentStep.results is None:
            # this should cause the step to finish.
            log.msg(" stopping currentStep", self.currentStep)
            self.currentStep.interrupt(Failure(error.ConnectionLost()))
        else:
            self.text = ["lost", "connection"]
            self.stopped = True
            if self._acquiringLock:
                lock, access, d = self._acquiringLock
                lock.stopWaitingUntilAvailable(self, access, d)
                d.callback(None)

    def controlStopBuild(self, key, params):
        return self.stopBuild(**params)

    def stopBuild(self, reason="<no reason given>", results=CANCELLED):
        # the idea here is to let the user cancel a build because, e.g.,
        # they realized they committed a bug and they don't want to waste
        # the time building something that they know will fail. Another
        # reason might be to abandon a stuck build. We want to mark the
        # build as failed quickly rather than waiting for the worker's
        # timeout to kill it on its own.

        log.msg(" %s: stopping build: %s %d" % (self, reason, results))
        if self.finished:
            return
        # TODO: include 'reason' in this point event
        self.stopped = True
        if self.currentStep and self.currentStep.results is None:
            self.currentStep.interrupt(reason)

        self.results = results

        if self._acquiringLock:
            lock, access, d = self._acquiringLock
            lock.stopWaitingUntilAvailable(self, access, d)
            d.callback(None)

    def allStepsDone(self):
        if self.results == FAILURE:
            text = ["failed"]
        elif self.results == WARNINGS:
            text = ["warnings"]
        elif self.results == EXCEPTION:
            text = ["exception"]
        elif self.results == RETRY:
            text = ["retry"]
        elif self.results == CANCELLED:
            text = ["cancelled"]
        else:
            text = ["build", "successful"]
        text.extend(self.text)
        return self.buildFinished(text, self.results)

    def buildException(self, why):
        log.msg("%s.buildException" % self)
        log.err(why)
        # try to finish the build, but since we've already faced an exception,
        # this may not work well.
        try:
            self.buildFinished(["build", "exception"], EXCEPTION)
        except Exception:
            log.err(Failure(), 'while finishing a build with an exception')

    @defer.inlineCallbacks
    def buildFinished(self, text, results):
        """This method must be called when the last Step has completed. It
        marks the Build as complete and returns the Builder to the 'idle'
        state.

        It takes two arguments which describe the overall build status:
        text, results. 'results' is one of the possible results (see buildbot.process.results).

        If 'results' is SUCCESS or WARNINGS, we will permit any dependent
        builds to start. If it is 'FAILURE', those builds will be
        abandoned."""
        try:
            self.stopBuildConsumer.stopConsuming()
            self.finished = True
            if self.conn:
                self.subs.unsubscribe()
                self.subs = None
                self.conn = None
            log.msg(" %s: build finished" % self)
            self.results = worst_status(self.results, results)
            self.build_status.setText(text)
            self.build_status.setResults(self.results)
            self.build_status.buildFinished()
            eventually(self.releaseLocks)
            metrics.MetricCountEvent.log('active_builds', -1)

            yield self.master.data.updates.setBuildStateString(self.buildid,
                                                               bytes2unicode(" ".join(text)))
            yield self.master.data.updates.finishBuild(self.buildid, self.results)

            # mark the build as finished
            self.workerforbuilder.buildFinished()
            self.builder.buildFinished(self, self.workerforbuilder)
        except Exception:
            log.err(None, 'from finishing a build; this is a '
                          'serious error - please file a bug at http://buildbot.net')

    def releaseLocks(self):
        if self.locks:
            log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            if lock.isOwner(self, access):
                lock.release(self, access)

    def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel):
        step_stats_list = [
            st.getStatistic(name)
            for st in self.executedSteps
            if st.hasStatistic(name)]
        if initial_value is self._sentinel:
            return reduce(summary_fn, step_stats_list)
        return reduce(summary_fn, step_stats_list, initial_value)

    @defer.inlineCallbacks
    def getUrl(self):
        builder_id = yield self.builder.getBuilderId()
        defer.returnValue(getURLForBuild(self.master, builder_id, self.number))

    def waitUntilFinished(self):
        return self.master.mq.waitUntilEvent(
            ('builds', str(self.buildid), 'finished'),
            lambda: self.finished)

    # IBuildControl

    def getStatus(self):
        return self.build_status
Пример #3
0
class BuildStep(results.ResultComputingConfigMixin, properties.PropertiesMixin,
                WorkerAPICompatMixin, util.ComparableMixin):

    alwaysRun = False
    doStepIf = True
    hideStepIf = False
    compare_attrs = ("_factory", )
    # properties set on a build step are, by nature, always runtime properties
    set_runtime_properties = True

    renderables = results.ResultComputingConfigMixin.resultConfig + [
        'alwaysRun',
        'description',
        'descriptionDone',
        'descriptionSuffix',
        'doStepIf',
        'hideStepIf',
        'workdir',
    ]

    # 'parms' holds a list of all the parameters we care about, to allow
    # users to instantiate a subclass of BuildStep with a mixture of
    # arguments, some of which are for us, some of which are for the subclass
    # (or a delegate of the subclass, like how ShellCommand delivers many
    # arguments to the RemoteShellCommand that it creates). Such delegating
    # subclasses will use this list to figure out which arguments are meant
    # for us and which should be given to someone else.
    parms = [
        'alwaysRun',
        'description',
        'descriptionDone',
        'descriptionSuffix',
        'doStepIf',
        'flunkOnFailure',
        'flunkOnWarnings',
        'haltOnFailure',
        'updateBuildSummaryPolicy',
        'hideStepIf',
        'locks',
        'logEncoding',
        'name',
        'progressMetrics',
        'useProgress',
        'warnOnFailure',
        'warnOnWarnings',
        'workdir',
    ]

    name = "generic"
    description = None  # set this to a list of short strings to override
    descriptionDone = None  # alternate description when the step is complete
    descriptionSuffix = None  # extra information to append to suffix
    updateBuildSummaryPolicy = None
    locks = []
    progressMetrics = ()  # 'time' is implicit
    useProgress = True  # set to False if step is really unpredictable
    build = None
    step_status = None
    progress = None
    logEncoding = None
    cmd = None
    rendered = False  # true if attributes are rendered
    _workdir = None
    _waitingForLocks = False

    def _run_finished_hook(self):
        return None  # override in tests

    def __init__(self, **kwargs):
        self.worker = None
        self._registerOldWorkerAttr("worker", name="buildslave")

        for p in self.__class__.parms:
            if p in kwargs:
                setattr(self, p, kwargs.pop(p))

        if kwargs:
            config.error("%s.__init__ got unexpected keyword argument(s) %s" %
                         (self.__class__, list(kwargs)))
        self._pendingLogObservers = []

        if not isinstance(self.name, str) and not IRenderable.providedBy(
                self.name):
            config.error(
                "BuildStep name must be a string or a renderable object: "
                "%r" % (self.name, ))

        if isinstance(self.description, str):
            self.description = [self.description]
        if isinstance(self.descriptionDone, str):
            self.descriptionDone = [self.descriptionDone]
        if isinstance(self.descriptionSuffix, str):
            self.descriptionSuffix = [self.descriptionSuffix]

        if self.updateBuildSummaryPolicy is None:  # compute default value for updateBuildSummaryPolicy
            self.updateBuildSummaryPolicy = [EXCEPTION, RETRY, CANCELLED]
            if self.flunkOnFailure or self.haltOnFailure or self.warnOnFailure:
                self.updateBuildSummaryPolicy.append(FAILURE)
            if self.warnOnWarnings or self.flunkOnWarnings:
                self.updateBuildSummaryPolicy.append(WARNINGS)
        if self.updateBuildSummaryPolicy is False:
            self.updateBuildSummaryPolicy = []
        if self.updateBuildSummaryPolicy is True:
            self.updateBuildSummaryPolicy = ALL_RESULTS
        if not isinstance(self.updateBuildSummaryPolicy, list):
            config.error("BuildStep updateBuildSummaryPolicy must be "
                         "a list of result ids or boolean but it is %r" %
                         (self.updateBuildSummaryPolicy, ))
        self._acquiringLock = None
        self.stopped = False
        self.master = None
        self.statistics = {}
        self.logs = {}
        self._running = False
        self.stepid = None
        self.results = None
        self._start_unhandled_deferreds = None

    def __new__(klass, *args, **kwargs):
        self = object.__new__(klass)
        self._factory = _BuildStepFactory(klass, *args, **kwargs)
        return self

    def __str__(self):
        args = [repr(x) for x in self._factory.args]
        args.extend(
            [str(k) + "=" + repr(v) for k, v in self._factory.kwargs.items()])
        return "{}({})".format(self.__class__.__name__, ", ".join(args))

    __repr__ = __str__

    def setBuild(self, build):
        self.build = build
        self.master = self.build.master

    def setWorker(self, worker):
        self.worker = worker

    deprecatedWorkerClassMethod(locals(),
                                setWorker,
                                compat_name="setBuildSlave")

    @deprecate.deprecated(versions.Version("buildbot", 0, 9, 0))
    def setDefaultWorkdir(self, workdir):
        if self._workdir is None:
            self._workdir = workdir

    @property
    def workdir(self):
        # default the workdir appropriately
        if self._workdir is not None or self.build is None:
            return self._workdir
        else:
            # see :ref:`Factory-Workdir-Functions` for details on how to
            # customize this
            if callable(self.build.workdir):
                try:
                    return self.build.workdir(self.build.sources)
                except AttributeError as e:
                    # if the callable raises an AttributeError
                    # python thinks it is actually workdir that is not existing.
                    # python will then swallow the attribute error and call
                    # __getattr__ from worker_transition
                    raise raise_with_traceback(CallableAttributeError(e))
                    # we re-raise the original exception by changing its type,
                    # but keeping its stacktrace
            else:
                return self.build.workdir

    @workdir.setter
    def workdir(self, workdir):
        self._workdir = workdir

    def addFactoryArguments(self, **kwargs):
        # this is here for backwards compatibility
        pass

    def _getStepFactory(self):
        return self._factory

    def setupProgress(self):
        # this function temporarily does nothing
        pass

    def setProgress(self, metric, value):
        # this function temporarily does nothing
        pass

    def getCurrentSummary(self):
        if self.description is not None:
            stepsumm = util.join_list(self.description)
            if self.descriptionSuffix:
                stepsumm += u' ' + util.join_list(self.descriptionSuffix)
        else:
            stepsumm = u'running'
        return {u'step': stepsumm}

    def getResultSummary(self):
        if self.descriptionDone is not None or self.description is not None:
            stepsumm = util.join_list(self.descriptionDone or self.description)
            if self.descriptionSuffix:
                stepsumm += u' ' + util.join_list(self.descriptionSuffix)
        else:
            stepsumm = u'finished'

        if self.results != SUCCESS:
            stepsumm += u' (%s)' % Results[self.results]

        return {u'step': stepsumm}

    @defer.inlineCallbacks
    def getBuildResultSummary(self):
        summary = yield self.getResultSummary()
        if self.results in self.updateBuildSummaryPolicy and u'build' not in summary and u'step' in summary:
            summary[u'build'] = summary[u'step']
        defer.returnValue(summary)

    @debounce.method(wait=1)
    @defer.inlineCallbacks
    def updateSummary(self):
        def methodInfo(m):
            import inspect
            lines = inspect.getsourcelines(m)
            return "\nat %s:%s:\n %s" % (inspect.getsourcefile(m), lines[1],
                                         "\n".join(lines[0]))

        if not self._running:
            summary = yield self.getResultSummary()
            if not isinstance(summary, dict):
                raise TypeError('getResultSummary must return a dictionary: ' +
                                methodInfo(self.getResultSummary))
        else:
            summary = yield self.getCurrentSummary()
            if not isinstance(summary, dict):
                raise TypeError(
                    'getCurrentSummary must return a dictionary: ' +
                    methodInfo(self.getCurrentSummary))

        stepResult = summary.get('step', u'finished')
        if not isinstance(stepResult, text_type):
            raise TypeError("step result string must be unicode (got %r)" %
                            (stepResult, ))
        if self.stepid is not None:
            stepResult = self.build.properties.cleanupTextFromSecrets(
                stepResult)
            yield self.master.data.updates.setStepStateString(
                self.stepid, stepResult)

        if not self._running:
            buildResult = summary.get('build', None)
            if buildResult and not isinstance(buildResult, text_type):
                raise TypeError("build result string must be unicode")

    # updateSummary gets patched out for old-style steps, so keep a copy we can
    # call internally for such steps
    realUpdateSummary = updateSummary

    @defer.inlineCallbacks
    def addStep(self):
        # create and start the step, noting that the name may be altered to
        # ensure uniqueness
        self.name = yield self.build.render(self.name)
        self.stepid, self.number, self.name = yield self.master.data.updates.addStep(
            buildid=self.build.buildid, name=util.bytes2unicode(self.name))
        yield self.master.data.updates.startStep(self.stepid)

    @defer.inlineCallbacks
    def startStep(self, remote):
        self.remote = remote

        yield self.addStep()
        self.locks = yield self.build.render(self.locks)

        # convert all locks into their real form
        self.locks = [
            (self.build.builder.botmaster.getLockFromLockAccess(access),
             access) for access in self.locks
        ]
        # then narrow WorkerLocks down to the worker that this build is being
        # run on
        self.locks = [(l.getLock(self.build.workerforbuilder.worker), la)
                      for l, la in self.locks]

        for l, la in self.locks:
            if l in self.build.locks:
                log.msg("Hey, lock %s is claimed by both a Step (%s) and the"
                        " parent Build (%s)" % (l, self, self.build))
                raise RuntimeError("lock claimed by both Step and Build")

        try:
            # set up locks
            yield self.acquireLocks()

            if self.stopped:
                raise BuildStepCancelled

            # render renderables in parallel
            renderables = []
            accumulateClassList(self.__class__, 'renderables', renderables)

            def setRenderable(res, attr):
                setattr(self, attr, res)

            dl = []
            for renderable in renderables:
                d = self.build.render(getattr(self, renderable))
                d.addCallback(setRenderable, renderable)
                dl.append(d)
            yield defer.gatherResults(dl)
            self.rendered = True
            # we describe ourselves only when renderables are interpolated
            self.realUpdateSummary()

            # check doStepIf (after rendering)
            if isinstance(self.doStepIf, bool):
                doStep = self.doStepIf
            else:
                doStep = yield self.doStepIf(self)

            # run -- or skip -- the step
            if doStep:
                try:
                    self._running = True
                    self.results = yield self.run()
                finally:
                    self._running = False
            else:
                self.results = SKIPPED

        # NOTE: all of these `except` blocks must set self.results immediately!
        except BuildStepCancelled:
            self.results = CANCELLED

        except BuildStepFailed:
            self.results = FAILURE

        except error.ConnectionLost:
            self.results = RETRY

        except Exception:
            self.results = EXCEPTION
            why = Failure()
            log.err(why, "BuildStep.failed; traceback follows")
            yield self.addLogWithFailure(why)

        if self.stopped and self.results != RETRY:
            # We handle this specially because we don't care about
            # the return code of an interrupted command; we know
            # that this should just be exception due to interrupt
            # At the same time we must respect RETRY status because it's used
            # to retry interrupted build due to some other issues for example
            # due to worker lost
            if self.results != CANCELLED:
                self.results = EXCEPTION

        # update the summary one last time, make sure that completes,
        # and then don't update it any more.
        self.realUpdateSummary()
        yield self.realUpdateSummary.stop()

        # determine whether we should hide this step
        hidden = self.hideStepIf
        if callable(hidden):
            try:
                hidden = hidden(self.results, self)
            except Exception:
                why = Failure()
                log.err(why, "hidden callback failed; traceback follows")
                yield self.addLogWithFailure(why)
                self.results = EXCEPTION
                hidden = False

        yield self.master.data.updates.finishStep(self.stepid, self.results,
                                                  hidden)
        # finish unfinished logs
        all_finished = yield self.finishUnfinishedLogs()
        if not all_finished:
            self.results = EXCEPTION
        self.releaseLocks()

        defer.returnValue(self.results)

    @defer.inlineCallbacks
    def finishUnfinishedLogs(self):
        ok = True
        not_finished_logs = [
            v for (k, v) in iteritems(self.logs) if not v.finished
        ]
        finish_logs = yield defer.DeferredList(
            [v.finish() for v in not_finished_logs], consumeErrors=True)
        for success, res in finish_logs:
            if not success:
                log.err(res, "when trying to finish a log")
                ok = False
        defer.returnValue(ok)

    def acquireLocks(self, res=None):
        self._acquiringLock = None
        if not self.locks:
            return defer.succeed(None)
        if self.stopped:
            return defer.succeed(None)
        log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                self._waitingForLocks = True
                log.msg("step %s waiting for lock %s" % (self, lock))
                d = lock.waitUntilMaybeAvailable(self, access)
                d.addCallback(self.acquireLocks)
                self._acquiringLock = (lock, access, d)
                return d
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        self._waitingForLocks = False
        return defer.succeed(None)

    @defer.inlineCallbacks
    def run(self):
        self._start_deferred = defer.Deferred()
        unhandled = self._start_unhandled_deferreds = []
        self._sync_addlog_deferreds = []
        try:
            # here's where we set things up for backward compatibility for
            # old-style steps, using monkey patches so that new-style steps
            # aren't bothered by any of this equipment

            # monkey-patch self.step_status.{setText,setText2} back into
            # existence for old steps, signalling an update to the summary
            self.step_status = BuildStepStatus()
            self.step_status.setText = lambda text: self.realUpdateSummary()
            self.step_status.setText2 = lambda text: self.realUpdateSummary()

            # monkey-patch in support for old statistics functions
            self.step_status.setStatistic = self.setStatistic
            self.step_status.getStatistic = self.getStatistic
            self.step_status.hasStatistic = self.hasStatistic

            # monkey-patch an addLog that returns an write-only, sync log
            self.addLog = self.addLog_oldStyle
            self._logFileWrappers = {}

            # and a getLog that returns a read-only, sync log, captured by
            # LogObservers installed by addLog_oldStyle
            self.getLog = self.getLog_oldStyle

            # old-style steps shouldn't be calling updateSummary
            def updateSummary():
                assert 0, 'updateSummary is only valid on new-style steps'

            self.updateSummary = updateSummary

            results = yield self.start()
            if results is not None:
                self._start_deferred.callback(results)
            results = yield self._start_deferred
        finally:
            # hook for tests
            # assert so that it is only run in non optimized mode
            assert self._run_finished_hook() is None
            # wait until all the sync logs have been actually created before
            # finishing
            yield defer.DeferredList(self._sync_addlog_deferreds,
                                     consumeErrors=True)
            self._start_deferred = None
            unhandled = self._start_unhandled_deferreds
            self.realUpdateSummary()

            # Wait for any possibly-unhandled deferreds.  If any fail, change the
            # result to EXCEPTION and log.
            while unhandled:
                self._start_unhandled_deferreds = []
                unhandled_results = yield defer.DeferredList(
                    unhandled, consumeErrors=True)
                for success, res in unhandled_results:
                    if not success:
                        log.err(
                            res,
                            "from an asynchronous method executed in an old-style step"
                        )
                        results = EXCEPTION
                unhandled = self._start_unhandled_deferreds

        defer.returnValue(results)

    def finished(self, results):
        assert self._start_deferred, \
            "finished() can only be called from old steps implementing start()"
        self._start_deferred.callback(results)

    def failed(self, why):
        assert self._start_deferred, \
            "failed() can only be called from old steps implementing start()"
        self._start_deferred.errback(why)

    def isNewStyle(self):
        # **temporary** method until new-style steps are the only supported style
        if PY3:
            return self.run.__func__ is not BuildStep.run
        return self.run.im_func is not BuildStep.run.im_func

    def start(self):
        # New-style classes implement 'run'.
        # Old-style classes implemented 'start'. Advise them to do 'run'
        # instead.
        raise NotImplementedError("your subclass must implement run()")

    def interrupt(self, reason):
        self.stopped = True
        if self._acquiringLock:
            lock, access, d = self._acquiringLock
            lock.stopWaitingUntilAvailable(self, access, d)
            d.callback(None)

        if self._waitingForLocks:
            self.addCompleteLog('cancelled while waiting for locks',
                                str(reason))
        else:
            self.addCompleteLog('cancelled', str(reason))

        if self.cmd:
            d = self.cmd.interrupt(reason)
            d.addErrback(log.err, 'while cancelling command')

    def releaseLocks(self):
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            if lock.isOwner(self, access):
                lock.release(self, access)
            else:
                # This should only happen if we've been interrupted
                assert self.stopped

    # utility methods that BuildSteps may find useful

    def workerVersion(self, command, oldversion=None):
        return self.build.getWorkerCommandVersion(command, oldversion)

    deprecatedWorkerClassMethod(locals(), workerVersion)

    def workerVersionIsOlderThan(self, command, minversion):
        sv = self.build.getWorkerCommandVersion(command, None)
        if sv is None:
            return True
        if [int(s) for s in sv.split(".")
            ] < [int(m) for m in minversion.split(".")]:
            return True
        return False

    deprecatedWorkerClassMethod(locals(), workerVersionIsOlderThan)

    def checkWorkerHasCommand(self, command):
        if not self.workerVersion(command):
            message = "worker is too old, does not know about %s" % command
            raise WorkerTooOldError(message)

    deprecatedWorkerClassMethod(locals(), checkWorkerHasCommand)

    def getWorkerName(self):
        return self.build.getWorkerName()

    deprecatedWorkerClassMethod(locals(), getWorkerName)

    def addLog(self, name, type='s', logEncoding=None):
        d = self.master.data.updates.addLog(self.stepid,
                                            util.bytes2unicode(name),
                                            text_type(type))

        @d.addCallback
        def newLog(logid):
            return self._newLog(name, type, logid, logEncoding)

        return d

    addLog_newStyle = addLog

    def addLog_oldStyle(self, name, type='s', logEncoding=None):
        # create a logfile instance that acts like old-style status logfiles
        # begin to create a new-style logfile
        loog_d = self.addLog_newStyle(name, type, logEncoding)
        self._start_unhandled_deferreds.append(loog_d)
        # and wrap the deferred that will eventually fire with that logfile
        # into a write-only logfile instance
        wrapper = SyncLogFileWrapper(self, name, loog_d)
        self._logFileWrappers[name] = wrapper
        return wrapper

    def getLog(self, name):
        return self.logs[name]

    def getLog_oldStyle(self, name):
        return self._logFileWrappers[name]

    @_maybeUnhandled
    @defer.inlineCallbacks
    def addCompleteLog(self, name, text):
        logid = yield self.master.data.updates.addLog(self.stepid,
                                                      util.bytes2unicode(name),
                                                      u't')
        _log = self._newLog(name, u't', logid)
        yield _log.addContent(text)
        yield _log.finish()

    @_maybeUnhandled
    @defer.inlineCallbacks
    def addHTMLLog(self, name, html):
        logid = yield self.master.data.updates.addLog(self.stepid,
                                                      util.bytes2unicode(name),
                                                      u'h')
        _log = self._newLog(name, u'h', logid)
        html = bytes2NativeString(html)
        yield _log.addContent(html)
        yield _log.finish()

    @defer.inlineCallbacks
    def addLogWithFailure(self, why, logprefix=""):
        # helper for showing exceptions to the users
        try:
            yield self.addCompleteLog(logprefix + "err.text",
                                      why.getTraceback())
            yield self.addHTMLLog(logprefix + "err.html", formatFailure(why))
        except Exception:
            log.err(Failure(), "error while formatting exceptions")

    def addLogWithException(self, why, logprefix=""):
        return self.addLogWithFailure(Failure(why), logprefix)

    def addLogObserver(self, logname, observer):
        assert interfaces.ILogObserver.providedBy(observer)
        observer.setStep(self)
        self._pendingLogObservers.append((logname, observer))
        self._connectPendingLogObservers()

    def _newLog(self, name, type, logid, logEncoding=None):
        if not logEncoding:
            logEncoding = self.logEncoding
        if not logEncoding:
            logEncoding = self.master.config.logEncoding
        log = plog.Log.new(self.master, name, type, logid, logEncoding)
        self.logs[name] = log
        self._connectPendingLogObservers()
        return log

    def _connectPendingLogObservers(self):
        for logname, observer in self._pendingLogObservers[:]:
            if logname in self.logs:
                observer.setLog(self.logs[logname])
                self._pendingLogObservers.remove((logname, observer))

    @_maybeUnhandled
    @defer.inlineCallbacks
    def addURL(self, name, url):
        yield self.master.data.updates.addStepURL(self.stepid, text_type(name),
                                                  text_type(url))
        defer.returnValue(None)

    @defer.inlineCallbacks
    def runCommand(self, command):
        self.cmd = command
        command.worker = self.worker
        try:
            res = yield command.run(self, self.remote, self.build.builder.name)
        finally:
            self.cmd = None
        defer.returnValue(res)

    def hasStatistic(self, name):
        return name in self.statistics

    def getStatistic(self, name, default=None):
        return self.statistics.get(name, default)

    def getStatistics(self):
        return self.statistics.copy()

    def setStatistic(self, name, value):
        self.statistics[name] = value

    def _describe(self, done=False):
        # old-style steps expect this function to exist
        assert not self.isNewStyle()
        return []

    def describe(self, done=False):
        # old-style steps expect this function to exist
        assert not self.isNewStyle()
        desc = self._describe(done)
        if not desc:
            return []
        if self.descriptionSuffix:
            desc += self.descriptionSuffix
        return desc
Пример #4
0
class Builder(util_service.ReconfigurableServiceMixin, service.MultiService,
              WorkerAPICompatMixin):

    # reconfigure builders before workers
    reconfig_priority = 196

    @property
    def expectations(self):
        warnings.warn("'Builder.expectations' is deprecated.")
        return None

    def __init__(self, name):
        service.MultiService.__init__(self)
        self.name = name

        # this is filled on demand by getBuilderId; don't access it directly
        self._builderid = None

        # build/wannabuild slots: Build objects move along this sequence
        self.building = []
        # old_building holds active builds that were stolen from a predecessor
        self.old_building = weakref.WeakKeyDictionary()

        # workers which have connected but which are not yet available.
        # These are always in the ATTACHING state.
        self.attaching_workers = []
        self._registerOldWorkerAttr("attaching_workers")

        # workers at our disposal. Each WorkerForBuilder instance has a
        # .state that is IDLE, PINGING, or BUILDING. "PINGING" is used when a
        # Build is about to start, to make sure that they're still alive.
        self.workers = []
        self._registerOldWorkerAttr("workers")

        self.config = None
        self.builder_status = None

    @defer.inlineCallbacks
    def reconfigServiceWithBuildbotConfig(self, new_config):
        # find this builder in the config
        for builder_config in new_config.builders:
            if builder_config.name == self.name:
                found_config = True
                break
        assert found_config, "no config found for builder '%s'" % self.name

        # set up a builder status object on the first reconfig
        if not self.builder_status:
            self.builder_status = self.master.status.builderAdded(
                name=builder_config.name,
                basedir=builder_config.builddir,
                tags=builder_config.tags,
                description=builder_config.description)

        self.config = builder_config

        # allocate  builderid now, so that the builder is visible in the web
        # UI; without this, the builder wouldn't appear until it preformed a
        # build.
        builderid = yield self.getBuilderId()

        self.master.data.updates.updateBuilderInfo(builderid,
                                                   builder_config.description,
                                                   builder_config.tags)

        self.builder_status.setDescription(builder_config.description)
        self.builder_status.setTags(builder_config.tags)
        self.builder_status.setWorkernames(self.config.workernames)
        self.builder_status.setCacheSize(new_config.caches['Builds'])

        # if we have any workers attached which are no longer configured,
        # drop them.
        new_workernames = set(builder_config.workernames)
        self.workers = [
            w for w in self.workers if w.worker.workername in new_workernames
        ]

    def __repr__(self):
        return "<Builder '%r' at %d>" % (self.name, id(self))

    def getBuilderIdForName(self, name):
        # buildbot.config should ensure this is already unicode, but it doesn't
        # hurt to check again
        name = ascii2unicode(name)
        return self.master.data.updates.findBuilderId(name)

    def getBuilderId(self):
        # since findBuilderId is idempotent, there's no reason to add
        # additional locking around this function.
        if self._builderid:
            return defer.succeed(self._builderid)

        d = self.getBuilderIdForName(self.name)

        @d.addCallback
        def keep(builderid):
            self._builderid = builderid
            return builderid

        return d

    @defer.inlineCallbacks
    def getOldestRequestTime(self):
        """Returns the submitted_at of the oldest unclaimed build request for
        this builder, or None if there are no build requests.

        @returns: datetime instance or None, via Deferred
        """
        bldrid = yield self.getBuilderId()
        unclaimed = yield self.master.data.get(
            ('builders', bldrid, 'buildrequests'),
            [resultspec.Filter('claimed', 'eq', [False])],
            order=['submitted_at'],
            limit=1)
        if unclaimed:
            defer.returnValue(unclaimed[0]['submitted_at'])

    @defer.inlineCallbacks
    def getNewestCompleteTime(self):
        """Returns the complete_at of the latest completed build request for
        this builder, or None if there are no such build requests.

        @returns: datetime instance or None, via Deferred
        """
        bldrid = yield self.getBuilderId()
        completed = yield self.master.data.get(
            ('builders', bldrid, 'buildrequests'),
            [resultspec.Filter('complete', 'eq', [False])],
            order=['-complete_at'],
            limit=1)
        if completed:
            defer.returnValue(completed[0]['complete_at'])
        else:
            defer.returnValue(None)

    def getBuild(self, number):
        for b in self.building:
            if b.build_status and b.build_status.number == number:
                return b
        for b in self.old_building:
            if b.build_status and b.build_status.number == number:
                return b
        return None

    def addLatentWorker(self, worker):
        assert interfaces.ILatentWorker.providedBy(worker)
        for w in self.workers:
            if w == worker:
                break
        else:
            wfb = workerforbuilder.LatentWorkerForBuilder(worker, self)
            self.workers.append(wfb)
            self.botmaster.maybeStartBuildsForBuilder(self.name)

    deprecatedWorkerClassMethod(locals(), addLatentWorker)

    def attached(self, worker, commands):
        """This is invoked by the Worker when the self.workername bot
        registers their builder.

        @type  worker: L{buildbot.worker.Worker}
        @param worker: the Worker that represents the worker as a whole
        @type  commands: dict: string -> string, or None
        @param commands: provides the worker's version of each RemoteCommand

        @rtype:  L{twisted.internet.defer.Deferred}
        @return: a Deferred that fires (with 'self') when the worker-side
                 builder is fully attached and ready to accept commands.
        """
        for w in self.attaching_workers + self.workers:
            if w.worker == worker:
                # already attached to them. This is fairly common, since
                # attached() gets called each time we receive the builder
                # list from the worker, and we ask for it each time we add or
                # remove a builder. So if the worker is hosting builders
                # A,B,C, and the config file changes A, we'll remove A and
                # re-add it, triggering two builder-list requests, getting
                # two redundant calls to attached() for B, and another two
                # for C.
                #
                # Therefore, when we see that we're already attached, we can
                # just ignore it.
                return defer.succeed(self)

        wfb = workerforbuilder.WorkerForBuilder()
        wfb.setBuilder(self)
        self.attaching_workers.append(wfb)
        d = wfb.attached(worker, commands)
        d.addCallback(self._attached)
        d.addErrback(self._not_attached, worker)
        return d

    def _attached(self, wfb):
        self.attaching_workers.remove(wfb)
        self.workers.append(wfb)

        return self

    def _not_attached(self, why, worker):
        # already log.err'ed by WorkerForBuilder._attachFailure
        # TODO: remove from self.workers (except that detached() should get
        #       run first, right?)
        log.err(why, 'worker failed to attach')

    def detached(self, worker):
        """This is called when the connection to the bot is lost."""
        for wfb in self.attaching_workers + self.workers:
            if wfb.worker == worker:
                break
        else:
            log.msg("WEIRD: Builder.detached(%s) (%s)"
                    " not in attaching_workers(%s)"
                    " or workers(%s)" % (worker, worker.workername,
                                         self.attaching_workers, self.workers))
            return

        if wfb in self.attaching_workers:
            self.attaching_workers.remove(wfb)
        if wfb in self.workers:
            self.workers.remove(wfb)

        # inform the WorkerForBuilder that their worker went away
        wfb.detached()

    def getAvailableWorkers(self):
        return [wfb for wfb in self.workers if wfb.isAvailable()]

    deprecatedWorkerClassMethod(locals(), getAvailableWorkers)

    @defer.inlineCallbacks
    def canStartWithWorkerForBuilder(self,
                                     workerforbuilder,
                                     buildrequests=None):
        locks = self.config.locks
        if IRenderable.providedBy(locks):
            if buildrequests is None:
                raise RuntimeError("buildrequests parameter must be specified "
                                   " when using renderable builder locks. Not "
                                   "specifying buildrequests is deprecated")

            # collect properties that would be set for a build if we
            # started it now and render locks using it
            props = Properties()
            Build.setupPropertiesKnownBeforeBuildStarts(
                props, buildrequests, self, workerforbuilder)
            locks = yield props.render(locks)

        # Make sure we don't warn and throw an exception at the same time
        if buildrequests is None:
            warnings.warn("Not passing corresponding buildrequests to "
                          "Builder.canStartWithWorkerForBuilder is deprecated")

        locks = [(self.botmaster.getLockFromLockAccess(access), access)
                 for access in locks]
        can_start = Build.canStartWithWorkerForBuilder(locks, workerforbuilder)
        defer.returnValue(can_start)

    deprecatedWorkerClassMethod(locals(),
                                canStartWithWorkerForBuilder,
                                compat_name="canStartWithSlavebuilder")

    def canStartBuild(self, workerforbuilder, breq):
        if callable(self.config.canStartBuild):
            return defer.maybeDeferred(self.config.canStartBuild, self,
                                       workerforbuilder, breq)
        return defer.succeed(True)

    @defer.inlineCallbacks
    def _startBuildFor(self, workerforbuilder, buildrequests):
        build = self.config.factory.newBuild(buildrequests)
        build.setBuilder(self)

        props = build.getProperties()

        # give the properties a reference back to this build
        props.build = build

        Build.setupPropertiesKnownBeforeBuildStarts(props, build.requests,
                                                    build.builder,
                                                    workerforbuilder)

        log.msg("starting build %s using worker %s" %
                (build, workerforbuilder))

        # set up locks
        locks = yield build.render(self.config.locks)
        yield build.setLocks(locks)

        if self.config.env:
            build.setWorkerEnvironment(self.config.env)

        # append the build to self.building
        self.building.append(build)

        # The worker is ready to go. workerforbuilder.buildStarted() sets its
        # state to BUILDING (so we won't try to use it for any other builds).
        # This gets set back to IDLE by the Build itself when it finishes.
        # Note: This can't be done in `Build.startBuild`, since it needs to be done
        # synchronously, before the BuildRequestDistributor looks at
        # another build request.
        workerforbuilder.buildStarted()

        # create the BuildStatus object that goes with the Build
        bs = self.builder_status.newBuild()

        # let status know
        self.master.status.build_started(buildrequests[0].id, self.name, bs)

        # start the build. This will first set up the steps, then tell the
        # BuildStatus that it has started, which will announce it to the world
        # (through our BuilderStatus object, which is its parent).  Finally it
        # will start the actual build process.  This is done with a fresh
        # Deferred since _startBuildFor should not wait until the build is
        # finished.  This uses `maybeDeferred` to ensure that any exceptions
        # raised by startBuild are treated as deferred errbacks (see
        # http://trac.buildbot.net/ticket/2428).
        d = defer.maybeDeferred(build.startBuild, bs, workerforbuilder)
        # this shouldn't happen. if it does, the worker will be wedged
        d.addErrback(
            log.err, 'from a running build; this is a '
            'serious error - please file a bug at http://buildbot.net')

        defer.returnValue(True)

    def setupProperties(self, props):
        props.setProperty("buildername", self.name, "Builder")
        if self.config.properties:
            for propertyname in self.config.properties:
                props.setProperty(propertyname,
                                  self.config.properties[propertyname],
                                  "Builder")

    def buildFinished(self, build, wfb):
        """This is called when the Build has finished (either success or
        failure). Any exceptions during the build are reported with
        results=FAILURE, not with an errback."""

        # by the time we get here, the Build has already released the worker,
        # which will trigger a check for any now-possible build requests
        # (maybeStartBuilds)

        results = build.build_status.getResults()

        self.building.remove(build)
        if results == RETRY:
            d = self._resubmit_buildreqs(build)
            d.addErrback(log.err, 'while resubmitting a build request')
        else:
            complete_at_epoch = self.master.reactor.seconds()
            complete_at = epoch2datetime(complete_at_epoch)
            brids = [br.id for br in build.requests]

            d = self.master.data.updates.completeBuildRequests(
                brids, results, complete_at=complete_at)
            # nothing in particular to do with this deferred, so just log it if
            # it fails..
            d.addErrback(log.err, 'while marking build requests as completed')

        if wfb.worker:
            wfb.worker.releaseLocks()

    def _resubmit_buildreqs(self, build):
        brids = [br.id for br in build.requests]
        d = self.master.data.updates.unclaimBuildRequests(brids)

        @d.addCallback
        def notify(_):
            pass  # XXX method does not exist
            # self._msg_buildrequests_unclaimed(build.requests)

        return d

    # Build Creation

    def maybeStartBuild(self, workerforbuilder, breqs):
        # This method is called by the botmaster whenever this builder should
        # start a set of buildrequests on a worker. Do not call this method
        # directly - use master.botmaster.maybeStartBuildsForBuilder, or one of
        # the other similar methods if more appropriate

        # first, if we're not running, then don't start builds; stopService
        # uses this to ensure that any ongoing maybeStartBuild invocations
        # are complete before it stops.
        if not self.running:
            return defer.succeed(False)

        # If the build fails from here on out (e.g., because a worker has failed),
        # it will be handled outside of this function. TODO: test that!

        return self._startBuildFor(workerforbuilder, breqs)

    # a few utility functions to make the maybeStartBuild a bit shorter and
    # easier to read

    def getCollapseRequestsFn(self):
        """Helper function to determine which collapseRequests function to use
        from L{_collapseRequests}, or None for no merging"""
        # first, seek through builder, global, and the default
        collapseRequests_fn = self.config.collapseRequests
        if collapseRequests_fn is None:
            collapseRequests_fn = self.master.config.collapseRequests
        if collapseRequests_fn is None:
            collapseRequests_fn = True

        # then translate False and True properly
        if collapseRequests_fn is False:
            collapseRequests_fn = None
        elif collapseRequests_fn is True:
            collapseRequests_fn = self._defaultCollapseRequestFn

        return collapseRequests_fn

    @staticmethod
    def _defaultCollapseRequestFn(master, builder, brdict1, brdict2):
        return buildrequest.BuildRequest.canBeCollapsed(
            master, brdict1, brdict2)
Пример #5
0
class Builder(util_service.ReconfigurableServiceMixin, service.MultiService,
              WorkerAPICompatMixin):

    # reconfigure builders before workers
    reconfig_priority = 196

    def __init__(self, name, _addServices=True):
        service.MultiService.__init__(self)
        self.name = name

        # this is filled on demand by getBuilderId; don't access it directly
        self._builderid = None

        # this is created the first time we get a good build
        self.expectations = None

        # build/wannabuild slots: Build objects move along this sequence
        self.building = []
        # old_building holds active builds that were stolen from a predecessor
        self.old_building = weakref.WeakKeyDictionary()

        # workers which have connected but which are not yet available.
        # These are always in the ATTACHING state.
        self.attaching_workers = []
        self._registerOldWorkerAttr("attaching_workers")

        # workers at our disposal. Each WorkerForBuilder instance has a
        # .state that is IDLE, PINGING, or BUILDING. "PINGING" is used when a
        # Build is about to start, to make sure that they're still alive.
        self.workers = []
        self._registerOldWorkerAttr("workers")

        self.config = None
        self.builder_status = None

        if _addServices:
            self.reclaim_svc = internet.TimerService(10 * 60,
                                                     self.reclaimAllBuilds)
            self.reclaim_svc.setServiceParent(self)

            # update big status every 30 minutes, working around #1980
            self.updateStatusService = internet.TimerService(
                30 * 60, self.updateBigStatus)
            self.updateStatusService.setServiceParent(self)

    @defer.inlineCallbacks
    def reconfigServiceWithBuildbotConfig(self, new_config):
        # find this builder in the config
        for builder_config in new_config.builders:
            if builder_config.name == self.name:
                found_config = True
                break
        assert found_config, "no config found for builder '%s'" % self.name

        # set up a builder status object on the first reconfig
        if not self.builder_status:
            self.builder_status = self.master.status.builderAdded(
                name=builder_config.name,
                basedir=builder_config.builddir,
                tags=builder_config.tags,
                description=builder_config.description)

        self.config = builder_config

        # allocate  builderid now, so that the builder is visible in the web
        # UI; without this, the bulider wouldn't appear until it preformed a
        # build.
        builderid = yield self.getBuilderId()

        self.master.data.updates.updateBuilderInfo(builderid,
                                                   builder_config.description,
                                                   builder_config.tags)

        self.builder_status.setDescription(builder_config.description)
        self.builder_status.setTags(builder_config.tags)
        self.builder_status.setWorkernames(self.config.workernames)
        self.builder_status.setCacheSize(new_config.caches['Builds'])

        # if we have any workers attached which are no longer configured,
        # drop them.
        new_workernames = set(builder_config.workernames)
        self.workers = [
            w for w in self.workers if w.worker.workername in new_workernames
        ]

    def __repr__(self):
        return "<Builder '%r' at %d>" % (self.name, id(self))

    def getBuilderId(self):
        # since findBuilderId is idempotent, there's no reason to add
        # additional locking around this function.
        if self._builderid:
            return defer.succeed(self._builderid)
        # buildbot.config should ensure this is already unicode, but it doesn't
        # hurt to check again
        name = ascii2unicode(self.name)
        d = self.master.data.updates.findBuilderId(name)

        @d.addCallback
        def keep(builderid):
            self._builderid = builderid
            return builderid

        return d

    @defer.inlineCallbacks
    def getOldestRequestTime(self):
        """Returns the submitted_at of the oldest unclaimed build request for
        this builder, or None if there are no build requests.

        @returns: datetime instance or None, via Deferred
        """
        bldrid = yield self.getBuilderId()
        unclaimed = yield self.master.data.get(
            ('builders', bldrid, 'buildrequests'),
            [resultspec.Filter('claimed', 'eq', [False])])
        if unclaimed:
            unclaimed = sorted([brd['submitted_at'] for brd in unclaimed])
            defer.returnValue(unclaimed[0])
        else:
            defer.returnValue(None)

    def reclaimAllBuilds(self):
        brids = set()
        for b in self.building:
            brids.update([br.id for br in b.requests])
        for b in self.old_building:
            brids.update([br.id for br in b.requests])

        if not brids:
            return defer.succeed(None)

        d = self.master.data.updates.reclaimBuildRequests(list(brids))
        d.addErrback(log.err, 'while re-claiming running BuildRequests')
        return d

    def getBuild(self, number):
        for b in self.building:
            if b.build_status and b.build_status.number == number:
                return b
        for b in self.old_building:
            if b.build_status and b.build_status.number == number:
                return b
        return None

    def addLatentWorker(self, worker):
        assert interfaces.ILatentWorker.providedBy(worker)
        for w in self.workers:
            if w == worker:
                break
        else:
            sb = workerforbuilder.LatentWorkerForBuilder(worker, self)
            self.builder_status.addPointEvent(
                ['added', 'latent', worker.workername])
            self.workers.append(sb)
            self.botmaster.maybeStartBuildsForBuilder(self.name)

    deprecatedWorkerClassMethod(locals(), addLatentWorker)

    def attached(self, worker, commands):
        """This is invoked by the Worker when the self.workername bot
        registers their builder.

        @type  worker: L{buildbot.worker.Worker}
        @param worker: the Worker that represents the worker as a whole
        @type  commands: dict: string -> string, or None
        @param commands: provides the worker's version of each RemoteCommand

        @rtype:  L{twisted.internet.defer.Deferred}
        @return: a Deferred that fires (with 'self') when the worker-side
                 builder is fully attached and ready to accept commands.
        """
        for w in self.attaching_workers + self.workers:
            if w.worker == worker:
                # already attached to them. This is fairly common, since
                # attached() gets called each time we receive the builder
                # list from the worker, and we ask for it each time we add or
                # remove a builder. So if the worker is hosting builders
                # A,B,C, and the config file changes A, we'll remove A and
                # re-add it, triggering two builder-list requests, getting
                # two redundant calls to attached() for B, and another two
                # for C.
                #
                # Therefore, when we see that we're already attached, we can
                # just ignore it.
                return defer.succeed(self)

        sb = workerforbuilder.WorkerForBuilder()
        sb.setBuilder(self)
        self.attaching_workers.append(sb)
        d = sb.attached(worker, commands)
        d.addCallback(self._attached)
        d.addErrback(self._not_attached, worker)
        return d

    def _attached(self, sb):
        self.builder_status.addPointEvent(['connect', sb.worker.workername])
        self.attaching_workers.remove(sb)
        self.workers.append(sb)

        self.updateBigStatus()

        return self

    def _not_attached(self, why, worker):
        # already log.err'ed by WorkerForBuilder._attachFailure
        # TODO: remove from self.workers (except that detached() should get
        #       run first, right?)
        log.err(why, 'worker failed to attach')
        self.builder_status.addPointEvent(
            ['failed', 'connect', worker.workername])
        # TODO: add an HTMLLogFile of the exception

    def detached(self, worker):
        """This is called when the connection to the bot is lost."""
        for wfb in self.attaching_workers + self.workers:
            if wfb.worker == worker:
                break
        else:
            log.msg("WEIRD: Builder.detached(%s) (%s)"
                    " not in attaching_workers(%s)"
                    " or workers(%s)" % (worker, worker.workername,
                                         self.attaching_workers, self.workers))
            return
        if wfb.state == BUILDING:
            # the Build's .lostRemote method (invoked by a notifyOnDisconnect
            # handler) will cause the Build to be stopped, probably right
            # after the notifyOnDisconnect that invoked us finishes running.
            pass

        if wfb in self.attaching_workers:
            self.attaching_workers.remove(wfb)
        if wfb in self.workers:
            self.workers.remove(wfb)

        self.builder_status.addPointEvent(['disconnect', worker.workername])
        wfb.detached(
        )  # inform the WorkerForBuilder that their worker went away
        self.updateBigStatus()

    def updateBigStatus(self):
        try:
            # Catch exceptions here, since this is called in a LoopingCall.
            if not self.builder_status:
                return
            if not self.workers:
                self.builder_status.setBigState("offline")
            elif self.building or self.old_building:
                self.builder_status.setBigState("building")
            else:
                self.builder_status.setBigState("idle")
        except Exception:
            log.err(
                None, "while trying to update status of builder '%s'" %
                (self.name, ))

    def getAvailableWorkers(self):
        return [wfb for wfb in self.workers if wfb.isAvailable()]

    deprecatedWorkerClassMethod(locals(), getAvailableWorkers)

    def canStartWithWorkerForBuilder(self, workerforbuilder):
        locks = [(self.botmaster.getLockFromLockAccess(access), access)
                 for access in self.config.locks]
        return Build.canStartWithWorkerForBuilder(locks, workerforbuilder)

    deprecatedWorkerClassMethod(locals(),
                                canStartWithWorkerForBuilder,
                                compat_name="canStartWithSlavebuilder")

    def canStartBuild(self, workerforbuilder, breq):
        if callable(self.config.canStartBuild):
            return defer.maybeDeferred(self.config.canStartBuild, self,
                                       workerforbuilder, breq)
        return defer.succeed(True)

    @defer.inlineCallbacks
    def _startBuildFor(self, workerforbuilder, buildrequests):
        # Build a stack of cleanup functions so that, at any point, we can
        # abort this operation and unwind the commitments made so far.
        cleanups = []

        def run_cleanups():
            try:
                while cleanups:
                    fn = cleanups.pop()
                    fn()
            except Exception:
                log.err(failure.Failure(),
                        "while running %r" % (run_cleanups, ))

        # the last cleanup we want to perform is to update the big
        # status based on any other cleanup
        cleanups.append(lambda: self.updateBigStatus())

        build = self.config.factory.newBuild(buildrequests)
        build.setBuilder(self)
        build.setupProperties()
        log.msg("starting build %s using worker %s" %
                (build, workerforbuilder))

        # set up locks
        build.setLocks(self.config.locks)
        cleanups.append(workerforbuilder.worker.releaseLocks)

        if len(self.config.env) > 0:
            build.setWorkerEnvironment(self.config.env)

        # append the build to self.building
        self.building.append(build)
        cleanups.append(lambda: self.building.remove(build))

        # update the big status accordingly
        self.updateBigStatus()

        try:
            ready = yield workerforbuilder.prepare(self.builder_status, build)
        except Exception:
            log.err(failure.Failure(), 'while preparing workerforbuilder:')
            ready = False

        # If prepare returns True then it is ready and we start a build
        # If it returns false then we don't start a new build.
        if not ready:
            log.msg("worker %s can't build %s after all; re-queueing the "
                    "request" % (build, workerforbuilder))
            run_cleanups()
            defer.returnValue(False)
            return

        # ping the worker to make sure they're still there. If they've
        # fallen off the map (due to a NAT timeout or something), this
        # will fail in a couple of minutes, depending upon the TCP
        # timeout.
        #
        # TODO: This can unnecessarily suspend the starting of a build, in
        # situations where the worker is live but is pushing lots of data to
        # us in a build.
        log.msg("starting build %s.. pinging the worker %s" %
                (build, workerforbuilder))
        try:
            ping_success = yield workerforbuilder.ping()
        except Exception:
            log.err(failure.Failure(), 'while pinging worker before build:')
            ping_success = False

        if not ping_success:
            log.msg("worker ping failed; re-queueing the request")
            run_cleanups()
            defer.returnValue(False)
            return

        # The worker is ready to go. workerforbuilder.buildStarted() sets its
        # state to BUILDING (so we won't try to use it for any other builds).
        # This gets set back to IDLE by the Build itself when it finishes.
        workerforbuilder.buildStarted()
        cleanups.append(lambda: workerforbuilder.buildFinished())

        # tell the remote that it's starting a build, too
        try:
            yield workerforbuilder.worker.conn.remoteStartBuild(
                build.builder.name)
        except Exception:
            log.err(failure.Failure(), 'while calling remote startBuild:')
            run_cleanups()
            defer.returnValue(False)
            return

        # create the BuildStatus object that goes with the Build
        bs = self.builder_status.newBuild()

        # IMPORTANT: no yielding is allowed from here to the startBuild call!

        # it's possible that we lost the worker remote between the ping above
        # and now.  If so, bail out.  The build.startBuild call below transfers
        # responsibility for monitoring this connection to the Build instance,
        # so this check ensures we hand off a working connection.
        if not workerforbuilder.worker.conn:  # TODO: replace with isConnected()
            log.msg("worker disappeared before build could start")
            run_cleanups()
            defer.returnValue(False)
            return

        # let status know
        self.master.status.build_started(buildrequests[0].id, self.name, bs)

        # start the build. This will first set up the steps, then tell the
        # BuildStatus that it has started, which will announce it to the world
        # (through our BuilderStatus object, which is its parent).  Finally it
        # will start the actual build process.  This is done with a fresh
        # Deferred since _startBuildFor should not wait until the build is
        # finished.  This uses `maybeDeferred` to ensure that any exceptions
        # raised by startBuild are treated as deferred errbacks (see
        # http://trac.buildbot.net/ticket/2428).
        d = defer.maybeDeferred(build.startBuild, bs, self.expectations,
                                workerforbuilder)
        d.addCallback(lambda _: self.buildFinished(build, workerforbuilder))
        # this shouldn't happen. if it does, the worker will be wedged
        d.addErrback(
            log.err, 'from a running build; this is a '
            'serious error - please file a bug at http://buildbot.net')

        # make sure the builder's status is represented correctly
        self.updateBigStatus()

        defer.returnValue(True)

    def setupProperties(self, props):
        props.setProperty("buildername", self.name, "Builder")
        if len(self.config.properties) > 0:
            for propertyname in self.config.properties:
                props.setProperty(propertyname,
                                  self.config.properties[propertyname],
                                  "Builder")

    def buildFinished(self, build, wfb):
        """This is called when the Build has finished (either success or
        failure). Any exceptions during the build are reported with
        results=FAILURE, not with an errback."""

        # by the time we get here, the Build has already released the worker,
        # which will trigger a check for any now-possible build requests
        # (maybeStartBuilds)

        results = build.build_status.getResults()

        self.building.remove(build)
        if results == RETRY:
            d = self._resubmit_buildreqs(build)
            d.addErrback(log.err, 'while resubmitting a build request')
        else:
            complete_at_epoch = reactor.seconds()
            complete_at = epoch2datetime(complete_at_epoch)
            brids = [br.id for br in build.requests]

            d = self.master.data.updates.completeBuildRequests(
                brids, results, complete_at=complete_at)
            # nothing in particular to do with this deferred, so just log it if
            # it fails..
            d.addErrback(log.err, 'while marking build requests as completed')

        if wfb.worker:
            wfb.worker.releaseLocks()

        self.updateBigStatus()

    def _resubmit_buildreqs(self, build):
        brids = [br.id for br in build.requests]
        d = self.master.data.updates.unclaimBuildRequests(brids)

        @d.addCallback
        def notify(_):
            pass  # XXX method does not exist
            # self._msg_buildrequests_unclaimed(build.requests)

        return d

    # Build Creation

    @defer.inlineCallbacks
    def maybeStartBuild(self, workerforbuilder, breqs, _reactor=reactor):
        # This method is called by the botmaster whenever this builder should
        # start a set of buildrequests on a worker. Do not call this method
        # directly - use master.botmaster.maybeStartBuildsForBuilder, or one of
        # the other similar methods if more appropriate

        # first, if we're not running, then don't start builds; stopService
        # uses this to ensure that any ongoing maybeStartBuild invocations
        # are complete before it stops.
        if not self.running:
            defer.returnValue(False)
            return

        # If the build fails from here on out (e.g., because a worker has failed),
        # it will be handled outside of this function. TODO: test that!

        build_started = yield self._startBuildFor(workerforbuilder, breqs)
        defer.returnValue(build_started)

    # a few utility functions to make the maybeStartBuild a bit shorter and
    # easier to read

    def getCollapseRequestsFn(self):
        """Helper function to determine which collapseRequests function to use
        from L{_collapseRequests}, or None for no merging"""
        # first, seek through builder, global, and the default
        collapseRequests_fn = self.config.collapseRequests
        if collapseRequests_fn is None:
            collapseRequests_fn = self.master.config.collapseRequests
        if collapseRequests_fn is None:
            collapseRequests_fn = True

        # then translate False and True properly
        if collapseRequests_fn is False:
            collapseRequests_fn = None
        elif collapseRequests_fn is True:
            collapseRequests_fn = Builder._defaultCollapseRequestFn

        return collapseRequests_fn

    def _defaultCollapseRequestFn(self, brdict1, brdict2):
        return buildrequest.BuildRequest.canBeCollapsed(
            self.master, brdict1, brdict2)
Пример #6
0
        class C(object):

            def updateWorker(self, res):
                return res
            deprecatedWorkerClassMethod(locals(), updateWorker)
Пример #7
0
class WorkersConnectorComponent(base.DBConnectorComponent):
    # Documentation is in developer/database.rst

    def findWorkerId(self, name):
        tbl = self.db.model.workers
        # callers should verify this and give good user error messages
        assert identifiers.isIdentifier(50, name)
        return self.findSomethingId(tbl=tbl,
                                    whereclause=(tbl.c.name == name),
                                    insert_values=dict(
                                        name=name,
                                        info={},
                                        paused=0,
                                        graceful=0,
                                    ))

    def _deleteFromConfiguredWorkers_thd(self,
                                         conn,
                                         buildermasterids,
                                         workerid=None):
        cfg_tbl = self.db.model.configured_workers
        # batch deletes to avoid using too many variables
        for batch in self.doBatch(buildermasterids, 100):
            q = cfg_tbl.delete()
            q = q.where(cfg_tbl.c.buildermasterid.in_(batch))
            if workerid:
                q = q.where(cfg_tbl.c.workerid == workerid)
            conn.execute(q).close()

    # returns a Deferred which returns None
    def deconfigureAllWorkersForMaster(self, masterid):
        def thd(conn):
            # first remove the old configured buildermasterids for this master and worker
            # as sqlalchemy does not support delete with join, we need to do
            # that in 2 queries
            cfg_tbl = self.db.model.configured_workers
            bm_tbl = self.db.model.builder_masters
            j = cfg_tbl
            j = j.outerjoin(bm_tbl)
            q = sa.select([cfg_tbl.c.buildermasterid],
                          from_obj=[j],
                          distinct=True)
            q = q.where(bm_tbl.c.masterid == masterid)
            res = conn.execute(q)
            buildermasterids = [row['buildermasterid'] for row in res]
            res.close()
            self._deleteFromConfiguredWorkers_thd(conn, buildermasterids)

        return self.db.pool.do(thd)

    # returns a Deferred that returns None
    def workerConfigured(self, workerid, masterid, builderids):
        def thd(conn):

            cfg_tbl = self.db.model.configured_workers
            bm_tbl = self.db.model.builder_masters

            # get the buildermasterids that are configured
            if builderids:
                q = sa.select([bm_tbl.c.id], from_obj=[bm_tbl])
                q = q.where(bm_tbl.c.masterid == masterid)
                q = q.where(bm_tbl.c.builderid.in_(builderids))
                res = conn.execute(q)
                buildermasterids = {row['id'] for row in res}
                res.close()
            else:
                buildermasterids = set([])

            j = cfg_tbl
            j = j.outerjoin(bm_tbl)
            q = sa.select([cfg_tbl.c.buildermasterid],
                          from_obj=[j],
                          distinct=True)
            q = q.where(bm_tbl.c.masterid == masterid)
            q = q.where(cfg_tbl.c.workerid == workerid)
            res = conn.execute(q)
            oldbuildermasterids = {row['buildermasterid'] for row in res}
            res.close()

            todeletebuildermasterids = oldbuildermasterids - buildermasterids
            toinsertbuildermasterids = buildermasterids - oldbuildermasterids
            transaction = conn.begin()
            self._deleteFromConfiguredWorkers_thd(conn,
                                                  todeletebuildermasterids,
                                                  workerid)

            # and insert the new ones
            if toinsertbuildermasterids:
                q = cfg_tbl.insert()
                conn.execute(q, [{
                    'workerid': workerid,
                    'buildermasterid': buildermasterid
                } for buildermasterid in toinsertbuildermasterids]).close()

            transaction.commit()

        return self.db.pool.do(thd)

    @defer.inlineCallbacks
    def getWorker(self,
                  workerid=None,
                  name=None,
                  masterid=None,
                  builderid=None):
        if workerid is None and name is None:
            defer.returnValue(None)
        workers = yield self.getWorkers(_workerid=workerid,
                                        _name=name,
                                        masterid=masterid,
                                        builderid=builderid)
        if workers:
            defer.returnValue(workers[0])

    # returns a Deferred that returns a value
    def getWorkers(self,
                   _workerid=None,
                   _name=None,
                   masterid=None,
                   builderid=None,
                   paused=None,
                   graceful=None):
        def thd(conn):
            workers_tbl = self.db.model.workers
            conn_tbl = self.db.model.connected_workers
            cfg_tbl = self.db.model.configured_workers
            bm_tbl = self.db.model.builder_masters

            def selectWorker(q):
                return q

            # first, get the worker itself and the configured_on info
            j = workers_tbl
            j = j.outerjoin(cfg_tbl)
            j = j.outerjoin(bm_tbl)
            q = sa.select([
                workers_tbl.c.id, workers_tbl.c.name, workers_tbl.c.info,
                workers_tbl.c.paused, workers_tbl.c.graceful,
                bm_tbl.c.builderid, bm_tbl.c.masterid
            ],
                          from_obj=[j],
                          order_by=[workers_tbl.c.id])

            if _workerid is not None:
                q = q.where(workers_tbl.c.id == _workerid)
            if _name is not None:
                q = q.where(workers_tbl.c.name == _name)
            if masterid is not None:
                q = q.where(bm_tbl.c.masterid == masterid)
            if builderid is not None:
                q = q.where(bm_tbl.c.builderid == builderid)
            if paused is not None:
                q = q.where(workers_tbl.c.paused == int(paused))
            if graceful is not None:
                q = q.where(workers_tbl.c.graceful == int(graceful))

            rv = {}
            res = None
            lastId = None
            cfgs = None
            for row in conn.execute(q):
                if row.id != lastId:
                    lastId = row.id
                    cfgs = []
                    res = {
                        'id': lastId,
                        'name': row.name,
                        'configured_on': cfgs,
                        'connected_to': [],
                        'workerinfo': row.info,
                        'paused': bool(row.paused),
                        'graceful': bool(row.graceful)
                    }
                    rv[lastId] = res
                if row.builderid and row.masterid:
                    cfgs.append({
                        'builderid': row.builderid,
                        'masterid': row.masterid
                    })

            # now go back and get the connection info for the same set of
            # workers
            j = conn_tbl
            if _name is not None:
                # note this is not an outer join; if there are unconnected
                # workers, they were captured in rv above
                j = j.join(workers_tbl)
            q = sa.select([conn_tbl.c.workerid, conn_tbl.c.masterid],
                          from_obj=[j],
                          order_by=[conn_tbl.c.workerid])

            if _workerid is not None:
                q = q.where(conn_tbl.c.workerid == _workerid)
            if _name is not None:
                q = q.where(workers_tbl.c.name == _name)
            if masterid is not None:
                q = q.where(conn_tbl.c.masterid == masterid)

            for row in conn.execute(q):
                id = row.workerid
                if id not in rv:
                    continue
                rv[row.workerid]['connected_to'].append(row.masterid)

            return list(itervalues(rv))

        return self.db.pool.do(thd)

    deprecatedWorkerClassMethod(locals(),
                                getWorkers,
                                compat_name="getBuildslaves")

    # returns a Deferred that returns None
    def workerConnected(self, workerid, masterid, workerinfo):
        def thd(conn):
            conn_tbl = self.db.model.connected_workers
            q = conn_tbl.insert()
            try:
                conn.execute(q, {'workerid': workerid, 'masterid': masterid})
            except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
                # if the row is already present, silently fail..
                pass

            bs_tbl = self.db.model.workers
            q = bs_tbl.update(whereclause=(bs_tbl.c.id == workerid))
            conn.execute(q, info=workerinfo)

        return self.db.pool.do(thd)

    # returns a Deferred that returns None
    def workerDisconnected(self, workerid, masterid):
        def thd(conn):
            tbl = self.db.model.connected_workers
            q = tbl.delete(whereclause=(tbl.c.workerid == workerid)
                           & (tbl.c.masterid == masterid))
            conn.execute(q)

        return self.db.pool.do(thd)

    # returns a Deferred that returns None
    def setWorkerState(self, workerid, paused, graceful):
        def thd(conn):
            tbl = self.db.model.workers
            q = tbl.update(whereclause=(tbl.c.id == workerid))
            conn.execute(q, paused=int(paused), graceful=int(graceful))

        return self.db.pool.do(thd)