def test_flush_duringTurn(self): testd = defer.Deferred() def cb(): d = eventual.flushEventualQueue() d.addCallback(testd.callback) eventual.eventually(cb) return testd
def test_eventually_err(self): # monkey-patch log.err; this is restored by tearDown log.err = lambda : self.results.append("err") def cb_fails(): raise RuntimeError("should not cause test failure") eventual.eventually(cb_fails) return self.assertResults(['err'])
def release(self, owner, access): """ Release the lock """ assert isinstance(access, LockAccess) debuglog("%s release(%s, %s)" % (self, owner, access.mode)) entry = (owner, access) if not entry in self.owners: debuglog("%s already released" % self) return self.owners.remove(entry) # who can we wake up? # After an exclusive access, we may need to wake up several waiting. # Break out of the loop when the first waiting client should not be awakened. num_excl, num_counting = self._getOwnersCount() for i, (w_owner, w_access, d) in enumerate(self.waiting): if w_access.mode == 'counting': if num_excl > 0 or num_counting == self.maxCount: break else: num_counting = num_counting + 1 else: # w_access.mode == 'exclusive' if num_excl > 0 or num_counting > 0: break else: num_excl = num_excl + 1 # If the waiter has a deferred, wake it up and clear the deferred # from the wait queue entry to indicate that it has been woken. if d: self.waiting[i] = (w_owner, w_access, None) eventually(d.callback, self) # notify any listeners self.release_subs.deliver()
def testfn(): d = defer.Deferred() def done(): events.append('TM') d.callback(None) eventually(done) return d
def buildFinished(self, text, results): """This method must be called when the last Step has completed. It marks the Build as complete and returns the Builder to the 'idle' state. It takes two arguments which describe the overall build status: text, results. 'results' is one of SUCCESS, WARNINGS, or FAILURE. If 'results' is SUCCESS or WARNINGS, we will permit any dependant builds to start. If it is 'FAILURE', those builds will be abandoned.""" self.finished = True if self.remote: self.remote.dontNotifyOnDisconnect(self.lostRemote) self.remote = None self.results = results log.msg(" %s: build finished" % self) self.build_status.setText(text) self.build_status.setResults(results) self.build_status.buildFinished() if self.progress and results == SUCCESS: # XXX: also test a 'timing consistent' flag? log.msg(" setting expectations for next time") self.builder.setExpectations(self.progress) eventually(self.releaseLocks) self.deferred.callback(self) self.deferred = None
def content(self, request, cxt): status = self.getStatus(request) res = yield self.getAuthz(request).actionAllowed("cleanShutdown", request) if request.path == '/shutdown': if res: eventually(status.cleanShutdown) yield defer.returnValue(redirectTo("/", request)) return else: yield defer.returnValue( redirectTo(path_to_authzfail(request), request)) return elif request.path == '/cancel_shutdown': if res: eventually(status.cancelCleanShutdown) yield defer.returnValue(redirectTo("/", request)) return else: yield defer.returnValue( redirectTo(path_to_authzfail(request), request)) return cxt.update( shutting_down = status.shuttingDown, shutdown_url = request.childLink("shutdown"), cancel_shutdown_url = request.childLink("cancel_shutdown"), ) template = request.site.buildbot_service.templates.get_template("root.html") yield defer.returnValue(template.render(**cxt))
def _buildRequestCallback(self, notif): buildername = notif["buildername"] if buildername in self._builder_observers: brs = buildrequest.BuildRequestStatus(buildername, notif["brid"], self) for observer in self._builder_observers[buildername]: if hasattr(observer, "requestSubmitted"): eventually(observer.requestSubmitted, brs)
def _work_done(res): log.msg("Completed a piece of work") self.queue.pop(0) if self.queue: log.msg("Preparing next piece of work") eventually(self._process) return res
def perspective_subscribe(self, mode, interval, target): """The remote client wishes to subscribe to some set of events. 'target' will be sent remote messages when these events happen. 'mode' indicates which events are desired: it is a string with one of the following values: 'builders': builderAdded, builderRemoved 'builds': those plus builderChangedState, buildStarted, buildFinished 'steps': all those plus buildETAUpdate, stepStarted, stepFinished 'logs': all those plus stepETAUpdate, logStarted, logFinished 'full': all those plus logChunk (with the log contents) Messages are defined by buildbot.interfaces.IStatusReceiver . 'interval' is used to specify how frequently ETAUpdate messages should be sent. Raising or lowering the subscription level will take effect starting with the next build or step.""" assert mode in ("builders", "builds", "steps", "logs", "full") assert target twlog.msg("PB subscribe(%s)" % mode) self.client = target self.subscribed = mode self.interval = interval self.subscribed_to.append(self.status) # wait a moment before subscribing, so the new-builder messages # won't appear before this remote method finishes eventually(self.status.subscribe, self) return None
def _db_builds_changed(self, category, bid): brid,buildername,buildnum = self.db.get_build_info(bid) if brid in self._buildreq_observers: bs = self.getBuilder(buildername).getBuild(buildnum) if bs: for o in self._buildreq_observers[brid]: eventually(o, bs)
def fireTestEvent(self, name, fire_with=None): if fire_with is None: fire_with = self watchers = self.watchers[name] self.watchers[name] = [] for w in watchers: eventually(w.callback, fire_with)
def buildFinished(self, text, results): """This method must be called when the last Step has completed. It marks the Build as complete and returns the Builder to the 'idle' state. It takes two arguments which describe the overall build status: text, results. 'results' is one of the possible results (see buildbot.process.results). If 'results' is SUCCESS or WARNINGS, we will permit any dependant builds to start. If it is 'FAILURE', those builds will be abandoned.""" self.stopBuildConsumer.stopConsuming() self.finished = True if self.conn: self.subs.unsubscribe() self.subs = None self.conn = None log.msg(" %s: build finished" % self) self.results = worst_status(self.results, results) self.build_status.setText(text) self.build_status.setResults(self.results) self.build_status.buildFinished() eventually(self.releaseLocks) self.deferred.callback(self) self.deferred = None
def br_consumer_cb(self, key, msg): buildername = msg['buildername'] if buildername in self._builder_observers: brs = buildrequest.BuildRequestStatus(buildername, msg['brid'], self) for observer in self._builder_observers[buildername]: if hasattr(observer, 'requestSubmitted'): eventually(observer.requestSubmitted, brs)
def _attach(ign): for s in added: s.setServiceParent(self) self.upstream_subscribers = collections.defaultdict(list) for s in list(self): if s.upstream_name: self.upstream_subscribers[s.upstream_name].append(s) eventually(self.trigger)
def resumeProducing(self): # Twisted-1.3.0 has a bug which causes hangs when resumeProducing # calls transport.write (there is a recursive loop, fixed in 2.0 in # t.i.abstract.FileDescriptor.doWrite by setting the producerPaused # flag *before* calling resumeProducing). To work around this, we # just put off the real resumeProducing for a moment. This probably # has a performance hit, but I'm going to assume that the log files # are not retrieved frequently enough for it to be an issue. eventually(self._resumeProducing)
def msb_stopNow(): self.maybeStartBuild_calls.append('A') stop_d = self.brd.stopService() stop_d.addCallback(lambda _ : self.maybeStartBuild_calls.append('(stopped)')) d = defer.Deferred() def a_finished(): self.maybeStartBuild_calls.append('A-finished') d.callback(None) eventually(a_finished) return d
def br_consumer_cb(self, key, msg): builderid = msg["builderid"] buildername = None # convert builderid to buildername for b in itervalues(self.botmaster.builders): if builderid == (yield b.getBuilderId()): buildername = b.name break if buildername in self._builder_observers: brs = buildrequest.BuildRequestStatus(buildername, msg["buildrequestid"], self) for observer in self._builder_observers[buildername]: if hasattr(observer, "requestSubmitted"): eventually(observer.requestSubmitted, brs)
def _handle_buildrequest_event(self, mode, brids): for brid in brids: buildername = self.db.get_buildername_for_brid(brid) if buildername in self._builder_observers: brs = buildrequest.BuildRequestStatus(brid, self, self.db) for observer in self._builder_observers[buildername]: if mode == "added": if hasattr(observer, 'requestSubmitted'): eventually(observer.requestSubmitted, brs) else: if hasattr(observer, 'requestCancelled'): builder = self.getBuilder(buildername) eventually(observer.requestCancelled, builder, brs)
def _end_operation(self, t): # this is always invoked from the main thread, but is wrapped by # synchronized= and threadable.synchronous(), since it touches # self._pending_notifications, which is also touched by # runInteraction threads self._active_operations.discard(t) if self._active_operations: return for (category, args) in self._pending_notifications: # in the distributed system, this will be a # transport.write(" ".join([category] + [str(a) for a in args])) eventually(self.send_notification, category, args) self._pending_notifications = []
def _db_buildset_changed(self, bsid): # check bsid to see if it's successful or finished, and notify anyone # who cares if (bsid not in self._buildset_success_waiters and bsid not in self._buildset_finished_waiters): return successful,finished = self.db.examine_buildset(bsid) bss = buildset.BuildSetStatus(bsid, self, self.db) if successful is not None: for d in self._buildset_success_waiters.pop(bsid): eventually(d.callback, bss) if finished: for d in self._buildset_finished_waiters.pop(bsid): eventually(d.callback, bss)
def buildFinished(self, text, results): """This method must be called when the last Step has completed. It marks the Build as complete and returns the Builder to the 'idle' state. It takes two arguments which describe the overall build status: text, results. 'results' is one of the possible results (see buildbot.process.results). If 'results' is SUCCESS or WARNINGS, we will permit any dependent builds to start. If it is 'FAILURE', those builds will be abandoned.""" try: self.stopBuildConsumer.stopConsuming() self.finished = True if self.conn: self.subs.unsubscribe() self.subs = None self.conn = None log.msg(" %s: build finished" % self) self.results = worst_status(self.results, results) self.build_status.setText(text) self.build_status.setResults(self.results) self.build_status.buildFinished() eventually(self.releaseLocks) metrics.MetricCountEvent.log('active_builds', -1) yield self.master.data.updates.setBuildStateString(self.buildid, bytes2unicode(" ".join(text))) yield self.master.data.updates.finishBuild(self.buildid, self.results) if self.results == EXCEPTION: # When a build has an exception, put the worker in quarantine for a few seconds # to make sure we try next build with another worker self.workerforbuilder.worker.putInQuarantine() elif self.results != RETRY: # This worker looks sane if status is neither retry or exception # Avoid a race in case the build step reboot the worker if self.workerforbuilder.worker is not None: self.workerforbuilder.worker.resetQuarantine() # mark the build as finished self.workerforbuilder.buildFinished() self.builder.buildFinished(self, self.workerforbuilder) self._tryScheduleBuildsAfterLockUnlock(build_finished=True) except Exception: log.err(None, 'from finishing a build; this is a ' 'serious error - please file a bug at http://buildbot.net')
def remote_complete(self, failure=None): """ Called by the slave's L{buildbot.slave.bot.SlaveBuilder} to notify me the remote command has finished. @type failure: L{twisted.python.failure.Failure} or None @rtype: None """ self.buildslave.messageReceivedFromSlave() # call the real remoteComplete a moment later, but first return an # acknowledgement so the slave can retire the completion message. if self.active: eventually(self._finished, failure) return None
def remote_complete(self, failure=None): """ Called by the worker's L{buildbot_worker.base.WorkerForBuilderBase.commandComplete} to notify me the remote command has finished. @type failure: L{twisted.python.failure.Failure} or None @rtype: None """ self.worker.messageReceivedFromWorker() # call the real remoteComplete a moment later, but first return an # acknowledgement so the worker can retire the completion message. if self.active: eventually(self._finished, failure) return None
def detached(self, mind): metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", -1) self.slave = None self._old_builder_list = [] self.slave_status.removeGracefulWatcher(self._gracefulChanged) self.slave_status.setConnected(False) log.msg("BuildSlave.detached(%s)" % self.slavename) self.botmaster.master.status.slaveDisconnected(self.slavename) self.stopKeepaliveTimer() self.releaseLocks() # notify watchers, but do so in the next reactor iteration so that # any further detached() action by subclasses happens first def notif(): subs = self.detached_subs self.detached_subs = None subs.deliver() eventually(notif)
def start(self): self.checkWorkerHasCommand("downloadFile") # we are currently in the buildmaster's basedir, so any non-absolute # paths will be interpreted relative to that source = os.path.expanduser(self.mastersrc) workerdest = self.workerdest log.msg("FileDownload started, from master %r to worker %r" % (source, workerdest)) self.descriptionDone = "downloading to %s" % os.path.basename( workerdest) # setup structures for reading the file try: fp = open(source, 'rb') except IOError: # if file does not exist, bail out with an error self.addCompleteLog('stderr', 'File %r not available at master' % source) # TODO: once BuildStep.start() gets rewritten to use # maybeDeferred, just re-raise the exception here. eventually(BuildStep.finished, self, FAILURE) return fileReader = remotetransfer.FileReader(fp) # default arguments args = { 'maxsize': self.maxsize, 'reader': fileReader, 'blocksize': self.blocksize, 'workdir': self.workdir, 'mode': self.mode, } if self.workerVersionIsOlderThan('downloadFile', '3.0'): args['slavedest'] = workerdest else: args['workerdest'] = workerdest cmd = makeStatusRemoteCommand(self, 'downloadFile', args) d = self.runTransferCommand(cmd) d.addCallback(self.finished).addErrback(self.failed)
def _startStep_3(self, doStep): doStep = doStep[0] try: if doStep: result = yield defer.maybeDeferred(self.start) if result == SKIPPED: doStep = False except: log.msg("BuildStep.startStep exception in .start") self.failed(Failure()) if not doStep: self.step_status.setText(self.describe(True) + ['skipped']) self.step_status.setSkipped(True) # this return value from self.start is a shortcut to finishing # the step immediately; we skip calling finished() as # subclasses may have overridden that an expect it to be called # after start() (bug #837) eventually(self._finishFinished, SKIPPED)
def start(self): version = self.slaveVersion("downloadFile") if not version: m = "slave is too old, does not know about downloadFile" raise BuildSlaveTooOldError(m) # we are currently in the buildmaster's basedir, so any non-absolute # paths will be interpreted relative to that source = os.path.expanduser(self.mastersrc) slavedest = self.slavedest log.msg("FileDownload started, from master %r to slave %r" % (source, slavedest)) self.step_status.setText(['downloading', "to", os.path.basename(slavedest)]) # setup structures for reading the file try: fp = open(source, 'rb') except IOError: # if file does not exist, bail out with an error self.addCompleteLog('stderr', 'File %r not available at master' % source) # TODO: once BuildStep.start() gets rewritten to use # maybeDeferred, just re-raise the exception here. eventually(BuildStep.finished, self, FAILURE) return fileReader = _FileReader(fp) # default arguments args = { 'slavedest': slavedest, 'maxsize': self.maxsize, 'reader': fileReader, 'blocksize': self.blocksize, 'workdir': self._getWorkdir(), 'mode': self.mode, } self.cmd = makeStatusRemoteCommand(self, 'downloadFile', args) d = self.runCommand(self.cmd) d.addCallback(self.finished).addErrback(self.failed)
def requestAvatarId(self, creds): p = Properties() p.master = self.master username = bytes2unicode(creds.username) try: yield self.master.initLock.acquire() if username in self.users: password, _ = self.users[username] password = yield p.render(password) matched = yield defer.maybeDeferred( creds.checkPassword, unicode2bytes(password)) if not matched: log.msg("invalid login from user '{}'".format(username)) raise error.UnauthorizedLogin() return creds.username log.msg("invalid login from unknown user '{}'".format(username)) raise error.UnauthorizedLogin() finally: # brake the callback stack by returning to the reactor # before waking up other waiters eventually(self.master.initLock.release)
def updateInfo(self, **kwargs): # round-trip the value through json to 'normalize' it and # to ensure bad values dont get stuffed into the dictionary new_values = json.loads(json.dumps(kwargs)) for special_key in ['admin', 'host']: if special_key in new_values: new_values[special_key] = ascii2unicode(new_values[special_key]) # try to see if anything changed (so we should inform watchers) for k, v in new_values.iteritems(): if k not in self.info: break if self.info[k] != v: break else: # nothing changed so just bail now return self.info.update(new_values) for watcher in self.info_change_callbacks: eventually(watcher, self.getInfoAsDict())
def __init__(self, slave, name, isConnected=True, reactor=reactor): self.slavename = name self.slave = slave self.isConnectedResult = isConnected self.reactor = reactor self.call_on_detach = lambda: None # set up for loseConnection to cause the slave to detach, but not # immediately def tport_loseConnection(): self.isConnectedResult = False self.call_on_detach() self.call_on_detach = None self.slave.broker.transport.loseConnection = (lambda: eventually(tport_loseConnection))
def test_eventually_butNotNow(self): eventual.eventually(self.cb, 1) self.assertFalse(self.results != []) return self.assertResults([(1, )])
def chain(n): self.results.append(n) if n <= 0: return eventual.eventually(chain, n - 1)
def test_eventually_order(self): eventual.eventually(self.cb, 1) eventual.eventually(self.cb, 2) eventual.eventually(self.cb, 3) return self.assertResults([(1, ), (2, ), (3, )])
def do_notifies(bsdict): bss = buildset.BuildSetStatus(bsdict, self) if bss.isFinished(): for d in self._buildset_finished_waiters.pop(bsid): eventually(d.callback, bss)
def tree(n): self.results.append(n) if n <= 0: return eventual.eventually(tree, n - 1) eventual.eventually(tree, n - 1)
def send_notification(self, category, args): # TODO: remove # in the distributed system, this will be invoked by lineReceived() #print "SEND", category, args for observer in self._subscribers[category]: eventually(observer, category, *args)
def _one_done(self, ignored): eventually(self._loop_next)
def _disconnected(rref): eventually(d.callback, None)
def build_started(self, brid, buildername, buildnum): if brid in self._buildreq_observers: bs = self.getBuilder(buildername).getBuild(buildnum) for o in self._buildreq_observers[brid]: eventually(o, bs)
def notify_old(oldbuilds): for bs in oldbuilds: eventually(observer, bs)
def wait_shutdown_started(self): d = defer.Deferred() self.notifyOnDisconnect(lambda: eventually(d.callback, None)) return d
def start(self): eventually(self.finished, 0)
def sighup(*args): eventually(self.reconfig)
def setGraceful(self, graceful): """Set the graceful shutdown flag, and notify all the watchers""" self.graceful_shutdown = graceful for cb in self.graceful_callbacks: eventually(cb, graceful)
def test_eventually_args(self): eventual.eventually(self.cb, 1, 2, a='a') return self.assertResults([(1, 2, dict(a='a'))])
def setPaused(self, isPaused): self.paused = isPaused for cb in self.pause_callbacks: eventually(cb, isPaused)
def test_eventually_calls(self): eventual.eventually(self.cb) return self.assertResults([()])
def startStep(self, remote): self.remote = remote isNew = self.isNewStyle() old_finished = self.finished old_failed = self.failed if isNew: def nope(*args, **kwargs): raise AssertionError("new-style steps must not call " "this method") self.finished = nope self.failed = nope # convert all locks into their real form self.locks = [(self.build.builder.botmaster.getLockByID(access.lockid), access) for access in self.locks] # then narrow SlaveLocks down to the slave that this build is being # run on self.locks = [(l.getLock(self.build.slavebuilder.slave), la) for l, la in self.locks] for l, la in self.locks: if l in self.build.locks: log.msg("Hey, lock %s is claimed by both a Step (%s) and the" " parent Build (%s)" % (l, self, self.build)) raise RuntimeError("lock claimed by both Step and Build") self.deferred = defer.Deferred() # Set the step's text here so that the stepStarted notification sees # the correct description self._step_status.setText(self.describe(False)) self._step_status.stepStarted() try: # set up locks yield self.acquireLocks() if self.stopped: old_finished(EXCEPTION) defer.returnValue((yield self.deferred)) # ste up progress if self.progress: self.progress.start() # check doStepIf if isinstance(self.doStepIf, bool): doStep = self.doStepIf else: doStep = yield self.doStepIf(self) # render renderables in parallel renderables = [] accumulateClassList(self.__class__, 'renderables', renderables) def setRenderable(res, attr): setattr(self, attr, res) dl = [] for renderable in renderables: d = self.build.render(getattr(self, renderable)) d.addCallback(setRenderable, renderable) dl.append(d) yield defer.gatherResults(dl) try: if doStep: if isNew: result = yield self.run() assert isinstance(result, int), \ "run must return an integer (via Deferred)" old_finished(result) else: result = yield self.start() if result == SKIPPED: doStep = False except Exception: log.msg("BuildStep.startStep exception in .start") self.finished = old_finished old_failed(Failure()) if not doStep: self._step_status.setText(self.describe(True) + ['skipped']) self._step_status.setSkipped(True) # this return value from self.start is a shortcut to finishing # the step immediately; we skip calling finished() as # subclasses may have overridden that an expect it to be called # after start() (bug #837) eventually(self._finishFinished, SKIPPED) except Exception: self.finished = old_finished old_failed(Failure()) # and finally, wait for self.deferred to get triggered and return its # value defer.returnValue((yield self.deferred))
def run(self): d = defer.Deferred() eventually(d.callback, 0) # FIXME: this uses real reactor instead of fake one return d
def remote_setBuilderList(self, builder_info): builder_names = [n for n, dir in builder_info] slbuilders = [FakeSlaveBuilder() for n in builder_names] eventually(self.callWhenBuilderListSet) return dict(zip(builder_names, slbuilders))
def sigusr1(*args): eventually(self.botmaster.cleanShutdown)
def build_started(self, brid, buildername, build_status): if brid in self._buildreq_observers: for o in self._buildreq_observers[brid]: eventually(o, build_status)