def trigger(self, ssid, set_props=None): """Trigger this scheduler with the given sourcestamp ID. Returns a deferred that will fire when the buildset is finished.""" # properties for this buildset are composed of our own properties, # potentially overridden by anything from the triggering build props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) # note that this does not use the buildset subscriptions mechanism, as # the duration of interest to the caller is bounded by the lifetime of # this process. if ssid: d = self.addBuildsetForSourceStamp(reason=self.reason, ssid=ssid, properties=props) else: d = self.addBuildsetForLatest(reason=self.reason, properties=props) def setup_waiter(bsid): self._waiters[bsid] = d = defer.Deferred() self._updateWaiters() return d d.addCallback(setup_waiter) return d
class TestProperties(unittest.TestCase): def setUp(self): self.props = Properties() def testDictBehavior(self): self.props.setProperty("do-tests", 1, "scheduler") self.props.setProperty("do-install", 2, "scheduler") self.assert_(self.props.has_key('do-tests')) self.failUnlessEqual(self.props['do-tests'], 1) self.failUnlessEqual(self.props['do-install'], 2) self.assertRaises(KeyError, lambda: self.props['do-nothing']) self.failUnlessEqual(self.props.getProperty('do-install'), 2) def testUpdate(self): self.props.setProperty("x", 24, "old") newprops = {'a': 1, 'b': 2} self.props.update(newprops, "new") self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromProperties(self): self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new") newprops.setProperty('b', 2, "new") self.props.updateFromProperties(newprops) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
def TryJobBaseGetProps(self, builder, options): """ Override of try_job_base.TryJobBase.get_props: http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/master/try_job_base.py?view=markup We modify it to add "baseurl". """ keys = ( ############################### Added by borenet ############################### 'baseurl', ################################################################################ 'clobber', 'issue', 'patchset', 'requester', 'rietveld', 'root', 'try_job_key', ) # All these settings have no meaning when False or not set, so don't set # them in that case. properties = dict((i, options[i]) for i in keys if options.get(i)) properties['testfilter'] = options['bot'].get(builder, None) # pylint: disable=W0212 props = Properties() props.updateFromProperties(self.properties) props.update(properties, self._PROPERTY_SOURCE) return props
def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None): if not bs.isFinished(): return # Make a copy of the properties so as not to modify the original build. properties = Properties() # Don't include runtime-set properties in a rebuild request properties.updateFromPropertiesNoRuntime(bs.getProperties()) if extraProperties is None: properties.updateFromProperties(extraProperties) properties_dict = dict((k,(v,s)) for (k,v,s) in properties.asList()) ssList = bs.getSourceStamps(absolute=True) if ssList: sourcestampsetid = yield ssList[0].getSourceStampSetId(self.control.master) dl = [] for ss in ssList[1:]: # add defered to the list dl.append(ss.addSourceStampToDatabase(self.control.master, sourcestampsetid)) yield defer.gatherResults(dl) bsid, brids = yield self.control.master.addBuildset( builderNames=[self.original.name], sourcestampsetid=sourcestampsetid, reason=reason, properties=properties_dict) defer.returnValue((bsid, brids)) else: log.msg('Cannot start rebuild, rebuild has no sourcestamps for a new build') defer.returnValue(None)
def perspective_try(self, branch, revision, patch, builderNames, properties={}): log.msg("user %s requesting build on builders %s" % (self.username, builderNames)) for b in builderNames: if not b in self.parent.builderNames: log.msg("%s got job with builder %s" % (self, b)) log.msg(" but that wasn't in our list: %s" % (self.parent.builderNames,)) return ss = SourceStamp(branch, revision, patch) reason = "'try' job from user %s" % self.username # roll the specified props in with our inherited props combined_props = Properties() combined_props.updateFromProperties(self.parent.properties) combined_props.update(properties, "try build") bs = buildset.BuildSet(builderNames, ss, reason=reason, properties=combined_props) self.parent.submitBuildSet(bs) # return a remotely-usable BuildSetStatus object from buildbot.status.client import makeRemote return makeRemote(bs.status)
def trigger(self, ssid, set_props=None): """Trigger this scheduler with the given sourcestamp ID. Returns a deferred that will fire when the buildset is finished.""" # properties for this buildset are composed of our own properties, # potentially overridden by anything from the triggering build props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) # note that this does not use the buildset subscriptions mechanism, as # the duration of interest to the caller is bounded by the lifetime of # this process. if ssid: d = self.addBuildsetForSourceStamp(reason=self.reason, ssid=ssid, properties=props) else: d = self.addBuildsetForLatest(reason=self.reason, properties=props) def setup_waiter((bsid,brids)): d = defer.Deferred() self._waiters[bsid] = (d, brids) self._updateWaiters() return d d.addCallback(setup_waiter) return d
def trigger(self, waited_for, sourcestamps=None, set_props=None, parent_buildid=None, parent_relationship=None): """Trigger this scheduler with the optional given list of sourcestamps Returns two deferreds: idsDeferred -- yields the ids of the buildset and buildrequest, as soon as they are available. resultsDeferred -- yields the build result(s), when they finish.""" # properties for this buildset are composed of our own properties, # potentially overridden by anything from the triggering build props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) # note that this does not use the buildset subscriptions mechanism, as # the duration of interest to the caller is bounded by the lifetime of # this process. idsDeferred = self.addBuildsetForSourceStampsWithDefaults( self.reason, sourcestamps, waited_for, properties=props, parent_buildid=parent_buildid, parent_relationship=parent_relationship) resultsDeferred = defer.Deferred() @idsDeferred.addCallback def setup_waiter(ids): bsid, brids = ids self._waiters[bsid] = (resultsDeferred, brids) self._updateWaiters() return ids return idsDeferred, resultsDeferred
def trigger(self, sourcestamps=None, set_props=None): """Trigger this scheduler with the optional given list of sourcestamps Returns a deferred that will fire when the buildset is finished.""" # properties for this buildset are composed of our own properties, # potentially overridden by anything from the triggering build props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) # note that this does not use the buildset subscriptions mechanism, as # the duration of interest to the caller is bounded by the lifetime of # this process. d = self.addBuildsetForSourceStampSetDetails(self.reason, sourcestamps, props) def setup_waiter(xxx_todo_changeme): (bsid, brids) = xxx_todo_changeme d = defer.Deferred() self._waiters[bsid] = (d, brids) self._updateWaiters() return d d.addCallback(setup_waiter) return d
class TestProperties(unittest.TestCase): def setUp(self): self.props = Properties() def testDictBehavior(self): self.props.setProperty("do-tests", 1, "scheduler") self.props.setProperty("do-install", 2, "scheduler") self.assert_(self.props.has_key('do-tests')) self.failUnlessEqual(self.props['do-tests'], 1) self.failUnlessEqual(self.props['do-install'], 2) self.assertRaises(KeyError, lambda : self.props['do-nothing']) self.failUnlessEqual(self.props.getProperty('do-install'), 2) def testUpdate(self): self.props.setProperty("x", 24, "old") newprops = { 'a' : 1, 'b' : 2 } self.props.update(newprops, "new") self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromProperties(self): self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new") newprops.setProperty('b', 2, "new") self.props.updateFromProperties(newprops) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
def perspective_try(self, branch, revision, patch, repository, project, builderNames, properties={}, ): log.msg("user %s requesting build on builders %s" % (self.username, builderNames)) # build the intersection of the request and our configured list builderNames = self.parent.filterBuilderList(builderNames) if not builderNames: return ss = SourceStamp(branch, revision, patch, repository=repository, project=project) reason = "'try' job from user %s" % self.username # roll the specified props in with our inherited props combined_props = Properties() combined_props.updateFromProperties(self.parent.properties) combined_props.update(properties, "try build") status = self.parent.parent.parent.status db = self.parent.parent.db d = db.runInteraction(self._try, ss, builderNames, reason, combined_props, db) def _done(bsid): # return a remotely-usable BuildSetStatus object bss = BuildSetStatus(bsid, status, db) from buildbot.status.client import makeRemote r = makeRemote(bss) #self.parent.parent.loop_done() # so it will notify builder loop return r d.addCallback(_done) return d
def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None): if not bs.isFinished(): return # Make a copy of the properties so as not to modify the original build. properties = Properties() # Don't include runtime-set properties in a rebuild request properties.updateFromPropertiesNoRuntime(bs.getProperties()) if extraProperties is None: properties.updateFromProperties(extraProperties) properties_dict = dict( (k, (v, s)) for (k, v, s) in properties.asList()) ss = bs.getSourceStamp(absolute=True) d = ss.getSourceStampId(self.master.master) def add_buildset(ssid): return self.master.master.addBuildset( builderNames=[self.original.name], ssid=ssid, reason=reason, properties=properties_dict) d.addCallback(add_buildset) return d
def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None, absolute=True, newOwner=''): if not bs.isFinished(): return # Make a copy of the properties so as not to modify the original build. properties = Properties() # Don't include runtime-set properties in a rebuild request properties.updateFromPropertiesNoRuntime(bs.getProperties()) if extraProperties: properties.updateFromProperties(extraProperties) properties_dict = dict( (k, (v, s)) for (k, v, s) in properties.asList()) # set buildLatestRev to False when rebuilding owners = None if 'buildLatestRev' in properties_dict.keys(): (v, s) = properties_dict['buildLatestRev'] properties_dict['buildLatestRev'] = (False, s) owners = bs.getProperty('owners') if newOwner and owners is not None and newOwner not in owners: properties.setProperty('owner', newOwner, source='Builder rebuildBuild') owners.append(newOwner) bs.setOwners(owners) ssList = bs.getSourceStamps(absolute=absolute) if ssList: sourcestampsetid = yield ssList[0].getSourceStampSetId( self.control.master) dl = [] for ss in ssList[1:]: # add defered to the list dl.append( ss.addSourceStampToDatabase(self.control.master, sourcestampsetid)) yield defer.gatherResults(dl) bsid, brids = yield self.control.master.addBuildset( builderNames=[self.original.name], sourcestampsetid=sourcestampsetid, reason=reason, properties=properties_dict) defer.returnValue((bsid, brids)) else: log.msg( 'Cannot start rebuild, rebuild has no sourcestamps for a new build' ) defer.returnValue(None)
def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None): if not bs.isFinished(): return ss = bs.getSourceStamp(absolute=True) # Make a copy so as not to modify the original build. properties = Properties() # Don't include runtime-set properties in a rebuild request properties.updateFromPropertiesNoRuntime(bs.getProperties()) if extraProperties is None: properties.updateFromProperties(extraProperties) self.submitBuildRequest(ss, reason, props=properties)
def submitJob(self, change, job): props = Properties() if change.properties: props.updateFromProperties(change.properties) if job.build_properties: props.update(job.build_properties, 'Gerrit') bsid = yield self.addBuildsetForChanges(reason='tryjob', changeids=[change.number], builderNames=job.builder_names, properties=props) log.msg('Successfully submitted a Gerrit try job for %s: %s.' % (change.who, job)) defer.returnValue(bsid)
def reconfigService(self, new_config): # Given a new BuildSlave, configure this one identically. Because # BuildSlave objects are remotely referenced, we can't replace them # without disconnecting the slave, yet there's no reason to do that. new = self.findNewSlaveInstance(new_config) assert self.slavename == new.slavename # do we need to re-register? if (not self.registration or self.password != new.password or new_config.slavePortnum != self.registered_port): if self.registration: self.registration.unregister() self.password = new.password self.registered_port = new_config.slavePortnum self.registration = self.master.pbmanager.register( self.registered_port, self.slavename, self.password, self.getPerspective) # adopt new instance's configuration parameters self.max_builds = new.max_builds self.access = new.access self.notify_on_missing = new.notify_on_missing self.keepalive_interval = new.keepalive_interval if self.missing_timeout != new.missing_timeout: running_missing_timer = self.missing_timer self.stopMissingTimer() self.missing_timeout = new.missing_timeout if running_missing_timer: self.startMissingTimer() properties = Properties() properties.updateFromProperties(new.properties) self.properties = properties self.updateLocks() # update the attached slave's notion of which builders are attached. # This assumes that the relevant builders have already been configured, # which is why the reconfig_priority is set low in this class. d = self.updateSlave() # and chain up d.addCallback(lambda _ : config.ReconfigurableServiceMixin.reconfigService(self, new_config)) return d
def get_props(self, builder, options): """Current job extra properties that are not related to the source stamp. Initialize with the Scheduler's base properties. """ keys = ('clobber', 'issue', 'patchset', 'rietveld', 'root', 'try_job_key') # All these settings have no meaning when False or not set, so don't set # them in that case. properties = dict((i, options[i]) for i in keys if options.get(i)) properties['testfilter'] = options['bot'].get(builder, None) props = Properties() props.updateFromProperties(self.properties) props.update(properties, self._PROPERTY_SOURCE) return props
def trigger(self, ss, set_props=None): """Trigger this scheduler. Returns a deferred that will fire when the buildset is finished. """ # properties for this buildset are composed of our own properties, # potentially overridden by anything from the triggering build props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) bs = buildset.BuildSet(self.builderNames, ss, properties=props) d = bs.waitUntilFinished() self.submitBuildSet(bs) return d
def resubmitBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None): if not bs.isFinished(): return ss = bs.getSourceStamp(absolute=True) if extraProperties is None: properties = bs.getProperties() else: # Make a copy so as not to modify the original build. properties = Properties() properties.updateFromProperties(bs.getProperties()) properties.updateFromProperties(extraProperties) req = base.BuildRequest(reason, ss, self.original.name, properties=properties) self.requestBuild(req)
def submitJob(self, change, job): props = Properties() if change.properties: props.updateFromProperties(change.properties) if job.build_properties: props.update(job.build_properties, 'Gerrit') bsid = yield self.addBuildsetForChanges( reason='tryjob', changeids=[change.number], builderNames=job.builder_names, properties=props) log.msg('Successfully submitted a Gerrit try job for %s: %s.' % (change.who, job)) defer.returnValue(bsid)
def reconfigServiceWithBuildbotConfig(self, new_config): # Given a new BuildSlave, configure this one identically. Because # BuildSlave objects are remotely referenced, we can't replace them # without disconnecting the slave, yet there's no reason to do that. new = self.findNewSlaveInstance(new_config) assert self.slavename == new.slavename self.password = new.password # update our records with the buildslave manager if not self.registration: self.registration = yield self.master.buildslaves.register(self) yield self.registration.update(new, new_config) # adopt new instance's configuration parameters self.max_builds = new.max_builds self.access = new.access self.notify_on_missing = new.notify_on_missing if self.missing_timeout != new.missing_timeout: running_missing_timer = self.missing_timer self.stopMissingTimer() self.missing_timeout = new.missing_timeout if running_missing_timer: self.startMissingTimer() properties = Properties() properties.updateFromProperties(new.properties) self.properties = properties self.updateLocks() bids = [ b._builderid for b in self.botmaster.getBuildersForSlave(self.slavename) ] yield self.master.data.updates.buildslaveConfigured( self.buildslaveid, self.master.masterid, bids) # update the attached slave's notion of which builders are attached. # This assumes that the relevant builders have already been configured, # which is why the reconfig_priority is set low in this class. yield self.updateSlave() yield service.ReconfigurableServiceMixin.reconfigServiceWithBuildbotConfig( self, new_config)
def perspective_try( self, branch, revision, patch, repository, project, builderNames, properties={}, ): log.msg("user %s requesting build on builders %s" % (self.username, builderNames)) # build the intersection of the request and our configured list builderNames = self.parent.filterBuilderList(builderNames) if not builderNames: return ss = SourceStamp(branch, revision, patch, repository=repository, project=project) reason = "'try' job from user %s" % self.username # roll the specified props in with our inherited props combined_props = Properties() combined_props.updateFromProperties(self.parent.properties) combined_props.update(properties, "try build") status = self.parent.parent.parent.status db = self.parent.parent.db d = db.runInteraction(self._try, ss, builderNames, reason, combined_props, db) def _done(bsid): # return a remotely-usable BuildSetStatus object bss = BuildSetStatus(bsid, status, db) from buildbot.status.client import makeRemote r = makeRemote(bss) #self.parent.parent.loop_done() # so it will notify builder loop return r d.addCallback(_done) return d
def reconfigServiceWithBuildbotConfig(self, new_config): # Given a new BuildSlave, configure this one identically. Because # BuildSlave objects are remotely referenced, we can't replace them # without disconnecting the slave, yet there's no reason to do that. new = self.findNewSlaveInstance(new_config) assert self.slavename == new.slavename self.password = new.password # update our records with the buildslave manager if not self.registration: self.registration = yield self.master.buildslaves.register(self) yield self.registration.update(new, new_config) # adopt new instance's configuration parameters self.max_builds = new.max_builds self.access = new.access self.notify_on_missing = new.notify_on_missing if self.missing_timeout != new.missing_timeout: running_missing_timer = self.missing_timer self.stopMissingTimer() self.missing_timeout = new.missing_timeout if running_missing_timer: self.startMissingTimer() properties = Properties() properties.updateFromProperties(new.properties) self.properties = properties self.updateLocks() bids = [b._builderid for b in self.botmaster.getBuildersForSlave(self.slavename)] yield self.master.data.updates.buildslaveConfigured(self.buildslaveid, bids) # update the attached slave's notion of which builders are attached. # This assumes that the relevant builders have already been configured, # which is why the reconfig_priority is set low in this class. yield self.updateSlave() yield service.ReconfigurableServiceMixin.reconfigServiceWithBuildbotConfig(self, new_config)
def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None): if not bs.isFinished(): return # Make a copy of the properties so as not to modify the original build. properties = Properties() # Don't include runtime-set properties in a rebuild request properties.updateFromPropertiesNoRuntime(bs.getProperties()) if extraProperties is None: properties.updateFromProperties(extraProperties) properties_dict = dict((k,(v,s)) for (k,v,s) in properties.asList()) ss = bs.getSourceStamp(absolute=True) d = ss.getSourceStampId(self.master.master) def add_buildset(ssid): return self.master.master.addBuildset( builderNames=[self.original.name], ssid=ssid, reason=reason, properties=properties_dict) d.addCallback(add_buildset) return d
def trigger(self, ss, set_props=None): """Trigger this scheduler. Returns a deferred that will fire when the buildset is finished. """ # properties for this buildset are composed of our own properties, # potentially overridden by anything from the triggering build props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) d = self.parent.db.runInteraction(self._trigger, ss, props) # this returns a Deferred that fires when the buildset is complete, # with the buildset results (SUCCESS or FAILURE). This Deferred is # not persistent: if the master is bounced, the "upstream" build (the # one which used steps.trigger.Trigger) will disappear anyways. def _done(res): return res[0] # chain the Deferred d.addCallback(_done) return d
def get_props(self, builder, options): """Current job extra properties that are not related to the source stamp. Initialize with the Scheduler's base properties. """ keys = ( 'clobber', 'issue', 'patchset', 'requester', 'rietveld', 'root', 'try_job_key', ) # All these settings have no meaning when False or not set, so don't set # them in that case. properties = dict((i, options[i]) for i in keys if options.get(i)) properties['testfilter'] = options['bot'].get(builder, None) props = Properties() props.updateFromProperties(self.properties) props.update(properties, self._PROPERTY_SOURCE) return props
def create_buildset(self, ssid, reason, t, props=None, builderNames=None): # We need a fresh set of properties each time since we expect to update # the properties below my_props = Properties() if props is None: my_props.updateFromProperties(self.properties) else: my_props.updateFromProperties(props) # Update with our prop functions try: for func in propfuncs: try: request_props = func(self, t, ssid) log.msg("%s: propfunc returned %s" % (self.name, request_props)) my_props.updateFromProperties(request_props) except: log.msg("Error running %s" % func) log.err() except: log.msg("%s: error calculating properties" % self.name) log.err() # Call our base class's original, with our new properties. return base_class.create_buildset(self, ssid, reason, t, my_props, builderNames)
def get_props(self, builder, options): """Current job extra properties that are not related to the source stamp. Initialize with the Scheduler's base properties. """ always_included_keys = ( 'orig_revision', ) optional_keys = ( 'clobber', 'issue', 'patch_ref', 'patch_repo_url', 'patch_storage', 'patch_url', 'patch_project', 'patchset', 'requester', 'rietveld', 'root', 'try_job_key', ) # All these settings have no meaning when False or not set, so don't set # them in that case. properties = dict((i, options[i]) for i in optional_keys if options.get(i)) # These settings are meaningful even if the value evaluates to False # or None. Note that when options don't contain given key, it will # be set to None. properties.update(dict((i, options.get(i)) for i in always_included_keys)) # Specially evaluated properties, e.g. ones where key name is different # between properties and options. properties['testfilter'] = options['bot'].get(builder, None) props = Properties() props.updateFromProperties(self.properties) props.update(properties, self._PROPERTY_SOURCE) return props
def get_props(self, builder, options): """Current job extra properties that are not related to the source stamp. Initialize with the Scheduler's base properties. """ always_included_keys = ('orig_revision', ) optional_keys = ( 'clobber', 'issue', 'patch_ref', 'patch_repo_url', 'patch_storage', 'patch_url', 'patch_project', 'patchset', 'requester', 'rietveld', 'root', 'try_job_key', ) # All these settings have no meaning when False or not set, so don't set # them in that case. properties = dict( (i, options[i]) for i in optional_keys if options.get(i)) # These settings are meaningful even if the value evaluates to False # or None. Note that when options don't contain given key, it will # be set to None. properties.update( dict((i, options.get(i)) for i in always_included_keys)) # Specially evaluated properties, e.g. ones where key name is different # between properties and options. properties['testfilter'] = options['bot'].get(builder, None) props = Properties() props.updateFromProperties(self.properties) props.update(properties, self._PROPERTY_SOURCE) return props
def trigger(self, ss, set_props=None): self.ss = ss self.set_props = set_props props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) self.script_repo_revision = props.getProperty('script_repo_revision') assert self.script_repo_revision, 'script_repo_revision should be set' self.release_config = props.getProperty('release_config') assert self.release_config, 'release_config should be set' def _run_loop(_): self.loop = LoopingCall(self.poll) reactor.callLater(0, self.loop.start, self.pollInterval) reactor.callLater(self.pollTimeout, self.stopLoop, 'Timeout after %s' % self.pollTimeout) d = self.getReleaseConfig() d.addCallback(_run_loop)
class BuildRequest: """I represent a request to a specific Builder to run a single build. I have a SourceStamp which specifies what sources I will build. This may specify a specific revision of the source tree (so source.branch, source.revision, and source.patch are used). The .patch attribute is either None or a tuple of (patchlevel, diff), consisting of a number to use in 'patch -pN', and a unified-format context diff. Alternatively, the SourceStamp may specify a set of Changes to be built, contained in source.changes. In this case, I may be mergeable with other BuildRequests on the same branch. I may be part of a BuildSet, in which case I will report status results to it. I am paired with a BuildRequestStatus object, to which I feed status information. @type source: a L{buildbot.sourcestamp.SourceStamp} instance. @ivar source: the source code that this BuildRequest use @type reason: string @ivar reason: the reason this Build is being requested. Schedulers provide this, but for forced builds the user requesting the build will provide a string. @type properties: Properties object @ivar properties: properties that should be applied to this build 'owner' property is used by Build objects to collect the list returned by getInterestedUsers @ivar status: the IBuildStatus object which tracks our status @ivar submittedAt: a timestamp (seconds since epoch) when this request was submitted to the Builder. This is used by the CVS step to compute a checkout timestamp, as well as the master to prioritize build requests from oldest to newest. """ source = None builder = None startCount = 0 # how many times we have tried to start this build submittedAt = None implements(interfaces.IBuildRequestControl) def __init__(self, reason, source, builderName, properties=None): assert interfaces.ISourceStamp(source, None) self.reason = reason self.source = source self.properties = Properties() if properties: self.properties.updateFromProperties(properties) self.start_watchers = [] self.finish_watchers = [] self.status = BuildRequestStatus(source, builderName) def canBeMergedWith(self, other): return self.source.canBeMergedWith(other.source) def mergeWith(self, others): return self.source.mergeWith([o.source for o in others]) def mergeReasons(self, others): """Return a reason for the merged build request.""" reasons = [] for req in [self] + others: if req.reason and req.reason not in reasons: reasons.append(req.reason) return ", ".join(reasons) def waitUntilFinished(self): """Get a Deferred that will fire (with a L{buildbot.interfaces.IBuildStatus} instance when the build finishes.""" d = defer.Deferred() self.finish_watchers.append(d) return d # these are called by the Builder def requestSubmitted(self, builder): # the request has been placed on the queue self.builder = builder def buildStarted(self, build, buildstatus): """This is called by the Builder when a Build has been started in the hopes of satifying this BuildRequest. It may be called multiple times, since interrupted builds and lost buildslaves may force multiple Builds to be run until the fate of the BuildRequest is known for certain.""" for o in self.start_watchers[:]: # these observers get the IBuildControl o(build) # while these get the IBuildStatus self.status.buildStarted(buildstatus) def finished(self, buildstatus): """This is called by the Builder when the BuildRequest has been retired. This happens when its Build has either succeeded (yay!) or failed (boo!). TODO: If it is halted due to an exception (oops!), or some other retryable error, C{finished} will not be called yet.""" for w in self.finish_watchers: w.callback(buildstatus) self.finish_watchers = [] # IBuildRequestControl def subscribe(self, observer): self.start_watchers.append(observer) def unsubscribe(self, observer): self.start_watchers.remove(observer) def cancel(self): """Cancel this request. This can only be successful if the Build has not yet been started. @return: a boolean indicating if the cancel was successful.""" if self.builder: return self.builder.cancelBuildRequest(self) return False def setSubmitTime(self, t): self.submittedAt = t self.status.setSubmitTime(t) def getSubmitTime(self): return self.submittedAt
def trigger(self, ssid, set_props=None): """Trigger this scheduler with the given sourcestamp ID. Returns a deferred that will fire when the buildset is finished.""" # properties for this buildset are composed of our own properties, # potentially overridden by anything from the triggering build props = Properties() props.updateFromProperties(self.properties) if set_props: props.updateFromProperties(set_props) newRevision = [None] def getRevision(ss): newRevision[0] = ss['revision'] return ss['revision'] def getRecentChanges(newRev): if self.lastRevision is None: return None return self.master.db.changes.getRecentChanges(self.maxChange) # check the last x changeset and pick up the ones that are between # last revision and current revision and belong to interested projects def selectChangeSet(changes): changeids = [] if changes is not None: for change in changes: if change['revision'] > newRevision[0] or change['revision'] <= self.lastRevision or change['project'] not in self.projects: continue changeids.append(change['changeid']) log.msg("LLDBTriggerable: last revision change from %s to %s" % (self.lastRevision, newRevision[0])) self.lastRevision = newRevision[0] return changeids def addBuildset(changeids): if changeids: log.msg("LLDBTriggerable: addBuildsetForChanges, changeids: %s" % changeids) return self.addBuildsetForChanges(reason=self.reason, changeids=changeids, properties=props) elif ssid: # if this is the first build after master startup, use the source stamp from triggerer build # it's possible to write last revision to a file on master, so after master reconfig we could # pick up the correct last revision. # It's not implemented here because 1) the cases are rare that first build after master restart # is preceded by failing builds on triggerer builder, 2) avoid polluting master with project # specific cache files log.msg("LLDBTriggerable: addBuildsetForSourceStamp") return self.addBuildsetForSourceStamp(reason=self.reason, ssid=ssid, properties=props) else: return self.addBuildsetForLatest(reason=self.reason, properties=props) def setup_waiter((bsid,brids)): self._waiters[bsid] = d = defer.Deferred() self._updateWaiters() return d d = self.master.db.sourcestamps.getSourceStamp(ssid) d.addCallback(getRevision) d.addCallback(getRecentChanges) d.addCallback(selectChangeSet) d.addCallback(addBuildset) d.addCallback(setup_waiter) return d
class BuildRequest: """I represent a request to a specific Builder to run a single build. I am generated by db.getBuildRequestWithNumber, and am used to tell the Build about what it ought to be building. I am also used by the Builder to let hook functions decide which requests should be handled first. I have a SourceStamp which specifies what sources I will build. This may specify a specific revision of the source tree (so source.branch, source.revision, and source.patch are used). The .patch attribute is either None or a tuple of (patchlevel, diff), consisting of a number to use in 'patch -pN', and a unified-format context diff. Alternatively, the SourceStamp may specify a set of Changes to be built, contained in source.changes. In this case, I may be mergeable with other BuildRequests on the same branch. @type source: a L{buildbot.sourcestamp.SourceStamp} instance. @ivar source: the source code that this BuildRequest use @type reason: string @ivar reason: the reason this Build is being requested. Schedulers provide this, but for forced builds the user requesting the build will provide a string. @type properties: Properties object @ivar properties: properties that should be applied to this build 'owner' property is used by Build objects to collect the list returned by getInterestedUsers @ivar status: the IBuildStatus object which tracks our status @ivar submittedAt: a timestamp (seconds since epoch) when this request was submitted to the Builder. This is used by the CVS step to compute a checkout timestamp, as well as the master to prioritize build requests from oldest to newest. """ source = None builder = None # XXXREMOVE startCount = 0 # how many times we have tried to start this build # XXXREMOVE submittedAt = None def __init__(self, reason, source, builderName, properties=None): assert interfaces.ISourceStamp(source, None) self.reason = reason self.source = source self.builderName = builderName self.properties = Properties() if properties: self.properties.updateFromProperties(properties) def canBeMergedWith(self, other): return self.source.canBeMergedWith(other.source) def mergeWith(self, others): return self.source.mergeWith([o.source for o in others]) def mergeReasons(self, others): """Return a reason for the merged build request.""" reasons = [] for req in [self] + others: if req.reason and req.reason not in reasons: reasons.append(req.reason) return ", ".join(reasons) # IBuildRequestControl def cancel(self): # XXXREMOVE """Cancel this request. This can only be successful if the Build has not yet been started. @return: a boolean indicating if the cancel was successful.""" if self.builder: return self.builder.cancelBuildRequest(self) return False def getSubmitTime(self): return self.submittedAt
class BuildSet: """I represent a set of potential Builds, all of the same source tree, across a specified list of Builders. I can represent a build of a specific version of the source tree (named by source.branch and source.revision), or a build of a certain set of Changes (source.changes=list).""" def __init__(self, builderNames, source, reason=None, bsid=None, properties=None): """ @param source: a L{buildbot.sourcestamp.SourceStamp} """ self.builderNames = builderNames self.source = source self.reason = reason self.properties = Properties() if properties: self.properties.updateFromProperties(properties) self.stillHopeful = True self.status = bss = builder.BuildSetStatus(source, reason, builderNames, bsid) def waitUntilSuccess(self): return self.status.waitUntilSuccess() def waitUntilFinished(self): return self.status.waitUntilFinished() def start(self, builders): """This is called by the BuildMaster to actually create and submit the BuildRequests.""" self.requests = [] reqs = [] # create the requests for b in builders: req = base.BuildRequest(self.reason, self.source, b.name, properties=self.properties) reqs.append((b, req)) self.requests.append(req) d = req.waitUntilFinished() d.addCallback(self.requestFinished, req) # tell our status about them req_statuses = [req.status for req in self.requests] self.status.setBuildRequestStatuses(req_statuses) # now submit them for b,req in reqs: b.submitBuildRequest(req) def requestFinished(self, buildstatus, req): # TODO: this is where individual build status results are aggregated # into a BuildSet-wide status. Consider making a rule that says one # WARNINGS results in the overall status being WARNINGS too. The # current rule is that any FAILURE means FAILURE, otherwise you get # SUCCESS. self.requests.remove(req) results = buildstatus.getResults() if results == builder.FAILURE: self.status.setResults(results) if self.stillHopeful: # oh, cruel reality cuts deep. no joy for you. This is the # first failure. This flunks the overall BuildSet, so we can # notify success watchers that they aren't going to be happy. self.stillHopeful = False self.status.giveUpHope() self.status.notifySuccessWatchers() if not self.requests: # that was the last build, so we can notify finished watchers. If # we haven't failed by now, we can claim success. if self.stillHopeful: self.status.setResults(builder.SUCCESS) self.status.notifySuccessWatchers() self.status.notifyFinishedWatchers()
class AbstractBuildSlave(pb.Avatar, service.MultiService): """This is the master-side representative for a remote buildbot slave. There is exactly one for each slave described in the config file (the c['slaves'] list). When buildbots connect in (.attach), they get a reference to this instance. The BotMaster object is stashed as the .botmaster attribute. The BotMaster is also our '.parent' Service. I represent a build slave -- a remote machine capable of running builds. I am instantiated by the configuration file, and can be subclassed to add extra functionality.""" implements(IBuildSlave) keepalive_timer = None keepalive_interval = None def __init__(self, name, password, max_builds=None, notify_on_missing=[], missing_timeout=3600, properties={}, locks=None, keepalive_interval=3600): """ @param name: botname this machine will supply when it connects @param password: password this machine will supply when it connects @param max_builds: maximum number of simultaneous builds that will be run concurrently on this buildslave (the default is None for no limit) @param properties: properties that will be applied to builds run on this slave @type properties: dictionary @param locks: A list of locks that must be acquired before this slave can be used @type locks: dictionary """ service.MultiService.__init__(self) self.slavename = name self.password = password self.botmaster = None # no buildmaster yet self.slave_status = SlaveStatus(name) self.slave = None # a RemoteReference to the Bot, when connected self.slave_commands = None self.slavebuilders = {} self.max_builds = max_builds self.access = [] if locks: self.access = locks self.properties = Properties() self.properties.update(properties, "BuildSlave") self.properties.setProperty("slavename", name, "BuildSlave") self.lastMessageReceived = 0 if isinstance(notify_on_missing, str): notify_on_missing = [notify_on_missing] self.notify_on_missing = notify_on_missing for i in notify_on_missing: assert isinstance(i, str) self.missing_timeout = missing_timeout self.missing_timer = None self.keepalive_interval = keepalive_interval self.detached_subs = None self._old_builder_list = None def identity(self): """ Return a tuple describing this slave. After reconfiguration a new slave with the same identity will update this one, rather than replacing it, thereby avoiding an interruption of current activity. """ return (self.slavename, self.password, '%s.%s' % (self.__class__.__module__, self.__class__.__name__)) def update(self, new): """ Given a new BuildSlave, configure this one identically. Because BuildSlave objects are remotely referenced, we can't replace them without disconnecting the slave, yet there's no reason to do that. """ # the reconfiguration logic should guarantee this: assert self.slavename == new.slavename assert self.password == new.password assert self.identity() == new.identity() self.max_builds = new.max_builds self.access = new.access self.notify_on_missing = new.notify_on_missing self.missing_timeout = new.missing_timeout self.keepalive_interval = new.keepalive_interval self.properties = Properties() self.properties.updateFromProperties(new.properties) if self.botmaster: self.updateLocks() def __repr__(self): if self.botmaster: builders = self.botmaster.getBuildersForSlave(self.slavename) return "<%s '%s', current builders: %s>" % \ (self.__class__.__name__, self.slavename, ','.join(map(lambda b: b.name, builders))) else: return "<%s '%s', (no builders yet)>" % \ (self.__class__.__name__, self.slavename) def updateLocks(self): # convert locks into their real form locks = [] for access in self.access: if not isinstance(access, LockAccess): access = access.defaultAccess() lock = self.botmaster.getLockByID(access.lockid) locks.append((lock, access)) self.locks = [(l.getLock(self), la) for l, la in locks] def locksAvailable(self): """ I am called to see if all the locks I depend on are available, in which I return True, otherwise I return False """ if not self.locks: return True for lock, access in self.locks: if not lock.isAvailable(access): return False return True def acquireLocks(self): """ I am called when a build is preparing to run. I try to claim all the locks that are needed for a build to happen. If I can't, then my caller should give up the build and try to get another slave to look at it. """ log.msg("acquireLocks(slave %s, locks %s)" % (self, self.locks)) if not self.locksAvailable(): log.msg("slave %s can't lock, giving up" % (self, )) return False # all locks are available, claim them all for lock, access in self.locks: lock.claim(self, access) return True def releaseLocks(self): """ I am called to release any locks after a build has finished """ log.msg("releaseLocks(%s): %s" % (self, self.locks)) for lock, access in self.locks: lock.release(self, access) def setBotmaster(self, botmaster): assert not self.botmaster, "BuildSlave already has a botmaster" self.botmaster = botmaster self.updateLocks() self.startMissingTimer() def stopMissingTimer(self): if self.missing_timer: self.missing_timer.cancel() self.missing_timer = None def startMissingTimer(self): if self.notify_on_missing and self.missing_timeout and self.parent: self.stopMissingTimer() # in case it's already running self.missing_timer = reactor.callLater(self.missing_timeout, self._missing_timer_fired) def doKeepalive(self): self.keepalive_timer = reactor.callLater(self.keepalive_interval, self.doKeepalive) if not self.slave: return d = self.slave.callRemote("print", "Received keepalive from master") d.addErrback(log.msg, "Keepalive failed for '%s'" % (self.slavename, )) def stopKeepaliveTimer(self): if self.keepalive_timer: self.keepalive_timer.cancel() def startKeepaliveTimer(self): assert self.keepalive_interval log.msg("Starting buildslave keepalive timer for '%s'" % \ (self.slavename, )) self.doKeepalive() def recordConnectTime(self): if self.slave_status: self.slave_status.recordConnectTime() def isConnected(self): return self.slave def _missing_timer_fired(self): self.missing_timer = None # notify people, but only if we're still in the config if not self.parent: return buildmaster = self.botmaster.master status = buildmaster.getStatus() text = "The Buildbot working for '%s'\n" % status.getTitle() text += ("has noticed that the buildslave named %s went away\n" % self.slavename) text += "\n" text += ("It last disconnected at %s (buildmaster-local time)\n" % time.ctime(time.time() - self.missing_timeout)) # approx text += "\n" text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n" text += "was '%s'.\n" % self.slave_status.getAdmin() text += "\n" text += "Sincerely,\n" text += " The Buildbot\n" text += " %s\n" % status.getTitleURL() subject = "Buildbot: buildslave %s was lost" % self.slavename return self._mail_missing_message(subject, text) def updateSlave(self): """Called to add or remove builders after the slave has connected. @return: a Deferred that indicates when an attached slave has accepted the new builders and/or released the old ones.""" if self.slave: return self.sendBuilderList() else: return defer.succeed(None) def updateSlaveStatus(self, buildStarted=None, buildFinished=None): if buildStarted: self.slave_status.buildStarted(buildStarted) if buildFinished: self.slave_status.buildFinished(buildFinished) @metrics.countMethod('AbstractBuildSlave.attached()') def attached(self, bot): """This is called when the slave connects. @return: a Deferred that fires when the attachment is complete """ # the botmaster should ensure this. assert not self.isConnected() metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", 1) # set up the subscription point for eventual detachment self.detached_subs = subscription.SubscriptionPoint("detached") # now we go through a sequence of calls, gathering information, then # tell the Botmaster that it can finally give this slave to all the # Builders that care about it. # we accumulate slave information in this 'state' dictionary, then # set it atomically if we make it far enough through the process state = {} # Reset graceful shutdown status self.slave_status.setGraceful(False) # We want to know when the graceful shutdown flag changes self.slave_status.addGracefulWatcher(self._gracefulChanged) d = defer.succeed(None) def _log_attachment_on_slave(res): d1 = bot.callRemote("print", "attached") d1.addErrback(lambda why: None) return d1 d.addCallback(_log_attachment_on_slave) def _get_info(res): d1 = bot.callRemote("getSlaveInfo") def _got_info(info): log.msg("Got slaveinfo from '%s'" % self.slavename) # TODO: info{} might have other keys state["admin"] = info.get("admin") state["host"] = info.get("host") state["access_uri"] = info.get("access_uri", None) state["slave_environ"] = info.get("environ", {}) state["slave_basedir"] = info.get("basedir", None) state["slave_system"] = info.get("system", None) def _info_unavailable(why): why.trap(pb.NoSuchMethod) # maybe an old slave, doesn't implement remote_getSlaveInfo log.msg("BuildSlave.info_unavailable") log.err(why) d1.addCallbacks(_got_info, _info_unavailable) return d1 d.addCallback(_get_info) self.startKeepaliveTimer() def _get_version(res): d = bot.callRemote("getVersion") def _got_version(version): state["version"] = version def _version_unavailable(why): why.trap(pb.NoSuchMethod) # probably an old slave state["version"] = '(unknown)' d.addCallbacks(_got_version, _version_unavailable) return d d.addCallback(_get_version) def _get_commands(res): d1 = bot.callRemote("getCommands") def _got_commands(commands): state["slave_commands"] = commands def _commands_unavailable(why): # probably an old slave log.msg("BuildSlave._commands_unavailable") if why.check(AttributeError): return log.err(why) d1.addCallbacks(_got_commands, _commands_unavailable) return d1 d.addCallback(_get_commands) def _accept_slave(res): self.slave_status.setAdmin(state.get("admin")) self.slave_status.setHost(state.get("host")) self.slave_status.setAccessURI(state.get("access_uri")) self.slave_status.setVersion(state.get("version")) self.slave_status.setConnected(True) self.slave_commands = state.get("slave_commands") self.slave_environ = state.get("slave_environ") self.slave_basedir = state.get("slave_basedir") self.slave_system = state.get("slave_system") self.slave = bot if self.slave_system == "win32": self.path_module = namedModule("win32path") else: # most eveything accepts / as separator, so posix should be a # reasonable fallback self.path_module = namedModule("posixpath") log.msg("bot attached") self.messageReceivedFromSlave() self.stopMissingTimer() self.botmaster.master.status.slaveConnected(self.slavename) return self.updateSlave() d.addCallback(_accept_slave) d.addCallback(lambda _: self.botmaster.maybeStartBuildsForSlave(self.slavename)) # Finally, the slave gets a reference to this BuildSlave. They # receive this later, after we've started using them. d.addCallback(lambda _: self) return d def messageReceivedFromSlave(self): now = time.time() self.lastMessageReceived = now self.slave_status.setLastMessageReceived(now) def detached(self, mind): metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", -1) self.slave = None self._old_builder_list = [] self.slave_status.removeGracefulWatcher(self._gracefulChanged) self.slave_status.setConnected(False) log.msg("BuildSlave.detached(%s)" % self.slavename) self.botmaster.master.status.slaveDisconnected(self.slavename) self.stopKeepaliveTimer() # notify watchers, but do so in the next reactor iteration so that # any further detached() action by subclasses happens first def notif(): subs = self.detached_subs self.detached_subs = None subs.deliver() reactor.callLater(0, notif) def subscribeToDetach(self, callback): """ Request that C{callable} be invoked with no arguments when the L{detached} method is invoked. @returns: L{Subscription} """ assert self.detached_subs, "detached_subs is only set if attached" return self.detached_subs.subscribe(callback) def disconnect(self): """Forcibly disconnect the slave. This severs the TCP connection and returns a Deferred that will fire (with None) when the connection is probably gone. If the slave is still alive, they will probably try to reconnect again in a moment. This is called in two circumstances. The first is when a slave is removed from the config file. In this case, when they try to reconnect, they will be rejected as an unknown slave. The second is when we wind up with two connections for the same slave, in which case we disconnect the older connection. """ if not self.slave: return defer.succeed(None) log.msg("disconnecting old slave %s now" % self.slavename) # When this Deferred fires, we'll be ready to accept the new slave return self._disconnect(self.slave) def _disconnect(self, slave): # all kinds of teardown will happen as a result of # loseConnection(), but it happens after a reactor iteration or # two. Hook the actual disconnect so we can know when it is safe # to connect the new slave. We have to wait one additional # iteration (with callLater(0)) to make sure the *other* # notifyOnDisconnect handlers have had a chance to run. d = defer.Deferred() # notifyOnDisconnect runs the callback with one argument, the # RemoteReference being disconnected. def _disconnected(rref): reactor.callLater(0, d.callback, None) slave.notifyOnDisconnect(_disconnected) tport = slave.broker.transport # this is the polite way to request that a socket be closed tport.loseConnection() try: # but really we don't want to wait for the transmit queue to # drain. The remote end is unlikely to ACK the data, so we'd # probably have to wait for a (20-minute) TCP timeout. #tport._closeSocket() # however, doing _closeSocket (whether before or after # loseConnection) somehow prevents the notifyOnDisconnect # handlers from being run. Bummer. tport.offset = 0 tport.dataBuffer = "" except: # however, these hacks are pretty internal, so don't blow up if # they fail or are unavailable log.msg("failed to accelerate the shutdown process") log.msg("waiting for slave to finish disconnecting") return d def sendBuilderList(self): our_builders = self.botmaster.getBuildersForSlave(self.slavename) blist = [(b.name, b.slavebuilddir) for b in our_builders] if blist == self._old_builder_list: log.msg("Builder list is unchanged; not calling setBuilderList") return defer.succeed(None) d = self.slave.callRemote("setBuilderList", blist) def sentBuilderList(ign): self._old_builder_list = blist return ign d.addCallback(sentBuilderList) return d def perspective_keepalive(self): self.messageReceivedFromSlave() def perspective_shutdown(self): log.msg("slave %s wants to shut down" % self.slavename) self.slave_status.setGraceful(True) def addSlaveBuilder(self, sb): self.slavebuilders[sb.builder_name] = sb def removeSlaveBuilder(self, sb): try: del self.slavebuilders[sb.builder_name] except KeyError: pass def buildFinished(self, sb): """This is called when a build on this slave is finished.""" self.botmaster.maybeStartBuildsForSlave(self.slavename) def canStartBuild(self): """ I am called when a build is requested to see if this buildslave can start a build. This function can be used to limit overall concurrency on the buildslave. Note for subclassers: if a slave can become willing to start a build without any action on that slave (for example, by a resource in use on another slave becoming available), then you must arrange for L{maybeStartBuildsForSlave} to be called at that time, or builds on this slave will not start. """ # If we're waiting to shutdown gracefully, then we shouldn't # accept any new jobs. if self.slave_status.getGraceful(): return False if self.max_builds: active_builders = [sb for sb in self.slavebuilders.values() if sb.isBusy()] if len(active_builders) >= self.max_builds: return False if not self.locksAvailable(): return False return True def _mail_missing_message(self, subject, text): # first, see if we have a MailNotifier we can use. This gives us a # fromaddr and a relayhost. buildmaster = self.botmaster.master for st in buildmaster.statusTargets: if isinstance(st, MailNotifier): break else: # if not, they get a default MailNotifier, which always uses SMTP # to localhost and uses a dummy fromaddr of "buildbot". log.msg("buildslave-missing msg using default MailNotifier") st = MailNotifier("buildbot") # now construct the mail m = Message() m.set_payload(text) m['Date'] = formatdate(localtime=True) m['Subject'] = subject m['From'] = st.fromaddr recipients = self.notify_on_missing m['To'] = ", ".join(recipients) d = st.sendMessage(m, recipients) # return the Deferred for testing purposes return d def _gracefulChanged(self, graceful): """This is called when our graceful shutdown setting changes""" self.maybeShutdown() @defer.deferredGenerator def shutdown(self): """Shutdown the slave""" if not self.slave: log.msg("no remote; slave is already shut down") return # First, try the "new" way - calling our own remote's shutdown # method. The method was only added in 0.8.3, so ignore NoSuchMethod # failures. def new_way(): d = self.slave.callRemote('shutdown') d.addCallback(lambda _ : True) # successful shutdown request def check_nsm(f): f.trap(pb.NoSuchMethod) return False # fall through to the old way d.addErrback(check_nsm) def check_connlost(f): f.trap(pb.PBConnectionLost) return True # the slave is gone, so call it finished d.addErrback(check_connlost) return d wfd = defer.waitForDeferred(new_way()) yield wfd if wfd.getResult(): return # done! # Now, the old way. Look for a builder with a remote reference to the # client side slave. If we can find one, then call "shutdown" on the # remote builder, which will cause the slave buildbot process to exit. def old_way(): d = None for b in self.slavebuilders.values(): if b.remote: d = b.remote.callRemote("shutdown") break if d: log.msg("Shutting down (old) slave: %s" % self.slavename) # The remote shutdown call will not complete successfully since the # buildbot process exits almost immediately after getting the # shutdown request. # Here we look at the reason why the remote call failed, and if # it's because the connection was lost, that means the slave # shutdown as expected. def _errback(why): if why.check(pb.PBConnectionLost): log.msg("Lost connection to %s" % self.slavename) else: log.err("Unexpected error when trying to shutdown %s" % self.slavename) d.addErrback(_errback) return d log.err("Couldn't find remote builder to shut down slave") return defer.succeed(None) wfd = defer.waitForDeferred(old_way()) yield wfd wfd.getResult() def maybeShutdown(self): """Shut down this slave if it has been asked to shut down gracefully, and has no active builders.""" if not self.slave_status.getGraceful(): return active_builders = [sb for sb in self.slavebuilders.values() if sb.isBusy()] if active_builders: return d = self.shutdown() d.addErrback(log.err, 'error while shutting down slave')
class TestProperties(unittest.TestCase): def setUp(self): self.props = Properties() def testDictBehavior(self): self.props.setProperty("do-tests", 1, "scheduler") self.props.setProperty("do-install", 2, "scheduler") self.assert_(self.props.has_key('do-tests')) self.failUnlessEqual(self.props['do-tests'], 1) self.failUnlessEqual(self.props['do-install'], 2) self.assertRaises(KeyError, lambda : self.props['do-nothing']) self.failUnlessEqual(self.props.getProperty('do-install'), 2) def testAsList(self): self.props.setProperty("happiness", 7, "builder") self.props.setProperty("flames", True, "tester") self.assertEqual(sorted(self.props.asList()), [ ('flames', True, 'tester'), ('happiness', 7, 'builder') ]) def testAsDict(self): self.props.setProperty("msi_filename", "product.msi", 'packager') self.props.setProperty("dmg_filename", "product.dmg", 'packager') self.assertEqual(self.props.asDict(), dict(msi_filename=('product.msi', 'packager'), dmg_filename=('product.dmg', 'packager'))) def testUpdate(self): self.props.setProperty("x", 24, "old") newprops = { 'a' : 1, 'b' : 2 } self.props.update(newprops, "new") self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromProperties(self): self.props.setProperty("a", 94, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new") newprops.setProperty('b', 2, "new") self.props.updateFromProperties(newprops) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromPropertiesNoRuntime(self): self.props.setProperty("a", 94, "old") self.props.setProperty("b", 84, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new", runtime=True) newprops.setProperty('b', 2, "new", runtime=False) newprops.setProperty('c', 3, "new", runtime=True) newprops.setProperty('d', 3, "new", runtime=False) self.props.updateFromPropertiesNoRuntime(newprops) self.failUnlessEqual(self.props.getProperty('a'), 94) self.failUnlessEqual(self.props.getPropertySource('a'), 'old') self.failUnlessEqual(self.props.getProperty('b'), 2) self.failUnlessEqual(self.props.getPropertySource('b'), 'new') self.failUnlessEqual(self.props.getProperty('c'), None) # not updated self.failUnlessEqual(self.props.getProperty('d'), 3) self.failUnlessEqual(self.props.getPropertySource('d'), 'new') self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
class BuildSet: """I represent a set of potential Builds, all of the same source tree, across a specified list of Builders. I can represent a build of a specific version of the source tree (named by source.branch and source.revision), or a build of a certain set of Changes (source.changes=list).""" def __init__(self, builderNames, source, reason=None, bsid=None, properties=None): """ @param source: a L{buildbot.sourcestamp.SourceStamp} """ self.builderNames = builderNames self.source = source self.reason = reason self.properties = Properties() if properties: self.properties.updateFromProperties(properties) self.stillHopeful = True self.status = bss = builder.BuildSetStatus(source, reason, builderNames, bsid) def waitUntilSuccess(self): return self.status.waitUntilSuccess() def waitUntilFinished(self): return self.status.waitUntilFinished() def getProperties(self): return self.properties def start(self, builders): """This is called by the BuildMaster to actually create and submit the BuildRequests.""" self.requests = [] reqs = [] # create the requests for b in builders: req = base.BuildRequest(self.reason, self.source, b.name, properties=self.properties) reqs.append((b, req)) self.requests.append(req) d = req.waitUntilFinished() d.addCallback(self.requestFinished, req) # tell our status about them req_statuses = [req.status for req in self.requests] self.status.setBuildRequestStatuses(req_statuses) # now submit them for b, req in reqs: b.submitBuildRequest(req) def requestFinished(self, buildstatus, req): # TODO: this is where individual build status results are aggregated # into a BuildSet-wide status. Consider making a rule that says one # WARNINGS results in the overall status being WARNINGS too. The # current rule is that any FAILURE means FAILURE, otherwise you get # SUCCESS. self.requests.remove(req) results = buildstatus.getResults() if results == builder.FAILURE: self.status.setResults(results) if self.stillHopeful: # oh, cruel reality cuts deep. no joy for you. This is the # first failure. This flunks the overall BuildSet, so we can # notify success watchers that they aren't going to be happy. self.stillHopeful = False self.status.giveUpHope() self.status.notifySuccessWatchers() if not self.requests: # that was the last build, so we can notify finished watchers. If # we haven't failed by now, we can claim success. if self.stillHopeful: self.status.setResults(builder.SUCCESS) self.status.notifySuccessWatchers() self.status.notifyFinishedWatchers()
def start(self): config = yield self.getStepConfig() ss = self.build.getSourceStamp('') got = self.build.getProperty('got_revision') if got: ss = ss.getAbsoluteSourceStamp(got) # Stop the build early if .travis.yml says we should ignore branch if ss.branch and not config.can_build_branch(ss.branch): defer.returnValue(self.end(SUCCESS)) # Find the master object master = self.build.builder.botmaster.parent # Find the scheduler we are going to use to queue actual builds all_schedulers = self.build.builder.botmaster.parent.allSchedulers() all_schedulers = dict([(sch.name, sch) for sch in all_schedulers]) sch = all_schedulers[self.scheduler] triggered = [] self.running = True for env in config.matrix: props_to_set = Properties() props_to_set.updateFromProperties(self.build.getProperties()) props_to_set.update(env["env"], ".travis.yml") props_to_set.setProperty("spawned_by", self.build.build_status.number, "Scheduler") ss_setid = yield ss.getSourceStampSetId(master) triggered.append(sch.trigger(ss_setid, set_props=props_to_set)) results = yield defer.DeferredList(triggered, consumeErrors=1) was_exception = was_failure = False brids = {} for was_cb, results in results: if isinstance(results, tuple): results, some_brids = results brids.update(some_brids) if not was_cb: was_exception = True log.err(results) continue if results == FAILURE: was_failure = True if was_exception: result = EXCEPTION elif was_failure: result = FAILURE else: result = SUCCESS if brids: brid_to_bn = dict((_brid,_bn) for _bn,_brid in brids.iteritems()) res = yield defer.DeferredList([master.db.builds.getBuildsForRequest(br) for br in brids.values()], consumeErrors=1) for was_cb, builddicts in res: if was_cb: for build in builddicts: bn = brid_to_bn[build['brid']] num = build['number'] url = master.status.getURLForBuild(bn, num) self.step_status.addURL("%s #%d" % (bn,num), url) defer.returnValue(self.end(result))
class TestProperties(unittest.TestCase): def setUp(self): self.props = Properties() def testDictBehavior(self): # note that dictionary-like behavior is deprecated and not exposed to # users! self.props.setProperty("do-tests", 1, "scheduler") self.props.setProperty("do-install", 2, "scheduler") self.assert_(self.props.has_key('do-tests')) self.failUnlessEqual(self.props['do-tests'], 1) self.failUnlessEqual(self.props['do-install'], 2) self.assertRaises(KeyError, lambda: self.props['do-nothing']) self.failUnlessEqual(self.props.getProperty('do-install'), 2) self.assertIn('do-tests', self.props) self.assertNotIn('missing-do-tests', self.props) def testAsList(self): self.props.setProperty("happiness", 7, "builder") self.props.setProperty("flames", True, "tester") self.assertEqual(sorted(self.props.asList()), [('flames', True, 'tester'), ('happiness', 7, 'builder')]) def testAsDict(self): self.props.setProperty("msi_filename", "product.msi", 'packager') self.props.setProperty("dmg_filename", "product.dmg", 'packager') self.assertEqual( self.props.asDict(), dict(msi_filename=('product.msi', 'packager'), dmg_filename=('product.dmg', 'packager'))) def testUpdate(self): self.props.setProperty("x", 24, "old") newprops = {'a': 1, 'b': 2} self.props.update(newprops, "new") self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateRuntime(self): self.props.setProperty("x", 24, "old") newprops = {'a': 1, 'b': 2} self.props.update(newprops, "new", runtime=True) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') self.assertEqual(self.props.runtime, set(['a', 'b'])) def testUpdateFromProperties(self): self.props.setProperty("a", 94, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new") newprops.setProperty('b', 2, "new") self.props.updateFromProperties(newprops) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromPropertiesNoRuntime(self): self.props.setProperty("a", 94, "old") self.props.setProperty("b", 84, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new", runtime=True) newprops.setProperty('b', 2, "new", runtime=False) newprops.setProperty('c', 3, "new", runtime=True) newprops.setProperty('d', 3, "new", runtime=False) self.props.updateFromPropertiesNoRuntime(newprops) self.failUnlessEqual(self.props.getProperty('a'), 94) self.failUnlessEqual(self.props.getPropertySource('a'), 'old') self.failUnlessEqual(self.props.getProperty('b'), 2) self.failUnlessEqual(self.props.getPropertySource('b'), 'new') self.failUnlessEqual(self.props.getProperty('c'), None) # not updated self.failUnlessEqual(self.props.getProperty('d'), 3) self.failUnlessEqual(self.props.getPropertySource('d'), 'new') self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') # IProperties methods def test_getProperty(self): self.props.properties['p1'] = (['p', 1], 'test') self.assertEqual(self.props.getProperty('p1'), ['p', 1]) def test_getProperty_default_None(self): self.assertEqual(self.props.getProperty('p1'), None) def test_getProperty_default(self): self.assertEqual(self.props.getProperty('p1', 2), 2) def test_hasProperty_false(self): self.assertFalse(self.props.hasProperty('x')) def test_hasProperty_true(self): self.props.properties['x'] = (False, 'test') self.assertTrue(self.props.hasProperty('x')) def test_has_key_false(self): self.assertFalse(self.props.has_key('x')) def test_setProperty(self): self.props.setProperty('x', 'y', 'test') self.assertEqual(self.props.properties['x'], ('y', 'test')) self.assertNotIn('x', self.props.runtime) def test_setProperty_runtime(self): self.props.setProperty('x', 'y', 'test', runtime=True) self.assertEqual(self.props.properties['x'], ('y', 'test')) self.assertIn('x', self.props.runtime) def test_setProperty_no_source(self): self.assertRaises(TypeError, lambda: self.props.setProperty('x', 'y')) def test_getProperties(self): self.assertIdentical(self.props.getProperties(), self.props) def test_getBuild(self): self.assertIdentical(self.props.getBuild(), self.props.build) def test_render(self): class FakeRenderable(object): implements(IRenderable) def getRenderingFor(self, props): return props.getProperty('x') + 'z' self.props.setProperty('x', 'y', 'test') self.assertEqual(self.props.render(FakeRenderable()), 'yz')
class TestProperties(unittest.TestCase): def setUp(self): self.props = Properties() def testDictBehavior(self): # note that dictionary-like behavior is deprecated and not exposed to # users! self.props.setProperty("do-tests", 1, "scheduler") self.props.setProperty("do-install", 2, "scheduler") self.assert_(self.props.has_key('do-tests')) self.failUnlessEqual(self.props['do-tests'], 1) self.failUnlessEqual(self.props['do-install'], 2) self.assertRaises(KeyError, lambda : self.props['do-nothing']) self.failUnlessEqual(self.props.getProperty('do-install'), 2) self.assertIn('do-tests', self.props) self.assertNotIn('missing-do-tests', self.props) def testAsList(self): self.props.setProperty("happiness", 7, "builder") self.props.setProperty("flames", True, "tester") self.assertEqual(sorted(self.props.asList()), [ ('flames', True, 'tester'), ('happiness', 7, 'builder') ]) def testAsDict(self): self.props.setProperty("msi_filename", "product.msi", 'packager') self.props.setProperty("dmg_filename", "product.dmg", 'packager') self.assertEqual(self.props.asDict(), dict(msi_filename=('product.msi', 'packager'), dmg_filename=('product.dmg', 'packager'))) def testUpdate(self): self.props.setProperty("x", 24, "old") newprops = { 'a' : 1, 'b' : 2 } self.props.update(newprops, "new") self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateRuntime(self): self.props.setProperty("x", 24, "old") newprops = { 'a' : 1, 'b' : 2 } self.props.update(newprops, "new", runtime=True) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') self.assertEqual(self.props.runtime, set(['a', 'b'])) def testUpdateFromProperties(self): self.props.setProperty("a", 94, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new") newprops.setProperty('b', 2, "new") self.props.updateFromProperties(newprops) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromPropertiesNoRuntime(self): self.props.setProperty("a", 94, "old") self.props.setProperty("b", 84, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new", runtime=True) newprops.setProperty('b', 2, "new", runtime=False) newprops.setProperty('c', 3, "new", runtime=True) newprops.setProperty('d', 3, "new", runtime=False) self.props.updateFromPropertiesNoRuntime(newprops) self.failUnlessEqual(self.props.getProperty('a'), 94) self.failUnlessEqual(self.props.getPropertySource('a'), 'old') self.failUnlessEqual(self.props.getProperty('b'), 2) self.failUnlessEqual(self.props.getPropertySource('b'), 'new') self.failUnlessEqual(self.props.getProperty('c'), None) # not updated self.failUnlessEqual(self.props.getProperty('d'), 3) self.failUnlessEqual(self.props.getPropertySource('d'), 'new') self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') # IProperties methods def test_getProperty(self): self.props.properties['p1'] = (['p', 1], 'test') self.assertEqual(self.props.getProperty('p1'), ['p', 1]) def test_getProperty_default_None(self): self.assertEqual(self.props.getProperty('p1'), None) def test_getProperty_default(self): self.assertEqual(self.props.getProperty('p1', 2), 2) def test_hasProperty_false(self): self.assertFalse(self.props.hasProperty('x')) def test_hasProperty_true(self): self.props.properties['x'] = (False, 'test') self.assertTrue(self.props.hasProperty('x')) def test_has_key_false(self): self.assertFalse(self.props.has_key('x')) def test_setProperty(self): self.props.setProperty('x', 'y', 'test') self.assertEqual(self.props.properties['x'], ('y', 'test')) self.assertNotIn('x', self.props.runtime) def test_setProperty_runtime(self): self.props.setProperty('x', 'y', 'test', runtime=True) self.assertEqual(self.props.properties['x'], ('y', 'test')) self.assertIn('x', self.props.runtime) def test_setProperty_no_source(self): self.assertRaises(TypeError, lambda : self.props.setProperty('x', 'y')) def test_getProperties(self): self.assertIdentical(self.props.getProperties(), self.props) def test_getBuild(self): self.assertIdentical(self.props.getBuild(), self.props.build) def test_render(self): class FakeRenderable(object): implements(IRenderable) def getRenderingFor(self, props): return props.getProperty('x') + 'z' self.props.setProperty('x', 'y', 'test') self.assertEqual(self.props.render(FakeRenderable()), 'yz')
class AbstractBuildSlave(config.ReconfigurableServiceMixin, pb.Avatar, service.MultiService): """This is the master-side representative for a remote buildbot slave. There is exactly one for each slave described in the config file (the c['slaves'] list). When buildbots connect in (.attach), they get a reference to this instance. The BotMaster object is stashed as the .botmaster attribute. The BotMaster is also our '.parent' Service. I represent a build slave -- a remote machine capable of running builds. I am instantiated by the configuration file, and can be subclassed to add extra functionality.""" implements(IBuildSlave) keepalive_timer = None keepalive_interval = None def __init__(self, name, password, max_builds=None, notify_on_missing=[], missing_timeout=3600, properties={}, locks=None, keepalive_interval=3600): """ @param name: botname this machine will supply when it connects @param password: password this machine will supply when it connects @param max_builds: maximum number of simultaneous builds that will be run concurrently on this buildslave (the default is None for no limit) @param properties: properties that will be applied to builds run on this slave @type properties: dictionary @param locks: A list of locks that must be acquired before this slave can be used @type locks: dictionary """ service.MultiService.__init__(self) self.slavename = name self.password = password # PB registration self.registration = None self.registered_port = None # these are set when the service is started, and unset when it is # stopped self.botmaster = None self.master = None self.slave_status = SlaveStatus(name) self.slave = None # a RemoteReference to the Bot, when connected self.slave_commands = None self.slavebuilders = {} self.max_builds = max_builds self.access = [] if locks: self.access = locks self.lock_subscriptions = [] self.properties = Properties() self.properties.update(properties, "BuildSlave") self.properties.setProperty("slavename", name, "BuildSlave") self.lastMessageReceived = 0 if isinstance(notify_on_missing, str): notify_on_missing = [notify_on_missing] self.notify_on_missing = notify_on_missing for i in notify_on_missing: if not isinstance(i, str): raise config.ConfigErrors( ['notify_on_missing arg %r is not a string' % (i, )]) self.missing_timeout = missing_timeout self.missing_timer = None self.keepalive_interval = keepalive_interval self.detached_subs = None self._old_builder_list = None def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.slavename) def updateLocks(self): """Convert the L{LockAccess} objects in C{self.locks} into real lock objects, while also maintaining the subscriptions to lock releases.""" # unsubscribe from any old locks for s in self.lock_subscriptions: s.unsubscribe() # convert locks into their real form locks = [] for access in self.access: if not isinstance(access, LockAccess): access = access.defaultAccess() lock = self.botmaster.getLockByID(access.lockid) locks.append((lock, access)) self.locks = [(l.getLock(self), la) for l, la in locks] self.lock_subscriptions = [ l.subscribeToReleases(self._lockReleased) for l, la in self.locks ] def locksAvailable(self): """ I am called to see if all the locks I depend on are available, in which I return True, otherwise I return False """ if not self.locks: return True for lock, access in self.locks: if not lock.isAvailable(access): return False return True def acquireLocks(self): """ I am called when a build is preparing to run. I try to claim all the locks that are needed for a build to happen. If I can't, then my caller should give up the build and try to get another slave to look at it. """ log.msg("acquireLocks(slave %s, locks %s)" % (self, self.locks)) if not self.locksAvailable(): log.msg("slave %s can't lock, giving up" % (self, )) return False # all locks are available, claim them all for lock, access in self.locks: lock.claim(self, access) return True def releaseLocks(self): """ I am called to release any locks after a build has finished """ log.msg("releaseLocks(%s): %s" % (self, self.locks)) for lock, access in self.locks: lock.release(self, access) def _lockReleased(self): """One of the locks for this slave was released; try scheduling builds.""" if not self.botmaster: return # oh well.. self.botmaster.maybeStartBuildsForSlave(self.slavename) def startService(self): self.updateLocks() self.startMissingTimer() return service.MultiService.startService(self) def reconfigService(self, new_config): # Given a new BuildSlave, configure this one identically. Because # BuildSlave objects are remotely referenced, we can't replace them # without disconnecting the slave, yet there's no reason to do that. new = self.findNewSlaveInstance(new_config) assert self.slavename == new.slavename # do we need to re-register? if (not self.registration or self.password != new.password or new_config.slavePortnum != self.registered_port): if self.registration: self.registration.unregister() self.password = new.password self.registered_port = new_config.slavePortnum self.registration = self.master.pbmanager.register( self.registered_port, self.slavename, self.password, self.getPerspective) # adopt new instance's configuration parameters self.max_builds = new.max_builds self.access = new.access self.notify_on_missing = new.notify_on_missing self.keepalive_interval = new.keepalive_interval if self.missing_timeout != new.missing_timeout: running_missing_timer = self.missing_timer self.stopMissingTimer() self.missing_timeout = new.missing_timeout if running_missing_timer: self.startMissingTimer() self.properties = Properties() self.properties.updateFromProperties(new.properties) self.updateLocks() # update the attached slave's notion of which builders are attached d = self.updateSlave() # and chain up d.addCallback(lambda _: config.ReconfigurableServiceMixin. reconfigService(self, new_config)) return d def stopService(self): self.stopMissingTimer() return service.MultiService.stopService(self) def findNewSlaveInstance(self, new_config): # TODO: called multiple times per reconfig; use 1-element cache? for sl in new_config.slaves: if sl.slavename == self.slavename: return sl assert 0, "no new slave named '%s'" % self.slavename def startMissingTimer(self): if self.notify_on_missing and self.missing_timeout and self.parent: self.stopMissingTimer() # in case it's already running self.missing_timer = reactor.callLater(self.missing_timeout, self._missing_timer_fired) def stopMissingTimer(self): if self.missing_timer: self.missing_timer.cancel() self.missing_timer = None def getPerspective(self, mind, slavename): assert slavename == self.slavename metrics.MetricCountEvent.log("attached_slaves", 1) # record when this connection attempt occurred if self.slave_status: self.slave_status.recordConnectTime() if self.isConnected(): # duplicate slave - send it to arbitration arb = botmaster.DuplicateSlaveArbitrator(self) return arb.getPerspective(mind, slavename) else: log.msg("slave '%s' attaching from %s" % (slavename, mind.broker.transport.getPeer())) return self def doKeepalive(self): self.keepalive_timer = reactor.callLater(self.keepalive_interval, self.doKeepalive) if not self.slave: return d = self.slave.callRemote("print", "Received keepalive from master") d.addErrback(log.msg, "Keepalive failed for '%s'" % (self.slavename, )) def stopKeepaliveTimer(self): if self.keepalive_timer: self.keepalive_timer.cancel() def startKeepaliveTimer(self): assert self.keepalive_interval log.msg("Starting buildslave keepalive timer for '%s'" % \ (self.slavename, )) self.doKeepalive() def isConnected(self): return self.slave def _missing_timer_fired(self): self.missing_timer = None # notify people, but only if we're still in the config if not self.parent: return buildmaster = self.botmaster.master status = buildmaster.getStatus() text = "The Buildbot working for '%s'\n" % status.getTitle() text += ("has noticed that the buildslave named %s went away\n" % self.slavename) text += "\n" text += ("It last disconnected at %s (buildmaster-local time)\n" % time.ctime(time.time() - self.missing_timeout)) # approx text += "\n" text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n" text += "was '%s'.\n" % self.slave_status.getAdmin() text += "\n" text += "Sincerely,\n" text += " The Buildbot\n" text += " %s\n" % status.getTitleURL() subject = "Buildbot: buildslave %s was lost" % self.slavename return self._mail_missing_message(subject, text) def updateSlave(self): """Called to add or remove builders after the slave has connected. @return: a Deferred that indicates when an attached slave has accepted the new builders and/or released the old ones.""" if self.slave: return self.sendBuilderList() else: return defer.succeed(None) def updateSlaveStatus(self, buildStarted=None, buildFinished=None): if buildStarted: self.slave_status.buildStarted(buildStarted) if buildFinished: self.slave_status.buildFinished(buildFinished) @metrics.countMethod('AbstractBuildSlave.attached()') def attached(self, bot): """This is called when the slave connects. @return: a Deferred that fires when the attachment is complete """ # the botmaster should ensure this. assert not self.isConnected() metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", 1) # set up the subscription point for eventual detachment self.detached_subs = subscription.SubscriptionPoint("detached") # now we go through a sequence of calls, gathering information, then # tell the Botmaster that it can finally give this slave to all the # Builders that care about it. # we accumulate slave information in this 'state' dictionary, then # set it atomically if we make it far enough through the process state = {} # Reset graceful shutdown status self.slave_status.setGraceful(False) # We want to know when the graceful shutdown flag changes self.slave_status.addGracefulWatcher(self._gracefulChanged) d = defer.succeed(None) def _log_attachment_on_slave(res): d1 = bot.callRemote("print", "attached") d1.addErrback(lambda why: None) return d1 d.addCallback(_log_attachment_on_slave) def _get_info(res): d1 = bot.callRemote("getSlaveInfo") def _got_info(info): log.msg("Got slaveinfo from '%s'" % self.slavename) # TODO: info{} might have other keys state["admin"] = info.get("admin") state["host"] = info.get("host") state["access_uri"] = info.get("access_uri", None) state["slave_environ"] = info.get("environ", {}) state["slave_basedir"] = info.get("basedir", None) state["slave_system"] = info.get("system", None) def _info_unavailable(why): why.trap(pb.NoSuchMethod) # maybe an old slave, doesn't implement remote_getSlaveInfo log.msg("BuildSlave.info_unavailable") log.err(why) d1.addCallbacks(_got_info, _info_unavailable) return d1 d.addCallback(_get_info) self.startKeepaliveTimer() def _get_version(res): d = bot.callRemote("getVersion") def _got_version(version): state["version"] = version def _version_unavailable(why): why.trap(pb.NoSuchMethod) # probably an old slave state["version"] = '(unknown)' d.addCallbacks(_got_version, _version_unavailable) return d d.addCallback(_get_version) def _get_commands(res): d1 = bot.callRemote("getCommands") def _got_commands(commands): state["slave_commands"] = commands def _commands_unavailable(why): # probably an old slave log.msg("BuildSlave._commands_unavailable") if why.check(AttributeError): return log.err(why) d1.addCallbacks(_got_commands, _commands_unavailable) return d1 d.addCallback(_get_commands) def _accept_slave(res): self.slave_status.setAdmin(state.get("admin")) self.slave_status.setHost(state.get("host")) self.slave_status.setAccessURI(state.get("access_uri")) self.slave_status.setVersion(state.get("version")) self.slave_status.setConnected(True) self.slave_commands = state.get("slave_commands") self.slave_environ = state.get("slave_environ") self.slave_basedir = state.get("slave_basedir") self.slave_system = state.get("slave_system") self.slave = bot if self.slave_system == "win32": self.path_module = namedModule("win32path") else: # most eveything accepts / as separator, so posix should be a # reasonable fallback self.path_module = namedModule("posixpath") log.msg("bot attached") self.messageReceivedFromSlave() self.stopMissingTimer() self.botmaster.master.status.slaveConnected(self.slavename) return self.updateSlave() d.addCallback(_accept_slave) d.addCallback( lambda _: self.botmaster.maybeStartBuildsForSlave(self.slavename)) # Finally, the slave gets a reference to this BuildSlave. They # receive this later, after we've started using them. d.addCallback(lambda _: self) return d def messageReceivedFromSlave(self): now = time.time() self.lastMessageReceived = now self.slave_status.setLastMessageReceived(now) def detached(self, mind): metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", -1) self.slave = None self._old_builder_list = [] self.slave_status.removeGracefulWatcher(self._gracefulChanged) self.slave_status.setConnected(False) log.msg("BuildSlave.detached(%s)" % self.slavename) self.botmaster.master.status.slaveDisconnected(self.slavename) self.stopKeepaliveTimer() # notify watchers, but do so in the next reactor iteration so that # any further detached() action by subclasses happens first def notif(): subs = self.detached_subs self.detached_subs = None subs.deliver() reactor.callLater(0, notif) def subscribeToDetach(self, callback): """ Request that C{callable} be invoked with no arguments when the L{detached} method is invoked. @returns: L{Subscription} """ assert self.detached_subs, "detached_subs is only set if attached" return self.detached_subs.subscribe(callback) def disconnect(self): """Forcibly disconnect the slave. This severs the TCP connection and returns a Deferred that will fire (with None) when the connection is probably gone. If the slave is still alive, they will probably try to reconnect again in a moment. This is called in two circumstances. The first is when a slave is removed from the config file. In this case, when they try to reconnect, they will be rejected as an unknown slave. The second is when we wind up with two connections for the same slave, in which case we disconnect the older connection. """ if not self.slave: return defer.succeed(None) log.msg("disconnecting old slave %s now" % self.slavename) # When this Deferred fires, we'll be ready to accept the new slave return self._disconnect(self.slave) def _disconnect(self, slave): # all kinds of teardown will happen as a result of # loseConnection(), but it happens after a reactor iteration or # two. Hook the actual disconnect so we can know when it is safe # to connect the new slave. We have to wait one additional # iteration (with callLater(0)) to make sure the *other* # notifyOnDisconnect handlers have had a chance to run. d = defer.Deferred() # notifyOnDisconnect runs the callback with one argument, the # RemoteReference being disconnected. def _disconnected(rref): reactor.callLater(0, d.callback, None) slave.notifyOnDisconnect(_disconnected) tport = slave.broker.transport # this is the polite way to request that a socket be closed tport.loseConnection() try: # but really we don't want to wait for the transmit queue to # drain. The remote end is unlikely to ACK the data, so we'd # probably have to wait for a (20-minute) TCP timeout. #tport._closeSocket() # however, doing _closeSocket (whether before or after # loseConnection) somehow prevents the notifyOnDisconnect # handlers from being run. Bummer. tport.offset = 0 tport.dataBuffer = "" except: # however, these hacks are pretty internal, so don't blow up if # they fail or are unavailable log.msg("failed to accelerate the shutdown process") log.msg("waiting for slave to finish disconnecting") return d def sendBuilderList(self): our_builders = self.botmaster.getBuildersForSlave(self.slavename) blist = [(b.name, b.config.slavebuilddir) for b in our_builders] if blist == self._old_builder_list: return defer.succeed(None) d = self.slave.callRemote("setBuilderList", blist) def sentBuilderList(ign): self._old_builder_list = blist return ign d.addCallback(sentBuilderList) return d def perspective_keepalive(self): self.messageReceivedFromSlave() def perspective_shutdown(self): log.msg("slave %s wants to shut down" % self.slavename) self.slave_status.setGraceful(True) def addSlaveBuilder(self, sb): self.slavebuilders[sb.builder_name] = sb def removeSlaveBuilder(self, sb): try: del self.slavebuilders[sb.builder_name] except KeyError: pass def buildFinished(self, sb): """This is called when a build on this slave is finished.""" self.botmaster.maybeStartBuildsForSlave(self.slavename) def canStartBuild(self): """ I am called when a build is requested to see if this buildslave can start a build. This function can be used to limit overall concurrency on the buildslave. Note for subclassers: if a slave can become willing to start a build without any action on that slave (for example, by a resource in use on another slave becoming available), then you must arrange for L{maybeStartBuildsForSlave} to be called at that time, or builds on this slave will not start. """ # If we're waiting to shutdown gracefully, then we shouldn't # accept any new jobs. if self.slave_status.getGraceful(): return False if self.max_builds: active_builders = [ sb for sb in self.slavebuilders.values() if sb.isBusy() ] if len(active_builders) >= self.max_builds: return False if not self.locksAvailable(): return False return True def _mail_missing_message(self, subject, text): # first, see if we have a MailNotifier we can use. This gives us a # fromaddr and a relayhost. buildmaster = self.botmaster.master for st in buildmaster.statusTargets: if isinstance(st, MailNotifier): break else: # if not, they get a default MailNotifier, which always uses SMTP # to localhost and uses a dummy fromaddr of "buildbot". log.msg("buildslave-missing msg using default MailNotifier") st = MailNotifier("buildbot") # now construct the mail m = Message() m.set_payload(text) m['Date'] = formatdate(localtime=True) m['Subject'] = subject m['From'] = st.fromaddr recipients = self.notify_on_missing m['To'] = ", ".join(recipients) d = st.sendMessage(m, recipients) # return the Deferred for testing purposes return d def _gracefulChanged(self, graceful): """This is called when our graceful shutdown setting changes""" self.maybeShutdown() @defer.deferredGenerator def shutdown(self): """Shutdown the slave""" if not self.slave: log.msg("no remote; slave is already shut down") return # First, try the "new" way - calling our own remote's shutdown # method. The method was only added in 0.8.3, so ignore NoSuchMethod # failures. def new_way(): d = self.slave.callRemote('shutdown') d.addCallback(lambda _: True) # successful shutdown request def check_nsm(f): f.trap(pb.NoSuchMethod) return False # fall through to the old way d.addErrback(check_nsm) def check_connlost(f): f.trap(pb.PBConnectionLost) return True # the slave is gone, so call it finished d.addErrback(check_connlost) return d wfd = defer.waitForDeferred(new_way()) yield wfd if wfd.getResult(): return # done! # Now, the old way. Look for a builder with a remote reference to the # client side slave. If we can find one, then call "shutdown" on the # remote builder, which will cause the slave buildbot process to exit. def old_way(): d = None for b in self.slavebuilders.values(): if b.remote: d = b.remote.callRemote("shutdown") break if d: log.msg("Shutting down (old) slave: %s" % self.slavename) # The remote shutdown call will not complete successfully since the # buildbot process exits almost immediately after getting the # shutdown request. # Here we look at the reason why the remote call failed, and if # it's because the connection was lost, that means the slave # shutdown as expected. def _errback(why): if why.check(pb.PBConnectionLost): log.msg("Lost connection to %s" % self.slavename) else: log.err("Unexpected error when trying to shutdown %s" % self.slavename) d.addErrback(_errback) return d log.err("Couldn't find remote builder to shut down slave") return defer.succeed(None) wfd = defer.waitForDeferred(old_way()) yield wfd wfd.getResult() def maybeShutdown(self): """Shut down this slave if it has been asked to shut down gracefully, and has no active builders.""" if not self.slave_status.getGraceful(): return active_builders = [ sb for sb in self.slavebuilders.values() if sb.isBusy() ] if active_builders: return d = self.shutdown() d.addErrback(log.err, 'error while shutting down slave')
def start(self): config = yield self.getStepConfig() ss = self.build.getSourceStamp('') got = self.build.getProperty('got_revision') if got: ss = ss.getAbsoluteSourceStamp(got) # Stop the build early if .travis.yml says we should ignore branch if ss.branch and not config.can_build_branch(ss.branch): defer.returnValue(self.end(SUCCESS)) # Find the master object master = self.build.builder.botmaster.parent # Find the scheduler we are going to use to queue actual builds all_schedulers = self.build.builder.botmaster.parent.allSchedulers() all_schedulers = dict([(sch.name, sch) for sch in all_schedulers]) sch = all_schedulers[self.scheduler] triggered = [] self.running = True for env in config.matrix: props_to_set = Properties() props_to_set.updateFromProperties(self.build.getProperties()) props_to_set.update(env["env"], ".travis.yml") props_to_set.setProperty("spawned_by", self.build.build_status.number, "Scheduler") ss_setid = yield ss.getSourceStampSetId(master) triggered.append(sch.trigger(ss_setid, set_props=props_to_set)) results = yield defer.DeferredList(triggered, consumeErrors=1) was_exception = was_failure = False brids = {} for was_cb, results in results: if isinstance(results, tuple): results, some_brids = results brids.update(some_brids) if not was_cb: was_exception = True log.err(results) continue if results == FAILURE: was_failure = True if was_exception: result = EXCEPTION elif was_failure: result = FAILURE else: result = SUCCESS if brids: brid_to_bn = dict((_brid, _bn) for _bn, _brid in brids.iteritems()) res = yield defer.DeferredList([ master.db.builds.getBuildsForRequest(br) for br in brids.values() ], consumeErrors=1) for was_cb, builddicts in res: if was_cb: for build in builddicts: bn = brid_to_bn[build['brid']] num = build['number'] url = master.status.getURLForBuild(bn, num) self.step_status.addURL("%s #%d" % (bn, num), url) defer.returnValue(self.end(result))
class TestProperties(unittest.TestCase): def setUp(self): self.props = Properties() def testDictBehavior(self): self.props.setProperty("do-tests", 1, "scheduler") self.props.setProperty("do-install", 2, "scheduler") self.assert_(self.props.has_key('do-tests')) self.failUnlessEqual(self.props['do-tests'], 1) self.failUnlessEqual(self.props['do-install'], 2) self.assertRaises(KeyError, lambda : self.props['do-nothing']) self.failUnlessEqual(self.props.getProperty('do-install'), 2) def testAsList(self): self.props.setProperty("happiness", 7, "builder") self.props.setProperty("flames", True, "tester") self.assertEqual(sorted(self.props.asList()), [ ('flames', True, 'tester'), ('happiness', 7, 'builder') ]) def testAsDict(self): self.props.setProperty("msi_filename", "product.msi", 'packager') self.props.setProperty("dmg_filename", "product.dmg", 'packager') self.assertEqual(self.props.asDict(), dict(msi_filename='product.msi', dmg_filename='product.dmg')) def testUpdate(self): self.props.setProperty("x", 24, "old") newprops = { 'a' : 1, 'b' : 2 } self.props.update(newprops, "new") self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromProperties(self): self.props.setProperty("a", 94, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new") newprops.setProperty('b', 2, "new") self.props.updateFromProperties(newprops) self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old') self.failUnlessEqual(self.props.getProperty('a'), 1) self.failUnlessEqual(self.props.getPropertySource('a'), 'new') def testUpdateFromPropertiesNoRuntime(self): self.props.setProperty("a", 94, "old") self.props.setProperty("b", 84, "old") self.props.setProperty("x", 24, "old") newprops = Properties() newprops.setProperty('a', 1, "new", runtime=True) newprops.setProperty('b', 2, "new", runtime=False) newprops.setProperty('c', 3, "new", runtime=True) newprops.setProperty('d', 3, "new", runtime=False) self.props.updateFromPropertiesNoRuntime(newprops) self.failUnlessEqual(self.props.getProperty('a'), 94) self.failUnlessEqual(self.props.getPropertySource('a'), 'old') self.failUnlessEqual(self.props.getProperty('b'), 2) self.failUnlessEqual(self.props.getPropertySource('b'), 'new') self.failUnlessEqual(self.props.getProperty('c'), None) # not updated self.failUnlessEqual(self.props.getProperty('d'), 3) self.failUnlessEqual(self.props.getPropertySource('d'), 'new') self.failUnlessEqual(self.props.getProperty('x'), 24) self.failUnlessEqual(self.props.getPropertySource('x'), 'old')