Exemplo n.º 1
0
class TestProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testDictBehavior(self):
        self.props.setProperty("do-tests", 1, "scheduler")
        self.props.setProperty("do-install", 2, "scheduler")

        self.assert_(self.props.has_key('do-tests'))
        self.failUnlessEqual(self.props['do-tests'], 1)
        self.failUnlessEqual(self.props['do-install'], 2)
        self.assertRaises(KeyError, lambda : self.props['do-nothing'])
        self.failUnlessEqual(self.props.getProperty('do-install'), 2)

    def testUpdate(self):
        self.props.setProperty("x", 24, "old")
        newprops = { 'a' : 1, 'b' : 2 }
        self.props.update(newprops, "new")

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromProperties(self):
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new")
        newprops.setProperty('b', 2, "new")
        self.props.updateFromProperties(newprops)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
Exemplo n.º 2
0
 def perspective_requestBuild(self, buildername, reason, branch, revision, properties={}):
     c = interfaces.IControl(self.master)
     bc = c.getBuilder(buildername)
     ss = SourceStamp(branch, revision)
     bpr = Properties()
     bpr.update(properties, "remote requestBuild")
     return bc.submitBuildRequest(ss, reason, bpr)
Exemplo n.º 3
0
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, properties={}, ):
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))
        # build the intersection of the request and our configured list
        builderNames = self.parent.filterBuilderList(builderNames)
        if not builderNames:
            return
        ss = SourceStamp(branch, revision, patch, repository=repository,
                         project=project)
        reason = "'try' job from user %s" % self.username

        # roll the specified props in with our inherited props
        combined_props = Properties()
        combined_props.updateFromProperties(self.parent.properties)
        combined_props.update(properties, "try build")

        status = self.parent.parent.parent.status
        db = self.parent.parent.db
        d = db.runInteraction(self._try, ss, builderNames, reason,
                              combined_props, db)
        def _done(bsid):
            # return a remotely-usable BuildSetStatus object
            bss = BuildSetStatus(bsid, status, db)
            from buildbot.status.client import makeRemote
            r = makeRemote(bss)
            #self.parent.parent.loop_done() # so it will notify builder loop
            return r
        d.addCallback(_done)
        return d
Exemplo n.º 4
0
    def getSchedulersAndProperties(self):
        sch = self.schedulerNames[0]
        triggered_schedulers = []
        for env in self.config.matrix:
            props_to_set = Properties()
            props_to_set.setProperty("TRAVIS_PULL_REQUEST",
                                     self.getProperty("TRAVIS_PULL_REQUEST"),
                                     "inherit")
            flat_env = {}
            for k, v in env.items():
                if k == "env":
                    props_to_set.update(v, ".travis.yml")
                    flat_env.update(v)
                else:
                    props_to_set.setProperty(k, v, ".travis.yml")
                    flat_env[k] = v
            props_to_set.setProperty(
                "reason",
                u" | ".join(
                    sorted(str(k) + '=' + str(v)
                           for k, v in flat_env.items())),
                "spawner")

            triggered_schedulers.append((sch, props_to_set))
        return triggered_schedulers
Exemplo n.º 5
0
    def perspective_try(self, branch, revision, patch, builderNames, properties={}):
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))
        for b in builderNames:
            if not b in self.parent.builderNames:
                log.msg("%s got job with builder %s" % (self, b))
                log.msg(" but that wasn't in our list: %s"
                        % (self.parent.builderNames,))
                return
        ss = SourceStamp(branch, revision, patch)
        reason = "'try' job from user %s" % self.username

        # roll the specified props in with our inherited props
        combined_props = Properties()
        combined_props.updateFromProperties(self.parent.properties)
        combined_props.update(properties, "try build")

        bs = buildset.BuildSet(builderNames, 
                               ss,
                               reason=reason, 
                               properties=combined_props)

        self.parent.submitBuildSet(bs)

        # return a remotely-usable BuildSetStatus object
        from buildbot.status.client import makeRemote
        return makeRemote(bs.status)
Exemplo n.º 6
0
class Try_Userpass(TryBase):
    compare_attrs = ( 'name', 'builderNames', 'port', 'userpass', 'properties' )

    def __init__(self, name, builderNames, port, userpass,
                 properties={}):
        base.BaseScheduler.__init__(self, name, builderNames, properties)
        self.port = port
        self.userpass = userpass
        self.properties = Properties()
        self.properties.update(properties, 'Scheduler')

    def startService(self):
        TryBase.startService(self)
        master = self.parent.parent

        # register each user/passwd with the pbmanager
        def factory(mind, username):
            return Try_Userpass_Perspective(self, username)
        self.registrations = []
        for user, passwd in self.userpass:
            self.registrations.append(
                    master.pbmanager.register(self.port, user, passwd, factory))

    def stopService(self):
        d = defer.maybeDeferred(TryBase.stopService, self)
        def unreg(_):
            return defer.gatherResults(
                [ reg.unregister() for reg in self.registrations ])
        d.addCallback(unreg)
Exemplo n.º 7
0
class BaseScheduler(service.MultiService, util.ComparableMixin):
    """
    A Schduler creates BuildSets and submits them to the BuildMaster.

    @ivar name: name of the scheduler

    @ivar properties: additional properties specified in this 
        scheduler's configuration
    @type properties: Properties object
    """
    implements(interfaces.IScheduler)

    def __init__(self, name, properties={}):
        """
        @param name: name for this scheduler

        @param properties: properties to be propagated from this scheduler
        @type properties: dict
        """
        service.MultiService.__init__(self)
        self.name = name
        self.properties = Properties()
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")

    def __repr__(self):
        # TODO: why can't id() return a positive number? %d is ugly.
        return "<Scheduler '%s' at %d>" % (self.name, id(self))

    def submitBuildSet(self, bs):
        self.parent.submitBuildSet(bs)

    def addChange(self, change):
        pass
Exemplo n.º 8
0
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, properties={}, ):
        db = self.scheduler.master.db
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))

        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            return

        wfd = defer.waitForDeferred(
                db.sourcestamps.createSourceStamp(branch=branch, revision=revision,
                    repository=repository, project=project, patch_level=patch[0],
                    patch_body=patch[1], patch_subdir=''))
                    # note: no way to specify patch subdir - #1769
        yield wfd
        ssid = wfd.getResult()

        reason = "'try' job from user %s" % self.username

        requested_props = Properties()
        requested_props.update(properties, "try build")
        wfd = defer.waitForDeferred(
                self.scheduler.addBuildsetForSourceStamp(ssid=ssid,
                        reason=reason, properties=requested_props,
                        builderNames=builderNames))
        yield wfd
        bsid = wfd.getResult()

        # return a remotely-usable BuildSetStatus object
        bss = BuildSetStatus(bsid, self.scheduler.master.status, db)
        from buildbot.status.client import makeRemote
        r = makeRemote(bss)
        yield r # return value
Exemplo n.º 9
0
def TryJobBaseGetProps(self, builder, options):
  """ Override of try_job_base.TryJobBase.get_props:
  http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/master/try_job_base.py?view=markup

  We modify it to add "baseurl".
  """
  keys = (
############################### Added by borenet ###############################
    'baseurl',
################################################################################
    'clobber',
    'issue',
    'patchset',
    'requester',
    'rietveld',
    'root',
    'try_job_key',
  )
  # All these settings have no meaning when False or not set, so don't set
  # them in that case.
  properties = dict((i, options[i]) for i in keys if options.get(i))
  properties['testfilter'] = options['bot'].get(builder, None)
  # pylint: disable=W0212
  props = Properties()
  props.updateFromProperties(self.properties)
  props.update(properties, self._PROPERTY_SOURCE)
  return props
Exemplo n.º 10
0
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, who="", comment="", properties=None):
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))
        if properties is None:
            properties = {}
        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            return

        reason = u"'try' job"

        if who:
            reason += u" by user %s" % ascii2unicode(who)

        if comment:
            reason += u" (%s)" % ascii2unicode(comment)

        sourcestamp = dict(
            branch=branch, revision=revision, repository=repository,
            project=project, patch_level=patch[0], patch_body=patch[1],
            patch_subdir='', patch_author=who or '',
            patch_comment=comment or '', codebase='',
        )           # note: no way to specify patch subdir - #1769

        requested_props = Properties()
        requested_props.update(properties, "try build")
        (bsid, brids) = yield self.scheduler.addBuildsetForSourceStamps(
            sourcestamps=[sourcestamp], reason=reason,
            properties=requested_props, builderNames=builderNames)

        # return a remotely-usable BuildSetStatus object
        bss = RemoteBuildSetStatus(self.scheduler.master, bsid, brids)
        defer.returnValue(bss)
Exemplo n.º 11
0
def MockBuild(my_builder, buildsetup, mastername, slavename, basepath=None,
              build_properties=None, slavedir=None):
  """Given a builder object and configuration, mock a Buildbot setup around it.

  This sets up a mock BuildMaster, BuildSlave, Build, BuildStatus, and all other
  superstructure required for BuildSteps inside the provided builder to render
  properly. These BuildSteps are returned to the user in an array. It
  additionally returns the build object (in order to get its properties if
  desired).

  buildsetup is passed straight into the FakeSource's init method and
  contains sourcestamp information (revision, branch, etc).

  basepath is the directory of the build (what goes under build/slave/, for
  example 'Chromium_Linux_Builder'. It is nominally inferred from the builder
  name, but it can be overridden. This is useful when pointing the buildrunner
  at a different builder than what it's running under.

  build_properties will update and override build_properties after all
  builder-derived defaults have been set.
  """

  my_factory = my_builder['factory']
  steplist = ListSteps(my_factory)

  build = base.Build([FakeRequest(buildsetup)])
  safename = buildbot.util.safeTranslate(my_builder['name'])
  if not basepath: basepath = safename
  if not slavedir: slavedir = os.path.join(SCRIPT_DIR,
                                           '..', '..', 'slave')
  basedir = os.path.join(slavedir, basepath)
  build.basedir = basedir
  builderstatus = builder.BuilderStatus('test')
  builderstatus.nextBuildNumber = 2
  builderstatus.basedir = basedir
  my_builder['builddir'] = safename
  my_builder['slavebuilddir'] = safename
  mybuilder = real_builder.Builder(my_builder, builderstatus)
  build.setBuilder(mybuilder)
  build_status = build_module.BuildStatus(builderstatus, 1)

  build_status.setProperty('blamelist', [], 'Build')
  build_status.setProperty('mastername', mastername, 'Build')
  build_status.setProperty('slavename', slavename, 'Build')
  build_status.setProperty('gtest_filter', [], 'Build')

  # if build_properties are passed in, overwrite the defaults above:
  buildprops = Properties()
  if build_properties:
    buildprops.update(build_properties, 'Botmaster')
  mybuilder.setBotmaster(FakeBotmaster(mastername, buildprops))

  buildslave = FakeSlave(safename, slavename)
  build.build_status = build_status
  build.setupSlaveBuilder(buildslave)
  build.setupProperties()
  process_steps(steplist, build, buildslave, build_status, basedir)

  return steplist, build
Exemplo n.º 12
0
 def createTriggerProperties(self):
     # make a new properties object from a dict rendered by the old
     # properties object
     trigger_properties = Properties()
     trigger_properties.update(self.set_properties, "Trigger")
     for prop in trigger_properties.asDict():
         if prop not in self.set_properties:
             self.set_properties[prop] = Property(prop)
     return trigger_properties
Exemplo n.º 13
0
    def perspective_try(self, branch, revision, patch, repository, taskId, project,
                        builderNames, who="", comment="", properties={}):
        db = self.scheduler.master.db
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))

        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            log.msg("incoming Try job from user %s did not specify a valid job id" % self.username)
            return

        # Mx specific
        if self.scheduler.auto:
            if not taskId:
                log.msg("incoming Try auto job from user %s did not specify a task id" % self.username)
                return
            issue = self.jiraClient.getIssue(taskId) 
            if not issue: 
                log.msg("incoming Try auto job from user %s did not specify a valid task id" % self.username)
                return
            properties["tpks"]=issue["tpks"]
            properties["taskId"]=taskId
            log.msg("incoming Try auto job from user %s properties %s" % (self.username, properties["tpks"]) ) 

        reason = "'try' job"

        if who:
            reason += " by user %s" % who

        if comment:
            reason += " (%s)" % comment

        sourcestampsetid = yield db.sourcestampsets.addSourceStampSet()

        yield db.sourcestamps.addSourceStamp(
            branch=branch, revision=revision, repository=repository,
            project=project, patch_level=patch[0], patch_body=patch[1],
            patch_subdir='', patch_author=who or '',
            patch_comment=comment or '',
            sourcestampsetid=sourcestampsetid)
                    # note: no way to specify patch subdir - #1769

        requested_props = Properties()
        requested_props.update(properties, "try build")
        (bsid, brids) = yield self.scheduler.addBuildsetForSourceStamp(
                setid=sourcestampsetid, reason=reason,
                properties=requested_props, builderNames=builderNames)

        # return a remotely-usable BuildSetStatus object
        bsdict = yield db.buildsets.getBuildset(bsid)

        bss = BuildSetStatus(bsdict, self.scheduler.master.status)
        from buildbot.status.client import makeRemote
        defer.returnValue(makeRemote(bss))
Exemplo n.º 14
0
 def create_buildset(setid):
     reason = "'try' job"
     if parsed_job['who']:
         reason += " by user %s" % parsed_job['who']
     properties = parsed_job['properties']
     requested_props = Properties()
     requested_props.update(properties, "try build")
     return self.addBuildsetForSourceStamp(
         ssid=None, setid=setid,
         reason=reason, external_idstring=parsed_job['jobid'],
         builderNames=builderNames, properties=requested_props)
Exemplo n.º 15
0
 def _createBuildset(self, ssid, job):
   properties = Properties()
   properties.update(job, 'Job JSON')
   builderNames = self.filterBuilderList([job.get('buildername', None)])
   if not builderNames:
     log.msg("Job did not specify any allowed builder names")
     return defer.succeed(None)
   return self.addBuildsetForSourceStamp(
       ssid,
       builderNames=builderNames,
       reason=job.get('reason', 'Job from JsonScheduler'),
       properties=properties)
Exemplo n.º 16
0
    def handleJobFile(self, filename, f):
        try:
            parsed_job = self.parseJob(f)
            builderNames = parsed_job['builderNames']
        except BadJobfile:
            log.msg("%s reports a bad jobfile in %s" % (self, filename))
            log.err()
            defer.returnValue(None)
            return

        # Validate/fixup the builder names.
        builderNames = self.filterBuilderList(builderNames)
        if not builderNames:
            log.msg(
                "incoming Try job did not specify any allowed builder names")
            defer.returnValue(None)
            return

        who = ""
        if parsed_job['who']:
            who = parsed_job['who']

        comment = ""
        if parsed_job['comment']:
            comment = parsed_job['comment']

        setid = yield self.master.db.sourcestampsets.addSourceStampSet()
        yield self.master.db.sourcestamps.addSourceStamp(
            sourcestampsetid=setid,
            branch=parsed_job['branch'],
            revision=parsed_job['baserev'],
            patch_body=parsed_job['patch_body'],
            patch_level=parsed_job['patch_level'],
            patch_author=who,
            patch_comment=comment,
            patch_subdir='',  # TODO: can't set this remotely - #1769
            project=parsed_job['project'],
            repository=parsed_job['repository'])

        reason = "'try' job"
        if parsed_job['who']:
            reason += " by user %s" % parsed_job['who']
        properties = parsed_job['properties']
        requested_props = Properties()
        requested_props.update(properties, "try build")
        bsid, brids = yield self.addBuildsetForSourceStamp(
            ssid=None, setid=setid,
            reason=reason,
            external_idstring=parsed_job['jobid'],
            builderNames=builderNames,
            properties=requested_props)
        defer.returnValue((bsid, brids))
Exemplo n.º 17
0
    def createTriggerProperties(self):
        properties = self.build.getProperties()

        # make a new properties object from a dict rendered by the old 
        # properties object
        trigger_properties = Properties()
        trigger_properties.update(self.set_properties, "Trigger")
        for p in self.copy_properties:
            if p not in properties:
                continue
            trigger_properties.setProperty(p, properties[p],
                        "%s (in triggering build)" % properties.getPropertySource(p))
        return trigger_properties
Exemplo n.º 18
0
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, who="", comment="", properties={} ):
        db = self.scheduler.master.db
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))

        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            return

        reason = "'try' job"
        
        if who:
            reason += " by user %s" % who
            
        if comment:
            reason += " (%s)" % comment

        wfd = defer.waitForDeferred(db.sourcestampsets.addSourceStampSet())
        yield wfd
        sourcestampsetid = wfd.getResult()

        wfd = defer.waitForDeferred(
                db.sourcestamps.addSourceStamp(branch=branch, revision=revision,
                    repository=repository, project=project, patch_level=patch[0],
                    patch_body=patch[1], patch_subdir='', patch_author=who or '',
                    patch_comment=comment or '', sourcestampsetid = sourcestampsetid))
                    # note: no way to specify patch subdir - #1769
        yield wfd
        wfd.getResult()

        requested_props = Properties()
        requested_props.update(properties, "try build")
        wfd = defer.waitForDeferred(
                self.scheduler.addBuildsetForSourceStamp(setid=sourcestampsetid,
                        reason=reason, properties=requested_props,
                        builderNames=builderNames))
        yield wfd
        (bsid,brids) = wfd.getResult()

        # return a remotely-usable BuildSetStatus object
        wfd = defer.waitForDeferred(
                db.buildsets.getBuildset(bsid))
        yield wfd
        bsdict = wfd.getResult()

        bss = BuildSetStatus(bsdict, self.scheduler.master.status)
        from buildbot.status.client import makeRemote
        r = makeRemote(bss)
        yield r # return value
Exemplo n.º 19
0
    def handleJobFile(self, filename, f):
        try:
            parsed_job = self.parseJob(f)
            builderNames = parsed_job["builderNames"]
        except BadJobfile:
            log.msg("%s reports a bad jobfile in %s" % (self, filename))
            log.err()
            return defer.succeed(None)

        # Validate/fixup the builder names.
        builderNames = self.filterBuilderList(builderNames)
        if not builderNames:
            log.msg("incoming Try job did not specify any allowed builder names")
            return defer.succeed(None)

        who = ""
        if parsed_job["who"]:
            who = parsed_job["who"]

        comment = ""
        if parsed_job["comment"]:
            comment = parsed_job["comment"]

        sourcestamp = dict(
            branch=parsed_job["branch"],
            codebase="",
            revision=parsed_job["baserev"],
            patch_body=parsed_job["patch_body"],
            patch_level=parsed_job["patch_level"],
            patch_author=who,
            patch_comment=comment,
            patch_subdir="",  # TODO: can't set this remotely - #1769
            project=parsed_job["project"],
            repository=parsed_job["repository"],
        )
        reason = u"'try' job"
        if parsed_job["who"]:
            reason += u" by user %s" % ascii2unicode(parsed_job["who"])
        properties = parsed_job["properties"]
        requested_props = Properties()
        requested_props.update(properties, "try build")

        return self.addBuildsetForSourceStamps(
            sourcestamps=[sourcestamp],
            reason=reason,
            external_idstring=ascii2unicode(parsed_job["jobid"]),
            builderNames=builderNames,
            properties=requested_props,
        )
Exemplo n.º 20
0
  def submitJob(self, change, job):
    props = Properties()
    if change.properties:
      props.updateFromProperties(change.properties)
    if job.build_properties:
      props.update(job.build_properties, 'Gerrit')

    bsid = yield self.addBuildsetForChanges(
        reason='tryjob',
        changeids=[change.number],
        builderNames=job.builder_names,
        properties=props)
    log.msg('Successfully submitted a Gerrit try job for %s: %s.' %
            (change.who, job))
    defer.returnValue(bsid)
Exemplo n.º 21
0
    def handleJobFile(self, filename, f):
        try:
            parsed_job = self.parseJob(f)
            builderNames = parsed_job['builderNames']
        except BadJobfile:
            log.msg("%s reports a bad jobfile in %s" % (self, filename))
            log.err()
            return defer.succeed(None)

        # Validate/fixup the builder names.
        builderNames = self.filterBuilderList(builderNames)
        if not builderNames:
            log.msg(
                "incoming Try job did not specify any allowed builder names")
            return defer.succeed(None)

        who = ""
        if parsed_job['who']:
            who = parsed_job['who']

        comment = ""
        if parsed_job['comment']:
            comment = parsed_job['comment']

        sourcestamp = dict(
            branch=parsed_job['branch'],
            codebase='',
            revision=parsed_job['baserev'],
            patch_body=parsed_job['patch_body'],
            patch_level=parsed_job['patch_level'],
            patch_author=who,
            patch_comment=comment,
            patch_subdir='',  # TODO: can't set this remotely - #1769
            project=parsed_job['project'],
            repository=parsed_job['repository'])
        reason = u"'try' job"
        if parsed_job['who']:
            reason += u" by user %s" % ascii2unicode(parsed_job['who'])
        properties = parsed_job['properties']
        requested_props = Properties()
        requested_props.update(properties, "try build")

        return self.addBuildsetForSourceStamps(sourcestamps=[sourcestamp],
                                               reason=reason,
                                               external_idstring=ascii2unicode(
                                                   parsed_job['jobid']),
                                               builderNames=builderNames,
                                               properties=requested_props)
Exemplo n.º 22
0
    def handleJobFile(self, filename, f):
        try:
            parsed_job = self.parseJob(f)
            builderNames = parsed_job['builderNames']
        except BadJobfile:
            log.msg("%s reports a bad jobfile in %s" % (self, filename))
            log.err()
            return defer.succeed(None)

        # Validate/fixup the builder names.
        builderNames = self.filterBuilderList(builderNames)
        if not builderNames:
            log.msg(
                "incoming Try job did not specify any allowed builder names")
            return defer.succeed(None)

        who = ""
        if parsed_job['who']:
            who = parsed_job['who']

        comment = ""
        if parsed_job['comment']:
            comment = parsed_job['comment']

        sourcestamp = dict(branch=parsed_job['branch'],
                           codebase='',
                           revision=parsed_job['baserev'],
                           patch_body=parsed_job['patch_body'],
                           patch_level=parsed_job['patch_level'],
                           patch_author=who,
                           patch_comment=comment,
                           # TODO: can't set this remotely - #1769
                           patch_subdir='',
                           project=parsed_job['project'],
                           repository=parsed_job['repository'])
        reason = "'try' job"
        if parsed_job['who']:
            reason += " by user {}".format(bytes2unicode(parsed_job['who']))
        properties = parsed_job['properties']
        requested_props = Properties()
        requested_props.update(properties, "try build")

        return self.addBuildsetForSourceStamps(
            sourcestamps=[sourcestamp],
            reason=reason,
            external_idstring=bytes2unicode(parsed_job['jobid']),
            builderNames=builderNames,
            properties=requested_props)
Exemplo n.º 23
0
    def getSchedulersAndProperties(self):
        sch = self.schedulerNames[0]
        triggered_schedulers = []

        for env in self.config.matrix:
            props_to_set = Properties()
            props_to_set.setProperty("TRAVIS_PULL_REQUEST",
                                     self.getProperty("TRAVIS_PULL_REQUEST"), "inherit")
            for k, v in env.items():
                if k == "env":
                    props_to_set.update(v, ".travis.yml")
                else:
                    props_to_set.setProperty(k, v, ".travis.yml")

            triggered_schedulers.append((sch, props_to_set))
        return triggered_schedulers
Exemplo n.º 24
0
    def getSchedulersAndProperties(self):
        sch = self.schedulerNames[0]
        triggered_schedulers = []

        for env in self.config.matrix:
            props_to_set = Properties()
            props_to_set.setProperty("TRAVIS_PULL_REQUEST",
                                     self.getProperty("TRAVIS_PULL_REQUEST"),
                                     "inherit")
            for k, v in env.items():
                if k == "env":
                    props_to_set.update(v, ".travis.yml")
                else:
                    props_to_set.setProperty(k, v, ".travis.yml")

            triggered_schedulers.append((sch, props_to_set))
        return triggered_schedulers
Exemplo n.º 25
0
    def perspective_try(
        self,
        branch,
        revision,
        patch,
        repository,
        project,
        builderNames,
        properties={},
    ):
        log.msg("user %s requesting build on builders %s" %
                (self.username, builderNames))
        # build the intersection of the request and our configured list
        builderNames = self.parent.filterBuilderList(builderNames)
        if not builderNames:
            return
        ss = SourceStamp(branch,
                         revision,
                         patch,
                         repository=repository,
                         project=project)
        reason = "'try' job from user %s" % self.username

        # roll the specified props in with our inherited props
        combined_props = Properties()
        combined_props.updateFromProperties(self.parent.properties)
        combined_props.update(properties, "try build")

        status = self.parent.parent.parent.status
        db = self.parent.parent.db
        d = db.runInteraction(self._try, ss, builderNames, reason,
                              combined_props, db)

        def _done(bsid):
            # return a remotely-usable BuildSetStatus object
            bss = BuildSetStatus(bsid, status, db)
            from buildbot.status.client import makeRemote
            r = makeRemote(bss)
            #self.parent.parent.loop_done() # so it will notify builder loop
            return r

        d.addCallback(_done)
        return d
Exemplo n.º 26
0
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, who="", comment="", properties=None):
        log.msg("user {} requesting build on builders {}".format(self.username, builderNames))
        if properties is None:
            properties = {}
        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            return None

        branch = bytes2unicode(branch)
        revision = bytes2unicode(revision)
        patch = patch[0], bytes2unicode(patch[1])
        repository = bytes2unicode(repository)
        project = bytes2unicode(project)
        who = bytes2unicode(who)
        comment = bytes2unicode(comment)

        reason = "'try' job"

        if who:
            reason += " by user {}".format(bytes2unicode(who))

        if comment:
            reason += " ({})".format(bytes2unicode(comment))

        sourcestamp = dict(
            branch=branch, revision=revision, repository=repository,
            project=project, patch_level=patch[0], patch_body=patch[1],
            patch_subdir='', patch_author=who or '',
            patch_comment=comment or '', codebase='',
        )           # note: no way to specify patch subdir - #1769

        requested_props = Properties()
        requested_props.update(properties, "try build")
        (bsid, brids) = yield self.scheduler.addBuildsetForSourceStamps(
            sourcestamps=[sourcestamp], reason=reason,
            properties=requested_props, builderNames=builderNames)

        # return a remotely-usable BuildSetStatus object
        bss = RemoteBuildSetStatus(self.scheduler.master, bsid, brids)
        return bss
Exemplo n.º 27
0
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, who="", comment="", properties={}):
        db = self.scheduler.master.db
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))

        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            return

        reason = "'try' job"

        if who:
            reason += " by user %s" % who

        if comment:
            reason += " (%s)" % comment

        sourcestampsetid = yield db.sourcestampsets.addSourceStampSet()

        yield db.sourcestamps.addSourceStamp(
            branch=branch, revision=revision, repository=repository,
            project=project, patch_level=patch[0], patch_body=patch[1],
            patch_subdir='', patch_author=who or '',
            patch_comment=comment or '',
            sourcestampsetid=sourcestampsetid)
                    # note: no way to specify patch subdir - #1769

        requested_props = Properties()
        requested_props.update(properties, "try build")
        (bsid, brids) = yield self.scheduler.addBuildsetForSourceStamp(
                setid=sourcestampsetid, reason=reason,
                properties=requested_props, builderNames=builderNames)

        # return a remotely-usable BuildSetStatus object
        bsdict = yield db.buildsets.getBuildset(bsid)

        bss = BuildSetStatus(bsdict, self.scheduler.master.status)
        from buildbot.status.client import makeRemote
        defer.returnValue(makeRemote(bss))
Exemplo n.º 28
0
 def get_props(self, builder, options):
     """Current job extra properties that are not related to the source stamp.
 Initialize with the Scheduler's base properties.
 """
     keys = (
         'clobber',
         'issue',
         'patchset',
         'requester',
         'rietveld',
         'root',
         'try_job_key',
     )
     # All these settings have no meaning when False or not set, so don't set
     # them in that case.
     properties = dict((i, options[i]) for i in keys if options.get(i))
     properties['testfilter'] = options['bot'].get(builder, None)
     props = Properties()
     props.updateFromProperties(self.properties)
     props.update(properties, self._PROPERTY_SOURCE)
     return props
Exemplo n.º 29
0
 def get_props(self, builder, options):
   """Current job extra properties that are not related to the source stamp.
   Initialize with the Scheduler's base properties.
   """
   keys = (
     'clobber',
     'issue',
     'patchset',
     'requester',
     'rietveld',
     'root',
     'try_job_key',
   )
   # All these settings have no meaning when False or not set, so don't set
   # them in that case.
   properties = dict((i, options[i]) for i in keys if options.get(i))
   properties['testfilter'] = options['bot'].get(builder, None)
   props = Properties()
   props.updateFromProperties(self.properties)
   props.update(properties, self._PROPERTY_SOURCE)
   return props
Exemplo n.º 30
0
  def get_props(self, builder, options):
    """Current job extra properties that are not related to the source stamp.
    Initialize with the Scheduler's base properties.
    """
    always_included_keys = (
      'orig_revision',
    )

    optional_keys = (
      'clobber',
      'issue',
      'patch_ref',
      'patch_repo_url',
      'patch_storage',
      'patch_url',
      'patch_project',
      'patchset',
      'requester',
      'rietveld',
      'root',
      'try_job_key',
    )

    # All these settings have no meaning when False or not set, so don't set
    # them in that case.
    properties = dict((i, options[i]) for i in optional_keys if options.get(i))

    # These settings are meaningful even if the value evaluates to False
    # or None. Note that when options don't contain given key, it will
    # be set to None.
    properties.update(dict((i, options.get(i)) for i in always_included_keys))

    # Specially evaluated properties, e.g. ones where key name is different
    # between properties and options.
    properties['testfilter'] = options['bot'].get(builder, None)

    props = Properties()
    props.updateFromProperties(self.properties)
    props.update(properties, self._PROPERTY_SOURCE)
    return props
Exemplo n.º 31
0
    def get_props(self, builder, options):
        """Current job extra properties that are not related to the source stamp.
    Initialize with the Scheduler's base properties.
    """
        always_included_keys = ('orig_revision', )

        optional_keys = (
            'clobber',
            'issue',
            'patch_ref',
            'patch_repo_url',
            'patch_storage',
            'patch_url',
            'patch_project',
            'patchset',
            'requester',
            'rietveld',
            'root',
            'try_job_key',
        )

        # All these settings have no meaning when False or not set, so don't set
        # them in that case.
        properties = dict(
            (i, options[i]) for i in optional_keys if options.get(i))

        # These settings are meaningful even if the value evaluates to False
        # or None. Note that when options don't contain given key, it will
        # be set to None.
        properties.update(
            dict((i, options.get(i)) for i in always_included_keys))

        # Specially evaluated properties, e.g. ones where key name is different
        # between properties and options.
        properties['testfilter'] = options['bot'].get(builder, None)

        props = Properties()
        props.updateFromProperties(self.properties)
        props.update(properties, self._PROPERTY_SOURCE)
        return props
Exemplo n.º 32
0
    def getSchedulersAndProperties(self):
        sch = self.schedulerNames[0]
        triggered_schedulers = []
        for env in self.config.matrix:
            props_to_set = Properties()
            props_to_set.setProperty("TRAVIS_PULL_REQUEST",
                                     self.getProperty("TRAVIS_PULL_REQUEST"),
                                     "inherit")
            flat_env = {}
            for k, v in env.items():
                if k == "env":
                    props_to_set.update(v, ".travis.yml")
                    flat_env.update(v)
                else:
                    props_to_set.setProperty(k, v, ".travis.yml")
                    flat_env[k] = v
            props_to_set.setProperty(
                "reason", u" | ".join(
                    sorted(str(k) + '=' + str(v)
                           for k, v in flat_env.items())), "spawner")

            triggered_schedulers.append((sch, props_to_set))
        return triggered_schedulers
Exemplo n.º 33
0
class Change:
    """I represent a single change to the source tree. This may involve several
    files, but they are all changed by the same person, and there is a change
    comment for the group as a whole."""

    implements(interfaces.IStatusEvent)

    number = None
    branch = None
    category = None
    revision = None  # used to create a source-stamp
    links = []  # links are gone, but upgrade code expects this attribute

    @classmethod
    def fromChdict(cls, master, chdict):
        """
        Class method to create a L{Change} from a dictionary as returned
        by L{ChangesConnectorComponent.getChange}.

        @param master: build master instance
        @param ssdict: change dictionary

        @returns: L{Change} via Deferred
        """
        cache = master.caches.get_cache("Changes", cls._make_ch)
        return cache.get(chdict['changeid'], chdict=chdict, master=master)

    @classmethod
    def _make_ch(cls, changeid, master, chdict):
        change = cls(None, None, None, _fromChdict=True)
        change.who = chdict['author']
        change.comments = chdict['comments']
        change.isdir = chdict['is_dir']
        change.revision = chdict['revision']
        change.branch = chdict['branch']
        change.category = chdict['category']
        change.revlink = chdict['revlink']
        change.repository = chdict['repository']
        change.codebase = chdict['codebase']
        change.project = chdict['project']
        change.number = chdict['changeid']

        when = chdict['when_timestamp']
        if when:
            when = datetime2epoch(when)
        change.when = when

        change.files = chdict['files'][:]
        change.files.sort()

        change.properties = Properties()
        for n, (v, s) in chdict['properties'].iteritems():
            change.properties.setProperty(n, v, s)

        return defer.succeed(change)

    def __init__(self,
                 who,
                 files,
                 comments,
                 isdir=0,
                 revision=None,
                 when=None,
                 branch=None,
                 category=None,
                 revlink='',
                 properties={},
                 repository='',
                 codebase='',
                 project='',
                 _fromChdict=False):
        # skip all this madness if we're being built from the database
        if _fromChdict:
            return

        self.who = who
        self.comments = comments
        self.isdir = isdir

        def none_or_unicode(x):
            if x is None: return x
            return unicode(x)

        self.revision = none_or_unicode(revision)
        now = util.now()
        if when is None:
            self.when = now
        elif when > now:
            # this happens when the committing system has an incorrect clock, for example.
            # handle it gracefully
            log.msg(
                "received a Change with when > now; assuming the change happened now"
            )
            self.when = now
        else:
            self.when = when
        self.branch = none_or_unicode(branch)
        self.category = none_or_unicode(category)
        self.revlink = revlink
        self.properties = Properties()
        self.properties.update(properties, "Change")
        self.repository = repository
        self.codebase = codebase
        self.project = project

        # keep a sorted list of the files, for easier display
        self.files = (files or [])[:]
        self.files.sort()

    def __setstate__(self, dict):
        self.__dict__ = dict
        # Older Changes won't have a 'properties' attribute in them
        if not hasattr(self, 'properties'):
            self.properties = Properties()
        if not hasattr(self, 'revlink'):
            self.revlink = ""

    def __str__(self):
        return (u"Change(revision=%r, who=%r, branch=%r, comments=%r, " +
                u"when=%r, category=%r, project=%r, repository=%r, " +
                u"codebase=%r)") % (self.revision, self.who, self.branch,
                                    self.comments, self.when, self.category,
                                    self.project, self.repository,
                                    self.codebase)

    def __cmp__(self, other):
        return self.number - other.number

    def asText(self):
        data = ""
        data += self.getFileContents()
        if self.repository:
            data += "On: %s\n" % self.repository
        if self.project:
            data += "For: %s\n" % self.project
        data += "At: %s\n" % self.getTime()
        data += "Changed By: %s\n" % self.who
        data += "Comments: %s" % self.comments
        data += "Properties: \n%s\n\n" % self.getProperties()
        return data

    def asDict(self):
        '''returns a dictonary with suitable info for html/mail rendering'''
        result = {}

        files = [dict(name=f) for f in self.files]
        files.sort(cmp=lambda a, b: a['name'] < b['name'])

        # Constant
        result['number'] = self.number
        result['branch'] = self.branch
        result['category'] = self.category
        result['who'] = self.getShortAuthor()
        result['comments'] = self.comments
        result['revision'] = self.revision
        result['rev'] = self.revision
        result['when'] = self.when
        result['at'] = self.getTime()
        result['files'] = files
        result['revlink'] = getattr(self, 'revlink', None)
        result['properties'] = self.properties.asList()
        result['repository'] = getattr(self, 'repository', None)
        result['codebase'] = getattr(self, 'codebase', '')
        result['project'] = getattr(self, 'project', None)
        return result

    def getShortAuthor(self):
        return self.who

    def getTime(self):
        if not self.when:
            return "?"
        return time.strftime("%a %d %b %Y %H:%M:%S", time.localtime(self.when))

    def getTimes(self):
        return (self.when, None)

    def getText(self):
        return [html.escape(self.who)]

    def getLogs(self):
        return {}

    def getFileContents(self):
        data = ""
        if len(self.files) == 1:
            if self.isdir:
                data += "Directory: %s\n" % self.files[0]
            else:
                data += "File: %s\n" % self.files[0]
        else:
            data += "Files:\n"
            for f in self.files:
                data += " %s\n" % f
        return data

    def getProperties(self):
        data = ""
        for prop in self.properties.asList():
            data += "  %s: %s" % (prop[0], prop[1])
        return data
Exemplo n.º 34
0
class AbstractBuildSlave(config.ReconfigurableServiceMixin, pb.Avatar,
                         service.MultiService):
    """This is the master-side representative for a remote buildbot slave.
    There is exactly one for each slave described in the config file (the
    c['slaves'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a build slave -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IBuildSlave)
    keepalive_timer = None
    keepalive_interval = None

    # reconfig slaves after builders
    reconfig_priority = 64

    def __init__(self,
                 name,
                 password,
                 max_builds=None,
                 notify_on_missing=[],
                 missing_timeout=3600,
                 properties={},
                 locks=None,
                 keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this slave
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this slave
                      can be used
        @type locks: dictionary
        """
        service.MultiService.__init__(self)
        self.slavename = name
        self.password = password

        # PB registration
        self.registration = None
        self.registered_port = None

        # these are set when the service is started, and unset when it is
        # stopped
        self.botmaster = None
        self.master = None

        self.slave_status = SlaveStatus(name)
        self.slave = None  # a RemoteReference to the Bot, when connected
        self.slave_commands = None
        self.slavebuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error('notify_on_missing arg %r is not a string' %
                             (i, ))
        self.missing_timeout = missing_timeout
        self.missing_timer = None
        self.keepalive_interval = keepalive_interval

        self.detached_subs = None

        self._old_builder_list = None

    def __repr__(self):
        return "<%s %r>" % (self.__class__.__name__, self.slavename)

    def updateLocks(self):
        """Convert the L{LockAccess} objects in C{self.locks} into real lock
        objects, while also maintaining the subscriptions to lock releases."""
        # unsubscribe from any old locks
        for s in self.lock_subscriptions:
            s.unsubscribe()

        # convert locks into their real form
        locks = [(self.botmaster.getLockFromLockAccess(a), a)
                 for a in self.access]
        self.locks = [(l.getLock(self), la) for l, la in locks]
        self.lock_subscriptions = [
            l.subscribeToReleases(self._lockReleased) for l, la in self.locks
        ]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another slave
        to look at it.
        """
        log.msg("acquireLocks(slave %s, locks %s)" % (self, self.locks))
        if not self.locksAvailable():
            log.msg("slave %s can't lock, giving up" % (self, ))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def _lockReleased(self):
        """One of the locks for this slave was released; try scheduling
        builds."""
        if not self.botmaster:
            return  # oh well..
        self.botmaster.maybeStartBuildsForSlave(self.slavename)

    def _saveSlaveInfoDict(self, slaveinfo):
        return self.master.db.buildslaves.updateBuildslave(
            name=self.slavename,
            slaveinfo=slaveinfo,
        )

    def _getSlaveInfo(self):
        d = self.master.db.buildslaves.getBuildslaveByName(self.slavename)

        @d.addCallback
        def applyInfo(buildslave):
            if buildslave is None:
                return

            self.updateSlaveInfo(**buildslave['slaveinfo'])

        return d

    def updateSlaveInfo(self, **kwargs):
        self.slave_status.updateInfo(**kwargs)

    def getSlaveInfo(self, key, default=None):
        return self.slave_status.getInfo(key, default)

    def setServiceParent(self, parent):
        # botmaster needs to set before setServiceParent which calls startService
        self.botmaster = parent
        self.master = parent.master
        service.MultiService.setServiceParent(self, parent)

    def startService(self):
        self.updateLocks()
        self.startMissingTimer()
        self.slave_status.addInfoWatcher(self._saveSlaveInfoDict)
        d = self._getSlaveInfo()
        d.addCallback(lambda _: service.MultiService.startService(self))
        return d

    @defer.inlineCallbacks
    def reconfigService(self, new_config):
        # Given a new BuildSlave, configure this one identically.  Because
        # BuildSlave objects are remotely referenced, we can't replace them
        # without disconnecting the slave, yet there's no reason to do that.
        new = self.findNewSlaveInstance(new_config)

        assert self.slavename == new.slavename

        # do we need to re-register?
        if (not self.registration or self.password != new.password
                or new_config.protocols['pb']['port'] != self.registered_port):
            if self.registration:
                yield self.registration.unregister()
                self.registration = None
            self.password = new.password
            self.registered_port = new_config.protocols['pb']['port']
            self.registration = self.master.pbmanager.register(
                self.registered_port, self.slavename, self.password,
                self.getPerspective)

        # adopt new instance's configuration parameters
        self.max_builds = new.max_builds
        self.access = new.access
        self.notify_on_missing = new.notify_on_missing
        self.keepalive_interval = new.keepalive_interval

        if self.missing_timeout != new.missing_timeout:
            running_missing_timer = self.missing_timer
            self.stopMissingTimer()
            self.missing_timeout = new.missing_timeout
            if running_missing_timer:
                self.startMissingTimer()

        properties = Properties()
        properties.updateFromProperties(new.properties)
        self.properties = properties

        self.updateLocks()

        # update the attached slave's notion of which builders are attached.
        # This assumes that the relevant builders have already been configured,
        # which is why the reconfig_priority is set low in this class.
        yield self.updateSlave()

        yield config.ReconfigurableServiceMixin.reconfigService(
            self, new_config)

    def stopService(self):
        self.slave_status.removeInfoWatcher(self._saveSlaveInfoDict)
        if self.registration:
            self.registration.unregister()
            self.registration = None
        self.stopMissingTimer()
        return service.MultiService.stopService(self)

    def findNewSlaveInstance(self, new_config):
        # TODO: called multiple times per reconfig; use 1-element cache?
        for sl in new_config.slaves:
            if sl.slavename == self.slavename:
                return sl
        assert 0, "no new slave named '%s'" % self.slavename

    def startMissingTimer(self):
        if self.notify_on_missing and self.missing_timeout and self.parent:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = reactor.callLater(self.missing_timeout,
                                                   self._missing_timer_fired)

    def stopMissingTimer(self):
        if self.missing_timer:
            self.missing_timer.cancel()
            self.missing_timer = None

    def getPerspective(self, mind, slavename):
        assert slavename == self.slavename
        metrics.MetricCountEvent.log("attached_slaves", 1)

        # record when this connection attempt occurred
        if self.slave_status:
            self.slave_status.recordConnectTime()

        # try to use TCP keepalives
        try:
            mind.broker.transport.setTcpKeepAlive(1)
        except:
            pass

        if self.isConnected():
            # duplicate slave - send it to arbitration
            arb = botmaster.DuplicateSlaveArbitrator(self)
            return arb.getPerspective(mind, slavename)
        else:
            log.msg("slave '%s' attaching from %s" %
                    (slavename, mind.broker.transport.getPeer()))
            return self

    def doKeepalive(self):
        self.keepalive_timer = reactor.callLater(self.keepalive_interval,
                                                 self.doKeepalive)
        if not self.slave:
            return
        d = self.slave.callRemote("print", "Received keepalive from master")
        d.addErrback(log.msg, "Keepalive failed for '%s'" % (self.slavename, ))

    def stopKeepaliveTimer(self):
        if self.keepalive_timer:
            self.keepalive_timer.cancel()

    def startKeepaliveTimer(self):
        assert self.keepalive_interval
        log.msg("Starting buildslave keepalive timer for '%s'" %
                (self.slavename, ))
        self.doKeepalive()

    def isConnected(self):
        return self.slave

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        buildmaster = self.botmaster.master
        status = buildmaster.getStatus()
        text = "The Buildbot working for '%s'\n" % status.getTitle()
        text += ("has noticed that the buildslave named %s went away\n" %
                 self.slavename)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout))  # approx
        text += "\n"
        text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
        text += "was '%s'.\n" % self.slave_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getTitleURL()
        text += "\n"
        text += "%s\n" % status.getURLForThing(self.slave_status)
        subject = "Buildbot: buildslave %s was lost" % self.slavename
        return self._mail_missing_message(subject, text)

    def updateSlave(self):
        """Called to add or remove builders after the slave has connected.

        @return: a Deferred that indicates when an attached slave has
        accepted the new builders and/or released the old ones."""
        if self.slave:
            return self.sendBuilderList()
        else:
            return defer.succeed(None)

    def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
        if buildStarted:
            self.slave_status.buildStarted(buildStarted)
        if buildFinished:
            self.slave_status.buildFinished(buildFinished)

    def attached(self, bot):
        """This is called when the slave connects.

        @return: a Deferred that fires when the attachment is complete
        """

        # the botmaster should ensure this.
        assert not self.isConnected()

        metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", 1)

        # set up the subscription point for eventual detachment
        self.detached_subs = subscription.SubscriptionPoint("detached")

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this slave to all the
        # Builders that care about it.

        # we accumulate slave information in this 'state' dictionary, then
        # set it atomically if we make it far enough through the process
        state = {}

        # Reset graceful shutdown status
        self.slave_status.setGraceful(False)
        # We want to know when the graceful shutdown flag changes
        self.slave_status.addGracefulWatcher(self._gracefulChanged)
        self.slave_status.addPauseWatcher(self._pauseChanged)

        d = defer.succeed(None)

        @d.addCallback
        def _log_attachment_on_slave(res):
            d1 = bot.callRemote("print", "attached")
            d1.addErrback(lambda why: None)
            return d1

        @d.addCallback
        def _get_info(res):
            d1 = bot.callRemote("getSlaveInfo")

            def _got_info(info):
                log.msg("Got slaveinfo from '%s'" % self.slavename)
                # TODO: info{} might have other keys
                state["admin"] = info.get("admin")
                state["host"] = info.get("host")
                state["access_uri"] = info.get("access_uri", None)
                state["slave_environ"] = info.get("environ", {})
                state["slave_basedir"] = info.get("basedir", None)
                state["slave_system"] = info.get("system", None)

            def _info_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # maybe an old slave, doesn't implement remote_getSlaveInfo
                log.msg("BuildSlave.info_unavailable")
                log.err(why)

            d1.addCallbacks(_got_info, _info_unavailable)
            return d1

        d.addCallback(lambda _: self.startKeepaliveTimer())

        @d.addCallback
        def _get_version(_):
            d = bot.callRemote("getVersion")

            def _got_version(version):
                state["version"] = version

            def _version_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # probably an old slave
                state["version"] = '(unknown)'

            d.addCallbacks(_got_version, _version_unavailable)
            return d

        @d.addCallback
        def _get_commands(_):
            d1 = bot.callRemote("getCommands")

            def _got_commands(commands):
                state["slave_commands"] = commands

            def _commands_unavailable(why):
                # probably an old slave
                if why.check(AttributeError):
                    return
                log.msg("BuildSlave.getCommands is unavailable - ignoring")
                log.err(why)

            d1.addCallbacks(_got_commands, _commands_unavailable)
            return d1

        @d.addCallback
        def _accept_slave(res):
            self.slave_status.setConnected(True)

            self.slave_status.updateInfo(
                admin=state.get("admin"),
                host=state.get("host"),
                access_uri=state.get("access_uri"),
                version=state.get("version"),
            )

            self.slave_commands = state.get("slave_commands")
            self.slave_environ = state.get("slave_environ")
            self.slave_basedir = state.get("slave_basedir")
            self.slave_system = state.get("slave_system")
            self.slave = bot
            if self.slave_system == "nt":
                self.path_module = namedModule("ntpath")
            else:
                # most everything accepts / as separator, so posix should be a
                # reasonable fallback
                self.path_module = namedModule("posixpath")
            log.msg("bot attached")
            self.messageReceivedFromSlave()
            self.stopMissingTimer()
            self.master.status.slaveConnected(self.slavename)

        d.addCallback(lambda _: self.updateSlave())

        d.addCallback(
            lambda _: self.botmaster.maybeStartBuildsForSlave(self.slavename))

        # Finally, the slave gets a reference to this BuildSlave. They
        # receive this later, after we've started using them.
        d.addCallback(lambda _: self)
        return d

    def messageReceivedFromSlave(self):
        now = time.time()
        self.lastMessageReceived = now
        self.slave_status.setLastMessageReceived(now)

    def detached(self, mind):
        metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", -1)
        self.slave = None
        self._old_builder_list = []
        self.slave_status.removeGracefulWatcher(self._gracefulChanged)
        self.slave_status.removePauseWatcher(self._pauseChanged)
        self.slave_status.setConnected(False)
        log.msg("BuildSlave.detached(%s)" % self.slavename)
        self.master.status.slaveDisconnected(self.slavename)
        self.stopKeepaliveTimer()
        self.releaseLocks()

        # notify watchers, but do so in the next reactor iteration so that
        # any further detached() action by subclasses happens first
        def notif():
            subs = self.detached_subs
            self.detached_subs = None
            subs.deliver()

        eventually(notif)

    def subscribeToDetach(self, callback):
        """
        Request that C{callable} be invoked with no arguments when the
        L{detached} method is invoked.

        @returns: L{Subscription}
        """
        assert self.detached_subs, "detached_subs is only set if attached"
        return self.detached_subs.subscribe(callback)

    def disconnect(self):
        """Forcibly disconnect the slave.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the slave is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a slave is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown slave. The second is
        when we wind up with two connections for the same slave, in which
        case we disconnect the older connection.
        """

        if not self.slave:
            return defer.succeed(None)
        log.msg("disconnecting old slave %s now" % self.slavename)
        # When this Deferred fires, we'll be ready to accept the new slave
        return self._disconnect(self.slave)

    def _disconnect(self, slave):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new slave. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback with one argument, the
        # RemoteReference being disconnected.
        def _disconnected(rref):
            eventually(d.callback, None)

        slave.notifyOnDisconnect(_disconnected)
        tport = slave.broker.transport
        # this is the polite way to request that a socket be closed
        tport.loseConnection()
        try:
            # but really we don't want to wait for the transmit queue to
            # drain. The remote end is unlikely to ACK the data, so we'd
            # probably have to wait for a (20-minute) TCP timeout.
            # tport._closeSocket()
            # however, doing _closeSocket (whether before or after
            # loseConnection) somehow prevents the notifyOnDisconnect
            # handlers from being run. Bummer.
            tport.offset = 0
            tport.dataBuffer = ""
        except:
            # however, these hacks are pretty internal, so don't blow up if
            # they fail or are unavailable
            log.msg("failed to accelerate the shutdown process")
        log.msg("waiting for slave to finish disconnecting")

        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForSlave(self.slavename)
        blist = [(b.name, b.config.slavebuilddir) for b in our_builders]
        if blist == self._old_builder_list:
            return defer.succeed(None)

        d = self.slave.callRemote("setBuilderList", blist)

        def sentBuilderList(ign):
            self._old_builder_list = blist
            return ign

        d.addCallback(sentBuilderList)
        return d

    def perspective_keepalive(self):
        self.messageReceivedFromSlave()

    def perspective_shutdown(self):
        log.msg("slave %s wants to shut down" % self.slavename)
        self.slave_status.setGraceful(True)

    def addSlaveBuilder(self, sb):
        self.slavebuilders[sb.builder_name] = sb

    def removeSlaveBuilder(self, sb):
        try:
            del self.slavebuilders[sb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, sb):
        """This is called when a build on this slave is finished."""
        self.botmaster.maybeStartBuildsForSlave(self.slavename)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this buildslave
        can start a build.  This function can be used to limit overall
        concurrency on the buildslave.

        Note for subclassers: if a slave can become willing to start a build
        without any action on that slave (for example, by a resource in use on
        another slave becoming available), then you must arrange for
        L{maybeStartBuildsForSlave} to be called at that time, or builds on
        this slave will not start.
        """

        if self.slave_status.isPaused():
            return False

        # If we're waiting to shutdown gracefully, then we shouldn't
        # accept any new jobs.
        if self.slave_status.getGraceful():
            return False

        if self.max_builds:
            active_builders = [
                sb for sb in self.slavebuilders.values() if sb.isBusy()
            ]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    def _mail_missing_message(self, subject, text):
        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.master
        for st in buildmaster.status:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("buildslave-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = subject
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def _gracefulChanged(self, graceful):
        """This is called when our graceful shutdown setting changes"""
        self.maybeShutdown()

    @defer.inlineCallbacks
    def shutdown(self):
        """Shutdown the slave"""
        if not self.slave:
            log.msg("no remote; slave is already shut down")
            return

        # First, try the "new" way - calling our own remote's shutdown
        # method.  The method was only added in 0.8.3, so ignore NoSuchMethod
        # failures.
        def new_way():
            d = self.slave.callRemote('shutdown')
            d.addCallback(lambda _: True)  # successful shutdown request

            def check_nsm(f):
                f.trap(pb.NoSuchMethod)
                return False  # fall through to the old way

            d.addErrback(check_nsm)

            def check_connlost(f):
                f.trap(pb.PBConnectionLost)
                return True  # the slave is gone, so call it finished

            d.addErrback(check_connlost)
            return d

        if (yield new_way()):
            return  # done!

        # Now, the old way.  Look for a builder with a remote reference to the
        # client side slave.  If we can find one, then call "shutdown" on the
        # remote builder, which will cause the slave buildbot process to exit.
        def old_way():
            d = None
            for b in self.slavebuilders.values():
                if b.remote:
                    d = b.remote.callRemote("shutdown")
                    break

            if d:
                log.msg("Shutting down (old) slave: %s" % self.slavename)

                # The remote shutdown call will not complete successfully since the
                # buildbot process exits almost immediately after getting the
                # shutdown request.
                # Here we look at the reason why the remote call failed, and if
                # it's because the connection was lost, that means the slave
                # shutdown as expected.

                def _errback(why):
                    if why.check(pb.PBConnectionLost):
                        log.msg("Lost connection to %s" % self.slavename)
                    else:
                        log.err("Unexpected error when trying to shutdown %s" %
                                self.slavename)

                d.addErrback(_errback)
                return d
            log.err("Couldn't find remote builder to shut down slave")
            return defer.succeed(None)

        yield old_way()

    def maybeShutdown(self):
        """Shut down this slave if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self.slave_status.getGraceful():
            return
        active_builders = [
            sb for sb in self.slavebuilders.values() if sb.isBusy()
        ]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down slave')

    def _pauseChanged(self, paused):
        if paused is True:
            self.botmaster.master.status.slavePaused(self.slavename)
        else:
            self.botmaster.master.status.slaveUnpaused(self.slavename)

    def pause(self):
        """Stop running new builds on the slave."""
        self.slave_status.setPaused(True)

    def unpause(self):
        """Restart running new builds on the slave."""
        self.slave_status.setPaused(False)
        self.botmaster.maybeStartBuildsForSlave(self.slavename)

    def isPaused(self):
        return self.slave_status.isPaused()
Exemplo n.º 35
0
 def createTriggerProperties(self, properties):
     # make a new properties object from a dict rendered by the old
     # properties object
     trigger_properties = Properties()
     trigger_properties.update(properties, "Trigger")
     return trigger_properties
Exemplo n.º 36
0
class AbstractWorker(service.BuildbotService, object):

    """This is the master-side representative for a remote buildbot worker.
    There is exactly one for each worker described in the config file (the
    c['workers'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a worker -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    # reconfig workers after builders
    reconfig_priority = 64

    quarantine_timer = None
    quarantine_timeout = quarantine_initial_timeout = 10
    quarantine_max_timeout = 60 * 60
    start_missing_on_startup = True

    def checkConfig(self, name, password, max_builds=None,
                    notify_on_missing=None,
                    missing_timeout=10 * 60,   # Ten minutes
                    properties=None, locks=None, keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this worker (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this worker
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this worker
                      can be used
        @type locks: dictionary
        """
        self.name = name = ascii2unicode(name)

        if properties is None:
            properties = {}

        self.password = password

        # protocol registration
        self.registration = None

        self._graceful = False
        self._paused = False

        # these are set when the service is started
        self.manager = None
        self.workerid = None

        self.worker_status = WorkerStatus(name)
        self.worker_commands = None
        self.workerforbuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties, "Worker")
        self.properties.setProperty("slavename", name, "Worker (deprecated)")
        self.properties.setProperty("workername", name, "Worker")

        self.lastMessageReceived = 0

        if notify_on_missing is None:
            notify_on_missing = []
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error(
                    'notify_on_missing arg %r is not a string' % (i,))

        self.missing_timeout = missing_timeout
        self.missing_timer = None

        # a protocol connection, if we're currently connected
        self.conn = None

        self._old_builder_list = None
        self._configured_builderid_list = None

    def __repr__(self):
        return "<%s %r>" % (self.__class__.__name__, self.name)

    @property
    def workername(self):
        # workername is now an alias to twisted.Service's name
        return self.name
    deprecatedWorkerClassProperty(locals(), workername)

    @property
    def botmaster(self):
        if self.master is None:
            return None
        return self.master.botmaster

    def updateLocks(self):
        """Convert the L{LockAccess} objects in C{self.locks} into real lock
        objects, while also maintaining the subscriptions to lock releases."""
        # unsubscribe from any old locks
        for s in self.lock_subscriptions:
            s.unsubscribe()

        # convert locks into their real form
        locks = [(self.botmaster.getLockFromLockAccess(a), a)
                 for a in self.access]
        self.locks = [(l.getLock(self), la) for l, la in locks]
        self.lock_subscriptions = [l.subscribeToReleases(self._lockReleased)
                                   for l, la in self.locks]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another worker
        to look at it.
        """
        log.msg("acquireLocks(worker %s, locks %s)" % (self, self.locks))
        if not self.locksAvailable():
            log.msg("worker %s can't lock, giving up" % (self, ))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def _lockReleased(self):
        """One of the locks for this worker was released; try scheduling
        builds."""
        if not self.botmaster:
            return  # oh well..
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def _applyWorkerInfo(self, info):
        if not info:
            return

        self.worker_status.setAdmin(info.get("admin"))
        self.worker_status.setHost(info.get("host"))
        self.worker_status.setAccessURI(info.get("access_uri", None))
        self.worker_status.setVersion(info.get("version", "(unknown)"))

    @defer.inlineCallbacks
    def _getWorkerInfo(self):
        worker = yield self.master.data.get(
            ('workers', self.workerid))
        self._applyWorkerInfo(worker['workerinfo'])

    def setServiceParent(self, parent):
        # botmaster needs to set before setServiceParent which calls
        # startService

        self.manager = parent
        return service.BuildbotService.setServiceParent(self, parent)

    @defer.inlineCallbacks
    def startService(self):
        self.updateLocks()
        self.workerid = yield self.master.data.updates.findWorkerId(
            self.name)

        self.workerActionConsumer = yield self.master.mq.startConsuming(self.controlWorker,
                                                                        ("control", "worker",
                                                                        str(self.workerid),
                                                                        None))

        yield self._getWorkerInfo()
        yield service.BuildbotService.startService(self)

        # startMissingTimer wants the service to be running to really start
        if self.start_missing_on_startup:
            self.startMissingTimer()

    @defer.inlineCallbacks
    def reconfigService(self, name, password, max_builds=None,
                        notify_on_missing=None, missing_timeout=3600,
                        properties=None, locks=None, keepalive_interval=3600):
        # Given a Worker config arguments, configure this one identically.
        # Because Worker objects are remotely referenced, we can't replace them
        # without disconnecting the worker, yet there's no reason to do that.

        assert self.name == name
        self.password = password

        # adopt new instance's configuration parameters
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.notify_on_missing = notify_on_missing

        if self.missing_timeout != missing_timeout:
            running_missing_timer = self.missing_timer
            self.stopMissingTimer()
            self.missing_timeout = missing_timeout
            if running_missing_timer:
                self.startMissingTimer()

        if properties is None:
            properties = {}
        self.properties = Properties()
        self.properties.update(properties, "Worker")
        self.properties.setProperty("slavename", name, "Worker (deprecated)")
        self.properties.setProperty("workername", name, "Worker")

        # update our records with the worker manager
        if not self.registration:
            self.registration = yield self.master.workers.register(self)
        yield self.registration.update(self, self.master.config)

        self.updateLocks()

    @defer.inlineCallbacks
    def reconfigServiceWithSibling(self, sibling):
        # reconfigServiceWithSibling will only reconfigure the worker when it is configured differently.
        # However, the worker configuration depends on which builder it is configured
        yield service.BuildbotService.reconfigServiceWithSibling(self, sibling)

        # update the attached worker's notion of which builders are attached.
        # This assumes that the relevant builders have already been configured,
        # which is why the reconfig_priority is set low in this class.
        bids = [
            b.getBuilderId() for b in self.botmaster.getBuildersForWorker(self.name)]
        bids = yield defer.gatherResults(bids, consumeErrors=True)
        if self._configured_builderid_list != bids:
            yield self.master.data.updates.workerConfigured(self.workerid, self.master.masterid, bids)
            yield self.updateWorker()
            self._configured_builderid_list = bids

    @defer.inlineCallbacks
    def stopService(self):
        if self.registration:
            yield self.registration.unregister()
            self.registration = None
        self.workerActionConsumer.stopConsuming()
        self.stopMissingTimer()
        self.stopQuarantineTimer()
        # mark this worker as configured for zero builders in this master
        yield self.master.data.updates.workerConfigured(self.workerid, self.master.masterid, [])
        yield service.BuildbotService.stopService(self)

    def startMissingTimer(self):
        if self.missing_timeout and self.parent and self.running:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = self.master.reactor.callLater(self.missing_timeout,
                                                               self._missing_timer_fired)

    def stopMissingTimer(self):
        if self.missing_timer:
            if self.missing_timer.active():
                self.missing_timer.cancel()
            self.missing_timer = None

    def isConnected(self):
        return self.conn

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return
        last_connection = time.ctime(time.time() - self.missing_timeout)
        self.master.data.updates.workerMissing(
            workerid=self.workerid,
            masterid=self.master.masterid,
            last_connection=last_connection,
            notify=self.notify_on_missing
        )

    def updateWorker(self):
        """Called to add or remove builders after the worker has connected.

        @return: a Deferred that indicates when an attached worker has
        accepted the new builders and/or released the old ones."""
        if self.conn:
            return self.sendBuilderList()
        # else:
        return defer.succeed(None)

    @defer.inlineCallbacks
    def attached(self, conn):
        """This is called when the worker connects."""

        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", 1)

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this worker to all the
        # Builders that care about it.

        # Reset graceful shutdown status
        self._graceful = False

        self.conn = conn
        self._old_builder_list = None  # clear builder list before proceed

        self.worker_status.setConnected(True)

        self._applyWorkerInfo(conn.info)
        self.worker_commands = conn.info.get("worker_commands", {})
        self.worker_environ = conn.info.get("environ", {})
        self.worker_basedir = conn.info.get("basedir", None)
        self.worker_system = conn.info.get("system", None)

        self.conn.notifyOnDisconnect(self.detached)

        workerinfo = {
            'admin': conn.info.get('admin'),
            'host': conn.info.get('host'),
            'access_uri': conn.info.get('access_uri'),
            'version': conn.info.get('version')
        }

        yield self.master.data.updates.workerConnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
            workerinfo=workerinfo
        )

        if self.worker_system == "nt":
            self.path_module = namedModule("ntpath")
        else:
            # most everything accepts / as separator, so posix should be a
            # reasonable fallback
            self.path_module = namedModule("posixpath")
        log.msg("bot attached")
        self.messageReceivedFromWorker()
        self.stopMissingTimer()
        yield self.updateWorker()
        yield self.botmaster.maybeStartBuildsForWorker(self.name)

    def messageReceivedFromWorker(self):
        now = time.time()
        self.lastMessageReceived = now
        self.worker_status.setLastMessageReceived(now)

    @defer.inlineCallbacks
    def detached(self):
        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", -1)
        self.conn = None
        self._old_builder_list = []
        self.worker_status.setConnected(False)
        log.msg("Worker.detached(%s)" % (self.name,))
        self.releaseLocks()
        yield self.master.data.updates.workerDisconnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
        )

    def disconnect(self):
        """Forcibly disconnect the worker.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the worker is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a worker is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown worker. The second is
        when we wind up with two connections for the same worker, in which
        case we disconnect the older connection.
        """
        if self.conn is None:
            return defer.succeed(None)
        log.msg("disconnecting old worker %s now" % (self.name,))
        # When this Deferred fires, we'll be ready to accept the new worker
        return self._disconnect(self.conn)

    def _disconnect(self, conn):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new worker. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback
        def _disconnected():
            eventually(d.callback, None)
        conn.notifyOnDisconnect(_disconnected)
        conn.loseConnection()
        log.msg("waiting for worker to finish disconnecting")

        return d

    @defer.inlineCallbacks
    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForWorker(self.name)

        blist = [(b.name, b.config.workerbuilddir) for b in our_builders]

        if blist == self._old_builder_list:
            return

        slist = yield self.conn.remoteSetBuilderList(builders=blist)

        self._old_builder_list = blist

        # Nothing has changed, so don't need to re-attach to everything
        if not slist:
            return

        dl = []
        for name in slist:
            # use get() since we might have changed our mind since then
            b = self.botmaster.builders.get(name)
            if b:
                d1 = self.attachBuilder(b)
                dl.append(d1)
        yield defer.DeferredList(dl)

    def attachBuilder(self, builder):
        return builder.attached(self, self.worker_commands)

    def controlWorker(self, key, params):
        log.msg("worker {} wants to {}: {}".format(self.name, key[-1], params))
        if key[-1] == "stop":
            return self.shutdownRequested()
        if key[-1] == "pause":
            self.pause()
        if key[-1] == "unpause":
            self.unpause()
        if key[-1] == "kill":
            self.shutdown()

    def shutdownRequested(self):
        self._graceful = True
        self.maybeShutdown()

    def addWorkerForBuilder(self, wfb):
        self.workerforbuilders[wfb.builder_name] = wfb

    def removeWorkerForBuilder(self, wfb):
        try:
            del self.workerforbuilders[wfb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, wfb):
        """This is called when a build on this worker is finished."""
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this worker
        can start a build.  This function can be used to limit overall
        concurrency on the worker.

        Note for subclassers: if a worker can become willing to start a build
        without any action on that worker (for example, by a resource in use on
        another worker becoming available), then you must arrange for
        L{maybeStartBuildsForWorker} to be called at that time, or builds on
        this worker will not start.
        """

        if self.quarantine_timer:
            return False

        # If we're waiting to shutdown gracefully or paused, then we shouldn't
        # accept any new jobs.
        if self._graceful or self._paused:
            return False

        if self.max_builds:
            active_builders = [wfb for wfb in itervalues(self.workerforbuilders)
                               if wfb.isBusy()]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    @defer.inlineCallbacks
    def shutdown(self):
        """Shutdown the worker"""
        if not self.conn:
            log.msg("no remote; worker is already shut down")
            return

        yield self.conn.remoteShutdown()

    def maybeShutdown(self):
        """Shut down this worker if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self._graceful:
            return
        active_builders = [wfb for wfb in itervalues(self.workerforbuilders)
                           if wfb.isBusy()]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down worker')

    def pause(self):
        """Stop running new builds on the worker."""
        self._paused = True

    def unpause(self):
        """Restart running new builds on the worker."""
        self._paused = False
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def isPaused(self):
        return self._paused

    def resetQuarantine(self):
        self.quarantine_timeout = self.quarantine_initial_timeout

    def putInQuarantine(self):
        if self.quarantine_timer:  # already in quarantine
            return
        self.quarantine_timer = self.master.reactor.callLater(
            self.quarantine_timeout, self.exitQuarantine)
        log.msg("{} has been put in quarantine for {}s".format(
            self.name, self.quarantine_timeout))
        # next we will wait twice as long
        self.quarantine_timeout *= 2
        if self.quarantine_timeout > self.quarantine_max_timeout:
            # unless we hit the max timeout
            self.quarantine_timeout = self.quarantine_max_timeout

    def exitQuarantine(self):
        self.quarantine_timer = None
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def stopQuarantineTimer(self):
        if self.quarantine_timer is not None:
            self.quarantine_timer.cancel()
            self.quarantine_timer = None
Exemplo n.º 37
0
def MockBuild(my_builder,
              buildsetup,
              mastername,
              slavename,
              basepath=None,
              build_properties=None,
              slavedir=None):
    """Given a builder object and configuration, mock a Buildbot setup around it.

  This sets up a mock BuildMaster, BuildSlave, Build, BuildStatus, and all other
  superstructure required for BuildSteps inside the provided builder to render
  properly. These BuildSteps are returned to the user in an array. It
  additionally returns the build object (in order to get its properties if
  desired).

  buildsetup is passed straight into the FakeSource's init method and
  contains sourcestamp information (revision, branch, etc).

  basepath is the directory of the build (what goes under build/slave/, for
  example 'Chromium_Linux_Builder'. It is nominally inferred from the builder
  name, but it can be overridden. This is useful when pointing the buildrunner
  at a different builder than what it's running under.

  build_properties will update and override build_properties after all
  builder-derived defaults have been set.
  """

    my_factory = my_builder['factory']
    steplist = ListSteps(my_factory)

    build = base.Build([FakeRequest(buildsetup)])
    safename = buildbot.util.safeTranslate(my_builder['name'])

    my_builder['builddir'] = safename
    my_builder.setdefault('slavebuilddir', safename)

    workdir_root = None
    if not slavedir:
        workdir_root = os.path.join(SCRIPT_DIR, '..', '..', 'slave',
                                    my_builder['slavebuilddir'])

    if not basepath: basepath = safename
    if not slavedir: slavedir = os.path.join(SCRIPT_DIR, '..', '..', 'slave')
    basedir = os.path.join(slavedir, basepath)
    build.basedir = basedir
    if not workdir_root:
        workdir_root = basedir

    builderstatus = builder.BuilderStatus('test')
    builderstatus.basedir = basedir
    buildnumber = build_properties.get('buildnumber', 1)
    builderstatus.nextBuildNumber = buildnumber + 1

    mybuilder = real_builder.Builder(my_builder, builderstatus)
    build.setBuilder(mybuilder)
    build_status = build_module.BuildStatus(builderstatus, buildnumber)

    build_status.setProperty('blamelist', [], 'Build')
    build_status.setProperty('mastername', mastername, 'Build')
    build_status.setProperty('slavename', slavename, 'Build')
    build_status.setProperty('gtest_filter', [], 'Build')
    build_status.setProperty('extra_args', [], 'Build')
    build_status.setProperty('build_id', buildnumber, 'Build')

    # if build_properties are passed in, overwrite the defaults above:
    buildprops = Properties()
    if build_properties:
        buildprops.update(build_properties, 'Botmaster')
    mybuilder.setBotmaster(FakeBotmaster(mastername, buildprops))

    buildslave = FakeSlave(safename, my_builder.get('slavebuilddir'),
                           slavename)
    build.build_status = build_status
    build.setupSlaveBuilder(buildslave)
    build.setupProperties()
    process_steps(steplist, build, buildslave, build_status, workdir_root)

    return steplist, build
Exemplo n.º 38
0
class Change:
    """I represent a single change to the source tree. This may involve
    several files, but they are all changed by the same person, and there is
    a change comment for the group as a whole.

    If the version control system supports sequential repository- (or
    branch-) wide change numbers (like SVN, P4, and Bzr), then revision=
    should be set to that number. The highest such number will be used at
    checkout time to get the correct set of files.

    If it does not (like CVS), when= should be set to the timestamp (seconds
    since epoch, as returned by time.time()) when the change was made. when=
    will be filled in for you (to the current time) if you omit it, which is
    suitable for ChangeSources which have no way of getting more accurate
    timestamps.

    The revision= and branch= values must be ASCII bytestrings, since they
    will eventually be used in a ShellCommand and passed to os.exec(), which
    requires bytestrings. These values will also be stored in a database,
    possibly as unicode, so they must be safely convertable back and forth.
    This restriction may be relaxed in the future.

    Changes should be submitted to ChangeMaster.addChange() in
    chronologically increasing order. Out-of-order changes will probably
    cause the web status displays to be corrupted."""

    implements(interfaces.IStatusEvent)

    number = None

    branch = None
    category = None
    revision = None  # used to create a source-stamp

    def __init__(self,
                 who,
                 files,
                 comments,
                 isdir=0,
                 links=None,
                 revision=None,
                 when=None,
                 branch=None,
                 category=None,
                 revlink='',
                 properties={},
                 repository='',
                 project=''):
        self.who = who
        self.comments = comments
        self.isdir = isdir
        if links is None:
            links = []
        self.links = links

        def none_or_unicode(x):
            if x is None: return x
            return unicode(x)

        self.revision = none_or_unicode(revision)
        now = util.now()
        if when is None:
            self.when = now
        elif when > now:
            # this happens when the committing system has an incorrect clock, for example.
            # handle it gracefully
            log.msg(
                "received a Change with when > now; assuming the change happened now"
            )
            self.when = now
        else:
            self.when = when
        self.branch = none_or_unicode(branch)
        self.category = none_or_unicode(category)
        self.revlink = revlink
        self.properties = Properties()
        self.properties.update(properties, "Change")
        self.repository = repository
        self.project = project

        # keep a sorted list of the files, for easier display
        self.files = files[:]
        self.files.sort()

    def __setstate__(self, dict):
        self.__dict__ = dict
        # Older Changes won't have a 'properties' attribute in them
        if not hasattr(self, 'properties'):
            self.properties = Properties()
        if not hasattr(self, 'revlink'):
            self.revlink = ""

    def asText(self):
        data = ""
        data += self.getFileContents()
        if self.repository:
            data += "On: %s\n" % self.repository
        if self.project:
            data += "For: %s\n" % self.project
        data += "At: %s\n" % self.getTime()
        data += "Changed By: %s\n" % self.who
        data += "Comments: %s" % self.comments
        data += "Properties: \n%s\n\n" % self.getProperties()
        return data

    def asDict(self):
        '''returns a dictonary with suitable info for html/mail rendering'''
        result = {}

        files = []
        for file in self.files:
            link = filter(lambda s: s.find(file) != -1, self.links)
            if len(link) == 1:
                url = link[0]
            else:
                url = None
            files.append(dict(url=url, name=file))

        files = sorted(files, cmp=lambda a, b: a['name'] < b['name'])

        # Constant
        result['number'] = self.number
        result['branch'] = self.branch
        result['category'] = self.category
        result['who'] = self.getShortAuthor()
        result['comments'] = self.comments
        result['revision'] = self.revision
        result['rev'] = self.revision
        result['when'] = self.when
        result['at'] = self.getTime()
        result['files'] = files
        result['revlink'] = getattr(self, 'revlink', None)
        result['properties'] = self.properties.asList()
        result['repository'] = getattr(self, 'repository', None)
        result['project'] = getattr(self, 'project', None)
        return result

    def getShortAuthor(self):
        return self.who

    def getTime(self):
        if not self.when:
            return "?"
        return time.strftime("%a %d %b %Y %H:%M:%S", time.localtime(self.when))

    def getTimes(self):
        return (self.when, None)

    def getText(self):
        return [html.escape(self.who)]

    def getLogs(self):
        return {}

    def getFileContents(self):
        data = ""
        if len(self.files) == 1:
            if self.isdir:
                data += "Directory: %s\n" % self.files[0]
            else:
                data += "File: %s\n" % self.files[0]
        else:
            data += "Files:\n"
            for f in self.files:
                data += " %s\n" % f
        return data

    def getProperties(self):
        data = ""
        for prop in self.properties.asList():
            data += "  %s: %s" % (prop[0], prop[1])
        return data
Exemplo n.º 39
0
class BaseScheduler(service.MultiService, ComparableMixin, StateMixin, ScheduleOnMultipleSlavesMixin):
    """
    Base class for all schedulers; this provides the equipment to manage
    reconfigurations and to handle basic scheduler state.  It also provides
    utility methods to begin various sorts of builds.

    Subclasses should add any configuration-derived attributes to
    C{base.Scheduler.compare_attrs}.
    """

    implements(interfaces.IScheduler)

    DefaultCodebases = {'':{}}

    compare_attrs = ('name', 'builderNames', 'properties', 'codebases')

    def __init__(self, name, builderNames, properties,
                 codebases = DefaultCodebases):
        """
        Initialize a Scheduler.

        @param name: name of this scheduler (used as a key for state)
        @type name: unicode

        @param builderNames: list of builders this scheduler may start
        @type builderNames: list of unicode

        @param properties: properties to add to builds triggered by this
        scheduler
        @type properties: dictionary

        @param codebases: codebases that are necessary to process the changes
        @type codebases: dict with following struct:
            key: '<codebase>'
            value: {'repository':'<repo>', 'branch':'<br>', 'revision:'<rev>'}

        @param consumeChanges: true if this scheduler wishes to be informed
        about the addition of new changes.  Defaults to False.  This should
        be passed explicitly from subclasses to indicate their interest in
        consuming changes.
        @type consumeChanges: boolean
        """
        service.MultiService.__init__(self)
        self.name = name
        "name of this scheduler; used to identify replacements on reconfig"

        ok = True
        if not isinstance(builderNames, (list, tuple)):
            ok = False
        else:
            for b in builderNames:
                if not isinstance(b, basestring):
                    ok = False
        if not ok:
            config.error(
                "The builderNames argument to a scheduler must be a list "
                  "of Builder names.")

        self.builderNames = builderNames
        "list of builder names to start in each buildset"

        self.properties = Properties()
        "properties that are contributed to each buildset"
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")

        self.objectid = None

        self.master = None

        # Set the codebases that are necessary to process the changes
        # These codebases will always result in a sourcestamp with or without changes
        if codebases is not None:
            if not isinstance(codebases, dict):
                config.error("Codebases must be a dict of dicts")
            for codebase, codebase_attrs in codebases.iteritems():
                if not isinstance(codebase_attrs, dict):
                    config.error("Codebases must be a dict of dicts")
                if (codebases != BaseScheduler.DefaultCodebases and
                   'repository' not in codebase_attrs):
                    config.error("The key 'repository' is mandatory in codebases")
        else:
            config.error("Codebases cannot be None")

        self.codebases = codebases
        
        # internal variables
        self._change_subscription = None
        self._change_consumption_lock = defer.DeferredLock()

    ## service handling

    def startService(self):
        service.MultiService.startService(self)

    def findNewSchedulerInstance(self, new_config):
        return new_config.schedulers[self.name] # should exist!

    def stopService(self):
        d = defer.maybeDeferred(self._stopConsumingChanges)
        d.addCallback(lambda _ : service.MultiService.stopService(self))
        return d


    ## status queries

    # TODO: these aren't compatible with distributed schedulers

    def listBuilderNames(self):
        "Returns the list of builder names"
        return self.builderNames

    def getPendingBuildTimes(self):
        "Returns a list of the next times that builds are scheduled, if known."
        return []

    ## change handling

    def startConsumingChanges(self, fileIsImportant=None, change_filter=None,
                              onlyImportant=False):
        """
        Subclasses should call this method from startService to register to
        receive changes.  The BaseScheduler class will take care of filtering
        the changes (using change_filter) and (if fileIsImportant is not None)
        classifying them.  See L{gotChange}.  Returns a Deferred.

        @param fileIsImportant: a callable provided by the user to distinguish
        important and unimportant changes
        @type fileIsImportant: callable

        @param change_filter: a filter to determine which changes are even
        considered by this scheduler, or C{None} to consider all changes
        @type change_filter: L{buildbot.changes.filter.ChangeFilter} instance

        @param onlyImportant: If True, only important changes, as specified by
        fileIsImportant, will be added to the buildset.
        @type onlyImportant: boolean

        """
        assert fileIsImportant is None or callable(fileIsImportant)

        # register for changes with master
        assert not self._change_subscription
        def changeCallback(change):
            # ignore changes delivered while we're not running
            if not self._change_subscription:
                return

            if change_filter and not change_filter.filter_change(change):
                return

            if change.codebase not in self.codebases:
                log.msg(format='change contains codebase %(codebase)s that is'
                    'not processed by scheduler %(name)s',
                    codebase=change.codebase, name=self.name)
                return

            if fileIsImportant:
                try:
                    important = fileIsImportant(change)
                    if not important and onlyImportant:
                        return
                except:
                    klog.err_json(failure.Failure(),
                            'in fileIsImportant check for %s' % change)
                    return
            else:
                important = True

            # use change_consumption_lock to ensure the service does not stop
            # while this change is being processed
            d = self._change_consumption_lock.run(self.gotChange, change, important)
            d.addErrback(klog.err_json, 'while processing change')
        self._change_subscription = self.master.subscribeToChanges(changeCallback)

        return defer.succeed(None)

    def _stopConsumingChanges(self):
        # (note: called automatically in stopService)

        # acquire the lock change consumption lock to ensure that any change
        # consumption is complete before we are done stopping consumption
        def stop():
            if self._change_subscription:
                self._change_subscription.unsubscribe()
                self._change_subscription = None
        return self._change_consumption_lock.run(stop)

    def gotChange(self, change, important):
        """
        Called when a change is received; returns a Deferred.  If the
        C{fileIsImportant} parameter to C{startConsumingChanges} was C{None},
        then all changes are considered important.
        The C{codebase} of the change has always an entry in the C{codebases}
        dictionary of the scheduler.

        @param change: the new change object
        @type change: L{buildbot.changes.changes.Change} instance
        @param important: true if this is an important change, according to
        C{fileIsImportant}.
        @type important: boolean
        @returns: Deferred
        """
        raise NotImplementedError

    ## starting bulids

    @defer.inlineCallbacks
    def addBuildsetForLatest(self, reason='', external_idstring=None,
                        branch=None, repository='', project='',
                        builderNames=None, properties=None):
        """
        Add a buildset for the 'latest' source in the given branch,
        repository, and project.  This will create a relative sourcestamp for
        the buildset.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's addBuildset
        method with the appropriate parameters.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param branch: branch to build (note that None often has a special meaning)
        @param repository: repository name for sourcestamp
        @param project: project name for sourcestamp
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        # Define setid for this set of changed repositories
        setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        # add a sourcestamp for each codebase
        for codebase, cb_info in self.codebases.iteritems():
            ss_repository = cb_info.get('repository', repository)
            ss_branch = cb_info.get('branch', branch)
            ss_revision = cb_info.get('revision', None)

            #Confirm that we have the specified branch
            oid = yield self.master.db.state.getObjectId(ss_repository, cb_info.get('poller', "HgPoller"))
            lastRev = yield self.master.db.state.getState(oid, 'lastRev', None)
            if lastRev is not None and lastRev.has_key(branch):
                ss_branch = branch


            yield self.master.db.sourcestamps.addSourceStamp(
                        codebase=codebase,
                        repository=ss_repository,
                        branch=ss_branch,
                        revision=ss_revision,
                        project=project,
                        changeids=set(),
                        sourcestampsetid=setid)

        bsid,brids = yield self.addBuildsetForSourceStamp(
                                setid=setid, reason=reason,
                                external_idstring=external_idstring,
                                builderNames=builderNames,
                                properties=properties)

        defer.returnValue((bsid,brids))


    @defer.inlineCallbacks
    def addBuildsetForSourceStampDetails(self, reason='', external_idstring=None,
                        branch=None, repository='', project='', revision=None,
                        builderNames=None, properties=None):
        """
        Given details about the source code to build, create a source stamp and
        then add a buildset for it.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param branch: branch to build (note that None often has a special meaning)
        @param repository: repository name for sourcestamp
        @param project: project name for sourcestamp
        @param revision: revision to build - default is latest
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        # Define setid for this set of changed repositories
        setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        yield self.master.db.sourcestamps.addSourceStamp(
                branch=branch, revision=revision, repository=repository,
                project=project, sourcestampsetid=setid)

        rv = yield self.addBuildsetForSourceStamp(
                                setid=setid, reason=reason,
                                external_idstring=external_idstring,
                                builderNames=builderNames,
                                properties=properties)
        defer.returnValue(rv)


    @defer.inlineCallbacks
    def addBuildsetForSourceStampSetDetails(self, reason, sourcestamps,
                                            properties, triggeredbybrid=None, builderNames=None):

        if triggeredbybrid is not None:

            if builderNames is None:
                builderNames = self.builderNames

        if sourcestamps is None:
            sourcestamps = {}

        # Define new setid for this set of sourcestamps
        new_setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        # Merge codebases with the passed list of sourcestamps
        # This results in a new sourcestamp for each codebase
        for codebase in self.codebases:
            ss = self.codebases[codebase].copy()
             # apply info from passed sourcestamps onto the configured default
             # sourcestamp attributes for this codebase.
            ss.update(sourcestamps.get(codebase,{}))

            # add sourcestamp to the new setid
            revision = ss.get('revision', None)
            if revision is not None:
                revision = revision.strip()

            yield self.master.db.sourcestamps.addSourceStamp(
                        codebase=codebase,
                        repository=ss.get('repository', ''),
                        branch=ss.get('branch', None),
                        revision=revision,
                        project=ss.get('project', ''),
                        changeids=[c['number'] for c in ss.get('changes', [])],
                        patch_body=ss.get('patch_body', None),
                        patch_level=ss.get('patch_level', None),
                        patch_author=ss.get('patch_author', None),
                        patch_comment=ss.get('patch_comment', None),
                        sourcestampsetid=new_setid)

        rv = yield self.addBuildsetForSourceStamp(
                                setid=new_setid, reason=reason,
                                properties=properties,
                                triggeredbybrid=triggeredbybrid,
                                builderNames=builderNames)

        defer.returnValue(rv)


    @defer.inlineCallbacks
    def addBuildsetForChanges(self, reason='', external_idstring=None,
            changeids=[], builderNames=None, properties=None):
        changesByCodebase = {}

        def get_last_change_for_codebase(codebase):
            return max(changesByCodebase[codebase],key = lambda change: change["changeid"])

        # Define setid for this set of changed repositories
        setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        # Changes are retrieved from database and grouped by their codebase
        for changeid in changeids:
            chdict = yield self.master.db.changes.getChange(changeid)
            # group change by codebase
            changesByCodebase.setdefault(chdict["codebase"], []).append(chdict)

        for codebase in self.codebases:
            args = {'codebase': codebase, 'sourcestampsetid': setid }
            if codebase not in changesByCodebase:
                # codebase has no changes
                # create a sourcestamp that has no changes
                args['repository'] = self.codebases[codebase]['repository']
                args['branch'] = self.codebases[codebase].get('branch', None)
                args['revision'] = self.codebases[codebase].get('revision', None)
                args['changeids'] = set()
                args['project'] = ''
            else:
                #codebase has changes
                args['changeids'] = [c["changeid"] for c in changesByCodebase[codebase]]
                lastChange = get_last_change_for_codebase(codebase)
                for key in ['repository', 'branch', 'revision', 'project']:
                    args[key] = lastChange[key]

            yield self.master.db.sourcestamps.addSourceStamp(**args)

        # add one buildset, this buildset is connected to the sourcestamps by the setid
        bsid,brids = yield self.addBuildsetForSourceStamp( setid=setid,
                            reason=reason, external_idstring=external_idstring,
                            builderNames=builderNames, properties=properties)

        defer.returnValue((bsid,brids))

    @defer.inlineCallbacks
    def addBuildsetForSourceStamp(self, ssid=None, setid=None, reason='', external_idstring=None,
            properties=None, triggeredbybrid=None, builderNames=None):
        """
        Add a buildset for the given, already-existing sourcestamp.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's
        L{BuildMaster.addBuildset} method with the appropriate parameters, and
        return the same result.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param setid: idenitification of a set of sourcestamps
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        assert (ssid is None and setid is not None) \
            or (ssid is not None and setid is None), "pass a single sourcestamp OR set not both"

        # combine properties
        if properties:
            properties.updateFromProperties(self.properties)
        else:
            properties = self.properties

        # apply the default builderNames
        if not builderNames:
            builderNames = self.builderNames

        # translate properties object into a dict as required by the
        # addBuildset method
        properties_dict = properties.asDict()

        if setid == None:
            if ssid is not None:
                ssdict = yield self.master.db.sourcestamps.getSourceStamp(ssid)
                setid = ssdict['sourcestampsetid']
            else:
                # no sourcestamp and no sets
                yield None

        rv = yield self.master.addBuildset(sourcestampsetid=setid,
                            reason=reason, properties=properties_dict,
                            triggeredbybrid=triggeredbybrid,
                            builderNames=builderNames,
                            external_idstring=external_idstring)
        defer.returnValue(rv)
Exemplo n.º 40
0
class BuildStatus(styles.Versioned):
    implements(interfaces.IBuildStatus, interfaces.IStatusEvent)

    persistenceVersion = 3
    persistenceForgets = ( 'wasUpgraded', )

    source = None
    reason = None
    changes = []
    blamelist = []
    progress = None
    started = None
    finished = None
    currentStep = None
    text = []
    results = None
    slavename = "???"

    # these lists/dicts are defined here so that unserialized instances have
    # (empty) values. They are set in __init__ to new objects to make sure
    # each instance gets its own copy.
    watchers = []
    updates = {}
    finishedWatchers = []
    testResults = {}

    def __init__(self, parent, number):
        """
        @type  parent: L{BuilderStatus}
        @type  number: int
        """
        assert interfaces.IBuilderStatus(parent)
        self.builder = parent
        self.number = number
        self.watchers = []
        self.updates = {}
        self.finishedWatchers = []
        self.steps = []
        self.testResults = {}
        self.properties = Properties()

    def __repr__(self):
        return "<%s #%s>" % (self.__class__.__name__, self.number)

    # IBuildStatus

    def getBuilder(self):
        """
        @rtype: L{BuilderStatus}
        """
        return self.builder

    def getProperty(self, propname):
        return self.properties[propname]

    def getProperties(self):
        return self.properties

    def getNumber(self):
        return self.number

    def getPreviousBuild(self):
        if self.number == 0:
            return None
        return self.builder.getBuild(self.number-1)

    def getSourceStamp(self, absolute=False):
        if not absolute or not self.properties.has_key('got_revision'):
            return self.source
        return self.source.getAbsoluteSourceStamp(self.properties['got_revision'])

    def getReason(self):
        return self.reason

    def getChanges(self):
        return self.changes

    def getResponsibleUsers(self):
        return self.blamelist

    def getInterestedUsers(self):
        # TODO: the Builder should add others: sheriffs, domain-owners
        return self.blamelist + self.properties.getProperty('owners', [])

    def getSteps(self):
        """Return a list of IBuildStepStatus objects. For invariant builds
        (those which always use the same set of Steps), this should be the
        complete list, however some of the steps may not have started yet
        (step.getTimes()[0] will be None). For variant builds, this may not
        be complete (asking again later may give you more of them)."""
        return self.steps

    def getTimes(self):
        return (self.started, self.finished)

    _sentinel = [] # used as a sentinel to indicate unspecified initial_value
    def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel):
        """Summarize the named statistic over all steps in which it
        exists, using combination_fn and initial_value to combine multiple
        results into a single result.  This translates to a call to Python's
        X{reduce}::
            return reduce(summary_fn, step_stats_list, initial_value)
        """
        step_stats_list = [
                st.getStatistic(name)
                for st in self.steps
                if st.hasStatistic(name) ]
        if initial_value is self._sentinel:
            return reduce(summary_fn, step_stats_list)
        else:
            return reduce(summary_fn, step_stats_list, initial_value)

    def isFinished(self):
        return (self.finished is not None)

    def waitUntilFinished(self):
        if self.finished:
            d = defer.succeed(self)
        else:
            d = defer.Deferred()
            self.finishedWatchers.append(d)
        return d

    # while the build is running, the following methods make sense.
    # Afterwards they return None

    def getETA(self):
        if self.finished is not None:
            return None
        if not self.progress:
            return None
        eta = self.progress.eta()
        if eta is None:
            return None
        return eta - util.now()

    def getCurrentStep(self):
        return self.currentStep

    # Once you know the build has finished, the following methods are legal.
    # Before ths build has finished, they all return None.

    def getText(self):
        text = []
        text.extend(self.text)
        for s in self.steps:
            text.extend(s.text2)
        return text

    def getResults(self):
        return self.results

    def getSlavename(self):
        return self.slavename

    def getTestResults(self):
        return self.testResults

    def getTestResultsOrd(self):
        trs = self.testResults.keys()
        trs.sort()
        ret = [ self.testResults[t] for t in trs]
        return ret

    def getLogs(self):
        # TODO: steps should contribute significant logs instead of this
        # hack, which returns every log from every step. The logs should get
        # names like "compile" and "test" instead of "compile.output"
        logs = []
        for s in self.steps:
            for loog in s.getLogs():
                logs.append(loog)
        return logs

    # subscription interface

    def subscribe(self, receiver, updateInterval=None):
        # will receive stepStarted and stepFinished messages
        # and maybe buildETAUpdate
        self.watchers.append(receiver)
        if updateInterval is not None:
            self.sendETAUpdate(receiver, updateInterval)

    def sendETAUpdate(self, receiver, updateInterval):
        self.updates[receiver] = None
        ETA = self.getETA()
        if ETA is not None:
            receiver.buildETAUpdate(self, self.getETA())
        # they might have unsubscribed during buildETAUpdate
        if receiver in self.watchers:
            self.updates[receiver] = reactor.callLater(updateInterval,
                                                       self.sendETAUpdate,
                                                       receiver,
                                                       updateInterval)

    def unsubscribe(self, receiver):
        if receiver in self.watchers:
            self.watchers.remove(receiver)
        if receiver in self.updates:
            if self.updates[receiver] is not None:
                self.updates[receiver].cancel()
            del self.updates[receiver]

    # methods for the base.Build to invoke

    def addStepWithName(self, name):
        """The Build is setting up, and has added a new BuildStep to its
        list. Create a BuildStepStatus object to which it can send status
        updates."""

        s = BuildStepStatus(self, len(self.steps))
        s.setName(name)
        self.steps.append(s)
        return s

    def setProperty(self, propname, value, source, runtime=True):
        self.properties.setProperty(propname, value, source, runtime)

    def addTestResult(self, result):
        self.testResults[result.getName()] = result

    def setSourceStamp(self, sourceStamp):
        self.source = sourceStamp
        self.changes = self.source.changes

    def setReason(self, reason):
        self.reason = reason
    def setBlamelist(self, blamelist):
        self.blamelist = blamelist
    def setProgress(self, progress):
        self.progress = progress

    def buildStarted(self, build):
        """The Build has been set up and is about to be started. It can now
        be safely queried, so it is time to announce the new build."""

        self.started = util.now()
        # now that we're ready to report status, let the BuilderStatus tell
        # the world about us
        self.builder.buildStarted(self)

    def setSlavename(self, slavename):
        self.slavename = slavename

    def setText(self, text):
        assert isinstance(text, (list, tuple))
        self.text = text
    def setResults(self, results):
        self.results = results

    def buildFinished(self):
        self.currentStep = None
        self.finished = util.now()

        for r in self.updates.keys():
            if self.updates[r] is not None:
                self.updates[r].cancel()
                del self.updates[r]

        watchers = self.finishedWatchers
        self.finishedWatchers = []
        for w in watchers:
            w.callback(self)

    # methods called by our BuildStepStatus children

    def stepStarted(self, step):
        self.currentStep = step
        for w in self.watchers:
            receiver = w.stepStarted(self, step)
            if receiver:
                if type(receiver) == type(()):
                    step.subscribe(receiver[0], receiver[1])
                else:
                    step.subscribe(receiver)
                d = step.waitUntilFinished()
                d.addCallback(lambda step: step.unsubscribe(receiver))

        step.waitUntilFinished().addCallback(self._stepFinished)

    def _stepFinished(self, step):
        results = step.getResults()
        for w in self.watchers:
            w.stepFinished(self, step, results)

    # methods called by our BuilderStatus parent

    def pruneSteps(self):
        # this build is very old: remove the build steps too
        self.steps = []

    # persistence stuff

    def generateLogfileName(self, stepname, logname):
        """Return a filename (relative to the Builder's base directory) where
        the logfile's contents can be stored uniquely.

        The base filename is made by combining our build number, the Step's
        name, and the log's name, then removing unsuitable characters. The
        filename is then made unique by appending _0, _1, etc, until it does
        not collide with any other logfile.

        These files are kept in the Builder's basedir (rather than a
        per-Build subdirectory) because that makes cleanup easier: cron and
        find will help get rid of the old logs, but the empty directories are
        more of a hassle to remove."""

        starting_filename = "%d-log-%s-%s" % (self.number, stepname, logname)
        starting_filename = re.sub(r'[^\w\.\-]', '_', starting_filename)
        # now make it unique
        unique_counter = 0
        filename = starting_filename
        while filename in [l.filename
                           for step in self.steps
                           for l in step.getLogs()
                           if l.filename]:
            filename = "%s_%d" % (starting_filename, unique_counter)
            unique_counter += 1
        return filename

    def __getstate__(self):
        d = styles.Versioned.__getstate__(self)
        # for now, a serialized Build is always "finished". We will never
        # save unfinished builds.
        if not self.finished:
            d['finished'] = util.now()
            # TODO: push an "interrupted" step so it is clear that the build
            # was interrupted. The builder will have a 'shutdown' event, but
            # someone looking at just this build will be confused as to why
            # the last log is truncated.
        for k in 'builder', 'watchers', 'updates', 'finishedWatchers':
            if k in d: del d[k]
        return d

    def __setstate__(self, d):
        styles.Versioned.__setstate__(self, d)
        # self.builder must be filled in by our parent when loading
        for step in self.steps:
            step.build = weakref.ref(self)
        self.watchers = []
        self.updates = {}
        self.finishedWatchers = []

    def upgradeToVersion1(self):
        if hasattr(self, "sourceStamp"):
            # the old .sourceStamp attribute wasn't actually very useful
            maxChangeNumber, patch = self.sourceStamp
            changes = getattr(self, 'changes', [])
            source = sourcestamp.SourceStamp(branch=None,
                                             revision=None,
                                             patch=patch,
                                             changes=changes)
            self.source = source
            self.changes = source.changes
            del self.sourceStamp
        self.wasUpgraded = True

    def upgradeToVersion2(self):
        self.properties = {}
        self.wasUpgraded = True

    def upgradeToVersion3(self):
        # in version 3, self.properties became a Properties object
        propdict = self.properties
        self.properties = Properties()
        self.properties.update(propdict, "Upgrade from previous version")
        self.wasUpgraded = True

    def upgradeLogfiles(self):
        # upgrade any LogFiles that need it. This must occur after we've been
        # attached to our Builder, and after we know about all LogFiles of
        # all Steps (to get the filenames right).
        assert self.builder
        for s in self.steps:
            for l in s.getLogs():
                if l.filename:
                    pass # new-style, log contents are on disk
                else:
                    logfilename = self.generateLogfileName(s.name, l.name)
                    # let the logfile update its .filename pointer,
                    # transferring its contents onto disk if necessary
                    l.upgrade(logfilename)

    def checkLogfiles(self):
        # check that all logfiles exist, and remove references to any that
        # have been deleted (e.g., by purge())
        for s in self.steps:
            s.checkLogfiles()

    def saveYourself(self):
        filename = os.path.join(self.builder.basedir, "%d" % self.number)
        if os.path.isdir(filename):
            # leftover from 0.5.0, which stored builds in directories
            shutil.rmtree(filename, ignore_errors=True)
        tmpfilename = filename + ".tmp"
        try:
            dump(self, open(tmpfilename, "wb"), -1)
            if runtime.platformType  == 'win32':
                # windows cannot rename a file on top of an existing one, so
                # fall back to delete-first. There are ways this can fail and
                # lose the builder's history, so we avoid using it in the
                # general (non-windows) case
                if os.path.exists(filename):
                    os.unlink(filename)
            os.rename(tmpfilename, filename)
        except:
            log.msg("unable to save build %s-#%d" % (self.builder.name,
                                                     self.number))
            log.err()

    def asDict(self):
        result = {}
        # Constant
        result['builderName'] = self.builder.name
        result['number'] = self.getNumber()
        result['sourceStamp'] = self.getSourceStamp().asDict()
        result['reason'] = self.getReason()
        result['blame'] = self.getResponsibleUsers()

        # Transient
        result['properties'] = self.getProperties().asList()
        result['times'] = self.getTimes()
        result['text'] = self.getText()
        result['results'] = self.getResults()
        result['slave'] = self.getSlavename()
        # TODO(maruel): Add.
        #result['test_results'] = self.getTestResults()
        result['logs'] = [[l.getName(),
            self.builder.status.getURLForThing(l)] for l in self.getLogs()]
        result['eta'] = self.getETA()
        result['steps'] = [bss.asDict() for bss in self.steps]
        if self.getCurrentStep():
            result['currentStep'] = self.getCurrentStep().asDict()
        else:
            result['currentStep'] = None
        return result
Exemplo n.º 41
0
class TestProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testDictBehavior(self):
        # note that dictionary-like behavior is deprecated and not exposed to
        # users!
        self.props.setProperty("do-tests", 1, "scheduler")
        self.props.setProperty("do-install", 2, "scheduler")

        self.assert_(self.props.has_key('do-tests'))
        self.failUnlessEqual(self.props['do-tests'], 1)
        self.failUnlessEqual(self.props['do-install'], 2)
        self.assertRaises(KeyError, lambda: self.props['do-nothing'])
        self.failUnlessEqual(self.props.getProperty('do-install'), 2)
        self.assertIn('do-tests', self.props)
        self.assertNotIn('missing-do-tests', self.props)

    def testAsList(self):
        self.props.setProperty("happiness", 7, "builder")
        self.props.setProperty("flames", True, "tester")

        self.assertEqual(sorted(self.props.asList()),
                         [('flames', True, 'tester'),
                          ('happiness', 7, 'builder')])

    def testAsDict(self):
        self.props.setProperty("msi_filename", "product.msi", 'packager')
        self.props.setProperty("dmg_filename", "product.dmg", 'packager')

        self.assertEqual(
            self.props.asDict(),
            dict(msi_filename=('product.msi', 'packager'),
                 dmg_filename=('product.dmg', 'packager')))

    def testUpdate(self):
        self.props.setProperty("x", 24, "old")
        newprops = {'a': 1, 'b': 2}
        self.props.update(newprops, "new")

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateRuntime(self):
        self.props.setProperty("x", 24, "old")
        newprops = {'a': 1, 'b': 2}
        self.props.update(newprops, "new", runtime=True)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
        self.assertEqual(self.props.runtime, set(['a', 'b']))

    def testUpdateFromProperties(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new")
        newprops.setProperty('b', 2, "new")
        self.props.updateFromProperties(newprops)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromPropertiesNoRuntime(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("b", 84, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new", runtime=True)
        newprops.setProperty('b', 2, "new", runtime=False)
        newprops.setProperty('c', 3, "new", runtime=True)
        newprops.setProperty('d', 3, "new", runtime=False)
        self.props.updateFromPropertiesNoRuntime(newprops)

        self.failUnlessEqual(self.props.getProperty('a'), 94)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'old')
        self.failUnlessEqual(self.props.getProperty('b'), 2)
        self.failUnlessEqual(self.props.getPropertySource('b'), 'new')
        self.failUnlessEqual(self.props.getProperty('c'), None)  # not updated
        self.failUnlessEqual(self.props.getProperty('d'), 3)
        self.failUnlessEqual(self.props.getPropertySource('d'), 'new')
        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')

    # IProperties methods

    def test_getProperty(self):
        self.props.properties['p1'] = (['p', 1], 'test')
        self.assertEqual(self.props.getProperty('p1'), ['p', 1])

    def test_getProperty_default_None(self):
        self.assertEqual(self.props.getProperty('p1'), None)

    def test_getProperty_default(self):
        self.assertEqual(self.props.getProperty('p1', 2), 2)

    def test_hasProperty_false(self):
        self.assertFalse(self.props.hasProperty('x'))

    def test_hasProperty_true(self):
        self.props.properties['x'] = (False, 'test')
        self.assertTrue(self.props.hasProperty('x'))

    def test_has_key_false(self):
        self.assertFalse(self.props.has_key('x'))

    def test_setProperty(self):
        self.props.setProperty('x', 'y', 'test')
        self.assertEqual(self.props.properties['x'], ('y', 'test'))
        self.assertNotIn('x', self.props.runtime)

    def test_setProperty_runtime(self):
        self.props.setProperty('x', 'y', 'test', runtime=True)
        self.assertEqual(self.props.properties['x'], ('y', 'test'))
        self.assertIn('x', self.props.runtime)

    def test_setProperty_no_source(self):
        self.assertRaises(TypeError, lambda: self.props.setProperty('x', 'y'))

    def test_getProperties(self):
        self.assertIdentical(self.props.getProperties(), self.props)

    def test_getBuild(self):
        self.assertIdentical(self.props.getBuild(), self.props.build)

    def test_render(self):
        class FakeRenderable(object):
            implements(IRenderable)

            def getRenderingFor(self, props):
                return props.getProperty('x') + 'z'

        self.props.setProperty('x', 'y', 'test')
        self.assertEqual(self.props.render(FakeRenderable()), 'yz')
Exemplo n.º 42
0
class BaseScheduler(ClusteredBuildbotService, StateMixin):

    DEFAULT_CODEBASES = {'': {}}

    compare_attrs = ClusteredBuildbotService.compare_attrs + \
        ('builderNames', 'properties', 'codebases')

    def __init__(self,
                 name,
                 builderNames,
                 properties=None,
                 codebases=DEFAULT_CODEBASES):
        super(BaseScheduler, self).__init__(name=name)

        ok = True
        if not isinstance(builderNames, (list, tuple)):
            ok = False
        else:
            for b in builderNames:
                if not isinstance(b, string_types):
                    ok = False
        if not ok:
            config.error(
                "The builderNames argument to a scheduler must be a list "
                "of Builder names.")

        self.builderNames = builderNames

        if properties is None:
            properties = {}
        self.properties = Properties()
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")
        self.objectid = None

        # Set the codebases that are necessary to process the changes
        # These codebases will always result in a sourcestamp with or without
        # changes
        known_keys = set(['branch', 'repository', 'revision'])
        if codebases is None:
            config.error("Codebases cannot be None")
        elif isinstance(codebases, list):
            codebases = dict((codebase, {}) for codebase in codebases)
        elif not isinstance(codebases, dict):
            config.error(
                "Codebases must be a dict of dicts, or list of strings")
        else:
            for codebase, attrs in iteritems(codebases):
                if not isinstance(attrs, dict):
                    config.error("Codebases must be a dict of dicts")
                else:
                    unk = set(attrs) - known_keys
                    if unk:
                        config.error(
                            "Unknown codebase keys %s for codebase %s" %
                            (', '.join(unk), codebase))

        self.codebases = codebases

        # internal variables
        self._change_consumer = None
        self._change_consumption_lock = defer.DeferredLock()

    def reconfigService(self, *args, **kwargs):
        raise NotImplementedError()

    # activity handling

    def activate(self):
        return defer.succeed(None)

    def deactivate(self):
        return defer.maybeDeferred(self._stopConsumingChanges)

    # service handling

    def _getServiceId(self):
        return self.master.data.updates.findSchedulerId(self.name)

    def _claimService(self):
        return self.master.data.updates.trySetSchedulerMaster(
            self.serviceid, self.master.masterid)

    def _unclaimService(self):
        return self.master.data.updates.trySetSchedulerMaster(
            self.serviceid, None)

    # status queries

    # deprecated: these aren't compatible with distributed schedulers

    def listBuilderNames(self):
        return self.builderNames

    # change handling

    @defer.inlineCallbacks
    def startConsumingChanges(self,
                              fileIsImportant=None,
                              change_filter=None,
                              onlyImportant=False):
        assert fileIsImportant is None or callable(fileIsImportant)

        # register for changes with the data API
        assert not self._change_consumer
        self._change_consumer = yield self.master.mq.startConsuming(
            lambda k, m: self._changeCallback(k, m, fileIsImportant,
                                              change_filter, onlyImportant),
            ('changes', None, 'new'))

    @defer.inlineCallbacks
    def _changeCallback(self, key, msg, fileIsImportant, change_filter,
                        onlyImportant):

        # ignore changes delivered while we're not running
        if not self._change_consumer:
            return

        # get a change object, since the API requires it
        chdict = yield self.master.db.changes.getChange(msg['changeid'])
        change = yield changes.Change.fromChdict(self.master, chdict)

        # filter it
        if change_filter and not change_filter.filter_change(change):
            return
        if change.codebase not in self.codebases:
            log.msg(format='change contains codebase %(codebase)s that is '
                    'not processed by scheduler %(name)s',
                    codebase=change.codebase,
                    name=self.name)
            return
        if fileIsImportant:
            try:
                important = fileIsImportant(change)
                if not important and onlyImportant:
                    return
            except Exception:
                log.err(failure.Failure(),
                        'in fileIsImportant check for %s' % change)
                return
        else:
            important = True

        # use change_consumption_lock to ensure the service does not stop
        # while this change is being processed
        d = self._change_consumption_lock.run(self.gotChange, change,
                                              important)
        d.addErrback(log.err, 'while processing change')

    def _stopConsumingChanges(self):
        # (note: called automatically in deactivate)

        # acquire the lock change consumption lock to ensure that any change
        # consumption is complete before we are done stopping consumption
        def stop():
            if self._change_consumer:
                self._change_consumer.stopConsuming()
                self._change_consumer = None

        return self._change_consumption_lock.run(stop)

    def gotChange(self, change, important):
        raise NotImplementedError

    # starting builds

    @defer.inlineCallbacks
    def addBuildsetForSourceStampsWithDefaults(self,
                                               reason,
                                               sourcestamps=None,
                                               waited_for=False,
                                               properties=None,
                                               builderNames=None,
                                               **kw):
        if sourcestamps is None:
            sourcestamps = []

        # convert sourcestamps to a dictionary keyed by codebase
        stampsByCodebase = {}
        for ss in sourcestamps:
            cb = ss['codebase']
            if cb in stampsByCodebase:
                raise RuntimeError("multiple sourcestamps with same codebase")
            stampsByCodebase[cb] = ss

        # Merge codebases with the passed list of sourcestamps
        # This results in a new sourcestamp for each codebase
        stampsWithDefaults = []
        for codebase in self.codebases:
            cb = yield self.getCodebaseDict(codebase)
            ss = {
                'codebase': codebase,
                'repository': cb.get('repository', ''),
                'branch': cb.get('branch', None),
                'revision': cb.get('revision', None),
                'project': '',
            }
            # apply info from passed sourcestamps onto the configured default
            # sourcestamp attributes for this codebase.
            ss.update(stampsByCodebase.get(codebase, {}))
            stampsWithDefaults.append(ss)

        # fill in any supplied sourcestamps that aren't for a codebase in the
        # scheduler's codebase dictionary
        for codebase in set(stampsByCodebase) - set(self.codebases):
            cb = stampsByCodebase[codebase]
            ss = {
                'codebase': codebase,
                'repository': cb.get('repository', ''),
                'branch': cb.get('branch', None),
                'revision': cb.get('revision', None),
                'project': '',
            }
            stampsWithDefaults.append(ss)

        rv = yield self.addBuildsetForSourceStamps(
            sourcestamps=stampsWithDefaults,
            reason=reason,
            waited_for=waited_for,
            properties=properties,
            builderNames=builderNames,
            **kw)
        defer.returnValue(rv)

    def getCodebaseDict(self, codebase):
        # Hook for subclasses to change codebase parameters when a codebase does
        # not have a change associated with it.
        try:
            return defer.succeed(self.codebases[codebase])
        except KeyError:
            return defer.fail()

    @defer.inlineCallbacks
    def addBuildsetForChanges(self,
                              waited_for=False,
                              reason='',
                              external_idstring=None,
                              changeids=None,
                              builderNames=None,
                              properties=None,
                              **kw):
        if changeids is None:
            changeids = []
        changesByCodebase = {}

        def get_last_change_for_codebase(codebase):
            return max(changesByCodebase[codebase],
                       key=lambda change: change["changeid"])

        # Changes are retrieved from database and grouped by their codebase
        for changeid in changeids:
            chdict = yield self.master.db.changes.getChange(changeid)
            changesByCodebase.setdefault(chdict["codebase"], []).append(chdict)

        sourcestamps = []
        for codebase in sorted(self.codebases):
            if codebase not in changesByCodebase:
                # codebase has no changes
                # create a sourcestamp that has no changes
                cb = yield self.getCodebaseDict(codebase)

                ss = {
                    'codebase': codebase,
                    'repository': cb.get('repository', ''),
                    'branch': cb.get('branch', None),
                    'revision': cb.get('revision', None),
                    'project': '',
                }
            else:
                lastChange = get_last_change_for_codebase(codebase)
                ss = lastChange['sourcestampid']
            sourcestamps.append(ss)

        # add one buildset, using the calculated sourcestamps
        bsid, brids = yield self.addBuildsetForSourceStamps(
            waited_for,
            sourcestamps=sourcestamps,
            reason=reason,
            external_idstring=external_idstring,
            builderNames=builderNames,
            properties=properties,
            **kw)

        defer.returnValue((bsid, brids))

    @defer.inlineCallbacks
    def addBuildsetForSourceStamps(self,
                                   waited_for=False,
                                   sourcestamps=None,
                                   reason='',
                                   external_idstring=None,
                                   properties=None,
                                   builderNames=None,
                                   **kw):
        if sourcestamps is None:
            sourcestamps = []
        # combine properties
        if properties:
            properties.updateFromProperties(self.properties)
        else:
            properties = self.properties

        # apply the default builderNames
        if not builderNames:
            builderNames = self.builderNames

        # Get the builder ids
        # Note that there is a data.updates.findBuilderId(name)
        # but that would merely only optimize the single builder case, while
        # probably the multiple builder case will be severely impacted by the
        # several db requests needed.
        builderids = list()
        for bldr in (yield self.master.data.get(('builders', ))):
            if bldr['name'] in builderNames:
                builderids.append(bldr['builderid'])

        # translate properties object into a dict as required by the
        # addBuildset method
        properties_dict = properties.asDict()

        bsid, brids = yield self.master.data.updates.addBuildset(
            scheduler=self.name,
            sourcestamps=sourcestamps,
            reason=reason,
            waited_for=waited_for,
            properties=properties_dict,
            builderids=builderids,
            external_idstring=external_idstring,
            **kw)
        defer.returnValue((bsid, brids))
Exemplo n.º 43
0
class Change:
    """I represent a single change to the source tree. This may involve
    several files, but they are all changed by the same person, and there is
    a change comment for the group as a whole.

    If the version control system supports sequential repository- (or
    branch-) wide change numbers (like SVN, P4, and Bzr), then revision=
    should be set to that number. The highest such number will be used at
    checkout time to get the correct set of files.

    If it does not (like CVS), when= should be set to the timestamp (seconds
    since epoch, as returned by time.time()) when the change was made. when=
    will be filled in for you (to the current time) if you omit it, which is
    suitable for ChangeSources which have no way of getting more accurate
    timestamps.

    The revision= and branch= values must be ASCII bytestrings, since they
    will eventually be used in a ShellCommand and passed to os.exec(), which
    requires bytestrings. These values will also be stored in a database,
    possibly as unicode, so they must be safely convertable back and forth.
    This restriction may be relaxed in the future.

    Changes should be submitted to ChangeMaster.addChange() in
    chronologically increasing order. Out-of-order changes will probably
    cause the web status displays to be corrupted."""

    implements(interfaces.IStatusEvent)

    number = None

    branch = None
    category = None
    revision = None # used to create a source-stamp

    def __init__(self, who, files, comments, isdir=0, links=None,
                 revision=None, when=None, branch=None, category=None,
                 revlink='', properties={}, repository='', project=''):
        self.who = who
        self.comments = comments
        self.isdir = isdir
        if links is None:
            links = []
        self.links = links

        def none_or_unicode(x):
            if x is None: return x
            return unicode(x)

        self.revision = none_or_unicode(revision)
        now = util.now()
        if when is None:
            self.when = now
        elif when > now:
            # this happens when the committing system has an incorrect clock, for example.
            # handle it gracefully
            log.msg("received a Change with when > now; assuming the change happened now")
            self.when = now
        else:
            self.when = when
        self.branch = none_or_unicode(branch)
        self.category = none_or_unicode(category)
        self.revlink = revlink
        self.properties = Properties()
        self.properties.update(properties, "Change")
        self.repository = repository
        self.project = project

        # keep a sorted list of the files, for easier display
        self.files = files[:]
        self.files.sort()

    def __setstate__(self, dict):
        self.__dict__ = dict
        # Older Changes won't have a 'properties' attribute in them
        if not hasattr(self, 'properties'):
            self.properties = Properties()
        if not hasattr(self, 'revlink'):
            self.revlink = ""

    def asText(self):
        data = ""
        data += self.getFileContents()
        if self.repository:
            data += "On: %s\n" % self.repository
        if self.project:
            data += "For: %s\n" % self.project
        data += "At: %s\n" % self.getTime()
        data += "Changed By: %s\n" % self.who
        data += "Comments: %s" % self.comments
        data += "Properties: \n%s\n\n" % self.getProperties()
        return data

    def asDict(self):
        '''returns a dictonary with suitable info for html/mail rendering'''
        result = {}

        files = []
        for file in self.files:
            link = filter(lambda s: s.find(file) != -1, self.links)
            if len(link) == 1:
                url = link[0]
            else:
                url = None
            files.append(dict(url=url, name=file))

        files = sorted(files, cmp=lambda a, b: a['name'] < b['name'])

        # Constant
        result['number'] = self.number
        result['branch'] = self.branch
        result['category'] = self.category
        result['who'] = self.getShortAuthor()
        result['comments'] = self.comments
        result['revision'] = self.revision
        result['rev'] = self.revision
        result['when'] = self.when
        result['at'] = self.getTime()
        result['files'] = files
        result['revlink'] = getattr(self, 'revlink', None)
        result['properties'] = self.properties.asList()
        result['repository'] = getattr(self, 'repository', None)
        result['project'] = getattr(self, 'project', None)
        return result

    def getShortAuthor(self):
        return self.who

    def getTime(self):
        if not self.when:
            return "?"
        return time.strftime("%a %d %b %Y %H:%M:%S",
                             time.localtime(self.when))

    def getTimes(self):
        return (self.when, None)

    def getText(self):
        return [html.escape(self.who)]
    def getLogs(self):
        return {}

    def getFileContents(self):
        data = ""
        if len(self.files) == 1:
            if self.isdir:
                data += "Directory: %s\n" % self.files[0]
            else:
                data += "File: %s\n" % self.files[0]
        else:
            data += "Files:\n"
            for f in self.files:
                data += " %s\n" % f
        return data

    def getProperties(self):
        data = ""
        for prop in self.properties.asList():
            data += "  %s: %s" % (prop[0], prop[1])
        return data
Exemplo n.º 44
0
class BaseScheduler(service.MultiService, ComparableMixin):
    """
    Base class for all schedulers; this provides the equipment to manage
    reconfigurations and to handle basic scheduler state.  It also provides
    utility methods to begin various sorts of builds.

    Subclasses should add any configuration-derived attributes to
    C{base.Scheduler.compare_attrs}.
    """

    implements(interfaces.IScheduler)

    compare_attrs = ('name', 'builderNames', 'properties')

    def __init__(self, name, builderNames, properties):
        """
        Initialize a Scheduler.

        @param name: name of this scheduler (used as a key for state)
        @type name: unicode

        @param builderNames: list of builders this scheduler may start
        @type builderNames: list of unicode

        @param properties: properties to add to builds triggered by this
        scheduler
        @type properties: dictionary

        @param consumeChanges: true if this scheduler wishes to be informed
        about the addition of new changes.  Defaults to False.  This should
        be passed explicitly from subclasses to indicate their interest in
        consuming changes.
        @type consumeChanges: boolean
        """
        service.MultiService.__init__(self)
        self.name = name
        "name of this scheduler; used to identify replacements on reconfig"

        ok = True
        if not isinstance(builderNames, (list, tuple)):
            ok = False
        else:
            for b in builderNames:
                if not isinstance(b, basestring):
                    ok = False
        if not ok:
            config.error(
                "The builderNames argument to a scheduler must be a list "
                  "of Builder names.")

        self.builderNames = builderNames
        "list of builder names to start in each buildset"

        self.properties = Properties()
        "properties that are contributed to each buildset"
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")

        self.objectid = None

        self.master = None

        # internal variables
        self._change_subscription = None
        self._change_consumption_lock = defer.DeferredLock()
        self._objectid = None

    ## service handling

    def startService(self):
        service.MultiService.startService(self)

    def findNewSchedulerInstance(self, new_config):
        return new_config.schedulers[self.name] # should exist!

    def stopService(self):
        d = defer.maybeDeferred(self._stopConsumingChanges)
        d.addCallback(lambda _ : service.MultiService.stopService(self))
        return d

    ## state management

    @defer.deferredGenerator
    def getState(self, *args, **kwargs):
        """
        For use by subclasses; get a named state value from the scheduler's
        state, defaulting to DEFAULT.

        @param name: name of the value to retrieve
        @param default: (optional) value to return if C{name} is not present
        @returns: state value via a Deferred
        @raises KeyError: if C{name} is not present and no default is given
        @raises TypeError: if JSON parsing fails
        """
        # get the objectid, if not known
        if self._objectid is None:
            wfd = defer.waitForDeferred(
                self.master.db.state.getObjectId(self.name,
                                        self.__class__.__name__))
            yield wfd
            self._objectid = wfd.getResult()

        wfd = defer.waitForDeferred(
            self.master.db.state.getState(self._objectid, *args, **kwargs))
        yield wfd
        yield wfd.getResult()

    @defer.deferredGenerator
    def setState(self, key, value):
        """
        For use by subclasses; set a named state value in the scheduler's
        persistent state.  Note that value must be json-able.

        @param name: the name of the value to change
        @param value: the value to set - must be a JSONable object
        @param returns: Deferred
        @raises TypeError: if JSONification fails
        """
        # get the objectid, if not known
        if self._objectid is None:
            wfd = defer.waitForDeferred(
                self.master.db.state.getObjectId(self.name,
                                        self.__class__.__name__))
            yield wfd
            self._objectid = wfd.getResult()

        wfd = defer.waitForDeferred(
            self.master.db.state.setState(self._objectid, key, value))
        yield wfd
        wfd.getResult()

    ## status queries

    # TODO: these aren't compatible with distributed schedulers

    def listBuilderNames(self):
        "Returns the list of builder names"
        return self.builderNames

    def getPendingBuildTimes(self):
        "Returns a list of the next times that builds are scheduled, if known."
        return []

    ## change handling

    def startConsumingChanges(self, fileIsImportant=None, change_filter=None,
                              onlyImportant=False):
        """
        Subclasses should call this method from startService to register to
        receive changes.  The BaseScheduler class will take care of filtering
        the changes (using change_filter) and (if fileIsImportant is not None)
        classifying them.  See L{gotChange}.  Returns a Deferred.

        @param fileIsImportant: a callable provided by the user to distinguish
        important and unimportant changes
        @type fileIsImportant: callable

        @param change_filter: a filter to determine which changes are even
        considered by this scheduler, or C{None} to consider all changes
        @type change_filter: L{buildbot.changes.filter.ChangeFilter} instance

        @param onlyImportant: If True, only important changes, as specified by
        fileIsImportant, will be added to the buildset.
        @type onlyImportant: boolean

        """
        assert fileIsImportant is None or callable(fileIsImportant)

        # register for changes with master
        assert not self._change_subscription
        def changeCallback(change):
            # ignore changes delivered while we're not running
            if not self._change_subscription:
                return

            if change_filter and not change_filter.filter_change(change):
                return
            if fileIsImportant:
                try:
                    important = fileIsImportant(change)
                    if not important and onlyImportant:
                        return
                except:
                    log.err(failure.Failure(),
                            'in fileIsImportant check for %s' % change)
                    return
            else:
                important = True

            # use change_consumption_lock to ensure the service does not stop
            # while this change is being processed
            d = self._change_consumption_lock.acquire()
            d.addCallback(lambda _ : self.gotChange(change, important))
            def release(x):
                self._change_consumption_lock.release()
            d.addBoth(release)
            d.addErrback(log.err, 'while processing change')
        self._change_subscription = self.master.subscribeToChanges(changeCallback)

        return defer.succeed(None)

    def _stopConsumingChanges(self):
        # (note: called automatically in stopService)

        # acquire the lock change consumption lock to ensure that any change
        # consumption is complete before we are done stopping consumption
        d = self._change_consumption_lock.acquire()
        def stop(x):
            if self._change_subscription:
                self._change_subscription.unsubscribe()
                self._change_subscription = None
            self._change_consumption_lock.release()
        d.addBoth(stop)
        return d

    def gotChange(self, change, important):
        """
        Called when a change is received; returns a Deferred.  If the
        C{fileIsImportant} parameter to C{startConsumingChanges} was C{None},
        then all changes are considered important.

        @param change: the new change object
        @type change: L{buildbot.changes.changes.Change} instance
        @param important: true if this is an important change, according to
        C{fileIsImportant}.
        @type important: boolean
        @returns: Deferred
        """
        raise NotImplementedError

    ## starting bulids

    @defer.deferredGenerator
    def addBuildsetForLatest(self, reason='', external_idstring=None,
                        branch=None, repository='', project='',
                        builderNames=None, properties=None):
        """
        Add a buildset for the 'latest' source in the given branch,
        repository, and project.  This will create a relative sourcestamp for
        the buildset.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's addBuildset
        method with the appropriate parameters.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param branch: branch to build (note that None often has a special meaning)
        @param repository: repository name for sourcestamp
        @param project: project name for sourcestamp
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        # Define setid for this set of changed repositories
        wfd = defer.waitForDeferred(self.master.db.sourcestampsets.addSourceStampSet())
        yield wfd
        setid = wfd.getResult()

        wfd = defer.waitForDeferred(self.master.db.sourcestamps.addSourceStamp(
                branch=branch, revision=None, repository=repository,
                project=project, sourcestampsetid=setid))
        yield wfd
        wfd.getResult()

        wfd = defer.waitForDeferred(self.addBuildsetForSourceStamp(
                                setid=setid, reason=reason,
                                external_idstring=external_idstring,
                                builderNames=builderNames,
                                properties=properties))
        yield wfd
        yield wfd.getResult()

    @defer.deferredGenerator
    def addBuildsetForChanges(self, reason='', external_idstring=None,
            changeids=[], builderNames=None, properties=None):
        """
        Add a buildset for the combination of the given changesets, creating
        a sourcestamp based on those changes.  The sourcestamp for the buildset
        will reference all of the indicated changes.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's addBuildset
        method with the appropriate parameters.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param changeids: nonempty list of changes to include in this buildset
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        assert changeids is not []
        # attributes for this sourcestamp will be based on the most recent
        # change, so fetch the change with the highest id
        wfd = defer.waitForDeferred(self.master.db.changes.getChange(max(changeids)))
        yield wfd
        chdict = wfd.getResult()

        change = None
        if chdict:
            wfd = defer.waitForDeferred(changes.Change.fromChdict(self.master, chdict))
            yield wfd
            change = wfd.getResult()

        # Define setid for this set of changed repositories
        wfd = defer.waitForDeferred(self.master.db.sourcestampsets.addSourceStampSet())
        yield wfd
        setid = wfd.getResult()

        wfd = defer.waitForDeferred(self.master.db.sourcestamps.addSourceStamp(
                    branch=change.branch,
                    revision=change.revision,
                    repository=change.repository,
                    codebase=change.codebase,
                    project=change.project,
                    changeids=changeids,
                    sourcestampsetid=setid))
        yield wfd
        wfd.getResult()

        wfd = defer.waitForDeferred(self.addBuildsetForSourceStamp(
                                setid=setid, reason=reason,
                                external_idstring=external_idstring,
                                builderNames=builderNames,
                                properties=properties))
        yield wfd
        yield wfd.getResult()

    @defer.deferredGenerator
    def addBuildsetForSourceStamp(self, ssid=None, setid=None, reason='', external_idstring=None,
            properties=None, builderNames=None):
        """
        Add a buildset for the given, already-existing sourcestamp.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's
        L{BuildMaster.addBuildset} method with the appropriate parameters, and
        return the same result.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param setid: idenitification of a set of sourcestamps
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        assert (ssid is None and setid is not None) \
            or (ssid is not None and setid is None), "pass a single sourcestamp OR set not both"

        # combine properties
        if properties:
            properties.updateFromProperties(self.properties)
        else:
            properties = self.properties

        # apply the default builderNames
        if not builderNames:
            builderNames = self.builderNames

        # translate properties object into a dict as required by the
        # addBuildset method
        properties_dict = properties.asDict()

        if setid == None:
            if ssid != None:
                wfd = defer.waitForDeferred(self.master.db.sourcestamps.getSourceStamp(ssid))
                yield wfd
                ssdict = wfd.getResult()
                setid = ssdict['sourcestampsetid']
            else:
                # no sourcestamp and no sets
                yield None

        wfd = defer.waitForDeferred(self.master.addBuildset(
                                        sourcestampsetid=setid, reason=reason,
                                        properties=properties_dict,
                                        builderNames=builderNames,
                                        external_idstring=external_idstring))
        yield wfd
        yield wfd.getResult()
Exemplo n.º 45
0
class AbstractBuildSlave(pb.Avatar, service.MultiService):
    """This is the master-side representative for a remote buildbot slave.
    There is exactly one for each slave described in the config file (the
    c['slaves'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a build slave -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IBuildSlave)
    keepalive_timer = None
    keepalive_interval = None

    def __init__(self,
                 name,
                 password,
                 max_builds=None,
                 notify_on_missing=[],
                 missing_timeout=3600,
                 properties={},
                 locks=None,
                 keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this slave
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this slave
                      can be used
        @type locks: dictionary
        """
        service.MultiService.__init__(self)
        self.slavename = name
        self.password = password
        self.botmaster = None  # no buildmaster yet
        self.slave_status = SlaveStatus(name)
        self.slave = None  # a RemoteReference to the Bot, when connected
        self.slave_commands = None
        self.slavebuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            assert isinstance(i, str)
        self.missing_timeout = missing_timeout
        self.missing_timer = None
        self.keepalive_interval = keepalive_interval

        self._old_builder_list = None

    def identity(self):
        """
        Return a tuple describing this slave.  After reconfiguration a
        new slave with the same identity will update this one, rather
        than replacing it, thereby avoiding an interruption of current
        activity.
        """
        return (self.slavename, self.password,
                '%s.%s' % (self.__class__.__module__, self.__class__.__name__))

    def update(self, new):
        """
        Given a new BuildSlave, configure this one identically.  Because
        BuildSlave objects are remotely referenced, we can't replace them
        without disconnecting the slave, yet there's no reason to do that.
        """
        # the reconfiguration logic should guarantee this:
        assert self.slavename == new.slavename
        assert self.password == new.password
        assert self.identity() == new.identity()
        self.max_builds = new.max_builds
        self.access = new.access
        self.notify_on_missing = new.notify_on_missing
        self.missing_timeout = new.missing_timeout

        self.properties = Properties()
        self.properties.updateFromProperties(new.properties)

        if self.botmaster:
            self.updateLocks()

    def __repr__(self):
        if self.botmaster:
            builders = self.botmaster.getBuildersForSlave(self.slavename)
            return "<%s '%s', current builders: %s>" % \
               (self.__class__.__name__, self.slavename,
                ','.join(map(lambda b: b.name, builders)))
        else:
            return "<%s '%s', (no builders yet)>" % \
                (self.__class__.__name__, self.slavename)

    def updateLocks(self):
        # convert locks into their real form
        locks = []
        for access in self.access:
            if not isinstance(access, LockAccess):
                access = access.defaultAccess()
            lock = self.botmaster.getLockByID(access.lockid)
            locks.append((lock, access))
        self.locks = [(l.getLock(self), la) for l, la in locks]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another slave
        to look at it.
        """
        log.msg("acquireLocks(slave %s, locks %s)" % (self, self.locks))
        if not self.locksAvailable():
            log.msg("slave %s can't lock, giving up" % (self, ))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def setBotmaster(self, botmaster):
        assert not self.botmaster, "BuildSlave already has a botmaster"
        self.botmaster = botmaster
        self.updateLocks()
        self.startMissingTimer()

    def stopMissingTimer(self):
        if self.missing_timer:
            self.missing_timer.cancel()
            self.missing_timer = None

    def startMissingTimer(self):
        if self.notify_on_missing and self.missing_timeout and self.parent:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = reactor.callLater(self.missing_timeout,
                                                   self._missing_timer_fired)

    def doKeepalive(self):
        self.keepalive_timer = reactor.callLater(self.keepalive_interval,
                                                 self.doKeepalive)
        if not self.slave:
            return
        d = self.slave.callRemote("print", "Received keepalive from master")
        d.addErrback(log.msg, "Keepalive failed for '%s'" % (self.slavename, ))

    def stopKeepaliveTimer(self):
        if self.keepalive_timer:
            self.keepalive_timer.cancel()

    def startKeepaliveTimer(self):
        assert self.keepalive_interval
        log.msg("Starting buildslave keepalive timer for '%s'" % \
                                        (self.slavename, ))
        self.doKeepalive()

    def recordConnectTime(self):
        if self.slave_status:
            self.slave_status.recordConnectTime()

    def isConnected(self):
        return self.slave

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        buildmaster = self.botmaster.parent
        status = buildmaster.getStatus()
        text = "The Buildbot working for '%s'\n" % status.getTitle()
        text += ("has noticed that the buildslave named %s went away\n" %
                 self.slavename)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout))  # approx
        text += "\n"
        text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
        text += "was '%s'.\n" % self.slave_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getTitleURL()
        subject = "Buildbot: buildslave %s was lost" % self.slavename
        return self._mail_missing_message(subject, text)

    def updateSlave(self):
        """Called to add or remove builders after the slave has connected.

        @return: a Deferred that indicates when an attached slave has
        accepted the new builders and/or released the old ones."""
        if self.slave:
            return self.sendBuilderList()
        else:
            return defer.succeed(None)

    def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
        if buildStarted:
            self.slave_status.buildStarted(buildStarted)
        if buildFinished:
            self.slave_status.buildFinished(buildFinished)

    def attached(self, bot):
        """This is called when the slave connects.

        @return: a Deferred that fires when the attachment is complete
        """

        # the botmaster should ensure this.
        assert not self.isConnected()

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this slave to all the
        # Builders that care about it.

        # we accumulate slave information in this 'state' dictionary, then
        # set it atomically if we make it far enough through the process
        state = {}

        # Reset graceful shutdown status
        self.slave_status.setGraceful(False)
        # We want to know when the graceful shutdown flag changes
        self.slave_status.addGracefulWatcher(self._gracefulChanged)

        d = defer.succeed(None)

        def _log_attachment_on_slave(res):
            d1 = bot.callRemote("print", "attached")
            d1.addErrback(lambda why: None)
            return d1

        d.addCallback(_log_attachment_on_slave)

        def _get_info(res):
            d1 = bot.callRemote("getSlaveInfo")

            def _got_info(info):
                log.msg("Got slaveinfo from '%s'" % self.slavename)
                # TODO: info{} might have other keys
                state["admin"] = info.get("admin")
                state["host"] = info.get("host")
                state["access_uri"] = info.get("access_uri", None)
                state["slave_environ"] = info.get("environ", {})
                state["slave_basedir"] = info.get("basedir", None)
                state["slave_system"] = info.get("system", None)

            def _info_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # maybe an old slave, doesn't implement remote_getSlaveInfo
                log.msg("BuildSlave.info_unavailable")
                log.err(why)

            d1.addCallbacks(_got_info, _info_unavailable)
            return d1

        d.addCallback(_get_info)
        self.startKeepaliveTimer()

        def _get_version(res):
            d = bot.callRemote("getVersion")

            def _got_version(version):
                state["version"] = version

            def _version_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # probably an old slave
                state["version"] = '(unknown)'

            d.addCallbacks(_got_version, _version_unavailable)
            return d

        d.addCallback(_get_version)

        def _get_commands(res):
            d1 = bot.callRemote("getCommands")

            def _got_commands(commands):
                state["slave_commands"] = commands

            def _commands_unavailable(why):
                # probably an old slave
                log.msg("BuildSlave._commands_unavailable")
                if why.check(AttributeError):
                    return
                log.err(why)

            d1.addCallbacks(_got_commands, _commands_unavailable)
            return d1

        d.addCallback(_get_commands)

        def _accept_slave(res):
            self.slave_status.setAdmin(state.get("admin"))
            self.slave_status.setHost(state.get("host"))
            self.slave_status.setAccessURI(state.get("access_uri"))
            self.slave_status.setVersion(state.get("version"))
            self.slave_status.setConnected(True)
            self.slave_commands = state.get("slave_commands")
            self.slave_environ = state.get("slave_environ")
            self.slave_basedir = state.get("slave_basedir")
            self.slave_system = state.get("slave_system")
            self.slave = bot
            if self.slave_system == "win32":
                self.path_module = namedModule("win32path")
            else:
                # most eveything accepts / as separator, so posix should be a
                # reasonable fallback
                self.path_module = namedModule("posixpath")
            log.msg("bot attached")
            self.messageReceivedFromSlave()
            self.stopMissingTimer()
            self.botmaster.parent.status.slaveConnected(self.slavename)

            return self.updateSlave()

        d.addCallback(_accept_slave)
        d.addCallback(
            lambda _: self.botmaster.maybeStartBuildsForSlave(self.slavename))

        # Finally, the slave gets a reference to this BuildSlave. They
        # receive this later, after we've started using them.
        d.addCallback(lambda _: self)
        return d

    def messageReceivedFromSlave(self):
        now = time.time()
        self.lastMessageReceived = now
        self.slave_status.setLastMessageReceived(now)

    def detached(self, mind):
        self.slave = None
        self._old_builder_list = []
        self.slave_status.removeGracefulWatcher(self._gracefulChanged)
        self.slave_status.setConnected(False)
        log.msg("BuildSlave.detached(%s)" % self.slavename)
        self.botmaster.parent.status.slaveDisconnected(self.slavename)
        self.stopKeepaliveTimer()

    def disconnect(self):
        """Forcibly disconnect the slave.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the slave is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a slave is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown slave. The second is
        when we wind up with two connections for the same slave, in which
        case we disconnect the older connection.
        """

        if not self.slave:
            return defer.succeed(None)
        log.msg("disconnecting old slave %s now" % self.slavename)
        # When this Deferred fires, we'll be ready to accept the new slave
        return self._disconnect(self.slave)

    def _disconnect(self, slave):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new slave. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback with one argument, the
        # RemoteReference being disconnected.
        def _disconnected(rref):
            reactor.callLater(0, d.callback, None)

        slave.notifyOnDisconnect(_disconnected)
        tport = slave.broker.transport
        # this is the polite way to request that a socket be closed
        tport.loseConnection()
        try:
            # but really we don't want to wait for the transmit queue to
            # drain. The remote end is unlikely to ACK the data, so we'd
            # probably have to wait for a (20-minute) TCP timeout.
            #tport._closeSocket()
            # however, doing _closeSocket (whether before or after
            # loseConnection) somehow prevents the notifyOnDisconnect
            # handlers from being run. Bummer.
            tport.offset = 0
            tport.dataBuffer = ""
        except:
            # however, these hacks are pretty internal, so don't blow up if
            # they fail or are unavailable
            log.msg("failed to accelerate the shutdown process")
        log.msg("waiting for slave to finish disconnecting")

        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForSlave(self.slavename)
        blist = [(b.name, b.slavebuilddir) for b in our_builders]
        #        if blist == self._old_builder_list:
        #            log.msg("Builder list is unchanged; not calling setBuilderList")
        #            return defer.succeed(None)

        d = self.slave.callRemote("setBuilderList", blist)

        def sentBuilderList(ign):
            self._old_builder_list = blist
            return ign

        d.addCallback(sentBuilderList)
        return d

    def perspective_keepalive(self):
        self.messageReceivedFromSlave()

    def perspective_shutdown(self):
        log.msg("slave %s wants to shut down" % self.slavename)
        self.slave_status.setGraceful(True)

    def addSlaveBuilder(self, sb):
        self.slavebuilders[sb.builder_name] = sb

    def removeSlaveBuilder(self, sb):
        try:
            del self.slavebuilders[sb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, sb):
        """This is called when a build on this slave is finished."""
        self.botmaster.maybeStartBuildsForSlave(self.slavename)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this buildslave
        can start a build.  This function can be used to limit overall
        concurrency on the buildslave.
        """
        # If we're waiting to shutdown gracefully, then we shouldn't
        # accept any new jobs.
        if self.slave_status.getGraceful():
            return False

        if self.max_builds:
            active_builders = [
                sb for sb in self.slavebuilders.values() if sb.isBusy()
            ]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    def _mail_missing_message(self, subject, text):
        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.parent
        for st in buildmaster.statusTargets:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("buildslave-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = subject
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def _gracefulChanged(self, graceful):
        """This is called when our graceful shutdown setting changes"""
        self.maybeShutdown()

    @defer.deferredGenerator
    def shutdown(self):
        """Shutdown the slave"""
        if not self.slave:
            log.msg("no remote; slave is already shut down")
            return

        # First, try the "new" way - calling our own remote's shutdown
        # method.  The method was only added in 0.8.3, so ignore NoSuchMethod
        # failures.
        def new_way():
            d = self.slave.callRemote('shutdown')
            d.addCallback(lambda _: True)  # successful shutdown request

            def check_nsm(f):
                f.trap(pb.NoSuchMethod)
                return False  # fall through to the old way

            d.addErrback(check_nsm)

            def check_connlost(f):
                f.trap(pb.PBConnectionLost)
                return True  # the slave is gone, so call it finished

            d.addErrback(check_connlost)
            return d

        wfd = defer.waitForDeferred(new_way())
        yield wfd
        if wfd.getResult():
            return  # done!

        # Now, the old way.  Look for a builder with a remote reference to the
        # client side slave.  If we can find one, then call "shutdown" on the
        # remote builder, which will cause the slave buildbot process to exit.
        def old_way():
            d = None
            for b in self.slavebuilders.values():
                if b.remote:
                    d = b.remote.callRemote("shutdown")
                    break

            if d:
                log.msg("Shutting down (old) slave: %s" % self.slavename)

                # The remote shutdown call will not complete successfully since the
                # buildbot process exits almost immediately after getting the
                # shutdown request.
                # Here we look at the reason why the remote call failed, and if
                # it's because the connection was lost, that means the slave
                # shutdown as expected.
                def _errback(why):
                    if why.check(pb.PBConnectionLost):
                        log.msg("Lost connection to %s" % self.slavename)
                    else:
                        log.err("Unexpected error when trying to shutdown %s" %
                                self.slavename)

                d.addErrback(_errback)
                return d
            log.err("Couldn't find remote builder to shut down slave")
            return defer.succeed(None)

        wfd = defer.waitForDeferred(old_way())
        yield wfd
        wfd.getResult()

    def maybeShutdown(self):
        """Shut down this slave if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self.slave_status.getGraceful():
            return
        active_builders = [
            sb for sb in self.slavebuilders.values() if sb.isBusy()
        ]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down slave')
Exemplo n.º 46
0
class MockSlave(object):
    def __init__(self, name, properties):
        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")
Exemplo n.º 47
0
class AbstractWorker(service.BuildbotService):
    """This is the master-side representative for a remote buildbot worker.
    There is exactly one for each worker described in the config file (the
    c['workers'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a worker -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    # reconfig workers after builders
    reconfig_priority = 64

    quarantine_timer = None
    quarantine_timeout = quarantine_initial_timeout = 10
    quarantine_max_timeout = 60 * 60
    start_missing_on_startup = True
    DEFAULT_MISSING_TIMEOUT = 3600
    DEFAULT_KEEPALIVE_INTERVAL = 3600

    # override to True if isCompatibleWithBuild may return False
    builds_may_be_incompatible = False

    def checkConfig(self,
                    name,
                    password,
                    max_builds=None,
                    notify_on_missing=None,
                    missing_timeout=None,
                    properties=None,
                    defaultProperties=None,
                    locks=None,
                    keepalive_interval=DEFAULT_KEEPALIVE_INTERVAL,
                    machine_name=None):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this worker (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this worker
        @type properties: dictionary
        @param defaultProperties: properties that will be applied to builds
                                  run on this worker only if the property
                                  has not been set by another source
        @type defaultProperties: dictionary
        @param locks: A list of locks that must be acquired before this worker
                      can be used
        @type locks: dictionary
        @param machine_name: The name of the machine to associate with the
                             worker.
        """
        self.name = name = bytes2unicode(name)
        self.machine_name = machine_name

        self.password = password

        # protocol registration
        self.registration = None

        self._graceful = False
        self._paused = False

        # these are set when the service is started
        self.manager = None
        self.workerid = None

        self.worker_status = WorkerStatus(name)
        self.worker_commands = None
        self.workerforbuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties or {}, "Worker")
        self.properties.setProperty("workername", name, "Worker")
        self.defaultProperties = Properties()
        self.defaultProperties.update(defaultProperties or {}, "Worker")

        if self.machine_name is not None:
            self.properties.setProperty('machine_name', self.machine_name,
                                        'Worker')
        self.machine = None

        self.lastMessageReceived = 0

        if notify_on_missing is None:
            notify_on_missing = []
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error('notify_on_missing arg %r is not a string' %
                             (i, ))

        self.missing_timeout = missing_timeout
        self.missing_timer = None

        # a protocol connection, if we're currently connected
        self.conn = None

        # during disconnection self.conn will be set to None before all disconnection notifications
        # are delivered. During that period _pending_disconnection_delivery_notifier will be set to
        # a notifier and allows interested users to wait until all disconnection notifications are
        # delivered.
        self._pending_disconnection_delivery_notifier = None

        self._old_builder_list = None
        self._configured_builderid_list = None

    def __repr__(self):
        return "<{} {}>".format(self.__class__.__name__, repr(self.name))

    @property
    def workername(self):
        # workername is now an alias to twisted.Service's name
        return self.name

    @property
    def botmaster(self):
        if self.master is None:
            return None
        return self.master.botmaster

    @defer.inlineCallbacks
    def updateLocks(self):
        """Convert the L{LockAccess} objects in C{self.locks} into real lock
        objects, while also maintaining the subscriptions to lock releases."""
        # unsubscribe from any old locks
        for s in self.lock_subscriptions:
            s.unsubscribe()

        # convert locks into their real form
        locks = yield self.botmaster.getLockFromLockAccesses(
            self.access, self.config_version)

        self.locks = [(l.getLockForWorker(self.workername), la)
                      for l, la in locks]
        self.lock_subscriptions = [
            l.subscribeToReleases(self._lockReleased) for l, la in self.locks
        ]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another worker
        to look at it.
        """
        log.msg("acquireLocks(worker {}, locks {})".format(self, self.locks))
        if not self.locksAvailable():
            log.msg("worker {} can't lock, giving up".format(self))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks({}): {}".format(self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def _lockReleased(self):
        """One of the locks for this worker was released; try scheduling
        builds."""
        if not self.botmaster:
            return  # oh well..
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def _applyWorkerInfo(self, info):
        if not info:
            return

        self.worker_status.setAdmin(info.get("admin"))
        self.worker_status.setHost(info.get("host"))
        self.worker_status.setAccessURI(info.get("access_uri", None))
        self.worker_status.setVersion(info.get("version", "(unknown)"))

        # store everything as Properties
        for k, v in info.items():
            if k in ('environ', 'worker_commands'):
                continue
            self.worker_status.info.setProperty(k, v, "Worker")

    @defer.inlineCallbacks
    def _getWorkerInfo(self):
        worker = yield self.master.data.get(('workers', self.workerid))
        self._applyWorkerInfo(worker['workerinfo'])

    def setServiceParent(self, parent):
        # botmaster needs to set before setServiceParent which calls
        # startService

        self.manager = parent
        return super().setServiceParent(parent)

    @defer.inlineCallbacks
    def startService(self):
        # tracks config version for locks
        self.config_version = self.master.config_version

        self.updateLocks()
        self.workerid = yield self.master.data.updates.findWorkerId(self.name)

        self.workerActionConsumer = yield self.master.mq.startConsuming(
            self.controlWorker,
            ("control", "worker", str(self.workerid), None))

        yield self._getWorkerInfo()
        yield super().startService()

        # startMissingTimer wants the service to be running to really start
        if self.start_missing_on_startup:
            self.startMissingTimer()

    @defer.inlineCallbacks
    def reconfigService(self,
                        name,
                        password,
                        max_builds=None,
                        notify_on_missing=None,
                        missing_timeout=DEFAULT_MISSING_TIMEOUT,
                        properties=None,
                        defaultProperties=None,
                        locks=None,
                        keepalive_interval=DEFAULT_KEEPALIVE_INTERVAL,
                        machine_name=None):
        # Given a Worker config arguments, configure this one identically.
        # Because Worker objects are remotely referenced, we can't replace them
        # without disconnecting the worker, yet there's no reason to do that.

        assert self.name == name
        self.password = password

        # adopt new instance's configuration parameters
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        if notify_on_missing is None:
            notify_on_missing = []
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing

        if self.missing_timeout != missing_timeout:
            running_missing_timer = self.missing_timer
            self.stopMissingTimer()
            self.missing_timeout = missing_timeout
            if running_missing_timer:
                self.startMissingTimer()

        self.properties = Properties()
        self.properties.update(properties or {}, "Worker")
        self.properties.setProperty("workername", name, "Worker")
        self.defaultProperties = Properties()
        self.defaultProperties.update(defaultProperties or {}, "Worker")

        # Note that before first reconfig self.machine will always be None and
        # out of sync with self.machine_name, thus more complex logic is needed.
        if self.machine is not None and self.machine_name != machine_name:
            self.machine.unregisterWorker(self)
            self.machine = None

        self.machine_name = machine_name
        if self.machine is None and self.machine_name is not None:
            self.machine = self.master.machine_manager.getMachineByName(
                self.machine_name)
            if self.machine is not None:
                self.machine.registerWorker(self)
                self.properties.setProperty("machine_name", self.machine_name,
                                            "Worker")
            else:
                log.err("Unknown machine '{}' for worker '{}'".format(
                    self.machine_name, self.name))

        # update our records with the worker manager
        if not self.registration:
            self.registration = yield self.master.workers.register(self)
        yield self.registration.update(self, self.master.config)

        # tracks config version for locks
        self.config_version = self.master.config_version
        self.updateLocks()

    @defer.inlineCallbacks
    def reconfigServiceWithSibling(self, sibling):
        # reconfigServiceWithSibling will only reconfigure the worker when it is configured
        # differently.
        # However, the worker configuration depends on which builder it is configured
        yield super().reconfigServiceWithSibling(sibling)

        # update the attached worker's notion of which builders are attached.
        # This assumes that the relevant builders have already been configured,
        # which is why the reconfig_priority is set low in this class.
        bids = [
            b.getBuilderId()
            for b in self.botmaster.getBuildersForWorker(self.name)
        ]
        bids = yield defer.gatherResults(bids, consumeErrors=True)
        if self._configured_builderid_list != bids:
            yield self.master.data.updates.workerConfigured(
                self.workerid, self.master.masterid, bids)
            yield self.updateWorker()
            self._configured_builderid_list = bids

    @defer.inlineCallbacks
    def stopService(self):
        if self.registration:
            yield self.registration.unregister()
            self.registration = None
        self.workerActionConsumer.stopConsuming()
        self.stopMissingTimer()
        self.stopQuarantineTimer()
        # mark this worker as configured for zero builders in this master
        yield self.master.data.updates.workerConfigured(
            self.workerid, self.master.masterid, [])

        # during master shutdown we need to wait until the disconnection notification deliveries
        # are completed, otherwise some of the events may still be firing long after the master
        # is completely shut down.
        yield self.disconnect()
        yield self.waitForCompleteShutdown()

        yield super().stopService()

    def isCompatibleWithBuild(self, build_props):
        # given a build properties object, determines whether the build is
        # compatible with the currently running worker or not. This is most
        # often useful for latent workers where it's possible to request
        # different kinds of workers.
        return defer.succeed(True)

    def startMissingTimer(self):
        if self.missing_timeout and self.parent and self.running:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = self.master.reactor.callLater(
                self.missing_timeout, self._missing_timer_fired)

    def stopMissingTimer(self):
        if self.missing_timer:
            if self.missing_timer.active():
                self.missing_timer.cancel()
            self.missing_timer = None

    def isConnected(self):
        return self.conn

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return
        last_connection = time.ctime(time.time() - self.missing_timeout)
        self.master.data.updates.workerMissing(workerid=self.workerid,
                                               masterid=self.master.masterid,
                                               last_connection=last_connection,
                                               notify=self.notify_on_missing)

    def updateWorker(self):
        """Called to add or remove builders after the worker has connected.

        @return: a Deferred that indicates when an attached worker has
        accepted the new builders and/or released the old ones."""
        if self.conn:
            return self.sendBuilderList()
        # else:
        return defer.succeed(None)

    @defer.inlineCallbacks
    def attached(self, conn):
        """This is called when the worker connects."""

        assert self.conn is None

        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", 1)

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this worker to all the
        # Builders that care about it.

        # Reset graceful shutdown status
        self._graceful = False

        self.conn = conn
        self._old_builder_list = None  # clear builder list before proceed

        self.worker_status.setConnected(True)

        self._applyWorkerInfo(conn.info)
        self.worker_commands = conn.info.get("worker_commands", {})
        self.worker_environ = conn.info.get("environ", {})
        self.worker_basedir = conn.info.get("basedir", None)
        self.worker_system = conn.info.get("system", None)

        self.conn.notifyOnDisconnect(self.detached)

        workerinfo = {
            'admin': conn.info.get('admin'),
            'host': conn.info.get('host'),
            'access_uri': conn.info.get('access_uri'),
            'version': conn.info.get('version')
        }

        yield self.master.data.updates.workerConnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
            workerinfo=workerinfo)

        if self.worker_system == "nt":
            self.path_module = namedModule("ntpath")
        else:
            # most everything accepts / as separator, so posix should be a
            # reasonable fallback
            self.path_module = namedModule("posixpath")
        log.msg("bot attached")
        self.messageReceivedFromWorker()
        self.stopMissingTimer()
        yield self.updateWorker()
        yield self.botmaster.maybeStartBuildsForWorker(self.name)
        self.updateState()

    def messageReceivedFromWorker(self):
        now = time.time()
        self.lastMessageReceived = now
        self.worker_status.setLastMessageReceived(now)

    def setupProperties(self, props):
        for name in self.properties.properties:
            props.setProperty(name, self.properties.getProperty(name),
                              "Worker")
        for name in self.defaultProperties.properties:
            if name not in props:
                props.setProperty(name,
                                  self.defaultProperties.getProperty(name),
                                  "Worker")

    @defer.inlineCallbacks
    def _handle_disconnection_delivery_notifier(self):
        self._pending_disconnection_delivery_notifier = Notifier()
        yield self.conn.waitForNotifyDisconnectedDelivered()
        self._pending_disconnection_delivery_notifier.notify(None)
        self._pending_disconnection_delivery_notifier = None

    @defer.inlineCallbacks
    def detached(self):
        # protect against race conditions in conn disconnect path and someone
        # calling detached directly. At the moment the null worker does that.
        if self.conn is None:
            return

        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", -1)

        self._handle_disconnection_delivery_notifier()

        yield self.conn.waitShutdown()
        self.conn = None
        self._old_builder_list = []
        self.worker_status.setConnected(False)
        log.msg("Worker.detached({})".format(self.name))
        self.releaseLocks()
        yield self.master.data.updates.workerDisconnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
        )

    def disconnect(self):
        """Forcibly disconnect the worker.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the worker is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a worker is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown worker. The second is
        when we wind up with two connections for the same worker, in which
        case we disconnect the older connection.
        """
        if self.conn is None:
            return defer.succeed(None)
        log.msg("disconnecting old worker {} now".format(self.name))
        # When this Deferred fires, we'll be ready to accept the new worker
        return self._disconnect(self.conn)

    def waitForCompleteShutdown(self):
        # This function waits until the disconnection to happen and the disconnection
        # notifications have been delivered and acted upon.
        return self._waitForCompleteShutdownImpl(self.conn)

    @defer.inlineCallbacks
    def _waitForCompleteShutdownImpl(self, conn):
        if conn:
            d = defer.Deferred()

            def _disconnected():
                eventually(d.callback, None)

            conn.notifyOnDisconnect(_disconnected)
            yield d
            yield conn.waitForNotifyDisconnectedDelivered()
        elif self._pending_disconnection_delivery_notifier is not None:
            yield self._pending_disconnection_delivery_notifier.wait()

    @defer.inlineCallbacks
    def _disconnect(self, conn):
        # This function waits until the disconnection to happen and the disconnection
        # notifications have been delivered and acted upon
        d = self._waitForCompleteShutdownImpl(conn)
        conn.loseConnection()
        log.msg("waiting for worker to finish disconnecting")
        yield d

    @defer.inlineCallbacks
    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForWorker(self.name)

        blist = [(b.name, b.config.workerbuilddir) for b in our_builders]

        if blist == self._old_builder_list:
            return

        slist = yield self.conn.remoteSetBuilderList(builders=blist)

        self._old_builder_list = blist

        # Nothing has changed, so don't need to re-attach to everything
        if not slist:
            return

        dl = []
        for name in slist:
            # use get() since we might have changed our mind since then
            b = self.botmaster.builders.get(name)
            if b:
                d1 = self.attachBuilder(b)
                dl.append(d1)
        yield defer.DeferredList(dl)

    def attachBuilder(self, builder):
        return builder.attached(self, self.worker_commands)

    def controlWorker(self, key, params):
        log.msg("worker {} wants to {}: {}".format(self.name, key[-1], params))
        if key[-1] == "stop":
            return self.shutdownRequested()
        if key[-1] == "pause":
            self.pause()
        if key[-1] == "unpause":
            self.unpause()
        if key[-1] == "kill":
            self.shutdown()

    def shutdownRequested(self):
        self._graceful = True
        self.maybeShutdown()
        self.updateState()

    def addWorkerForBuilder(self, wfb):
        self.workerforbuilders[wfb.builder_name] = wfb

    def removeWorkerForBuilder(self, wfb):
        try:
            del self.workerforbuilders[wfb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, wfb):
        """This is called when a build on this worker is finished."""
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this worker
        can start a build.  This function can be used to limit overall
        concurrency on the worker.

        Note for subclassers: if a worker can become willing to start a build
        without any action on that worker (for example, by a resource in use on
        another worker becoming available), then you must arrange for
        L{maybeStartBuildsForWorker} to be called at that time, or builds on
        this worker will not start.
        """

        # If we're waiting to shutdown gracefully or paused, then we shouldn't
        # accept any new jobs.
        if self._graceful or self._paused:
            return False

        if self.max_builds:
            active_builders = [
                wfb for wfb in self.workerforbuilders.values() if wfb.isBusy()
            ]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    @defer.inlineCallbacks
    def shutdown(self):
        """Shutdown the worker"""
        if not self.conn:
            log.msg("no remote; worker is already shut down")
            return

        yield self.conn.remoteShutdown()

    def maybeShutdown(self):
        """Shut down this worker if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self._graceful:
            return
        active_builders = [
            wfb for wfb in self.workerforbuilders.values() if wfb.isBusy()
        ]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down worker')

    def updateState(self):
        self.master.data.updates.setWorkerState(self.workerid, self._paused,
                                                self._graceful)

    def pause(self):
        """Stop running new builds on the worker."""
        self._paused = True
        self.updateState()

    def unpause(self):
        """Restart running new builds on the worker."""
        self._paused = False
        self.botmaster.maybeStartBuildsForWorker(self.name)
        self.updateState()

    def isPaused(self):
        return self._paused

    def resetQuarantine(self):
        self.quarantine_timeout = self.quarantine_initial_timeout

    def putInQuarantine(self):
        if self.quarantine_timer:  # already in quarantine
            return

        self.pause()
        self.quarantine_timer = self.master.reactor.callLater(
            self.quarantine_timeout, self.exitQuarantine)
        log.msg("{} has been put in quarantine for {}s".format(
            self.name, self.quarantine_timeout))
        # next we will wait twice as long
        self.quarantine_timeout *= 2
        if self.quarantine_timeout > self.quarantine_max_timeout:
            # unless we hit the max timeout
            self.quarantine_timeout = self.quarantine_max_timeout

    def exitQuarantine(self):
        self.quarantine_timer = None
        self.unpause()

    def stopQuarantineTimer(self):
        if self.quarantine_timer is not None:
            self.quarantine_timer.cancel()
            self.quarantine_timer = None
            self.unpause()
Exemplo n.º 48
0
    def start(self):
        config = yield self.getStepConfig()

        ss = self.build.getSourceStamp('')
        got = self.build.getProperty('got_revision')
        if got:
            ss = ss.getAbsoluteSourceStamp(got)

        # Stop the build early if .travis.yml says we should ignore branch
        if ss.branch and not config.can_build_branch(ss.branch):
            defer.returnValue(self.end(SUCCESS))

        # Find the master object
        master = self.build.builder.botmaster.parent

        # Find the scheduler we are going to use to queue actual builds
        all_schedulers = self.build.builder.botmaster.parent.allSchedulers()
        all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])
        sch = all_schedulers[self.scheduler]

        triggered = []

        self.running = True

        for env in config.matrix:
            props_to_set = Properties()
            props_to_set.updateFromProperties(self.build.getProperties())
            props_to_set.update(env["env"], ".travis.yml")
            props_to_set.setProperty("spawned_by",
                                     self.build.build_status.number,
                                     "Scheduler")

            ss_setid = yield ss.getSourceStampSetId(master)
            triggered.append(sch.trigger(ss_setid, set_props=props_to_set))

        results = yield defer.DeferredList(triggered, consumeErrors=1)

        was_exception = was_failure = False
        brids = {}

        for was_cb, results in results:
            if isinstance(results, tuple):
                results, some_brids = results
                brids.update(some_brids)

            if not was_cb:
                was_exception = True
                log.err(results)
                continue

            if results == FAILURE:
                was_failure = True

        if was_exception:
            result = EXCEPTION
        elif was_failure:
            result = FAILURE
        else:
            result = SUCCESS

        if brids:
            brid_to_bn = dict((_brid, _bn) for _bn, _brid in brids.iteritems())
            res = yield defer.DeferredList([
                master.db.builds.getBuildsForRequest(br)
                for br in brids.values()
            ],
                                           consumeErrors=1)
            for was_cb, builddicts in res:
                if was_cb:
                    for build in builddicts:
                        bn = brid_to_bn[build['brid']]
                        num = build['number']

                        url = master.status.getURLForBuild(bn, num)
                        self.step_status.addURL("%s #%d" % (bn, num), url)

        defer.returnValue(self.end(result))
Exemplo n.º 49
0
    def start(self):
        properties = self.build.getProperties()

        # make a new properties object from a dict rendered by the old
        # properties object
        props_to_set = Properties()
        props_to_set.update(self.set_properties, "Trigger")
        for p in self.copy_properties:
            if p not in properties:
                continue
            props_to_set.setProperty(
                p, properties[p],
                "%s (in triggering build)" % properties.getPropertySource(p))

        self.running = True

        # (is there an easier way to find the BuildMaster?)
        all_schedulers = self.build.builder.botmaster.parent.allSchedulers()
        all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])
        unknown_schedulers = []
        triggered_schedulers = []

        # don't fire any schedulers if we discover an unknown one
        for scheduler in self.schedulerNames:
            scheduler = scheduler
            if all_schedulers.has_key(scheduler):
                sch = all_schedulers[scheduler]
                if isinstance(sch, Triggerable):
                    triggered_schedulers.append(scheduler)
                else:
                    unknown_schedulers.append(scheduler)
            else:
                unknown_schedulers.append(scheduler)

        if unknown_schedulers:
            self.step_status.setText(['no scheduler:'] + unknown_schedulers)
            return self.end(FAILURE)

        master = self.build.builder.botmaster.parent  # seriously?!
        if self.sourceStamp:
            d = master.db.sourcestamps.addSourceStamp(**self.sourceStamp)
        elif self.alwaysUseLatest:
            d = defer.succeed(None)
        else:
            ss = self.build.getSourceStamp()
            if self.updateSourceStamp:
                got = properties.getProperty('got_revision')
                if got:
                    ss = ss.getAbsoluteSourceStamp(got)
            d = ss.getSourceStampId(master)

        def start_builds(ssid):
            dl = []
            for scheduler in triggered_schedulers:
                sch = all_schedulers[scheduler]
                dl.append(sch.trigger(ssid, set_props=props_to_set))
            self.step_status.setText(['triggered'] + triggered_schedulers)

            if self.waitForFinish:
                return defer.DeferredList(dl, consumeErrors=1)
            else:
                # do something to handle errors
                for d in dl:
                    d.addErrback(
                        log.err,
                        '(ignored) while invoking Triggerable schedulers:')
                self.end(SUCCESS)
                return None

        d.addCallback(start_builds)

        def cb(rclist):
            result = SUCCESS
            for was_cb, results in rclist:
                # TODO: make this algo more configurable
                if not was_cb:
                    result = EXCEPTION
                    log.err(results)
                    break
                if results == FAILURE:
                    result = FAILURE
            return self.end(result)

        def eb(why):
            return self.end(FAILURE)

        if self.waitForFinish:
            d.addCallbacks(cb, eb)

        d.addErrback(log.err, '(ignored) while triggering builds:')
Exemplo n.º 50
0
class Change:

    """I represent a single change to the source tree. This may involve several
    files, but they are all changed by the same person, and there is a change
    comment for the group as a whole."""

    number = None
    branch = None
    category = None
    revision = None  # used to create a source-stamp
    links = []  # links are gone, but upgrade code expects this attribute

    @classmethod
    def fromChdict(cls, master, chdict):
        """
        Class method to create a L{Change} from a dictionary as returned
        by L{ChangesConnectorComponent.getChange}.

        @param master: build master instance
        @param ssdict: change dictionary

        @returns: L{Change} via Deferred
        """
        cache = master.caches.get_cache("Changes", cls._make_ch)
        return cache.get(chdict['changeid'], chdict=chdict, master=master)

    @classmethod
    def _make_ch(cls, changeid, master, chdict):
        change = cls(None, None, None, _fromChdict=True)
        change.who = chdict['author']
        change.comments = chdict['comments']
        change.revision = chdict['revision']
        change.branch = chdict['branch']
        change.category = chdict['category']
        change.revlink = chdict['revlink']
        change.repository = chdict['repository']
        change.codebase = chdict['codebase']
        change.project = chdict['project']
        change.number = chdict['changeid']

        when = chdict['when_timestamp']
        if when:
            when = datetime2epoch(when)
        change.when = when

        change.files = sorted(chdict['files'])

        change.properties = Properties()
        for n, (v, s) in iteritems(chdict['properties']):
            change.properties.setProperty(n, v, s)

        return defer.succeed(change)

    def __init__(self, who, files, comments, revision=None, when=None,
                 branch=None, category=None, revlink='', properties=None,
                 repository='', codebase='', project='', _fromChdict=False):
        if properties is None:
            properties = {}
        # skip all this madness if we're being built from the database
        if _fromChdict:
            return

        self.who = who
        self.comments = comments

        def none_or_unicode(x):
            if x is None:
                return x
            return text_type(x)

        self.revision = none_or_unicode(revision)
        now = util.now()
        if when is None:
            self.when = now
        elif when > now:
            # this happens when the committing system has an incorrect clock, for example.
            # handle it gracefully
            log.msg(
                "received a Change with when > now; assuming the change happened now")
            self.when = now
        else:
            self.when = when
        self.branch = none_or_unicode(branch)
        self.category = none_or_unicode(category)
        self.revlink = revlink
        self.properties = Properties()
        self.properties.update(properties, "Change")
        self.repository = repository
        self.codebase = codebase
        self.project = project

        # keep a sorted list of the files, for easier display
        self.files = sorted(files or [])

    def __setstate__(self, dict):
        self.__dict__ = dict
        # Older Changes won't have a 'properties' attribute in them
        if not hasattr(self, 'properties'):
            self.properties = Properties()
        if not hasattr(self, 'revlink'):
            self.revlink = ""

    def __str__(self):
        return (u"Change(revision=%r, who=%r, branch=%r, comments=%r, " +
                u"when=%r, category=%r, project=%r, repository=%r, " +
                u"codebase=%r)") % (
            self.revision, self.who, self.branch, self.comments,
            self.when, self.category, self.project, self.repository,
            self.codebase)

    def __cmp__(self, other):
        return self.number - other.number

    def asText(self):
        data = ""
        data += "Files:\n"
        for f in self.files:
            data += " %s\n" % f
        if self.repository:
            data += "On: %s\n" % self.repository
        if self.project:
            data += "For: %s\n" % self.project
        data += "At: %s\n" % self.getTime()
        data += "Changed By: %s\n" % self.who
        data += "Comments: %s" % self.comments
        data += "Properties: \n"
        for prop in self.properties.asList():
            data += "  %s: %s" % (prop[0], prop[1])
        data += '\n\n'
        return data

    def asDict(self):
        '''returns a dictonary with suitable info for html/mail rendering'''
        files = [dict(name=f) for f in self.files]
        files.sort(key=lambda a: a['name'])

        result = {
            # Constant
            'number': self.number,
            'branch': self.branch,
            'category': self.category,
            'who': self.getShortAuthor(),
            'comments': self.comments,
            'revision': self.revision,
            'rev': self.revision,
            'when': self.when,
            'at': self.getTime(),
            'files': files,
            'revlink': getattr(self, 'revlink', None),
            'properties': self.properties.asList(),
            'repository': getattr(self, 'repository', None),
            'codebase': getattr(self, 'codebase', ''),
            'project': getattr(self, 'project', None)
        }
        return result

    def getShortAuthor(self):
        return self.who

    def getTime(self):
        if not self.when:
            return "?"
        return time.strftime("%a %d %b %Y %H:%M:%S",
                             time.localtime(self.when))

    def getTimes(self):
        return (self.when, None)

    def getText(self):
        return [html.escape(self.who)]

    def getLogs(self):
        return {}
Exemplo n.º 51
0
class BaseScheduler(service.MultiService, ComparableMixin):
    implements(interfaces.IScheduler)
    # subclasses must set .compare_attrs

    upstream_name = None  # set to be notified about upstream buildsets

    def __init__(self, name, builderNames, properties):
        service.MultiService.__init__(self)
        self.name = name
        self.properties = Properties()
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")
        errmsg = ("The builderNames= argument to Scheduler must be a list "
                  "of Builder description names (i.e. the 'name' key of the "
                  "Builder specification dictionary)")
        assert isinstance(builderNames, (list, tuple)), errmsg
        for b in builderNames:
            assert isinstance(b, str), errmsg
        self.builderNames = builderNames
        # I will acquire a .schedulerid value before I'm started

    def compareToOther(self, them):
        # like ComparableMixin.__cmp__, but only used by our manager
        # TODO: why?? why not use __cmp__?
        result = cmp(type(self), type(them))
        if result:
            return result
        result = cmp(self.__class__, them.__class__)
        if result:
            return result
        assert self.compare_attrs == them.compare_attrs
        self_list = [getattr(self, name, _None) for name in self.compare_attrs]
        them_list = [getattr(them, name, _None) for name in self.compare_attrs]
        return cmp(self_list, them_list)

    def get_initial_state(self, max_changeid):
        # override this if you pay attention to Changes, probably to:
        #return {"last_processed": max_changeid}
        return {}

    def get_state(self, t):
        return self.parent.db.scheduler_get_state(self.schedulerid, t)

    def set_state(self, t, state):
        self.parent.db.scheduler_set_state(self.schedulerid, t, state)

    def listBuilderNames(self):
        return self.builderNames

    def getPendingBuildTimes(self):
        return []

    def create_buildset(self, ssid, reason, t, props=None, builderNames=None):
        db = self.parent.db
        if props is None:
            props = self.properties
        if builderNames is None:
            builderNames = self.builderNames
        bsid = db.create_buildset(ssid, reason, props, builderNames, t)
        # notify downstream schedulers so they can watch for it to complete
        self.parent.publish_buildset(self.name, bsid, t)
        return bsid
Exemplo n.º 52
0
class AbstractBuildSlave(NewCredPerspective, service.MultiService):
    """This is the master-side representative for a remote buildbot slave.
    There is exactly one for each slave described in the config file (the
    c['slaves'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a build slave -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IBuildSlave)

    def __init__(self,
                 name,
                 password,
                 max_builds=None,
                 notify_on_missing=[],
                 missing_timeout=3600,
                 properties={}):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this slave
        @type properties: dictionary
        """
        service.MultiService.__init__(self)
        self.slavename = name
        self.password = password
        self.botmaster = None  # no buildmaster yet
        self.slave_status = SlaveStatus(name)
        self.slave = None  # a RemoteReference to the Bot, when connected
        self.slave_commands = None
        self.slavebuilders = {}
        self.max_builds = max_builds

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            assert isinstance(i, str)
        self.missing_timeout = missing_timeout
        self.missing_timer = None

    def update(self, new):
        """
        Given a new BuildSlave, configure this one identically.  Because
        BuildSlave objects are remotely referenced, we can't replace them
        without disconnecting the slave, yet there's no reason to do that.
        """
        # the reconfiguration logic should guarantee this:
        assert self.slavename == new.slavename
        assert self.password == new.password
        assert self.__class__ == new.__class__
        self.max_builds = new.max_builds

    def __repr__(self):
        if self.botmaster:
            builders = self.botmaster.getBuildersForSlave(self.slavename)
            return "<%s '%s', current builders: %s>" % \
               (self.__class__.__name__, self.slavename,
                ','.join(map(lambda b: b.name, builders)))
        else:
            return "<%s '%s', (no builders yet)>" % \
                (self.__class__.__name__, self.slavename)

    def setBotmaster(self, botmaster):
        assert not self.botmaster, "BuildSlave already has a botmaster"
        self.botmaster = botmaster
        self.startMissingTimer()

    def stopMissingTimer(self):
        if self.missing_timer:
            self.missing_timer.cancel()
            self.missing_timer = None

    def startMissingTimer(self):
        if self.notify_on_missing and self.missing_timeout and self.parent:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = reactor.callLater(self.missing_timeout,
                                                   self._missing_timer_fired)

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        buildmaster = self.botmaster.parent
        status = buildmaster.getStatus()
        text = "The Buildbot working for '%s'\n" % status.getProjectName()
        text += ("has noticed that the buildslave named %s went away\n" %
                 self.slavename)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout))  # approx
        text += "\n"
        text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
        text += "was '%s'.\n" % self.slave_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getProjectURL()
        subject = "Buildbot: buildslave %s was lost" % self.slavename
        return self._mail_missing_message(subject, text)

    def updateSlave(self):
        """Called to add or remove builders after the slave has connected.

        @return: a Deferred that indicates when an attached slave has
        accepted the new builders and/or released the old ones."""
        if self.slave:
            return self.sendBuilderList()
        else:
            return defer.succeed(None)

    def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
        if buildStarted:
            self.slave_status.buildStarted(buildStarted)
        if buildFinished:
            self.slave_status.buildFinished(buildFinished)

    def attached(self, bot):
        """This is called when the slave connects.

        @return: a Deferred that fires with a suitable pb.IPerspective to
                 give to the slave (i.e. 'self')"""

        if self.slave:
            # uh-oh, we've got a duplicate slave. The most likely
            # explanation is that the slave is behind a slow link, thinks we
            # went away, and has attempted to reconnect, so we've got two
            # "connections" from the same slave, but the previous one is
            # stale. Give the new one precedence.
            log.msg("duplicate slave %s replacing old one" % self.slavename)

            # just in case we've got two identically-configured slaves,
            # report the IP addresses of both so someone can resolve the
            # squabble
            tport = self.slave.broker.transport
            log.msg("old slave was connected from", tport.getPeer())
            log.msg("new slave is from", bot.broker.transport.getPeer())
            d = self.disconnect()
        else:
            d = defer.succeed(None)
        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this slave to all the
        # Builders that care about it.

        # we accumulate slave information in this 'state' dictionary, then
        # set it atomically if we make it far enough through the process
        state = {}

        # Reset graceful shutdown status
        self.slave_status.setGraceful(False)
        # We want to know when the graceful shutdown flag changes
        self.slave_status.addGracefulWatcher(self._gracefulChanged)

        def _log_attachment_on_slave(res):
            d1 = bot.callRemote("print", "attached")
            d1.addErrback(lambda why: None)
            return d1

        d.addCallback(_log_attachment_on_slave)

        def _get_info(res):
            d1 = bot.callRemote("getSlaveInfo")

            def _got_info(info):
                log.msg("Got slaveinfo from '%s'" % self.slavename)
                # TODO: info{} might have other keys
                state["admin"] = info.get("admin")
                state["host"] = info.get("host")
                state["access_uri"] = info.get("access_uri", None)

            def _info_unavailable(why):
                # maybe an old slave, doesn't implement remote_getSlaveInfo
                log.msg("BuildSlave.info_unavailable")
                log.err(why)

            d1.addCallbacks(_got_info, _info_unavailable)
            return d1

        d.addCallback(_get_info)

        def _get_version(res):
            d1 = bot.callRemote("getVersion")

            def _got_version(version):
                state["version"] = version

            def _version_unavailable(why):
                # probably an old slave
                log.msg("BuildSlave.version_unavailable")
                log.err(why)

            d1.addCallbacks(_got_version, _version_unavailable)

        d.addCallback(_get_version)

        def _get_commands(res):
            d1 = bot.callRemote("getCommands")

            def _got_commands(commands):
                state["slave_commands"] = commands

            def _commands_unavailable(why):
                # probably an old slave
                log.msg("BuildSlave._commands_unavailable")
                if why.check(AttributeError):
                    return
                log.err(why)

            d1.addCallbacks(_got_commands, _commands_unavailable)
            return d1

        d.addCallback(_get_commands)

        def _accept_slave(res):
            self.slave_status.setAdmin(state.get("admin"))
            self.slave_status.setHost(state.get("host"))
            self.slave_status.setAccessURI(state.get("access_uri"))
            self.slave_status.setVersion(state.get("version"))
            self.slave_status.setConnected(True)
            self.slave_commands = state.get("slave_commands")
            self.slave = bot
            log.msg("bot attached")
            self.messageReceivedFromSlave()
            self.stopMissingTimer()
            self.botmaster.parent.status.slaveConnected(self.slavename)

            return self.updateSlave()

        d.addCallback(_accept_slave)
        d.addCallback(lambda res: self.botmaster.maybeStartAllBuilds())

        # Finally, the slave gets a reference to this BuildSlave. They
        # receive this later, after we've started using them.
        d.addCallback(lambda res: self)
        return d

    def messageReceivedFromSlave(self):
        now = time.time()
        self.lastMessageReceived = now
        self.slave_status.setLastMessageReceived(now)

    def detached(self, mind):
        self.slave = None
        self.slave_status.removeGracefulWatcher(self._gracefulChanged)
        self.slave_status.setConnected(False)
        log.msg("BuildSlave.detached(%s)" % self.slavename)
        self.botmaster.parent.status.slaveDisconnected(self.slavename)

    def disconnect(self):
        """Forcibly disconnect the slave.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the slave is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a slave is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown slave. The second is
        when we wind up with two connections for the same slave, in which
        case we disconnect the older connection.
        """

        if not self.slave:
            return defer.succeed(None)
        log.msg("disconnecting old slave %s now" % self.slavename)
        # When this Deferred fires, we'll be ready to accept the new slave
        return self._disconnect(self.slave)

    def _disconnect(self, slave):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new slave. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback with one argument, the
        # RemoteReference being disconnected.
        def _disconnected(rref):
            reactor.callLater(0, d.callback, None)

        slave.notifyOnDisconnect(_disconnected)
        tport = slave.broker.transport
        # this is the polite way to request that a socket be closed
        tport.loseConnection()
        try:
            # but really we don't want to wait for the transmit queue to
            # drain. The remote end is unlikely to ACK the data, so we'd
            # probably have to wait for a (20-minute) TCP timeout.
            #tport._closeSocket()
            # however, doing _closeSocket (whether before or after
            # loseConnection) somehow prevents the notifyOnDisconnect
            # handlers from being run. Bummer.
            tport.offset = 0
            tport.dataBuffer = ""
        except:
            # however, these hacks are pretty internal, so don't blow up if
            # they fail or are unavailable
            log.msg("failed to accelerate the shutdown process")
            pass
        log.msg("waiting for slave to finish disconnecting")

        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForSlave(self.slavename)
        blist = [(b.name, b.slavebuilddir) for b in our_builders]
        d = self.slave.callRemote("setBuilderList", blist)
        return d

    def perspective_keepalive(self):
        pass

    def addSlaveBuilder(self, sb):
        self.slavebuilders[sb.builder_name] = sb

    def removeSlaveBuilder(self, sb):
        try:
            del self.slavebuilders[sb.builder_name]
        except KeyError:
            pass

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this buildslave
        can start a build.  This function can be used to limit overall
        concurrency on the buildslave.
        """
        # If we're waiting to shutdown gracefully, then we shouldn't
        # accept any new jobs.
        if self.slave_status.getGraceful():
            return False

        if self.max_builds:
            active_builders = [
                sb for sb in self.slavebuilders.values() if sb.isBusy()
            ]
            if len(active_builders) >= self.max_builds:
                return False
        return True

    def _mail_missing_message(self, subject, text):
        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.parent
        for st in buildmaster.statusTargets:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("buildslave-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = subject
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def _gracefulChanged(self, graceful):
        """This is called when our graceful shutdown setting changes"""
        if graceful:
            active_builders = [
                sb for sb in self.slavebuilders.values() if sb.isBusy()
            ]
            if len(active_builders) == 0:
                # Shut down!
                self.shutdown()

    def shutdown(self):
        """Shutdown the slave"""
        # Look for a builder with a remote reference to the client side
        # slave.  If we can find one, then call "shutdown" on the remote
        # builder, which will cause the slave buildbot process to exit.
        d = None
        for b in self.slavebuilders.values():
            if b.remote:
                d = b.remote.callRemote("shutdown")
                break

        if d:
            log.msg("Shutting down slave: %s" % self.slavename)

            # The remote shutdown call will not complete successfully since the
            # buildbot process exits almost immediately after getting the
            # shutdown request.
            # Here we look at the reason why the remote call failed, and if
            # it's because the connection was lost, that means the slave
            # shutdown as expected.
            def _errback(why):
                if why.check(twisted.spread.pb.PBConnectionLost):
                    log.msg("Lost connection to %s" % self.slavename)
                else:
                    log.err("Unexpected error when trying to shutdown %s" %
                            self.slavename)

            d.addErrback(_errback)
            return d
        log.err("Couldn't find remote builder to shut down slave")
        return defer.succeed(None)
Exemplo n.º 53
0
 def createTriggerProperties(self, properties):
     # make a new properties object from a dict rendered by the old
     # properties object
     trigger_properties = Properties()
     trigger_properties.update(properties, "Trigger")
     return trigger_properties
Exemplo n.º 54
0
class TestProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testDictBehavior(self):
        self.props.setProperty("do-tests", 1, "scheduler")
        self.props.setProperty("do-install", 2, "scheduler")

        self.assert_(self.props.has_key('do-tests'))
        self.failUnlessEqual(self.props['do-tests'], 1)
        self.failUnlessEqual(self.props['do-install'], 2)
        self.assertRaises(KeyError, lambda : self.props['do-nothing'])
        self.failUnlessEqual(self.props.getProperty('do-install'), 2)

    def testAsList(self):
        self.props.setProperty("happiness", 7, "builder")
        self.props.setProperty("flames", True, "tester")

        self.assertEqual(sorted(self.props.asList()),
                [ ('flames', True, 'tester'), ('happiness', 7, 'builder') ])

    def testAsDict(self):
        self.props.setProperty("msi_filename", "product.msi", 'packager')
        self.props.setProperty("dmg_filename", "product.dmg", 'packager')

        self.assertEqual(self.props.asDict(),
                dict(msi_filename=('product.msi', 'packager'), dmg_filename=('product.dmg', 'packager')))

    def testUpdate(self):
        self.props.setProperty("x", 24, "old")
        newprops = { 'a' : 1, 'b' : 2 }
        self.props.update(newprops, "new")

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromProperties(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new")
        newprops.setProperty('b', 2, "new")
        self.props.updateFromProperties(newprops)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromPropertiesNoRuntime(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("b", 84, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new", runtime=True)
        newprops.setProperty('b', 2, "new", runtime=False)
        newprops.setProperty('c', 3, "new", runtime=True)
        newprops.setProperty('d', 3, "new", runtime=False)
        self.props.updateFromPropertiesNoRuntime(newprops)

        self.failUnlessEqual(self.props.getProperty('a'), 94)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'old')
        self.failUnlessEqual(self.props.getProperty('b'), 2)
        self.failUnlessEqual(self.props.getPropertySource('b'), 'new')
        self.failUnlessEqual(self.props.getProperty('c'), None) # not updated
        self.failUnlessEqual(self.props.getProperty('d'), 3)
        self.failUnlessEqual(self.props.getPropertySource('d'), 'new')
        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
Exemplo n.º 55
0
class AbstractWorker(service.BuildbotService, object):

    """This is the master-side representative for a remote buildbot worker.
    There is exactly one for each worker described in the config file (the
    c['workers'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a worker -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IWorker)

    # reconfig workers after builders
    reconfig_priority = 64

    def checkConfig(self, name, password, max_builds=None,
                    notify_on_missing=None,
                    missing_timeout=10 * 60,   # Ten minutes
                    properties=None, locks=None, keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this worker (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this worker
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this worker
                      can be used
        @type locks: dictionary
        """
        self.name = name = ascii2unicode(name)

        if properties is None:
            properties = {}

        self.password = password

        # protocol registration
        self.registration = None

        # these are set when the service is started
        self.manager = None
        self.workerid = None

        self.worker_status = WorkerStatus(name)
        self.worker_commands = None
        self.workerforbuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties, "Worker")
        self.properties.setProperty("slavename", name, "Worker (deprecated)")
        self.properties.setProperty("workername", name, "Worker")

        self.lastMessageReceived = 0

        if notify_on_missing is None:
            notify_on_missing = []
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error(
                    'notify_on_missing arg %r is not a string' % (i,))

        self.missing_timeout = missing_timeout
        self.missing_timer = None

        # a protocol connection, if we're currently connected
        self.conn = None

        self._old_builder_list = None

    def __repr__(self):
        return "<%s %r>" % (self.__class__.__name__, self.name)

    @property
    def workername(self):
        # workername is now an alias to twisted.Service's name
        return self.name
    deprecatedWorkerClassProperty(locals(), workername)

    @property
    def botmaster(self):
        if self.master is None:
            return None
        return self.master.botmaster

    def updateLocks(self):
        """Convert the L{LockAccess} objects in C{self.locks} into real lock
        objects, while also maintaining the subscriptions to lock releases."""
        # unsubscribe from any old locks
        for s in self.lock_subscriptions:
            s.unsubscribe()

        # convert locks into their real form
        locks = [(self.botmaster.getLockFromLockAccess(a), a)
                 for a in self.access]
        self.locks = [(l.getLock(self), la) for l, la in locks]
        self.lock_subscriptions = [l.subscribeToReleases(self._lockReleased)
                                   for l, la in self.locks]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another worker
        to look at it.
        """
        log.msg("acquireLocks(worker %s, locks %s)" % (self, self.locks))
        if not self.locksAvailable():
            log.msg("worker %s can't lock, giving up" % (self, ))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def _lockReleased(self):
        """One of the locks for this worker was released; try scheduling
        builds."""
        if not self.botmaster:
            return  # oh well..
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def _applyWorkerInfo(self, info):
        if not info:
            return

        self.worker_status.setAdmin(info.get("admin"))
        self.worker_status.setHost(info.get("host"))
        self.worker_status.setAccessURI(info.get("access_uri", None))
        self.worker_status.setVersion(info.get("version", "(unknown)"))

    @defer.inlineCallbacks
    def _getWorkerInfo(self):
        worker = yield self.master.data.get(
            ('workers', self.workerid))
        self._applyWorkerInfo(worker['workerinfo'])

    def setServiceParent(self, parent):
        # botmaster needs to set before setServiceParent which calls
        # startService

        self.manager = parent
        return service.BuildbotService.setServiceParent(self, parent)

    @defer.inlineCallbacks
    def startService(self):
        self.updateLocks()
        self.startMissingTimer()
        self.workerid = yield self.master.data.updates.findWorkerId(
            self.name)

        yield self._getWorkerInfo()
        yield service.BuildbotService.startService(self)

    @defer.inlineCallbacks
    def reconfigService(self, name, password, max_builds=None,
                        notify_on_missing=None, missing_timeout=3600,
                        properties=None, locks=None, keepalive_interval=3600):
        # Given a Worker config arguments, configure this one identically.
        # Because Worker objects are remotely referenced, we can't replace them
        # without disconnecting the worker, yet there's no reason to do that.

        assert self.name == name
        self.password = password

        # adopt new instance's configuration parameters
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.notify_on_missing = notify_on_missing

        if self.missing_timeout != missing_timeout:
            running_missing_timer = self.missing_timer
            self.stopMissingTimer()
            self.missing_timeout = missing_timeout
            if running_missing_timer:
                self.startMissingTimer()

        if properties is None:
            properties = {}
        self.properties = Properties()
        self.properties.update(properties, "Worker")
        self.properties.setProperty("slavename", name, "Worker (deprecated)")
        self.properties.setProperty("workername", name, "Worker")

        # update our records with the worker manager
        if not self.registration:
            self.registration = yield self.master.workers.register(self)
        yield self.registration.update(self, self.master.config)

        self.updateLocks()

        bids = [
            b._builderid for b in self.botmaster.getBuildersForWorker(self.name)]
        yield self.master.data.updates.workerConfigured(self.workerid, self.master.masterid, bids)

        # update the attached worker's notion of which builders are attached.
        # This assumes that the relevant builders have already been configured,
        # which is why the reconfig_priority is set low in this class.
        yield self.updateWorker()

    @defer.inlineCallbacks
    def stopService(self):
        if self.registration:
            yield self.registration.unregister()
            self.registration = None
        self.stopMissingTimer()
        # mark this worker as configured for zero builders in this master
        yield self.master.data.updates.workerConfigured(self.workerid, self.master.masterid, [])
        yield service.BuildbotService.stopService(self)

    def startMissingTimer(self):
        if self.notify_on_missing and self.missing_timeout and self.parent:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = self.master.reactor.callLater(self.missing_timeout,
                                                               self._missing_timer_fired)

    def stopMissingTimer(self):
        if self.missing_timer:
            self.missing_timer.cancel()
            self.missing_timer = None

    def isConnected(self):
        return self.conn

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        buildmaster = self.botmaster.master
        status = buildmaster.getStatus()
        text = "The Buildbot working for '%s'\n" % status.getTitle()
        text += ("has noticed that the worker named %s went away\n" %
                 self.name)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout))  # approx
        text += "\n"
        text += "The admin on record (as reported by WORKER:info/admin)\n"
        text += "was '%s'.\n" % self.worker_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getTitleURL()
        text += "\n"
        text += "%s\n" % status.getURLForThing(self.worker_status)
        subject = "Buildbot: worker %s was lost" % (self.name,)
        return self._mail_missing_message(subject, text)

    def updateWorker(self):
        """Called to add or remove builders after the worker has connected.

        @return: a Deferred that indicates when an attached worker has
        accepted the new builders and/or released the old ones."""
        if self.conn:
            return self.sendBuilderList()
        else:
            return defer.succeed(None)

    @defer.inlineCallbacks
    def attached(self, conn):
        """This is called when the worker connects."""

        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", 1)

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this worker to all the
        # Builders that care about it.

        # Reset graceful shutdown status
        self.worker_status.setGraceful(False)
        # We want to know when the graceful shutdown flag changes
        self.worker_status.addGracefulWatcher(self._gracefulChanged)
        self.conn = conn
        self._old_builder_list = None  # clear builder list before proceed
        self.worker_status.addPauseWatcher(self._pauseChanged)

        self.worker_status.setConnected(True)

        self._applyWorkerInfo(conn.info)
        self.worker_commands = conn.info.get("slave_commands", {})
        self.worker_environ = conn.info.get("environ", {})
        self.worker_basedir = conn.info.get("basedir", None)
        self.worker_system = conn.info.get("system", None)

        self.conn.notifyOnDisconnect(self.detached)

        workerinfo = {
            'admin': conn.info.get('admin'),
            'host': conn.info.get('host'),
            'access_uri': conn.info.get('access_uri'),
            'version': conn.info.get('version')
        }

        yield self.master.data.updates.workerConnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
            workerinfo=workerinfo
        )

        if self.worker_system == "nt":
            self.path_module = namedModule("ntpath")
        else:
            # most everything accepts / as separator, so posix should be a
            # reasonable fallback
            self.path_module = namedModule("posixpath")
        log.msg("bot attached")
        self.messageReceivedFromWorker()
        self.stopMissingTimer()
        self.master.status.workerConnected(self.name)
        yield self.updateWorker()
        yield self.botmaster.maybeStartBuildsForWorker(self.name)

    def messageReceivedFromWorker(self):
        now = time.time()
        self.lastMessageReceived = now
        self.worker_status.setLastMessageReceived(now)

    @defer.inlineCallbacks
    def detached(self):
        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", -1)
        self.conn = None
        self._old_builder_list = []
        self.worker_status.removeGracefulWatcher(self._gracefulChanged)
        self.worker_status.removePauseWatcher(self._pauseChanged)
        self.worker_status.setConnected(False)
        log.msg("Worker.detached(%s)" % (self.name,))
        self.master.status.workerDisconnected(self.name)
        self.releaseLocks()
        yield self.master.data.updates.workerDisconnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
        )

    def disconnect(self):
        """Forcibly disconnect the worker.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the worker is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a worker is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown worker. The second is
        when we wind up with two connections for the same worker, in which
        case we disconnect the older connection.
        """

        if self.conn is None:
            return defer.succeed(None)
        log.msg("disconnecting old worker %s now" % (self.name,))
        # When this Deferred fires, we'll be ready to accept the new worker
        return self._disconnect(self.conn)

    def _disconnect(self, conn):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new worker. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback
        def _disconnected():
            eventually(d.callback, None)
        conn.notifyOnDisconnect(_disconnected)
        conn.loseConnection()
        log.msg("waiting for worker to finish disconnecting")

        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForWorker(self.name)
        blist = [(b.name, b.config.workerbuilddir) for b in our_builders]
        if blist == self._old_builder_list:
            return defer.succeed(None)

        d = self.conn.remoteSetBuilderList(builders=blist)

        @d.addCallback
        def sentBuilderList(ign):
            self._old_builder_list = blist
            return ign
        return d

    def shutdownRequested(self):
        log.msg("worker %s wants to shut down" % (self.name,))
        self.worker_status.setGraceful(True)

    def addWorkerForBuilder(self, wfb):
        self.workerforbuilders[wfb.builder_name] = wfb

    def removeWorkerForBuilder(self, wfb):
        try:
            del self.workerforbuilders[wfb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, wfb):
        """This is called when a build on this worker is finished."""
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this worker
        can start a build.  This function can be used to limit overall
        concurrency on the worker.

        Note for subclassers: if a worker can become willing to start a build
        without any action on that worker (for example, by a resource in use on
        another worker becoming available), then you must arrange for
        L{maybeStartBuildsForWorker} to be called at that time, or builds on
        this worker will not start.
        """

        if self.worker_status.isPaused():
            return False

        # If we're waiting to shutdown gracefully, then we shouldn't
        # accept any new jobs.
        if self.worker_status.getGraceful():
            return False

        if self.max_builds:
            active_builders = [sb for sb in itervalues(self.workerforbuilders)
                               if sb.isBusy()]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    def _mail_missing_message(self, subject, text):
        # FIXME: This should be handled properly via the event api
        # we should send a missing message on the mq, and let any reporter
        # handle that

        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.master
        for st in buildmaster.services:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("worker-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = subject
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def _gracefulChanged(self, graceful):
        """This is called when our graceful shutdown setting changes"""
        self.maybeShutdown()

    @defer.inlineCallbacks
    def shutdown(self):
        """Shutdown the worker"""
        if not self.conn:
            log.msg("no remote; worker is already shut down")
            return

        yield self.conn.remoteShutdown()

    def maybeShutdown(self):
        """Shut down this worker if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self.worker_status.getGraceful():
            return
        active_builders = [sb for sb in itervalues(self.workerforbuilders)
                           if sb.isBusy()]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down worker')

    def _pauseChanged(self, paused):
        if paused is True:
            self.botmaster.master.status.workerPaused(self.name)
        else:
            self.botmaster.master.status.workerUnpaused(self.name)

    def pause(self):
        """Stop running new builds on the worker."""
        self.worker_status.setPaused(True)

    def unpause(self):
        """Restart running new builds on the worker."""
        self.worker_status.setPaused(False)
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def isPaused(self):
        return self.worker_status.isPaused()
Exemplo n.º 56
0
class BaseScheduler(service.MultiService, ComparableMixin):
    """
    Base class for all schedulers; this provides the equipment to manage
    reconfigurations and to handle basic scheduler state.  It also provides
    utility methods to begin various sorts of builds.

    Subclasses should add any configuration-derived attributes to
    C{base.Scheduler.compare_attrs}.
    """

    compare_attrs = ('name', 'builderNames', 'properties')

    def __init__(self, name, builderNames, properties):
        """
        Initialize a Scheduler.

        @param name: name of this scheduler (used as a key for state)
        @type name: unicode

        @param builderNames: list of builders this scheduler may start
        @type builderNames: list of unicode

        @param properties: properties to add to builds triggered by this
        scheduler
        @type properties: dictionary

        @param consumeChanges: true if this scheduler wishes to be informed
        about the addition of new changes.  Defaults to False.  This should
        be passed explicitly from subclasses to indicate their interest in
        consuming changes.
        @type consumeChanges: boolean
        """
        service.MultiService.__init__(self)
        self.name = name
        "name of this scheduler; used to identify replacements on reconfig"

        errmsg = ("The builderNames argument to a scheduler must be a list "
                  "of Builder names.")
        assert isinstance(builderNames, (list, tuple)), errmsg
        for b in builderNames:
            assert isinstance(b, str), errmsg
        self.builderNames = builderNames
        "list of builder names to start in each buildset"

        self.properties = Properties()
        "properties that are contributed to each buildset"
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")

        self.schedulerid = None
        """ID of this scheduler; set just before the scheduler starts, and set
        to None after stopService is complete."""

        self.master = None
        """BuildMaster instance; set just before the scheduler starts, and set
        to None after stopService is complete."""

        # internal variables
        self._change_subscription = None
        self._state_lock = defer.DeferredLock()
        self._change_consumption_lock = defer.DeferredLock()

    ## service handling

    def _setUpScheduler(self, schedulerid, master, manager):
        # this is called by SchedulerManager *before* startService
        self.schedulerid = schedulerid
        self.master = master

    def startService(self):
        service.MultiService.startService(self)

    def stopService(self):
        d = defer.maybeDeferred(self._stopConsumingChanges)
        d.addCallback(lambda _: service.MultiService.stopService(self))
        return d

    def _shutDownScheduler(self):
        # called by SchedulerManager *after* stopService is complete
        self.schedulerid = None
        self.master = None

    ## state management

    class Thunk:
        pass

    def getState(self, key, default=Thunk):
        """
        For use by subclasses; get a named state value from the scheduler's
        state, defaulting to DEFAULT; raises C{KeyError} if default is not
        given and no value exists.  Scheduler must be started.  Returns the
        value via a deferred.
        """
        d = self.master.db.schedulers.getState(self.schedulerid)

        def get_value(state_dict):
            if key in state_dict:
                return state_dict[key]
            if default is BaseScheduler.Thunk:
                raise KeyError("state key '%s' not found" % (key, ))
            return default

        d.addCallback(get_value)
        return d

    @util.deferredLocked('_state_lock')
    def setState(self, key, value):
        """
        For use by subclasses; set a named state value in the scheduler's
        persistent state.  Note that value must be json-able. Returns a
        Deferred.

        Note that this method is safe if called simultaneously in the same
        process, although it is not safe between processes.
        """
        d = self.master.db.schedulers.getState(self.schedulerid)

        def set_value_and_store(state_dict):
            state_dict[key] = value
            return self.master.db.schedulers.setState(self.schedulerid,
                                                      state_dict)

        d.addCallback(set_value_and_store)

    ## status queries

    # TODO: these aren't compatible with distributed schedulers

    def listBuilderNames(self):
        "Returns the list of builder names"
        return self.builderNames

    def getPendingBuildTimes(self):
        "Returns a list of the next times that builds are scheduled, if known."
        return []

    ## change handling

    def startConsumingChanges(self, fileIsImportant=None, change_filter=None):
        """
        Subclasses should call this method from startService to register to
        receive changes.  The BaseScheduler class will take care of filtering
        the changes (using change_filter) and (if fileIsImportant is not None)
        classifying them.  See L{gotChange}.  Returns a Deferred.

        @param fileIsImportant: a callable provided by the user to distinguish
        important and unimportant changes
        @type fileIsImportant: callable

        @param change_filter: a filter to determine which changes are even
        considered by this scheduler, or C{None} to consider all changes
        @type change_filter: L{buildbot.changes.filter.ChangeFilter} instance
        """
        assert fileIsImportant is None or callable(fileIsImportant)

        # register for changes with master
        assert not self._change_subscription

        def changeCallback(change):
            # ignore changes delivered while we're not running
            if not self._change_subscription:
                return

            if change_filter and not change_filter.filter_change(change):
                return
            if fileIsImportant:
                try:
                    important = fileIsImportant(change)
                except:
                    log.err(failure.Failure(),
                            'in fileIsImportant check for %s' % change)
                    return
            else:
                important = True

            # use change_consumption_lock to ensure the service does not stop
            # while this change is being processed
            d = self._change_consumption_lock.acquire()
            d.addCallback(lambda _: self.gotChange(change, important))

            def release(x):
                self._change_consumption_lock.release()

            d.addBoth(release)
            d.addErrback(log.err, 'while processing change')

        self._change_subscription = self.master.subscribeToChanges(
            changeCallback)

        return defer.succeed(None)

    def _stopConsumingChanges(self):
        # (note: called automatically in stopService)

        # acquire the lock change consumption lock to ensure that any change
        # consumption is complete before we are done stopping consumption
        d = self._change_consumption_lock.acquire()

        def stop(x):
            if self._change_subscription:
                self._change_subscription.unsubscribe()
                self._change_subscription = None
            self._change_consumption_lock.release()

        d.addBoth(stop)
        return d

    def gotChange(self, change, important):
        """
        Called when a change is received; returns a Deferred.  If the
        C{fileIsImportant} parameter to C{startConsumingChanges} was C{None},
        then all changes are considered important.

        @param change: the new change object
        @type change: L{buildbot.changes.changes.Change} instance
        @param important: true if this is an important change, according to
        C{fileIsImportant}.
        @type important: boolean
        @returns: Deferred
        """
        raise NotImplementedError

    ## starting bulids

    def addBuildsetForLatest(self,
                             reason='',
                             external_idstring=None,
                             branch=None,
                             repository='',
                             project='',
                             builderNames=None,
                             properties=None):
        """
        Add a buildset for the 'latest' source in the given branch,
        repository, and project.  This will create a relative sourcestamp for
        the buildset.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's addBuildset
        method with the appropriate parameters.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param branch: branch to build (note that None often has a special meaning)
        @param repository: repository name for sourcestamp
        @param project: project name for sourcestamp
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        d = self.master.db.sourcestamps.addSourceStamp(branch=branch,
                                                       revision=None,
                                                       repository=repository,
                                                       project=project)
        d.addCallback(self.addBuildsetForSourceStamp,
                      reason=reason,
                      external_idstring=external_idstring,
                      builderNames=builderNames,
                      properties=properties)
        return d

    def addBuildsetForChanges(self,
                              reason='',
                              external_idstring=None,
                              changeids=[],
                              builderNames=None,
                              properties=None):
        """
        Add a buildset for the combination of the given changesets, creating
        a sourcestamp based on those changes.  The sourcestamp for the buildset
        will reference all of the indicated changes.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's addBuildset
        method with the appropriate parameters.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param changeids: nonempty list of changes to include in this buildset
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        assert changeids is not []

        # attributes for this sourcestamp will be based on the most recent
        # change, so fetch the change with the highest id
        d = self.master.db.changes.getChange(max(changeids))

        def chdict2change(chdict):
            if not chdict:
                return None
            return changes.Change.fromChdict(self.master, chdict)

        d.addCallback(chdict2change)

        def create_sourcestamp(change):
            return self.master.db.sourcestamps.addSourceStamp(
                branch=change.branch,
                revision=change.revision,
                repository=change.repository,
                project=change.project,
                changeids=changeids)

        d.addCallback(create_sourcestamp)
        d.addCallback(self.addBuildsetForSourceStamp,
                      reason=reason,
                      external_idstring=external_idstring,
                      builderNames=builderNames,
                      properties=properties)
        return d

    def addBuildsetForSourceStamp(self,
                                  ssid,
                                  reason='',
                                  external_idstring=None,
                                  properties=None,
                                  builderNames=None):
        """
        Add a buildset for the given, already-existing sourcestamp.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's
        L{BuildMaster.addBuildset} method with the appropriate parameters, and
        return the same result.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        # combine properties
        if properties:
            properties.updateFromProperties(self.properties)
        else:
            properties = self.properties

        # apply the default builderNames
        if not builderNames:
            builderNames = self.builderNames

        # translate properties object into a dict as required by the
        # addBuildset method
        properties_dict = properties.asDict()

        # add the buildset
        return self.master.addBuildset(ssid=ssid,
                                       reason=reason,
                                       properties=properties_dict,
                                       builderNames=builderNames,
                                       external_idstring=external_idstring)
Exemplo n.º 57
0
 def oneTest(self, props, expected):
     p = Properties()
     p.update(props, "test")
     r = repo.RepoDownloadsFromProperties(list(props))
     self.assertEqual(r.getRenderingFor(p), expected)
Exemplo n.º 58
0
class BaseScheduler(ClusteredBuildbotService, StateMixin):

    DEFAULT_CODEBASES = {'': {}}

    compare_attrs = ClusteredBuildbotService.compare_attrs + \
        ('builderNames', 'properties', 'codebases')

    def __init__(self, name, builderNames, properties=None, codebases=None):
        super().__init__(name=name)
        if codebases is None:
            codebases = self.DEFAULT_CODEBASES.copy()

        ok = True
        if interfaces.IRenderable.providedBy(builderNames):
            pass
        elif isinstance(builderNames, (list, tuple)):
            for b in builderNames:
                if not isinstance(b, str) and \
                        not interfaces.IRenderable.providedBy(b):
                    ok = False
        else:
            ok = False
        if not ok:
            config.error(
                "The builderNames argument to a scheduler must be a list "
                "of Builder names or an IRenderable object that will render"
                "to a list of builder names.")

        self.builderNames = builderNames

        if properties is None:
            properties = {}
        self.properties = Properties()
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")
        self.objectid = None

        # Set the codebases that are necessary to process the changes
        # These codebases will always result in a sourcestamp with or without
        # changes
        known_keys = set(['branch', 'repository', 'revision'])
        if codebases is None:
            config.error("Codebases cannot be None")
        elif isinstance(codebases, list):
            codebases = dict((codebase, {}) for codebase in codebases)
        elif not isinstance(codebases, dict):
            config.error(
                "Codebases must be a dict of dicts, or list of strings")
        else:
            for codebase, attrs in codebases.items():
                if not isinstance(attrs, dict):
                    config.error("Codebases must be a dict of dicts")
                else:
                    unk = set(attrs) - known_keys
                    if unk:
                        config.error(f"Unknown codebase keys {', '.join(unk)} "
                                     f"for codebase {codebase}")

        self.codebases = codebases

        # internal variables
        self._change_consumer = None
        self._enable_consumer = None
        self._change_consumption_lock = defer.DeferredLock()

        self.enabled = True

    def reconfigService(self, *args, **kwargs):
        raise NotImplementedError()

    # activity handling
    @defer.inlineCallbacks
    def activate(self):
        if not self.enabled:
            return None

        # even if we aren't called via _activityPoll(), at this point we
        # need to ensure the service id is set correctly
        if self.serviceid is None:
            self.serviceid = yield self._getServiceId()
            assert self.serviceid is not None

        schedulerData = yield self._getScheduler(self.serviceid)

        if schedulerData:
            self.enabled = schedulerData['enabled']

        if not self._enable_consumer:
            yield self.startConsumingEnableEvents()
        return None

    def _enabledCallback(self, key, msg):
        if msg['enabled']:
            self.enabled = True
            d = self.activate()
        else:
            d = self.deactivate()

            def fn(x):
                self.enabled = False

            d.addCallback(fn)
        return d

    @defer.inlineCallbacks
    def deactivate(self):
        if not self.enabled:
            return None
        yield self._stopConsumingChanges()
        return None

    # service handling

    def _getServiceId(self):
        return self.master.data.updates.findSchedulerId(self.name)

    def _getScheduler(self, sid):
        return self.master.db.schedulers.getScheduler(sid)

    def _claimService(self):
        return self.master.data.updates.trySetSchedulerMaster(self.serviceid,
                                                              self.master.masterid)

    def _unclaimService(self):
        return self.master.data.updates.trySetSchedulerMaster(self.serviceid,
                                                              None)

    # status queries

    # deprecated: these aren't compatible with distributed schedulers

    def listBuilderNames(self):
        return self.builderNames

    # change handling

    @defer.inlineCallbacks
    def startConsumingChanges(self, fileIsImportant=None, change_filter=None,
                              onlyImportant=False):
        assert fileIsImportant is None or callable(fileIsImportant)

        # register for changes with the data API
        assert not self._change_consumer
        self._change_consumer = yield self.master.mq.startConsuming(
            lambda k, m: self._changeCallback(k, m, fileIsImportant,
                                              change_filter, onlyImportant),
            ('changes', None, 'new'))

    @defer.inlineCallbacks
    def startConsumingEnableEvents(self):
        assert not self._enable_consumer
        self._enable_consumer = yield self.master.mq.startConsuming(
            self._enabledCallback,
            ('schedulers', str(self.serviceid), 'updated'))

    @defer.inlineCallbacks
    def _changeCallback(self, key, msg, fileIsImportant, change_filter,
                        onlyImportant):

        # ignore changes delivered while we're not running
        if not self._change_consumer:
            return

        # get a change object, since the API requires it
        chdict = yield self.master.db.changes.getChange(msg['changeid'])
        change = yield changes.Change.fromChdict(self.master, chdict)

        # filter it
        if change_filter:
            # There has been a change in how Gerrit handles branches in Buildbot 3.5 - ref-updated
            # events will now emit proper branch instead of refs/heads/<branch>. Below we detect
            # whether this breaks change filters.
            change_filter_may_be_broken = \
                change.category == 'ref-updated' and not change.branch.startswith('refs/')

            if change_filter_may_be_broken:
                old_change = copy.deepcopy(change)
                old_change.branch = f'refs/heads/{old_change.branch}'

                old_filter_result = change_filter.filter_change(old_change)
                new_filter_result = change_filter.filter_change(change)

                if old_filter_result != new_filter_result and \
                        'refs/heads/' in repr(change_filter.checks['branch']):

                    warn_deprecated('3.5.0',
                                    'Change filters must not expect ref-updated events from '
                                    'Gerrit to include refs/heads prefix for the branch attr.')

                    if not old_filter_result:
                        return
                else:
                    if not new_filter_result:
                        return
            else:
                if not change_filter.filter_change(change):
                    return

        if change.codebase not in self.codebases:
            log.msg(format='change contains codebase %(codebase)s that is '
                    'not processed by scheduler %(name)s',
                    codebase=change.codebase, name=self.name)
            return

        if fileIsImportant:
            try:
                important = fileIsImportant(change)
                if not important and onlyImportant:
                    return
            except Exception:
                log.err(failure.Failure(), f'in fileIsImportant check for {change}')
                return
        else:
            important = True

        # use change_consumption_lock to ensure the service does not stop
        # while this change is being processed
        d = self._change_consumption_lock.run(
            self.gotChange, change, important)
        d.addErrback(log.err, 'while processing change')

    def _stopConsumingChanges(self):
        # (note: called automatically in deactivate)

        # acquire the lock change consumption lock to ensure that any change
        # consumption is complete before we are done stopping consumption
        def stop():
            if self._change_consumer:
                self._change_consumer.stopConsuming()
                self._change_consumer = None
        return self._change_consumption_lock.run(stop)

    def gotChange(self, change, important):
        raise NotImplementedError

    # starting builds

    @defer.inlineCallbacks
    def addBuildsetForSourceStampsWithDefaults(self, reason, sourcestamps=None,
                                               waited_for=False, properties=None, builderNames=None,
                                               **kw):
        if sourcestamps is None:
            sourcestamps = []

        # convert sourcestamps to a dictionary keyed by codebase
        stampsByCodebase = {}
        for ss in sourcestamps:
            cb = ss['codebase']
            if cb in stampsByCodebase:
                raise RuntimeError("multiple sourcestamps with same codebase")
            stampsByCodebase[cb] = ss

        # Merge codebases with the passed list of sourcestamps
        # This results in a new sourcestamp for each codebase
        stampsWithDefaults = []
        for codebase in self.codebases:
            cb = yield self.getCodebaseDict(codebase)
            ss = {
                'codebase': codebase,
                'repository': cb.get('repository', ''),
                'branch': cb.get('branch', None),
                'revision': cb.get('revision', None),
                'project': '',
            }
            # apply info from passed sourcestamps onto the configured default
            # sourcestamp attributes for this codebase.
            ss.update(stampsByCodebase.get(codebase, {}))
            stampsWithDefaults.append(ss)

        # fill in any supplied sourcestamps that aren't for a codebase in the
        # scheduler's codebase dictionary
        for codebase in set(stampsByCodebase) - set(self.codebases):
            cb = stampsByCodebase[codebase]
            ss = {
                'codebase': codebase,
                'repository': cb.get('repository', ''),
                'branch': cb.get('branch', None),
                'revision': cb.get('revision', None),
                'project': '',
            }
            stampsWithDefaults.append(ss)

        rv = yield self.addBuildsetForSourceStamps(
            sourcestamps=stampsWithDefaults, reason=reason,
            waited_for=waited_for, properties=properties,
            builderNames=builderNames, **kw)
        return rv

    def getCodebaseDict(self, codebase):
        # Hook for subclasses to change codebase parameters when a codebase does
        # not have a change associated with it.
        try:
            return defer.succeed(self.codebases[codebase])
        except KeyError:
            return defer.fail()

    @defer.inlineCallbacks
    def addBuildsetForChanges(self, waited_for=False, reason='',
                              external_idstring=None, changeids=None, builderNames=None,
                              properties=None,
                              **kw):
        if changeids is None:
            changeids = []
        changesByCodebase = {}

        def get_last_change_for_codebase(codebase):
            return max(changesByCodebase[codebase], key=lambda change: change["changeid"])

        # Changes are retrieved from database and grouped by their codebase
        for changeid in changeids:
            chdict = yield self.master.db.changes.getChange(changeid)
            changesByCodebase.setdefault(chdict["codebase"], []).append(chdict)

        sourcestamps = []
        for codebase in sorted(self.codebases):
            if codebase not in changesByCodebase:
                # codebase has no changes
                # create a sourcestamp that has no changes
                cb = yield self.getCodebaseDict(codebase)

                ss = {
                    'codebase': codebase,
                    'repository': cb.get('repository', ''),
                    'branch': cb.get('branch', None),
                    'revision': cb.get('revision', None),
                    'project': '',
                }
            else:
                lastChange = get_last_change_for_codebase(codebase)
                ss = lastChange['sourcestampid']
            sourcestamps.append(ss)

        # add one buildset, using the calculated sourcestamps
        bsid, brids = yield self.addBuildsetForSourceStamps(
            waited_for, sourcestamps=sourcestamps, reason=reason,
            external_idstring=external_idstring, builderNames=builderNames,
            properties=properties, **kw)

        return (bsid, brids)

    @defer.inlineCallbacks
    def addBuildsetForSourceStamps(self, waited_for=False, sourcestamps=None,
                                   reason='', external_idstring=None, properties=None,
                                   builderNames=None, **kw):
        if sourcestamps is None:
            sourcestamps = []
        # combine properties
        if properties:
            properties.updateFromProperties(self.properties)
        else:
            properties = self.properties

        # make a fresh copy that we actually can modify safely
        properties = Properties.fromDict(properties.asDict())

        # make extra info available from properties.render()
        properties.master = self.master
        properties.sourcestamps = []
        properties.changes = []
        for ss in sourcestamps:
            if isinstance(ss, int):
                # fetch actual sourcestamp and changes from data API
                properties.sourcestamps.append(
                    (yield self.master.data.get(('sourcestamps', ss))))
                properties.changes.extend(
                    (yield self.master.data.get(('sourcestamps', ss, 'changes'))))
            else:
                # sourcestamp with no change, see addBuildsetForChanges
                properties.sourcestamps.append(ss)

        for c in properties.changes:
            properties.updateFromProperties(Properties.fromDict(c['properties']))

        # apply the default builderNames
        if not builderNames:
            builderNames = self.builderNames

        # dynamically get the builder list to schedule
        builderNames = yield properties.render(builderNames)

        # Get the builder ids
        # Note that there is a data.updates.findBuilderId(name)
        # but that would merely only optimize the single builder case, while
        # probably the multiple builder case will be severely impacted by the
        # several db requests needed.
        builderids = []
        for bldr in (yield self.master.data.get(('builders', ))):
            if bldr['name'] in builderNames:
                builderids.append(bldr['builderid'])

        # translate properties object into a dict as required by the
        # addBuildset method
        properties_dict = yield properties.render(properties.asDict())

        bsid, brids = yield self.master.data.updates.addBuildset(
            scheduler=self.name, sourcestamps=sourcestamps, reason=reason,
            waited_for=waited_for, properties=properties_dict, builderids=builderids,
            external_idstring=external_idstring, **kw)
        return (bsid, brids)