Exemplo n.º 1
0
    def testBasicWorker2_16(self):
        s = transfer.JSONPropertiesDownload("props.json")
        s.build = Mock()
        props = Properties()
        props.setProperty('key1', 'value1', 'test')
        s.build.getProperties.return_value = props
        s.build.getWorkerCommandVersion.return_value = '2.16'
        ss = Mock()
        ss.asDict.return_value = dict(revision="12345")
        s.build.getAllSourceStamps.return_value = [ss]

        s.worker = Mock()
        s.remote = Mock()

        s.start()

        for c in s.remote.method_calls:
            name, command, args = c
            commandName = command[3]
            kwargs = command[-1]
            if commandName == 'downloadFile':
                self.assertEqual(kwargs['slavedest'], 'props.json')
                reader = kwargs['reader']
                data = reader.remote_read(100)
                data = bytes2unicode(data)
                actualJson = json.loads(data)
                expectedJson = dict(sourcestamps=[ss.asDict()], properties={'key1': 'value1'})
                self.assertEqual(actualJson, expectedJson)
                break
        else:
            raise ValueError("No downloadFile command found")
def buildIDSchedFunc(sched, t, ssid):
    """Generates a unique buildid for this change.

    Returns a Properties instance with 'buildid' set to the buildid to use.

    scheduler `sched`'s state is modified as a result."""
    state = sched.get_state(t)

    # Get the last buildid we scheduled from the database
    lastid = state.get('last_buildid', '19700101000000')

    incrementedid = incrementBuildID(lastid)
    nowid = genBuildID()

    # Our new buildid will be the highest of the last buildid incremented or
    # the buildid based on the current date
    newid = str(max(int(nowid), int(incrementedid)))

    # Save it in the scheduler's state so we don't generate the same one again.
    state['last_buildid'] = newid
    sched.set_state(t, state)

    props = Properties()
    props.setProperty('buildid', newid, 'buildIDSchedFunc')
    return props
Exemplo n.º 3
0
    def testBasic(self):
        s = transfer.JSONPropertiesDownload("props.json")
        s.build = Mock()
        props = Properties()
        props.setProperty('key1', 'value1', 'test')
        s.build.getProperties.return_value = props
        s.build.getWorkerCommandVersion.return_value = '3.0'
        ss = Mock()
        ss.asDict.return_value = dict(revision="12345")
        s.build.getAllSourceStamps.return_value = [ss]

        s.worker = Mock()
        s.remote = Mock()

        s.start()

        for c in s.remote.method_calls:
            name, command, args = c
            commandName = command[3]
            kwargs = command[-1]
            if commandName == 'downloadFile':
                self.assertEquals(kwargs['workerdest'], 'props.json')
                reader = kwargs['reader']
                data = reader.remote_read(100)
                self.assertEquals(
                    data, json.dumps(dict(sourcestamps=[ss.asDict()], properties={'key1': 'value1'})))
                break
        else:
            raise ValueError("No downloadFile command found")
Exemplo n.º 4
0
    def testBasic(self):
        s = transfer.JSONPropertiesDownload("props.json")
        s.build = Mock()
        props = Properties()
        props.setProperty("key1", "value1", "test")
        s.build.getProperties.return_value = props
        s.build.getWorkerCommandVersion.return_value = 1
        ss = Mock()
        ss.asDict.return_value = dict(revision="12345")
        s.build.getAllSourceStamps.return_value = [ss]

        s.worker = Mock()
        s.remote = Mock()

        s.start()

        for c in s.remote.method_calls:
            name, command, args = c
            commandName = command[3]
            kwargs = command[-1]
            if commandName == "downloadFile":
                self.assertEquals(kwargs["slavedest"], "props.json")
                reader = kwargs["reader"]
                data = reader.remote_read(100)
                self.assertEquals(data, json.dumps(dict(sourcestamps=[ss.asDict()], properties={"key1": "value1"})))
                break
        else:
            raise ValueError("No downloadFile command found")
Exemplo n.º 5
0
class BaseScheduler(service.MultiService, util.ComparableMixin):
    """
    A Schduler creates BuildSets and submits them to the BuildMaster.

    @ivar name: name of the scheduler

    @ivar properties: additional properties specified in this 
        scheduler's configuration
    @type properties: Properties object
    """
    implements(interfaces.IScheduler)

    def __init__(self, name, properties={}):
        """
        @param name: name for this scheduler

        @param properties: properties to be propagated from this scheduler
        @type properties: dict
        """
        service.MultiService.__init__(self)
        self.name = name
        self.properties = Properties()
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")

    def __repr__(self):
        # TODO: why can't id() return a positive number? %d is ugly.
        return "<Scheduler '%s' at %d>" % (self.name, id(self))

    def submitBuildSet(self, bs):
        self.parent.submitBuildSet(bs)

    def addChange(self, change):
        pass
Exemplo n.º 6
0
class TestInterpolatePositional(unittest.TestCase):
    def setUp(self):
        self.props = Properties()
        self.build = FakeBuild(self.props)

    def test_string(self):
        command = Interpolate("test %s", "one fish")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual,
                        "test one fish")

    def test_twoString(self):
        command = Interpolate("test %s, %s", "one fish", "two fish")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual,
                        "test one fish, two fish")

    def test_deferred(self):
        renderable = DeferredRenderable()
        command = Interpolate("echo '%s'", renderable)
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual,
                            "echo 'red fish'")
        renderable.callback("red fish")
        return d

    def test_renderable(self):
        self.props.setProperty("buildername", "blue fish", "test")
        command = Interpolate("echo '%s'", Property("buildername"))
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual,
                            "echo 'blue fish'")
        return d
    def testBasic(self):
        s = transfer.JSONPropertiesDownload("props.json")
        s.build = Mock()
        props = Properties()
        props.setProperty('key1', 'value1', 'test')
        s.build.getProperties.return_value = props
        s.build.getSlaveCommandVersion.return_value = 1
        ss = Mock()
        ss.asDict.return_value = dict(revision="12345")
        s.build.getSourceStamp.return_value = ss

        s.step_status = Mock()
        s.buildslave = Mock()
        s.remote = Mock()

        s.start()

        for c in s.remote.method_calls:
            name, command, args = c
            commandName = command[3]
            kwargs = command[-1]
            if commandName == 'downloadFile':
                self.assertEquals(kwargs['slavedest'], 'props.json')
                reader = kwargs['reader']
                data = reader.remote_read(100)
                self.assertEquals(data, json.dumps(dict(sourcestamp=ss.asDict(), properties={'key1': 'value1'})))
                break
        else:
            self.assert_(False, "No downloadFile command found")
Exemplo n.º 8
0
def getAndCheckProperties(req):
    """
    Fetch custom build properties from the HTTP request of a "Force build" or
    "Resubmit build" HTML form.
    Check the names for valid strings, and return None if a problem is found.
    Return a new Properties object containing each property found in req.
    """
    master = req.site.buildbot_service.master
    pname_validate = master.config.validation['property_name']
    pval_validate = master.config.validation['property_value']
    properties = Properties()
    i = 1
    while True:
        pname = req.args.get("property%dname" % i, [""])[0]
        pvalue = req.args.get("property%dvalue" % i, [""])[0]
        if not pname:
            break
        if not pname_validate.match(pname) \
                or not pval_validate.match(pvalue):
            log.msg("bad property name='%s', value='%s'" % (pname, pvalue))
            return None
        properties.setProperty(pname, pvalue, "Force Build Form")
        i = i + 1

    return properties
Exemplo n.º 9
0
 def getProperties(self, req):
   properties = Properties()
   for i in itertools.count(0):
     pname = req.args.get("property%dname" % i, [""])[0]
     pvalue = req.args.get("property%dvalue" % i, [""])[0]
     if not pname or not pvalue:
       break
     properties.setProperty(pname, pvalue, "Custom Force Build Form")
   return [properties, None]
Exemplo n.º 10
0
 def _txn_get_properties_from_db(self, t, tablename, idname, id):
     # apparently you can't use argument placeholders for table names. Don't
     # call this with a weird-looking tablename.
     q = self.quoteq("SELECT property_name,property_value FROM %s WHERE %s=?" % (tablename, idname))
     t.execute(q, (id,))
     retval = Properties()
     for key, valuepair in t.fetchall():
         value, source = json.loads(valuepair)
         retval.setProperty(str(key), value, source)
     return retval
Exemplo n.º 11
0
 def requestBuild(self, builder, locale):
     # returns a Deferred that fires with an IBuildStatus object when the
     # build is finished
     props = Properties()
     props.setProperty('locale', locale, 'scheduler')
     props.setProperty('tree', 'app', 'scheduler')
     req = BuildRequest("forced build", SourceStamp(), builder,
                        properties=props)
     self.control.getBuilder(builder).requestBuild(req)
     return req.waitUntilFinished()
Exemplo n.º 12
0
    def test_defaultProperties(self):
        props = Properties()
        props.setProperty('foo', 1, 'Scheduler')
        props.setProperty('bar', 'bleh', 'Change')

        yield self.makeBuilder(defaultProperties={'bar': 'onoes', 'cuckoo': 42})

        self.bldr.setupProperties(props)

        self.assertEquals(props.getProperty('bar'), 'bleh')
        self.assertEquals(props.getProperty('cuckoo'), 42)
Exemplo n.º 13
0
    def testUpdateFromProperties(self):
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new")
        newprops.setProperty('b', 2, "new")
        self.props.updateFromProperties(newprops)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
Exemplo n.º 14
0
  def _addBuildIdProperty(self, changeids, properties=None):
    """Adds the 'master_build_id' property if specified in the change log."""
    if not properties:
      properties = Properties()

    if len(changeids) == 1:
      change = yield self.master.db.changes.getChange(changeids[0])

      master_build_id = self._getMasterBuildId(change)
      if master_build_id:
        properties.setProperty('master_build_id', master_build_id,
                               'Scheduler')
    defer.returnValue(properties)
Exemplo n.º 15
0
    def createTriggerProperties(self):
        properties = self.build.getProperties()

        # make a new properties object from a dict rendered by the old 
        # properties object
        trigger_properties = Properties()
        trigger_properties.update(self.set_properties, "Trigger")
        for p in self.copy_properties:
            if p not in properties:
                continue
            trigger_properties.setProperty(p, properties[p],
                        "%s (in triggering build)" % properties.getPropertySource(p))
        return trigger_properties
Exemplo n.º 16
0
    def gatherPropertiesAndChanges(self, **kwargs):
        properties = {}
        changeids = []

        for param in self.forcedProperties:
            yield defer.maybeDeferred(param.updateFromKwargs, self.master, properties, changeids, kwargs)

        changeids = map(lambda a: type(a)==int and a or a.number, changeids)

        real_properties = Properties()
        for pname, pvalue in properties.items():
            real_properties.setProperty(pname, pvalue, "Force Build Form")

        defer.returnValue((real_properties, changeids))
Exemplo n.º 17
0
class TestProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testDictBehavior(self):
        self.props.setProperty("do-tests", 1, "scheduler")
        self.props.setProperty("do-install", 2, "scheduler")

        self.assert_(self.props.has_key('do-tests'))
        self.failUnlessEqual(self.props['do-tests'], 1)
        self.failUnlessEqual(self.props['do-install'], 2)
        self.assertRaises(KeyError, lambda : self.props['do-nothing'])
        self.failUnlessEqual(self.props.getProperty('do-install'), 2)

    def testUpdate(self):
        self.props.setProperty("x", 24, "old")
        newprops = { 'a' : 1, 'b' : 2 }
        self.props.update(newprops, "new")

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromProperties(self):
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new")
        newprops.setProperty('b', 2, "new")
        self.props.updateFromProperties(newprops)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
Exemplo n.º 18
0
 def _txn_get_properties_from_db(self, t, tablename, idname, id):
     # apparently you can't use argument placeholders for table names. Don't
     # call this with a weird-looking tablename.
     q = self.quoteq("SELECT property_name,property_value FROM %s WHERE %s=?" % (tablename, idname))
     t.execute(q, (id,))
     retval = Properties()
     for key, value_json in t.fetchall():
         value = json.loads(value_json)
         if tablename == "change_properties":
             # change_properties does not store a source
             value, source = value, "Change"
         else:
             # buildset_properties stores a tuple (value, source)
             value, source = value
         retval.setProperty(str(key), value, source)
     return retval
Exemplo n.º 19
0
    def getSchedulersAndProperties(self):
        sch = self.schedulerNames[0]
        triggered_schedulers = []

        for env in self.config.matrix:
            props_to_set = Properties()
            props_to_set.setProperty("TRAVIS_PULL_REQUEST",
                                     self.getProperty("TRAVIS_PULL_REQUEST"), "inherit")
            for k, v in env.items():
                if k == "env":
                    props_to_set.update(v, ".travis.yml")
                else:
                    props_to_set.setProperty(k, v, ".travis.yml")

            triggered_schedulers.append((sch, props_to_set))
        return triggered_schedulers
Exemplo n.º 20
0
def getAndCheckProperties(req):
    """
Fetch custom build properties from the HTTP request of a "Force build" or
"Resubmit build" HTML form.
Check the names for valid strings, and return None if a problem is found.
Return a new Properties object containing each property found in req.
"""
    properties = Properties()
    for i in (1, 2, 3):
        pname = req.args.get("property%dname" % i, [""])[0]
        pvalue = req.args.get("property%dvalue" % i, [""])[0]
        if pname and pvalue:
            if not re.match(r"^[\w\.\-\/\~:]*$", pname) or not re.match(r"^[\w\.\-\/\~:]*$", pvalue):
                log.msg("bad property name='%s', value='%s'" % (pname, pvalue))
                return None
            properties.setProperty(pname, pvalue, "Force Build Form")
    return properties
Exemplo n.º 21
0
    def perspective_forcewait(self, builder="build", reason="", branch="", revision="", pdict={}):
        log.msg("forcewait called")

        branch_validate = self.master.config.validation["branch"]
        revision_validate = self.master.config.validation["revision"]
        pname_validate = self.master.config.validation["property_name"]
        pval_validate = self.master.config.validation["property_value"]
        if not branch_validate.match(branch):
            log.msg("bad branch '%s'" % branch)
            return
        if not revision_validate.match(revision):
            log.msg("bad revision '%s'" % revision)
            return

        properties = Properties()
        if pdict:
            for prop in pdict:
                pname = prop
                pvalue = pdict[prop]
                if not pname_validate.match(pname) or not pval_validate.match(pvalue):
                    log.msg("bad property name='%s', value='%s'" % (pname, pvalue))
                    return
                log.msg("set property %s %s" % (pname, pvalue))
                properties.setProperty(pname, pvalue, "Force Build PB")

        c = interfaces.IControl(self.master)
        b = c.getBuilder(builder)

        ss = SourceStamp(branch=branch, revision=revision)

        dr = defer.Deferred()

        def started(s):
            log.msg("force started")
            dr.callback(s.getNumber())

        def requested(breq):
            log.msg("force requested")
            breq.subscribe(started)

        d2 = b.submitBuildRequest(ss, reason, props=properties.asDict())
        d2.addCallback(requested)
        d2.addErrback(log.err, "while forcing a build")

        return dr
Exemplo n.º 22
0
    def gatherPropertiesAndChanges(self, collector, **kwargs):
        properties = {}
        changeids = []
        sourcestamps = {}

        for param in self.forcedProperties:
            yield collector.collectValidationErrors(param.fullName,
                                                    param.updateFromKwargs,
                                                    master=self.master,
                                                    properties=properties,
                                                    changes=changeids,
                                                    sourcestamps=sourcestamps,
                                                    collector=collector,
                                                    kwargs=kwargs)
        changeids = map(lambda a: type(a) == int and a or a.number, changeids)

        real_properties = Properties()
        for pname, pvalue in iteritems(properties):
            real_properties.setProperty(pname, pvalue, "Force Build Form")

        defer.returnValue((real_properties, changeids, sourcestamps))
Exemplo n.º 23
0
    def getSchedulersAndProperties(self):
        sch = self.schedulerNames[0]
        triggered_schedulers = []
        for env in self.config.matrix:
            props_to_set = Properties()
            props_to_set.setProperty("TRAVIS_PULL_REQUEST",
                                     self.getProperty("TRAVIS_PULL_REQUEST"),
                                     "inherit")
            flat_env = {}
            for k, v in env.items():
                if k == "env":
                    props_to_set.update(v, ".travis.yml")
                    flat_env.update(v)
                else:
                    props_to_set.setProperty(k, v, ".travis.yml")
                    flat_env[k] = v
            props_to_set.setProperty(
                "reason",
                u" | ".join(
                    sorted(str(k) + '=' + str(v)
                           for k, v in flat_env.items())),
                "spawner")

            triggered_schedulers.append((sch, props_to_set))
        return triggered_schedulers
Exemplo n.º 24
0
    def command_BUILD(self, args, who):
        args = shlex.split(args)
        repos = { 'apiextractor' : None,
                  'generatorrunner' : None,
                  'shiboken' : None,
                  'pyside' : None
                }

        if not who in config.gitCustomers:
            self.send('%s, I\'ll not make this build for you. Do you think I found my genitalia in the trash?' % who)
            return

        builder = None
        for arg in args:
            try:
                repo, target = arg.split('=')
            except:
                self.send('Usage: ' + PySideContact.command_BUILD.usage)
                return
            if repo == 'builder':
                builder = target
            else:
                if not repo in repos:
                    self.send('%s, there\'s no "%s" repository' % (who, repo))
                    return
                repos[repo] = target

        slaves = ['build-pyside-' + arch for arch in config.slavesByArch.keys()]

        if builder:
            if builder not in slaves:
                self.send("%s, the slave '%s' that you asked for doesn't exist." % (who, builder))
                return
            slaves = [builder]

        for which in slaves:
            bc = self.getControl(which)

            build_properties = Properties()
            build_properties.setProperty('owner', who, 'Build requested from IRC bot on behalf of %s.' % who)
            for propName, propValue in [(pName, pValue) for pName, pValue in repos.items() if pValue]:
                build_properties.setProperty(propName + '_hashtag', propValue, 'Build requested from IRC bot.')

            for repoName, gitUrl in config.gitCustomers[who].items():
                build_properties.setProperty(repoName.lower() + '_gitUrl', config.baseGitURL + gitUrl,
                                             'Personal %s repository of %s.' % (repoName, who))

            r = "forced: by %s: %s" % (self.describeUser(who), 'He had his reasons.')
            s = SourceStamp(branch='BRANCH', revision='REVISION')
            req = BuildRequest(r, s, which, properties=build_properties)
            try:
                bc.requestBuildSoon(req)
            except interfaces.NoSlaveError:
                self.send("%s, sorry, I can't force a build: all slaves are offline" % who)
                return
            ireq = IrcBuildRequest(self)
            req.subscribe(ireq.started)
Exemplo n.º 25
0
 def get_props(self, config, options):
   """Overriding base class method."""
   props = Properties()
   props.setProperty('extra_args', options.get('extra_args', []),
                     self._PROPERTY_SOURCE)
   props.setProperty('slaves_request', options.get('slaves_request', []),
                     self._PROPERTY_SOURCE)
   props.setProperty('chromeos_config', config, self._PROPERTY_SOURCE)
   return props
Exemplo n.º 26
0
def buildIDSchedFunc(sched, t, ssid):
    """Generates a unique buildid for this change.

    Returns a Properties instance with 'buildid' set to the buildid to use.

    scheduler `sched`'s state is modified as a result."""
    state = sched.get_state(t)

    # Get the last buildid we scheduled from the database
    lastid = state.get("last_buildid", 0)

    newid = genBuildID()

    # Our new buildid will be the highest of the last buildid+1 or the buildid
    # based on the current date
    newid = str(max(int(newid), int(lastid) + 1))

    # Save it in the scheduler's state so we don't generate the same one again.
    state["last_buildid"] = newid
    sched.set_state(t, state)

    props = Properties()
    props.setProperty("buildid", newid, "buildIDSchedFunc")
    return props
Exemplo n.º 27
0
    def gatherPropertiesAndChanges(self, **kwargs):
        properties = {}
        changeids = []
        sourcestamps = {}

        for param in self.forcedProperties:
            yield defer.maybeDeferred(param.updateFromKwargs,
                                      master=self.master,
                                      properties=properties,
                                      changes=changeids,
                                      sourcestamps=sourcestamps,
                                      kwargs=kwargs)

        changeids = map(lambda a: isinstance(a, int) and a or a.number, changeids)
        layers = []
        for i in range(int(kwargs['num_layers'][0])):
            if (kwargs['layer'+str(i)][0] != '') and (kwargs['lrepo'+str(i)][0] != '') and (kwargs['lbranch'+str(i)][0] != ''):
                layer_string = kwargs['layer'+str(i)][0]+':'+kwargs['lrepo'+str(i)][0]+':'+kwargs['lbranch'+str(i)][0]
                layers.append(layer_string)

        properties['layers'] = ','.join(layers)
        real_properties = Properties()
        revdict = {}
        urldict = {}
        repo_overrides = ""
        for pname, pvalue in properties.items():
            real_properties.setProperty(pname, pvalue, "Force Build Form")
            if ('url' in pname) and pvalue:
                name = pname[0:pname.find('url')]
                urldict[name] = (name,pvalue)
        keys = urldict.keys()
        for key in keys:
            repo_overrides = repo_overrides+urldict[key][0] + ':' + urldict[key][1]+','
        real_properties.setProperty('repos', repo_overrides, "Force Build Form")

        defer.returnValue((real_properties, changeids, sourcestamps))
Exemplo n.º 28
0
class TestInterpolateProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()
        self.build = FakeBuild(self.props)

    def test_invalid_params(self):
        self.assertRaises(ValueError, lambda :
                Interpolate("%s %(foo)s", 1, foo=2))

    def test_properties(self):
        self.props.setProperty("buildername", "winbld", "test")
        command = Interpolate("echo buildby-%(prop:buildername)s")
        self.failUnlessEqual(self.build.render(command),
                             "echo buildby-winbld")
        
    def test_property_not_set(self):
        command = Interpolate("echo buildby-%(prop:buildername)s")
        self.failUnlessEqual(self.build.render(command),
                             "echo buildby-")

    def test_property_colon_minus(self):
        command = Interpolate("echo buildby-%(prop:buildername:-blddef)s")
        self.failUnlessEqual(self.build.render(command),
                             "echo buildby-blddef")

    def test_property_colon_tilde_true(self):
        self.props.setProperty("buildername", "winbld", "test")
        command = Interpolate("echo buildby-%(prop:buildername:~blddef)s")
        self.failUnlessEqual(self.build.render(command),
                             "echo buildby-winbld")

    def test_property_colon_tilde_false(self):
        self.props.setProperty("buildername", "", "test")
        command = Interpolate("echo buildby-%(prop:buildername:~blddef)s")
        self.failUnlessEqual(self.build.render(command),
                             "echo buildby-blddef")

    def test_property_colon_plus(self):
        self.props.setProperty("project", "proj1", "test")
        command = Interpolate("echo %(prop:project:+projectdefined)s")
        self.failUnlessEqual(self.build.render(command),
                             "echo projectdefined")
Exemplo n.º 29
0
    def testUpdateFromPropertiesNoRuntime(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("b", 84, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new", runtime=True)
        newprops.setProperty('b', 2, "new", runtime=False)
        newprops.setProperty('c', 3, "new", runtime=True)
        newprops.setProperty('d', 3, "new", runtime=False)
        self.props.updateFromPropertiesNoRuntime(newprops)

        self.failUnlessEqual(self.props.getProperty('a'), 94)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'old')
        self.failUnlessEqual(self.props.getProperty('b'), 2)
        self.failUnlessEqual(self.props.getPropertySource('b'), 'new')
        self.failUnlessEqual(self.props.getProperty('c'), None) # not updated
        self.failUnlessEqual(self.props.getProperty('d'), 3)
        self.failUnlessEqual(self.props.getPropertySource('d'), 'new')
        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
Exemplo n.º 30
0
  def get_props(self, config, options):
    """Overriding base class method."""
    props = Properties()

    props.setProperty('slaves_request', options.get('slaves_request', []),
                      self._PROPERTY_SOURCE)
    props.setProperty('cbb_config', config, self._PROPERTY_SOURCE)

    extra_args = options.get('extra_args')
    if extra_args:
      # This field can be quite large, and exceed BuildBot property limits.
      # Compress it, Base64 encode it, and prefix it with "z:" so the consumer
      # knows its size.
      extra_args = 'z:' + base64.b64encode(zlib.compress(json.dumps(
        extra_args)))
      props.setProperty('cbb_extra_args', extra_args,
                        self._PROPERTY_SOURCE)
    return props
class MockSlave(object):
  def __init__(self, name, properties):
    self.properties = Properties()
    self.properties.update(properties, "BuildSlave")
    self.properties.setProperty("slavename", name, "BuildSlave")
Exemplo n.º 32
0
class TestWithProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()
        self.build = FakeBuild(self.props)

    def testInvalidParams(self):
        self.assertRaises(ValueError,
                          lambda: WithProperties("%s %(foo)s", 1, foo=2))

    def testBasic(self):
        # test basic substitution with WithProperties
        self.props.setProperty("revision", "47", "test")
        command = WithProperties("build-%s.tar.gz", "revision")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "build-47.tar.gz")
        return d

    def testDict(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("other", "foo", "test")
        command = WithProperties("build-%(other)s.tar.gz")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "build-foo.tar.gz")
        return d

    def testDictColonMinus(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("prop1", "foo", "test")
        command = WithProperties(
            "build-%(prop1:-empty)s-%(prop2:-empty)s.tar.gz")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "build-foo-empty.tar.gz")
        return d

    def testDictColonPlus(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("prop1", "foo", "test")
        command = WithProperties(
            "build-%(prop1:+exists)s-%(prop2:+exists)s.tar.gz")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "build-exists-.tar.gz")
        return d

    def testDictColonTernary(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("prop1", "foo", "test")
        command = WithProperties(
            "build-%(prop1:?:exists:missing)s-%(prop2:?:exists:missing)s.tar.gz"
        )
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "build-exists-missing.tar.gz")
        return d

    def testEmpty(self):
        # None should render as ''
        self.props.setProperty("empty", None, "test")
        command = WithProperties("build-%(empty)s.tar.gz")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "build-.tar.gz")
        return d

    def testRecursiveList(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = [
            WithProperties("%(x)s %(y)s"), "and",
            WithProperties("%(y)s %(x)s")
        ]
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, ["10 20", "and", "20 10"])
        return d

    def testRecursiveTuple(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = (WithProperties("%(x)s %(y)s"), "and",
                   WithProperties("%(y)s %(x)s"))
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, ("10 20", "and", "20 10"))
        return d

    def testRecursiveDict(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = {
            WithProperties("%(x)s %(y)s"): WithProperties("%(y)s %(x)s")
        }
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, {"10 20": "20 10"})
        return d

    def testLambdaSubst(self):
        command = WithProperties('%(foo)s', foo=lambda _: 'bar')
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, 'bar')
        return d

    def testLambdaHasattr(self):
        command = WithProperties(
            '%(foo)s', foo=lambda b: b.hasProperty('x') and 'x' or 'y')
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, 'y')
        return d

    def testLambdaOverride(self):
        self.props.setProperty('x', 10, 'test')
        command = WithProperties('%(x)s', x=lambda _: 20)
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, '20')
        return d

    def testLambdaCallable(self):
        self.assertRaises(ValueError,
                          lambda: WithProperties('%(foo)s', foo='bar'))

    def testLambdaUseExisting(self):
        self.props.setProperty('x', 10, 'test')
        self.props.setProperty('y', 20, 'test')
        command = WithProperties(
            '%(z)s',
            z=lambda props: props.getProperty('x') + props.getProperty('y'))
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, '30')
        return d
Exemplo n.º 33
0
class BaseScheduler(ClusteredBuildbotService, StateMixin):

    DEFAULT_CODEBASES = {'': {}}

    compare_attrs = ClusteredBuildbotService.compare_attrs + \
        ('builderNames', 'properties', 'codebases')

    def __init__(self,
                 name,
                 builderNames,
                 properties=None,
                 codebases=DEFAULT_CODEBASES):
        super(BaseScheduler, self).__init__(name=name)

        ok = True
        if not isinstance(builderNames, (list, tuple)):
            ok = False
        else:
            for b in builderNames:
                if not isinstance(b, string_types):
                    ok = False
        if not ok:
            config.error(
                "The builderNames argument to a scheduler must be a list "
                "of Builder names.")

        self.builderNames = builderNames

        if properties is None:
            properties = {}
        self.properties = Properties()
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")
        self.objectid = None

        # Set the codebases that are necessary to process the changes
        # These codebases will always result in a sourcestamp with or without
        # changes
        known_keys = set(['branch', 'repository', 'revision'])
        if codebases is None:
            config.error("Codebases cannot be None")
        elif isinstance(codebases, list):
            codebases = dict((codebase, {}) for codebase in codebases)
        elif not isinstance(codebases, dict):
            config.error(
                "Codebases must be a dict of dicts, or list of strings")
        else:
            for codebase, attrs in iteritems(codebases):
                if not isinstance(attrs, dict):
                    config.error("Codebases must be a dict of dicts")
                else:
                    unk = set(attrs) - known_keys
                    if unk:
                        config.error(
                            "Unknown codebase keys %s for codebase %s" %
                            (', '.join(unk), codebase))

        self.codebases = codebases

        # internal variables
        self._change_consumer = None
        self._change_consumption_lock = defer.DeferredLock()

    # activity handling

    def activate(self):
        return defer.succeed(None)

    def deactivate(self):
        return defer.maybeDeferred(self._stopConsumingChanges)

    # service handling

    def _getServiceId(self):
        return self.master.data.updates.findSchedulerId(self.name)

    def _claimService(self):
        return self.master.data.updates.trySetSchedulerMaster(
            self.serviceid, self.master.masterid)

    def _unclaimService(self):
        return self.master.data.updates.trySetSchedulerMaster(
            self.serviceid, None)

    # status queries

    # deprecated: these aren't compatible with distributed schedulers

    def listBuilderNames(self):
        return self.builderNames

    # change handling

    @defer.inlineCallbacks
    def startConsumingChanges(self,
                              fileIsImportant=None,
                              change_filter=None,
                              onlyImportant=False):
        assert fileIsImportant is None or callable(fileIsImportant)

        # register for changes with the data API
        assert not self._change_consumer
        self._change_consumer = yield self.master.mq.startConsuming(
            lambda k, m: self._changeCallback(k, m, fileIsImportant,
                                              change_filter, onlyImportant),
            ('changes', None, 'new'))

    @defer.inlineCallbacks
    def _changeCallback(self, key, msg, fileIsImportant, change_filter,
                        onlyImportant):

        # ignore changes delivered while we're not running
        if not self._change_consumer:
            return

        # get a change object, since the API requires it
        chdict = yield self.master.db.changes.getChange(msg['changeid'])
        change = yield changes.Change.fromChdict(self.master, chdict)

        # filter it
        if change_filter and not change_filter.filter_change(change):
            return
        if change.codebase not in self.codebases:
            log.msg(format='change contains codebase %(codebase)s that is '
                    'not processed by scheduler %(name)s',
                    codebase=change.codebase,
                    name=self.name)
            return
        if fileIsImportant:
            try:
                important = fileIsImportant(change)
                if not important and onlyImportant:
                    return
            except Exception:
                log.err(failure.Failure(),
                        'in fileIsImportant check for %s' % change)
                return
        else:
            important = True

        # use change_consumption_lock to ensure the service does not stop
        # while this change is being processed
        d = self._change_consumption_lock.run(self.gotChange, change,
                                              important)
        d.addErrback(log.err, 'while processing change')

    def _stopConsumingChanges(self):
        # (note: called automatically in deactivate)

        # acquire the lock change consumption lock to ensure that any change
        # consumption is complete before we are done stopping consumption
        def stop():
            if self._change_consumer:
                self._change_consumer.stopConsuming()
                self._change_consumer = None

        return self._change_consumption_lock.run(stop)

    def gotChange(self, change, important):
        raise NotImplementedError

    # starting builds

    @defer.inlineCallbacks
    def addBuildsetForSourceStampsWithDefaults(self,
                                               reason,
                                               sourcestamps=None,
                                               waited_for=False,
                                               properties=None,
                                               builderNames=None,
                                               **kw):
        if sourcestamps is None:
            sourcestamps = []

        # convert sourcestamps to a dictionary keyed by codebase
        stampsByCodebase = {}
        for ss in sourcestamps:
            cb = ss['codebase']
            if cb in stampsByCodebase:
                raise RuntimeError("multiple sourcestamps with same codebase")
            stampsByCodebase[cb] = ss

        # Merge codebases with the passed list of sourcestamps
        # This results in a new sourcestamp for each codebase
        stampsWithDefaults = []
        for codebase in self.codebases:
            cb = yield self.getCodebaseDict(codebase)
            ss = {
                'codebase': codebase,
                'repository': cb.get('repository', ''),
                'branch': cb.get('branch', None),
                'revision': cb.get('revision', None),
                'project': '',
            }
            # apply info from passed sourcestamps onto the configured default
            # sourcestamp attributes for this codebase.
            ss.update(stampsByCodebase.get(codebase, {}))
            stampsWithDefaults.append(ss)

        # fill in any supplied sourcestamps that aren't for a codebase in the
        # scheduler's codebase dictionary
        for codebase in set(stampsByCodebase) - set(self.codebases):
            cb = stampsByCodebase[codebase]
            ss = {
                'codebase': codebase,
                'repository': cb.get('repository', ''),
                'branch': cb.get('branch', None),
                'revision': cb.get('revision', None),
                'project': '',
            }
            stampsWithDefaults.append(ss)

        rv = yield self.addBuildsetForSourceStamps(
            sourcestamps=stampsWithDefaults,
            reason=reason,
            waited_for=waited_for,
            properties=properties,
            builderNames=builderNames,
            **kw)
        defer.returnValue(rv)

    def getCodebaseDict(self, codebase):
        # Hook for subclasses to change codebase parameters when a codebase does
        # not have a change associated with it.
        try:
            return defer.succeed(self.codebases[codebase])
        except KeyError:
            return defer.fail()

    @defer.inlineCallbacks
    def addBuildsetForChanges(self,
                              waited_for=False,
                              reason='',
                              external_idstring=None,
                              changeids=None,
                              builderNames=None,
                              properties=None,
                              **kw):
        if changeids is None:
            changeids = []
        changesByCodebase = {}

        def get_last_change_for_codebase(codebase):
            return max(changesByCodebase[codebase],
                       key=lambda change: change["changeid"])

        # Changes are retrieved from database and grouped by their codebase
        for changeid in changeids:
            chdict = yield self.master.db.changes.getChange(changeid)
            changesByCodebase.setdefault(chdict["codebase"], []).append(chdict)

        sourcestamps = []
        for codebase in self.codebases:
            if codebase not in changesByCodebase:
                # codebase has no changes
                # create a sourcestamp that has no changes
                cb = yield self.getCodebaseDict(codebase)

                ss = {
                    'codebase': codebase,
                    'repository': cb.get('repository', ''),
                    'branch': cb.get('branch', None),
                    'revision': cb.get('revision', None),
                    'project': '',
                }
            else:
                lastChange = get_last_change_for_codebase(codebase)
                ss = lastChange['sourcestampid']
            sourcestamps.append(ss)

        # add one buildset, using the calculated sourcestamps
        bsid, brids = yield self.addBuildsetForSourceStamps(
            waited_for,
            sourcestamps=sourcestamps,
            reason=reason,
            external_idstring=external_idstring,
            builderNames=builderNames,
            properties=properties,
            **kw)

        defer.returnValue((bsid, brids))

    @defer.inlineCallbacks
    def addBuildsetForSourceStamps(self,
                                   waited_for=False,
                                   sourcestamps=None,
                                   reason='',
                                   external_idstring=None,
                                   properties=None,
                                   builderNames=None,
                                   **kw):
        if sourcestamps is None:
            sourcestamps = []
        # combine properties
        if properties:
            properties.updateFromProperties(self.properties)
        else:
            properties = self.properties

        # apply the default builderNames
        if not builderNames:
            builderNames = self.builderNames

        # Get the builder ids
        # Note that there is a data.updates.findBuilderId(name)
        # but that would merely only optimize the single builder case, while
        # probably the multiple builder case will be severaly impacted by the
        # several db requests needed.
        builderids = list()
        for bldr in (yield self.master.data.get(('builders', ))):
            if bldr['name'] in builderNames:
                builderids.append(bldr['builderid'])

        # translate properties object into a dict as required by the
        # addBuildset method
        properties_dict = properties.asDict()

        bsid, brids = yield self.master.data.updates.addBuildset(
            scheduler=self.name,
            sourcestamps=sourcestamps,
            reason=reason,
            waited_for=waited_for,
            properties=properties_dict,
            builderids=builderids,
            external_idstring=external_idstring,
            **kw)
        defer.returnValue((bsid, brids))
Exemplo n.º 34
0
class TestProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testDictBehavior(self):
        self.props.setProperty("do-tests", 1, "scheduler")
        self.props.setProperty("do-install", 2, "scheduler")

        self.assert_(self.props.has_key('do-tests'))
        self.failUnlessEqual(self.props['do-tests'], 1)
        self.failUnlessEqual(self.props['do-install'], 2)
        self.assertRaises(KeyError, lambda: self.props['do-nothing'])
        self.failUnlessEqual(self.props.getProperty('do-install'), 2)

    def testAsList(self):
        self.props.setProperty("happiness", 7, "builder")
        self.props.setProperty("flames", True, "tester")

        self.assertEqual(sorted(self.props.asList()),
                         [('flames', True, 'tester'),
                          ('happiness', 7, 'builder')])

    def testAsDict(self):
        self.props.setProperty("msi_filename", "product.msi", 'packager')
        self.props.setProperty("dmg_filename", "product.dmg", 'packager')

        self.assertEqual(
            self.props.asDict(),
            dict(msi_filename=('product.msi', 'packager'),
                 dmg_filename=('product.dmg', 'packager')))

    def testUpdate(self):
        self.props.setProperty("x", 24, "old")
        newprops = {'a': 1, 'b': 2}
        self.props.update(newprops, "new")

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromProperties(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new")
        newprops.setProperty('b', 2, "new")
        self.props.updateFromProperties(newprops)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromPropertiesNoRuntime(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("b", 84, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new", runtime=True)
        newprops.setProperty('b', 2, "new", runtime=False)
        newprops.setProperty('c', 3, "new", runtime=True)
        newprops.setProperty('d', 3, "new", runtime=False)
        self.props.updateFromPropertiesNoRuntime(newprops)

        self.failUnlessEqual(self.props.getProperty('a'), 94)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'old')
        self.failUnlessEqual(self.props.getProperty('b'), 2)
        self.failUnlessEqual(self.props.getPropertySource('b'), 'new')
        self.failUnlessEqual(self.props.getProperty('c'), None)  # not updated
        self.failUnlessEqual(self.props.getProperty('d'), 3)
        self.failUnlessEqual(self.props.getPropertySource('d'), 'new')
        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
Exemplo n.º 35
0
    def command_FORCE(self, args):
        # FIXME: NEED TO THINK ABOUT!
        errReply = "try '%s'" % (self.command_FORCE.usage)
        args = self.splitArgs(args)
        if not args:
            raise UsageError(errReply)
        what = args.pop(0)
        if what != "build":
            raise UsageError(errReply)
        opts = ForceOptions()
        opts.parseOptions(args)

        builderName = opts['builder']
        builder = yield self.getBuilder(buildername=builderName)
        branch = opts['branch']
        revision = opts['revision']
        codebase = opts['codebase']
        project = opts['project']
        reason = opts['reason']
        props = opts['props']

        if builderName is None:
            raise UsageError("you must provide a Builder, " + errReply)

        # keep weird stuff out of the branch, revision, and properties args.
        branch_validate = self.master.config.validation['branch']
        revision_validate = self.master.config.validation['revision']
        pname_validate = self.master.config.validation['property_name']
        pval_validate = self.master.config.validation['property_value']
        if branch and not branch_validate.match(branch):
            log.msg("bad branch '%s'" % branch)
            self.send("sorry, bad branch '%s'" % branch)
            return
        if revision and not revision_validate.match(revision):
            log.msg("bad revision '%s'" % revision)
            self.send("sorry, bad revision '%s'" % revision)
            return

        properties = Properties()
        if props:
            # split props into name:value dict
            pdict = {}
            propertylist = props.split(",")
            for i in range(0, len(propertylist)):
                splitproperty = propertylist[i].split("=", 1)
                pdict[splitproperty[0]] = splitproperty[1]

            # set properties
            for prop in pdict:
                pname = prop
                pvalue = pdict[prop]
                if not pname_validate.match(pname) \
                        or not pval_validate.match(pvalue):
                    log.msg("bad property name='%s', value='%s'" %
                            (pname, pvalue))
                    self.send("sorry, bad property name='%s', value='%s'" %
                              (pname, pvalue))
                    return
                properties.setProperty(pname, pvalue, "Force Build chat")

        reason = u"forced: by %s: %s" % (self.describeUser(), reason)
        try:
            yield self.master.data.updates.addBuildset(
                builderids=[builder['builderid']],
                # For now, we just use
                # this as the id.
                scheduler=u"status.words",
                sourcestamps=[{
                    'codebase': codebase,
                    'branch': branch,
                    'revision': revision,
                    'project': project,
                    'repository': "null"
                }],
                reason=reason,
                properties=properties.asDict(),
                waited_for=False)
        except AssertionError as e:
            self.send("I can't: " + str(e))
Exemplo n.º 36
0
class TestProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testDictBehavior(self):
        # note that dictionary-like behavior is deprecated and not exposed to
        # users!
        self.props.setProperty("do-tests", 1, "scheduler")
        self.props.setProperty("do-install", 2, "scheduler")

        self.assert_(self.props.has_key('do-tests'))
        self.failUnlessEqual(self.props['do-tests'], 1)
        self.failUnlessEqual(self.props['do-install'], 2)
        self.assertRaises(KeyError, lambda: self.props['do-nothing'])
        self.failUnlessEqual(self.props.getProperty('do-install'), 2)
        self.assertIn('do-tests', self.props)
        self.assertNotIn('missing-do-tests', self.props)

    def testAsList(self):
        self.props.setProperty("happiness", 7, "builder")
        self.props.setProperty("flames", True, "tester")

        self.assertEqual(sorted(self.props.asList()),
                         [('flames', True, 'tester'),
                          ('happiness', 7, 'builder')])

    def testAsDict(self):
        self.props.setProperty("msi_filename", "product.msi", 'packager')
        self.props.setProperty("dmg_filename", "product.dmg", 'packager')

        self.assertEqual(
            self.props.asDict(),
            dict(msi_filename=('product.msi', 'packager'),
                 dmg_filename=('product.dmg', 'packager')))

    def testUpdate(self):
        self.props.setProperty("x", 24, "old")
        newprops = {'a': 1, 'b': 2}
        self.props.update(newprops, "new")

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateRuntime(self):
        self.props.setProperty("x", 24, "old")
        newprops = {'a': 1, 'b': 2}
        self.props.update(newprops, "new", runtime=True)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
        self.assertEqual(self.props.runtime, set(['a', 'b']))

    def testUpdateFromProperties(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new")
        newprops.setProperty('b', 2, "new")
        self.props.updateFromProperties(newprops)

        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
        self.failUnlessEqual(self.props.getProperty('a'), 1)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'new')

    def testUpdateFromPropertiesNoRuntime(self):
        self.props.setProperty("a", 94, "old")
        self.props.setProperty("b", 84, "old")
        self.props.setProperty("x", 24, "old")
        newprops = Properties()
        newprops.setProperty('a', 1, "new", runtime=True)
        newprops.setProperty('b', 2, "new", runtime=False)
        newprops.setProperty('c', 3, "new", runtime=True)
        newprops.setProperty('d', 3, "new", runtime=False)
        self.props.updateFromPropertiesNoRuntime(newprops)

        self.failUnlessEqual(self.props.getProperty('a'), 94)
        self.failUnlessEqual(self.props.getPropertySource('a'), 'old')
        self.failUnlessEqual(self.props.getProperty('b'), 2)
        self.failUnlessEqual(self.props.getPropertySource('b'), 'new')
        self.failUnlessEqual(self.props.getProperty('c'), None)  # not updated
        self.failUnlessEqual(self.props.getProperty('d'), 3)
        self.failUnlessEqual(self.props.getPropertySource('d'), 'new')
        self.failUnlessEqual(self.props.getProperty('x'), 24)
        self.failUnlessEqual(self.props.getPropertySource('x'), 'old')

    @compat.usesFlushWarnings
    def test_setProperty_notJsonable(self):
        self.props.setProperty("project", ConstantRenderable('testing'),
                               "test")
        self.props.setProperty("project", object, "test")
        self.assertEqual(
            len(self.flushWarnings([self.test_setProperty_notJsonable])), 2)

    # IProperties methods

    def test_getProperty(self):
        self.props.properties['p1'] = (['p', 1], 'test')
        self.assertEqual(self.props.getProperty('p1'), ['p', 1])

    def test_getProperty_default_None(self):
        self.assertEqual(self.props.getProperty('p1'), None)

    def test_getProperty_default(self):
        self.assertEqual(self.props.getProperty('p1', 2), 2)

    def test_hasProperty_false(self):
        self.assertFalse(self.props.hasProperty('x'))

    def test_hasProperty_true(self):
        self.props.properties['x'] = (False, 'test')
        self.assertTrue(self.props.hasProperty('x'))

    def test_has_key_false(self):
        self.assertFalse(self.props.has_key('x'))

    def test_setProperty(self):
        self.props.setProperty('x', 'y', 'test')
        self.assertEqual(self.props.properties['x'], ('y', 'test'))
        self.assertNotIn('x', self.props.runtime)

    def test_setProperty_runtime(self):
        self.props.setProperty('x', 'y', 'test', runtime=True)
        self.assertEqual(self.props.properties['x'], ('y', 'test'))
        self.assertIn('x', self.props.runtime)

    def test_setProperty_no_source(self):
        self.assertRaises(TypeError, lambda: self.props.setProperty('x', 'y'))

    def test_getProperties(self):
        self.assertIdentical(self.props.getProperties(), self.props)

    def test_getBuild(self):
        self.assertIdentical(self.props.getBuild(), self.props.build)

    def test_render(self):
        class Renderable(object):
            implements(IRenderable)

            def getRenderingFor(self, props):
                return props.getProperty('x') + 'z'

        self.props.setProperty('x', 'y', 'test')
        d = self.props.render(Renderable())
        d.addCallback(self.assertEqual, 'yz')
        return d
Exemplo n.º 37
0
class AbstractWorker(service.BuildbotService):
    """This is the master-side representative for a remote buildbot worker.
    There is exactly one for each worker described in the config file (the
    c['workers'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a worker -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    # reconfig workers after builders
    reconfig_priority = 64

    quarantine_timer = None
    quarantine_timeout = quarantine_initial_timeout = 10
    quarantine_max_timeout = 60 * 60
    start_missing_on_startup = True
    DEFAULT_MISSING_TIMEOUT = 3600
    DEFAULT_KEEPALIVE_INTERVAL = 3600

    # override to True if isCompatibleWithBuild may return False
    builds_may_be_incompatible = False

    def checkConfig(self,
                    name,
                    password,
                    max_builds=None,
                    notify_on_missing=None,
                    missing_timeout=None,
                    properties=None,
                    defaultProperties=None,
                    locks=None,
                    keepalive_interval=DEFAULT_KEEPALIVE_INTERVAL,
                    machine_name=None):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this worker (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this worker
        @type properties: dictionary
        @param defaultProperties: properties that will be applied to builds
                                  run on this worker only if the property
                                  has not been set by another source
        @type defaultProperties: dictionary
        @param locks: A list of locks that must be acquired before this worker
                      can be used
        @type locks: dictionary
        @param machine_name: The name of the machine to associate with the
                             worker.
        """
        self.name = name = bytes2unicode(name)
        self.machine_name = machine_name

        self.password = password

        # protocol registration
        self.registration = None

        self._graceful = False
        self._paused = False

        # these are set when the service is started
        self.manager = None
        self.workerid = None

        self.info = Properties()
        self.worker_commands = None
        self.workerforbuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties or {}, "Worker")
        self.properties.setProperty("workername", name, "Worker")
        self.defaultProperties = Properties()
        self.defaultProperties.update(defaultProperties or {}, "Worker")

        if self.machine_name is not None:
            self.properties.setProperty('machine_name', self.machine_name,
                                        'Worker')
        self.machine = None

        self.lastMessageReceived = 0

        if notify_on_missing is None:
            notify_on_missing = []
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error('notify_on_missing arg %r is not a string' %
                             (i, ))

        self.missing_timeout = missing_timeout
        self.missing_timer = None

        # a protocol connection, if we're currently connected
        self.conn = None

        # during disconnection self.conn will be set to None before all disconnection notifications
        # are delivered. During that period _pending_disconnection_delivery_notifier will be set to
        # a notifier and allows interested users to wait until all disconnection notifications are
        # delivered.
        self._pending_disconnection_delivery_notifier = None

        self._old_builder_list = None
        self._configured_builderid_list = None

    def __repr__(self):
        return "<{} {}>".format(self.__class__.__name__, repr(self.name))

    @property
    def workername(self):
        # workername is now an alias to twisted.Service's name
        return self.name

    @property
    def botmaster(self):
        if self.master is None:
            return None
        return self.master.botmaster

    @defer.inlineCallbacks
    def updateLocks(self):
        """Convert the L{LockAccess} objects in C{self.locks} into real lock
        objects, while also maintaining the subscriptions to lock releases."""
        # unsubscribe from any old locks
        for s in self.lock_subscriptions:
            s.unsubscribe()

        # convert locks into their real form
        locks = yield self.botmaster.getLockFromLockAccesses(
            self.access, self.config_version)

        self.locks = [(l.getLockForWorker(self.workername), la)
                      for l, la in locks]
        self.lock_subscriptions = [
            l.subscribeToReleases(self._lockReleased) for l, la in self.locks
        ]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another worker
        to look at it.
        """
        log.msg("acquireLocks(worker {}, locks {})".format(self, self.locks))
        if not self.locksAvailable():
            log.msg("worker {} can't lock, giving up".format(self))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks({}): {}".format(self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def _lockReleased(self):
        """One of the locks for this worker was released; try scheduling
        builds."""
        if not self.botmaster:
            return  # oh well..
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def _applyWorkerInfo(self, info):
        if not info:
            return

        # set defaults
        self.info.setProperty("version", "(unknown)", "Worker")

        # store everything as Properties
        for k, v in info.items():
            if k in ('environ', 'worker_commands'):
                continue
            self.info.setProperty(k, v, "Worker")

    @defer.inlineCallbacks
    def _getWorkerInfo(self):
        worker = yield self.master.data.get(('workers', self.workerid))
        self._applyWorkerInfo(worker['workerinfo'])

    def setServiceParent(self, parent):
        # botmaster needs to set before setServiceParent which calls
        # startService

        self.manager = parent
        return super().setServiceParent(parent)

    @defer.inlineCallbacks
    def startService(self):
        # tracks config version for locks
        self.config_version = self.master.config_version

        self.updateLocks()
        self.workerid = yield self.master.data.updates.findWorkerId(self.name)

        self.workerActionConsumer = yield self.master.mq.startConsuming(
            self.controlWorker,
            ("control", "worker", str(self.workerid), None))

        yield self._getWorkerInfo()
        yield super().startService()

        # startMissingTimer wants the service to be running to really start
        if self.start_missing_on_startup:
            self.startMissingTimer()

    @defer.inlineCallbacks
    def reconfigService(self,
                        name,
                        password,
                        max_builds=None,
                        notify_on_missing=None,
                        missing_timeout=DEFAULT_MISSING_TIMEOUT,
                        properties=None,
                        defaultProperties=None,
                        locks=None,
                        keepalive_interval=DEFAULT_KEEPALIVE_INTERVAL,
                        machine_name=None):
        # Given a Worker config arguments, configure this one identically.
        # Because Worker objects are remotely referenced, we can't replace them
        # without disconnecting the worker, yet there's no reason to do that.

        assert self.name == name
        self.password = password

        # adopt new instance's configuration parameters
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        if notify_on_missing is None:
            notify_on_missing = []
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing

        if self.missing_timeout != missing_timeout:
            running_missing_timer = self.missing_timer
            self.stopMissingTimer()
            self.missing_timeout = missing_timeout
            if running_missing_timer:
                self.startMissingTimer()

        self.properties = Properties()
        self.properties.update(properties or {}, "Worker")
        self.properties.setProperty("workername", name, "Worker")
        self.defaultProperties = Properties()
        self.defaultProperties.update(defaultProperties or {}, "Worker")

        # Note that before first reconfig self.machine will always be None and
        # out of sync with self.machine_name, thus more complex logic is needed.
        if self.machine is not None and self.machine_name != machine_name:
            self.machine.unregisterWorker(self)
            self.machine = None

        self.machine_name = machine_name
        if self.machine is None and self.machine_name is not None:
            self.machine = self.master.machine_manager.getMachineByName(
                self.machine_name)
            if self.machine is not None:
                self.machine.registerWorker(self)
                self.properties.setProperty("machine_name", self.machine_name,
                                            "Worker")
            else:
                log.err("Unknown machine '{}' for worker '{}'".format(
                    self.machine_name, self.name))

        # update our records with the worker manager
        if not self.registration:
            self.registration = yield self.master.workers.register(self)
        yield self.registration.update(self, self.master.config)

        # tracks config version for locks
        self.config_version = self.master.config_version
        self.updateLocks()

    @defer.inlineCallbacks
    def reconfigServiceWithSibling(self, sibling):
        # reconfigServiceWithSibling will only reconfigure the worker when it is configured
        # differently.
        # However, the worker configuration depends on which builder it is configured
        yield super().reconfigServiceWithSibling(sibling)

        # update the attached worker's notion of which builders are attached.
        # This assumes that the relevant builders have already been configured,
        # which is why the reconfig_priority is set low in this class.
        bids = [
            b.getBuilderId()
            for b in self.botmaster.getBuildersForWorker(self.name)
        ]
        bids = yield defer.gatherResults(bids, consumeErrors=True)
        if self._configured_builderid_list != bids:
            yield self.master.data.updates.workerConfigured(
                self.workerid, self.master.masterid, bids)
            yield self.updateWorker()
            self._configured_builderid_list = bids

    @defer.inlineCallbacks
    def stopService(self):
        if self.registration:
            yield self.registration.unregister()
            self.registration = None
        self.workerActionConsumer.stopConsuming()
        self.stopMissingTimer()
        self.stopQuarantineTimer()
        # mark this worker as configured for zero builders in this master
        yield self.master.data.updates.workerConfigured(
            self.workerid, self.master.masterid, [])

        # during master shutdown we need to wait until the disconnection notification deliveries
        # are completed, otherwise some of the events may still be firing long after the master
        # is completely shut down.
        yield self.disconnect()
        yield self.waitForCompleteShutdown()

        yield super().stopService()

    def isCompatibleWithBuild(self, build_props):
        # given a build properties object, determines whether the build is
        # compatible with the currently running worker or not. This is most
        # often useful for latent workers where it's possible to request
        # different kinds of workers.
        return defer.succeed(True)

    def startMissingTimer(self):
        if self.missing_timeout and self.parent and self.running:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = self.master.reactor.callLater(
                self.missing_timeout, self._missing_timer_fired)

    def stopMissingTimer(self):
        if self.missing_timer:
            if self.missing_timer.active():
                self.missing_timer.cancel()
            self.missing_timer = None

    def isConnected(self):
        return self.conn

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return
        last_connection = time.ctime(time.time() - self.missing_timeout)
        self.master.data.updates.workerMissing(workerid=self.workerid,
                                               masterid=self.master.masterid,
                                               last_connection=last_connection,
                                               notify=self.notify_on_missing)

    def updateWorker(self):
        """Called to add or remove builders after the worker has connected.

        @return: a Deferred that indicates when an attached worker has
        accepted the new builders and/or released the old ones."""
        if self.conn:
            return self.sendBuilderList()
        # else:
        return defer.succeed(None)

    @defer.inlineCallbacks
    def attached(self, conn):
        """This is called when the worker connects."""

        assert self.conn is None

        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", 1)

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this worker to all the
        # Builders that care about it.

        # Reset graceful shutdown status
        self._graceful = False

        self.conn = conn
        self._old_builder_list = None  # clear builder list before proceed

        self._applyWorkerInfo(conn.info)
        self.worker_commands = conn.info.get("worker_commands", {})
        self.worker_environ = conn.info.get("environ", {})
        self.worker_basedir = conn.info.get("basedir", None)
        self.worker_system = conn.info.get("system", None)

        self.conn.notifyOnDisconnect(self.detached)

        workerinfo = {
            'admin': conn.info.get('admin'),
            'host': conn.info.get('host'),
            'access_uri': conn.info.get('access_uri'),
            'version': conn.info.get('version')
        }

        yield self.master.data.updates.workerConnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
            workerinfo=workerinfo)

        if self.worker_system == "nt":
            self.path_module = namedModule("ntpath")
        else:
            # most everything accepts / as separator, so posix should be a
            # reasonable fallback
            self.path_module = namedModule("posixpath")
        log.msg("bot attached")
        self.messageReceivedFromWorker()
        self.stopMissingTimer()
        yield self.updateWorker()
        yield self.botmaster.maybeStartBuildsForWorker(self.name)
        self.updateState()

    def messageReceivedFromWorker(self):
        now = time.time()
        self.lastMessageReceived = now

    def setupProperties(self, props):
        for name in self.properties.properties:
            props.setProperty(name, self.properties.getProperty(name),
                              "Worker")
        for name in self.defaultProperties.properties:
            if name not in props:
                props.setProperty(name,
                                  self.defaultProperties.getProperty(name),
                                  "Worker")

    @defer.inlineCallbacks
    def _handle_disconnection_delivery_notifier(self):
        self._pending_disconnection_delivery_notifier = Notifier()
        yield self.conn.waitForNotifyDisconnectedDelivered()
        self._pending_disconnection_delivery_notifier.notify(None)
        self._pending_disconnection_delivery_notifier = None

    @defer.inlineCallbacks
    def detached(self):
        # protect against race conditions in conn disconnect path and someone
        # calling detached directly. At the moment the null worker does that.
        if self.conn is None:
            return

        metrics.MetricCountEvent.log("AbstractWorker.attached_workers", -1)

        self._handle_disconnection_delivery_notifier()

        yield self.conn.waitShutdown()
        self.conn = None
        self._old_builder_list = []
        log.msg("Worker.detached({})".format(self.name))
        self.releaseLocks()
        yield self.master.data.updates.workerDisconnected(
            workerid=self.workerid,
            masterid=self.master.masterid,
        )

    def disconnect(self):
        """Forcibly disconnect the worker.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the worker is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a worker is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown worker. The second is
        when we wind up with two connections for the same worker, in which
        case we disconnect the older connection.
        """
        if self.conn is None:
            return defer.succeed(None)
        log.msg("disconnecting old worker {} now".format(self.name))
        # When this Deferred fires, we'll be ready to accept the new worker
        return self._disconnect(self.conn)

    def waitForCompleteShutdown(self):
        # This function waits until the disconnection to happen and the disconnection
        # notifications have been delivered and acted upon.
        return self._waitForCompleteShutdownImpl(self.conn)

    @defer.inlineCallbacks
    def _waitForCompleteShutdownImpl(self, conn):
        if conn:
            d = defer.Deferred()

            def _disconnected():
                eventually(d.callback, None)

            conn.notifyOnDisconnect(_disconnected)
            yield d
            yield conn.waitForNotifyDisconnectedDelivered()
        elif self._pending_disconnection_delivery_notifier is not None:
            yield self._pending_disconnection_delivery_notifier.wait()

    @defer.inlineCallbacks
    def _disconnect(self, conn):
        # This function waits until the disconnection to happen and the disconnection
        # notifications have been delivered and acted upon
        d = self._waitForCompleteShutdownImpl(conn)
        conn.loseConnection()
        log.msg("waiting for worker to finish disconnecting")
        yield d

    @defer.inlineCallbacks
    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForWorker(self.name)

        blist = [(b.name, b.config.workerbuilddir) for b in our_builders]

        if blist == self._old_builder_list:
            return

        slist = yield self.conn.remoteSetBuilderList(builders=blist)

        self._old_builder_list = blist

        # Nothing has changed, so don't need to re-attach to everything
        if not slist:
            return

        dl = []
        for name in slist:
            # use get() since we might have changed our mind since then
            b = self.botmaster.builders.get(name)
            if b:
                d1 = self.attachBuilder(b)
                dl.append(d1)
        yield defer.DeferredList(dl)

    def attachBuilder(self, builder):
        return builder.attached(self, self.worker_commands)

    def controlWorker(self, key, params):
        log.msg("worker {} wants to {}: {}".format(self.name, key[-1], params))
        if key[-1] == "stop":
            return self.shutdownRequested()
        if key[-1] == "pause":
            self.pause()
        if key[-1] == "unpause":
            self.unpause()
        if key[-1] == "kill":
            self.shutdown()
        return None

    def shutdownRequested(self):
        self._graceful = True
        self.maybeShutdown()
        self.updateState()

    def addWorkerForBuilder(self, wfb):
        self.workerforbuilders[wfb.builder_name] = wfb

    def removeWorkerForBuilder(self, wfb):
        try:
            del self.workerforbuilders[wfb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, wfb):
        """This is called when a build on this worker is finished."""
        self.botmaster.maybeStartBuildsForWorker(self.name)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this worker
        can start a build.  This function can be used to limit overall
        concurrency on the worker.

        Note for subclassers: if a worker can become willing to start a build
        without any action on that worker (for example, by a resource in use on
        another worker becoming available), then you must arrange for
        L{maybeStartBuildsForWorker} to be called at that time, or builds on
        this worker will not start.
        """

        # If we're waiting to shutdown gracefully or paused, then we shouldn't
        # accept any new jobs.
        if self._graceful or self._paused:
            return False

        if self.max_builds:
            active_builders = [
                wfb for wfb in self.workerforbuilders.values() if wfb.isBusy()
            ]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    @defer.inlineCallbacks
    def shutdown(self):
        """Shutdown the worker"""
        if not self.conn:
            log.msg("no remote; worker is already shut down")
            return

        yield self.conn.remoteShutdown()

    def maybeShutdown(self):
        """Shut down this worker if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self._graceful:
            return
        active_builders = [
            wfb for wfb in self.workerforbuilders.values() if wfb.isBusy()
        ]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down worker')

    def updateState(self):
        self.master.data.updates.setWorkerState(self.workerid, self._paused,
                                                self._graceful)

    def pause(self):
        """Stop running new builds on the worker."""
        self._paused = True
        self.updateState()

    def unpause(self):
        """Restart running new builds on the worker."""
        self._paused = False
        self.botmaster.maybeStartBuildsForWorker(self.name)
        self.updateState()

    def isPaused(self):
        return self._paused

    def resetQuarantine(self):
        self.quarantine_timeout = self.quarantine_initial_timeout

    def putInQuarantine(self):
        if self.quarantine_timer:  # already in quarantine
            return

        self.pause()
        self.quarantine_timer = self.master.reactor.callLater(
            self.quarantine_timeout, self.exitQuarantine)
        log.msg("{} has been put in quarantine for {}s".format(
            self.name, self.quarantine_timeout))
        # next we will wait twice as long
        self.quarantine_timeout *= 2
        if self.quarantine_timeout > self.quarantine_max_timeout:
            # unless we hit the max timeout
            self.quarantine_timeout = self.quarantine_max_timeout

    def exitQuarantine(self):
        self.quarantine_timer = None
        self.unpause()

    def stopQuarantineTimer(self):
        if self.quarantine_timer is not None:
            self.quarantine_timer.cancel()
            self.quarantine_timer = None
            self.unpause()
Exemplo n.º 38
0
class AbstractBuildSlave(config.ReconfigurableServiceMixin, pb.Avatar,
                         service.MultiService):
    """This is the master-side representative for a remote buildbot slave.
    There is exactly one for each slave described in the config file (the
    c['slaves'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a build slave -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IBuildSlave)
    keepalive_timer = None
    keepalive_interval = None

    # reconfig slaves after builders
    reconfig_priority = 64

    def __init__(self,
                 name,
                 password,
                 max_builds=None,
                 notify_on_missing=[],
                 missing_timeout=3600,
                 properties={},
                 locks=None,
                 keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this slave
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this slave
                      can be used
        @type locks: dictionary
        """
        service.MultiService.__init__(self)
        self.slavename = name
        self.password = password

        # PB registration
        self.registration = None
        self.registered_port = None

        # these are set when the service is started, and unset when it is
        # stopped
        self.botmaster = None
        self.master = None

        self.slave_status = SlaveStatus(name)
        self.slave = None  # a RemoteReference to the Bot, when connected
        self.slave_commands = None
        self.slavebuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error('notify_on_missing arg %r is not a string' %
                             (i, ))
        self.missing_timeout = missing_timeout
        self.missing_timer = None
        self.keepalive_interval = keepalive_interval

        self.detached_subs = None

        self._old_builder_list = None

    def __repr__(self):
        return "<%s %r>" % (self.__class__.__name__, self.slavename)

    def updateLocks(self):
        """Convert the L{LockAccess} objects in C{self.locks} into real lock
        objects, while also maintaining the subscriptions to lock releases."""
        # unsubscribe from any old locks
        for s in self.lock_subscriptions:
            s.unsubscribe()

        # convert locks into their real form
        locks = [(self.botmaster.getLockFromLockAccess(a), a)
                 for a in self.access]
        self.locks = [(l.getLock(self), la) for l, la in locks]
        self.lock_subscriptions = [
            l.subscribeToReleases(self._lockReleased) for l, la in self.locks
        ]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another slave
        to look at it.
        """
        log.msg("acquireLocks(slave %s, locks %s)" % (self, self.locks))
        if not self.locksAvailable():
            log.msg("slave %s can't lock, giving up" % (self, ))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def _lockReleased(self):
        """One of the locks for this slave was released; try scheduling
        builds."""
        if not self.botmaster:
            return  # oh well..
        self.botmaster.maybeStartBuildsForSlave(self.slavename)

    def _saveSlaveInfoDict(self, slaveinfo):
        return self.master.db.buildslaves.updateBuildslave(
            name=self.slavename,
            slaveinfo=slaveinfo,
        )

    def _getSlaveInfo(self):
        d = self.master.db.buildslaves.getBuildslaveByName(self.slavename)

        @d.addCallback
        def applyInfo(buildslave):
            if buildslave is None:
                return

            self.updateSlaveInfo(**buildslave['slaveinfo'])

        return d

    def updateSlaveInfo(self, **kwargs):
        self.slave_status.updateInfo(**kwargs)

    def getSlaveInfo(self, key, default=None):
        return self.slave_status.getInfo(key, default)

    def setServiceParent(self, parent):
        # botmaster needs to set before setServiceParent which calls startService
        self.botmaster = parent
        self.master = parent.master
        service.MultiService.setServiceParent(self, parent)

    def startService(self):
        self.updateLocks()
        self.startMissingTimer()
        self.slave_status.addInfoWatcher(self._saveSlaveInfoDict)
        d = self._getSlaveInfo()
        d.addCallback(lambda _: service.MultiService.startService(self))
        return d

    @defer.inlineCallbacks
    def reconfigService(self, new_config):
        # Given a new BuildSlave, configure this one identically.  Because
        # BuildSlave objects are remotely referenced, we can't replace them
        # without disconnecting the slave, yet there's no reason to do that.
        new = self.findNewSlaveInstance(new_config)

        assert self.slavename == new.slavename

        # do we need to re-register?
        if (not self.registration or self.password != new.password
                or new_config.protocols['pb']['port'] != self.registered_port):
            if self.registration:
                yield self.registration.unregister()
                self.registration = None
            self.password = new.password
            self.registered_port = new_config.protocols['pb']['port']
            self.registration = self.master.pbmanager.register(
                self.registered_port, self.slavename, self.password,
                self.getPerspective)

        # adopt new instance's configuration parameters
        self.max_builds = new.max_builds
        self.access = new.access
        self.notify_on_missing = new.notify_on_missing
        self.keepalive_interval = new.keepalive_interval

        if self.missing_timeout != new.missing_timeout:
            running_missing_timer = self.missing_timer
            self.stopMissingTimer()
            self.missing_timeout = new.missing_timeout
            if running_missing_timer:
                self.startMissingTimer()

        properties = Properties()
        properties.updateFromProperties(new.properties)
        self.properties = properties

        self.updateLocks()

        # update the attached slave's notion of which builders are attached.
        # This assumes that the relevant builders have already been configured,
        # which is why the reconfig_priority is set low in this class.
        yield self.updateSlave()

        yield config.ReconfigurableServiceMixin.reconfigService(
            self, new_config)

    def stopService(self):
        self.slave_status.removeInfoWatcher(self._saveSlaveInfoDict)
        if self.registration:
            self.registration.unregister()
            self.registration = None
        self.stopMissingTimer()
        return service.MultiService.stopService(self)

    def findNewSlaveInstance(self, new_config):
        # TODO: called multiple times per reconfig; use 1-element cache?
        for sl in new_config.slaves:
            if sl.slavename == self.slavename:
                return sl
        assert 0, "no new slave named '%s'" % self.slavename

    def startMissingTimer(self):
        if self.notify_on_missing and self.missing_timeout and self.parent:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = reactor.callLater(self.missing_timeout,
                                                   self._missing_timer_fired)

    def stopMissingTimer(self):
        if self.missing_timer:
            self.missing_timer.cancel()
            self.missing_timer = None

    def getPerspective(self, mind, slavename):
        assert slavename == self.slavename
        metrics.MetricCountEvent.log("attached_slaves", 1)

        # record when this connection attempt occurred
        if self.slave_status:
            self.slave_status.recordConnectTime()

        # try to use TCP keepalives
        try:
            mind.broker.transport.setTcpKeepAlive(1)
        except:
            pass

        if self.isConnected():
            # duplicate slave - send it to arbitration
            arb = botmaster.DuplicateSlaveArbitrator(self)
            return arb.getPerspective(mind, slavename)
        else:
            log.msg("slave '%s' attaching from %s" %
                    (slavename, mind.broker.transport.getPeer()))
            return self

    def doKeepalive(self):
        self.keepalive_timer = reactor.callLater(self.keepalive_interval,
                                                 self.doKeepalive)
        if not self.slave:
            return
        d = self.slave.callRemote("print", "Received keepalive from master")
        d.addErrback(log.msg, "Keepalive failed for '%s'" % (self.slavename, ))

    def stopKeepaliveTimer(self):
        if self.keepalive_timer:
            self.keepalive_timer.cancel()

    def startKeepaliveTimer(self):
        assert self.keepalive_interval
        log.msg("Starting buildslave keepalive timer for '%s'" %
                (self.slavename, ))
        self.doKeepalive()

    def isConnected(self):
        return self.slave

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        buildmaster = self.botmaster.master
        status = buildmaster.getStatus()
        text = "The Buildbot working for '%s'\n" % status.getTitle()
        text += ("has noticed that the buildslave named %s went away\n" %
                 self.slavename)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout))  # approx
        text += "\n"
        text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
        text += "was '%s'.\n" % self.slave_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getTitleURL()
        text += "\n"
        text += "%s\n" % status.getURLForThing(self.slave_status)
        subject = "Buildbot: buildslave %s was lost" % self.slavename
        return self._mail_missing_message(subject, text)

    def updateSlave(self):
        """Called to add or remove builders after the slave has connected.

        @return: a Deferred that indicates when an attached slave has
        accepted the new builders and/or released the old ones."""
        if self.slave:
            return self.sendBuilderList()
        else:
            return defer.succeed(None)

    def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
        if buildStarted:
            self.slave_status.buildStarted(buildStarted)
        if buildFinished:
            self.slave_status.buildFinished(buildFinished)

    def attached(self, bot):
        """This is called when the slave connects.

        @return: a Deferred that fires when the attachment is complete
        """

        # the botmaster should ensure this.
        assert not self.isConnected()

        metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", 1)

        # set up the subscription point for eventual detachment
        self.detached_subs = subscription.SubscriptionPoint("detached")

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this slave to all the
        # Builders that care about it.

        # we accumulate slave information in this 'state' dictionary, then
        # set it atomically if we make it far enough through the process
        state = {}

        # Reset graceful shutdown status
        self.slave_status.setGraceful(False)
        # We want to know when the graceful shutdown flag changes
        self.slave_status.addGracefulWatcher(self._gracefulChanged)
        self.slave_status.addPauseWatcher(self._pauseChanged)

        d = defer.succeed(None)

        @d.addCallback
        def _log_attachment_on_slave(res):
            d1 = bot.callRemote("print", "attached")
            d1.addErrback(lambda why: None)
            return d1

        @d.addCallback
        def _get_info(res):
            d1 = bot.callRemote("getSlaveInfo")

            def _got_info(info):
                log.msg("Got slaveinfo from '%s'" % self.slavename)
                # TODO: info{} might have other keys
                state["admin"] = info.get("admin")
                state["host"] = info.get("host")
                state["access_uri"] = info.get("access_uri", None)
                state["slave_environ"] = info.get("environ", {})
                state["slave_basedir"] = info.get("basedir", None)
                state["slave_system"] = info.get("system", None)

            def _info_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # maybe an old slave, doesn't implement remote_getSlaveInfo
                log.msg("BuildSlave.info_unavailable")
                log.err(why)

            d1.addCallbacks(_got_info, _info_unavailable)
            return d1

        d.addCallback(lambda _: self.startKeepaliveTimer())

        @d.addCallback
        def _get_version(_):
            d = bot.callRemote("getVersion")

            def _got_version(version):
                state["version"] = version

            def _version_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # probably an old slave
                state["version"] = '(unknown)'

            d.addCallbacks(_got_version, _version_unavailable)
            return d

        @d.addCallback
        def _get_commands(_):
            d1 = bot.callRemote("getCommands")

            def _got_commands(commands):
                state["slave_commands"] = commands

            def _commands_unavailable(why):
                # probably an old slave
                if why.check(AttributeError):
                    return
                log.msg("BuildSlave.getCommands is unavailable - ignoring")
                log.err(why)

            d1.addCallbacks(_got_commands, _commands_unavailable)
            return d1

        @d.addCallback
        def _accept_slave(res):
            self.slave_status.setConnected(True)

            self.slave_status.updateInfo(
                admin=state.get("admin"),
                host=state.get("host"),
                access_uri=state.get("access_uri"),
                version=state.get("version"),
            )

            self.slave_commands = state.get("slave_commands")
            self.slave_environ = state.get("slave_environ")
            self.slave_basedir = state.get("slave_basedir")
            self.slave_system = state.get("slave_system")
            self.slave = bot
            if self.slave_system == "nt":
                self.path_module = namedModule("ntpath")
            else:
                # most everything accepts / as separator, so posix should be a
                # reasonable fallback
                self.path_module = namedModule("posixpath")
            log.msg("bot attached")
            self.messageReceivedFromSlave()
            self.stopMissingTimer()
            self.master.status.slaveConnected(self.slavename)

        d.addCallback(lambda _: self.updateSlave())

        d.addCallback(
            lambda _: self.botmaster.maybeStartBuildsForSlave(self.slavename))

        # Finally, the slave gets a reference to this BuildSlave. They
        # receive this later, after we've started using them.
        d.addCallback(lambda _: self)
        return d

    def messageReceivedFromSlave(self):
        now = time.time()
        self.lastMessageReceived = now
        self.slave_status.setLastMessageReceived(now)

    def detached(self, mind):
        metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", -1)
        self.slave = None
        self._old_builder_list = []
        self.slave_status.removeGracefulWatcher(self._gracefulChanged)
        self.slave_status.removePauseWatcher(self._pauseChanged)
        self.slave_status.setConnected(False)
        log.msg("BuildSlave.detached(%s)" % self.slavename)
        self.master.status.slaveDisconnected(self.slavename)
        self.stopKeepaliveTimer()
        self.releaseLocks()

        # notify watchers, but do so in the next reactor iteration so that
        # any further detached() action by subclasses happens first
        def notif():
            subs = self.detached_subs
            self.detached_subs = None
            subs.deliver()

        eventually(notif)

    def subscribeToDetach(self, callback):
        """
        Request that C{callable} be invoked with no arguments when the
        L{detached} method is invoked.

        @returns: L{Subscription}
        """
        assert self.detached_subs, "detached_subs is only set if attached"
        return self.detached_subs.subscribe(callback)

    def disconnect(self):
        """Forcibly disconnect the slave.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the slave is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a slave is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown slave. The second is
        when we wind up with two connections for the same slave, in which
        case we disconnect the older connection.
        """

        if not self.slave:
            return defer.succeed(None)
        log.msg("disconnecting old slave %s now" % self.slavename)
        # When this Deferred fires, we'll be ready to accept the new slave
        return self._disconnect(self.slave)

    def _disconnect(self, slave):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new slave. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback with one argument, the
        # RemoteReference being disconnected.
        def _disconnected(rref):
            eventually(d.callback, None)

        slave.notifyOnDisconnect(_disconnected)
        tport = slave.broker.transport
        # this is the polite way to request that a socket be closed
        tport.loseConnection()
        try:
            # but really we don't want to wait for the transmit queue to
            # drain. The remote end is unlikely to ACK the data, so we'd
            # probably have to wait for a (20-minute) TCP timeout.
            # tport._closeSocket()
            # however, doing _closeSocket (whether before or after
            # loseConnection) somehow prevents the notifyOnDisconnect
            # handlers from being run. Bummer.
            tport.offset = 0
            tport.dataBuffer = ""
        except:
            # however, these hacks are pretty internal, so don't blow up if
            # they fail or are unavailable
            log.msg("failed to accelerate the shutdown process")
        log.msg("waiting for slave to finish disconnecting")

        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForSlave(self.slavename)
        blist = [(b.name, b.config.slavebuilddir) for b in our_builders]
        if blist == self._old_builder_list:
            return defer.succeed(None)

        d = self.slave.callRemote("setBuilderList", blist)

        def sentBuilderList(ign):
            self._old_builder_list = blist
            return ign

        d.addCallback(sentBuilderList)
        return d

    def perspective_keepalive(self):
        self.messageReceivedFromSlave()

    def perspective_shutdown(self):
        log.msg("slave %s wants to shut down" % self.slavename)
        self.slave_status.setGraceful(True)

    def addSlaveBuilder(self, sb):
        self.slavebuilders[sb.builder_name] = sb

    def removeSlaveBuilder(self, sb):
        try:
            del self.slavebuilders[sb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, sb):
        """This is called when a build on this slave is finished."""
        self.botmaster.maybeStartBuildsForSlave(self.slavename)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this buildslave
        can start a build.  This function can be used to limit overall
        concurrency on the buildslave.

        Note for subclassers: if a slave can become willing to start a build
        without any action on that slave (for example, by a resource in use on
        another slave becoming available), then you must arrange for
        L{maybeStartBuildsForSlave} to be called at that time, or builds on
        this slave will not start.
        """

        if self.slave_status.isPaused():
            return False

        # If we're waiting to shutdown gracefully, then we shouldn't
        # accept any new jobs.
        if self.slave_status.getGraceful():
            return False

        if self.max_builds:
            active_builders = [
                sb for sb in self.slavebuilders.values() if sb.isBusy()
            ]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    def _mail_missing_message(self, subject, text):
        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.master
        for st in buildmaster.status:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("buildslave-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = subject
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def _gracefulChanged(self, graceful):
        """This is called when our graceful shutdown setting changes"""
        self.maybeShutdown()

    @defer.inlineCallbacks
    def shutdown(self):
        """Shutdown the slave"""
        if not self.slave:
            log.msg("no remote; slave is already shut down")
            return

        # First, try the "new" way - calling our own remote's shutdown
        # method.  The method was only added in 0.8.3, so ignore NoSuchMethod
        # failures.
        def new_way():
            d = self.slave.callRemote('shutdown')
            d.addCallback(lambda _: True)  # successful shutdown request

            def check_nsm(f):
                f.trap(pb.NoSuchMethod)
                return False  # fall through to the old way

            d.addErrback(check_nsm)

            def check_connlost(f):
                f.trap(pb.PBConnectionLost)
                return True  # the slave is gone, so call it finished

            d.addErrback(check_connlost)
            return d

        if (yield new_way()):
            return  # done!

        # Now, the old way.  Look for a builder with a remote reference to the
        # client side slave.  If we can find one, then call "shutdown" on the
        # remote builder, which will cause the slave buildbot process to exit.
        def old_way():
            d = None
            for b in self.slavebuilders.values():
                if b.remote:
                    d = b.remote.callRemote("shutdown")
                    break

            if d:
                log.msg("Shutting down (old) slave: %s" % self.slavename)

                # The remote shutdown call will not complete successfully since the
                # buildbot process exits almost immediately after getting the
                # shutdown request.
                # Here we look at the reason why the remote call failed, and if
                # it's because the connection was lost, that means the slave
                # shutdown as expected.

                def _errback(why):
                    if why.check(pb.PBConnectionLost):
                        log.msg("Lost connection to %s" % self.slavename)
                    else:
                        log.err("Unexpected error when trying to shutdown %s" %
                                self.slavename)

                d.addErrback(_errback)
                return d
            log.err("Couldn't find remote builder to shut down slave")
            return defer.succeed(None)

        yield old_way()

    def maybeShutdown(self):
        """Shut down this slave if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self.slave_status.getGraceful():
            return
        active_builders = [
            sb for sb in self.slavebuilders.values() if sb.isBusy()
        ]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down slave')

    def _pauseChanged(self, paused):
        if paused is True:
            self.botmaster.master.status.slavePaused(self.slavename)
        else:
            self.botmaster.master.status.slaveUnpaused(self.slavename)

    def pause(self):
        """Stop running new builds on the slave."""
        self.slave_status.setPaused(True)

    def unpause(self):
        """Restart running new builds on the slave."""
        self.slave_status.setPaused(False)
        self.botmaster.maybeStartBuildsForSlave(self.slavename)

    def isPaused(self):
        return self.slave_status.isPaused()
Exemplo n.º 39
0
class BaseScheduler(service.MultiService, ComparableMixin, StateMixin):
    """
    Base class for all schedulers; this provides the equipment to manage
    reconfigurations and to handle basic scheduler state.  It also provides
    utility methods to begin various sorts of builds.

    Subclasses should add any configuration-derived attributes to
    C{base.Scheduler.compare_attrs}.
    """

    implements(interfaces.IScheduler)

    DefaultCodebases = {'': {}}

    compare_attrs = ('name', 'builderNames', 'properties', 'codebases')

    def __init__(self,
                 name,
                 builderNames,
                 properties,
                 codebases=DefaultCodebases):
        """
        Initialize a Scheduler.

        @param name: name of this scheduler (used as a key for state)
        @type name: unicode

        @param builderNames: list of builders this scheduler may start
        @type builderNames: list of unicode

        @param properties: properties to add to builds triggered by this
        scheduler
        @type properties: dictionary

        @param codebases: codebases that are necessary to process the changes
        @type codebases: dict with following struct:
            key: '<codebase>'
            value: {'repository':'<repo>', 'branch':'<br>', 'revision:'<rev>'}

        @param consumeChanges: true if this scheduler wishes to be informed
        about the addition of new changes.  Defaults to False.  This should
        be passed explicitly from subclasses to indicate their interest in
        consuming changes.
        @type consumeChanges: boolean
        """
        service.MultiService.__init__(self)
        self.name = name
        "name of this scheduler; used to identify replacements on reconfig"

        ok = True
        if not isinstance(builderNames, (list, tuple)):
            ok = False
        else:
            for b in builderNames:
                if not isinstance(b, basestring):
                    ok = False
        if not ok:
            config.error(
                "The builderNames argument to a scheduler must be a list "
                "of Builder names.")

        self.builderNames = builderNames
        "list of builder names to start in each buildset"

        self.properties = Properties()
        "properties that are contributed to each buildset"
        self.properties.update(properties, "Scheduler")
        self.properties.setProperty("scheduler", name, "Scheduler")

        self.objectid = None

        self.master = None

        # Set the codebases that are necessary to process the changes
        # These codebases will always result in a sourcestamp with or without changes
        if codebases is not None:
            if not isinstance(codebases, dict):
                config.error("Codebases must be a dict of dicts")
            for codebase, codebase_attrs in codebases.iteritems():
                if not isinstance(codebase_attrs, dict):
                    config.error("Codebases must be a dict of dicts")
                if (codebases != BaseScheduler.DefaultCodebases
                        and 'repository' not in codebase_attrs):
                    config.error(
                        "The key 'repository' is mandatory in codebases")
        else:
            config.error("Codebases cannot be None")

        self.codebases = codebases

        # internal variables
        self._change_subscription = None
        self._change_consumption_lock = defer.DeferredLock()

    ## service handling

    def startService(self):
        service.MultiService.startService(self)

    def findNewSchedulerInstance(self, new_config):
        return new_config.schedulers[self.name]  # should exist!

    def stopService(self):
        d = defer.maybeDeferred(self._stopConsumingChanges)
        d.addCallback(lambda _: service.MultiService.stopService(self))
        return d

    ## status queries

    # TODO: these aren't compatible with distributed schedulers

    def listBuilderNames(self):
        "Returns the list of builder names"
        return self.builderNames

    def getPendingBuildTimes(self):
        "Returns a list of the next times that builds are scheduled, if known."
        return []

    ## change handling

    def startConsumingChanges(self,
                              fileIsImportant=None,
                              change_filter=None,
                              onlyImportant=False):
        """
        Subclasses should call this method from startService to register to
        receive changes.  The BaseScheduler class will take care of filtering
        the changes (using change_filter) and (if fileIsImportant is not None)
        classifying them.  See L{gotChange}.  Returns a Deferred.

        @param fileIsImportant: a callable provided by the user to distinguish
        important and unimportant changes
        @type fileIsImportant: callable

        @param change_filter: a filter to determine which changes are even
        considered by this scheduler, or C{None} to consider all changes
        @type change_filter: L{buildbot.changes.filter.ChangeFilter} instance

        @param onlyImportant: If True, only important changes, as specified by
        fileIsImportant, will be added to the buildset.
        @type onlyImportant: boolean

        """
        assert fileIsImportant is None or callable(fileIsImportant)

        # register for changes with master
        assert not self._change_subscription

        def changeCallback(change):
            # ignore changes delivered while we're not running
            if not self._change_subscription:
                return

            if change_filter and not change_filter.filter_change(change):
                return
            if change.codebase not in self.codebases:
                log.msg('change contains codebase %s that is not processed by'
                        ' scheduler %s' % (change.codebase, self.name),
                        logLevel=logging.DEBUG)
                return
            if fileIsImportant:
                try:
                    important = fileIsImportant(change)
                    if not important and onlyImportant:
                        return
                except:
                    log.err(failure.Failure(),
                            'in fileIsImportant check for %s' % change)
                    return
            else:
                important = True

            # use change_consumption_lock to ensure the service does not stop
            # while this change is being processed
            d = self._change_consumption_lock.acquire()
            d.addCallback(lambda _: self.gotChange(change, important))

            def release(x):
                self._change_consumption_lock.release()

            d.addBoth(release)
            d.addErrback(log.err, 'while processing change')

        self._change_subscription = self.master.subscribeToChanges(
            changeCallback)

        return defer.succeed(None)

    def _stopConsumingChanges(self):
        # (note: called automatically in stopService)

        # acquire the lock change consumption lock to ensure that any change
        # consumption is complete before we are done stopping consumption
        d = self._change_consumption_lock.acquire()

        def stop(x):
            if self._change_subscription:
                self._change_subscription.unsubscribe()
                self._change_subscription = None
            self._change_consumption_lock.release()

        d.addBoth(stop)
        return d

    def gotChange(self, change, important):
        """
        Called when a change is received; returns a Deferred.  If the
        C{fileIsImportant} parameter to C{startConsumingChanges} was C{None},
        then all changes are considered important.
        The C{codebase} of the change has always an entry in the C{codebases}
        dictionary of the scheduler.

        @param change: the new change object
        @type change: L{buildbot.changes.changes.Change} instance
        @param important: true if this is an important change, according to
        C{fileIsImportant}.
        @type important: boolean
        @returns: Deferred
        """
        raise NotImplementedError

    ## starting bulids

    @defer.inlineCallbacks
    def addBuildsetForLatest(self,
                             reason='',
                             external_idstring=None,
                             branch=None,
                             repository='',
                             project='',
                             builderNames=None,
                             properties=None):
        """
        Add a buildset for the 'latest' source in the given branch,
        repository, and project.  This will create a relative sourcestamp for
        the buildset.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's addBuildset
        method with the appropriate parameters.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param branch: branch to build (note that None often has a special meaning)
        @param repository: repository name for sourcestamp
        @param project: project name for sourcestamp
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        # Define setid for this set of changed repositories
        setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        # add a sourcestamp for each codebase
        for codebase, cb_info in self.codebases.iteritems():
            ss_repository = cb_info.get('repository', repository)
            ss_branch = cb_info.get('branch', branch)
            ss_revision = cb_info.get('revision', None)
            yield self.master.db.sourcestamps.addSourceStamp(
                codebase=codebase,
                repository=ss_repository,
                branch=ss_branch,
                revision=ss_revision,
                project=project,
                changeids=set(),
                sourcestampsetid=setid)

        bsid, brids = yield self.addBuildsetForSourceStamp(
            setid=setid,
            reason=reason,
            external_idstring=external_idstring,
            builderNames=builderNames,
            properties=properties)

        defer.returnValue((bsid, brids))

    @defer.inlineCallbacks
    def addBuildsetForSourceStampDetails(self,
                                         reason='',
                                         external_idstring=None,
                                         branch=None,
                                         repository='',
                                         project='',
                                         revision=None,
                                         builderNames=None,
                                         properties=None):
        """
        Given details about the source code to build, create a source stamp and
        then add a buildset for it.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param branch: branch to build (note that None often has a special meaning)
        @param repository: repository name for sourcestamp
        @param project: project name for sourcestamp
        @param revision: revision to build - default is latest
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        # Define setid for this set of changed repositories
        setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        yield self.master.db.sourcestamps.addSourceStamp(
            branch=branch,
            revision=revision,
            repository=repository,
            project=project,
            sourcestampsetid=setid)

        rv = yield self.addBuildsetForSourceStamp(
            setid=setid,
            reason=reason,
            external_idstring=external_idstring,
            builderNames=builderNames,
            properties=properties)
        defer.returnValue(rv)

    @defer.inlineCallbacks
    def addBuildsetForSourceStampSetDetails(self,
                                            reason,
                                            sourcestamps,
                                            properties,
                                            builderNames=None):
        if sourcestamps is None:
            sourcestamps = {}

        # Define new setid for this set of sourcestamps
        new_setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        # Merge codebases with the passed list of sourcestamps
        # This results in a new sourcestamp for each codebase
        for codebase in self.codebases:
            ss = self.codebases[codebase].copy()
            # apply info from passed sourcestamps onto the configured default
            # sourcestamp attributes for this codebase.
            ss.update(sourcestamps.get(codebase, {}))

            # add sourcestamp to the new setid
            yield self.master.db.sourcestamps.addSourceStamp(
                codebase=codebase,
                repository=ss.get('repository', None),
                branch=ss.get('branch', None),
                revision=ss.get('revision', None),
                project=ss.get('project', ''),
                changeids=[c['number'] for c in ss.get('changes', [])],
                patch_body=ss.get('patch_body', None),
                patch_level=ss.get('patch_level', None),
                patch_author=ss.get('patch_author', None),
                patch_comment=ss.get('patch_comment', None),
                sourcestampsetid=new_setid)

        rv = yield self.addBuildsetForSourceStamp(setid=new_setid,
                                                  reason=reason,
                                                  properties=properties,
                                                  builderNames=builderNames)

        defer.returnValue(rv)

    @defer.inlineCallbacks
    def addBuildsetForChanges(self,
                              reason='',
                              external_idstring=None,
                              changeids=[],
                              builderNames=None,
                              properties=None):
        changesByCodebase = {}

        def get_last_change_for_codebase(codebase):
            return max(changesByCodebase[codebase],
                       key=lambda change: change["changeid"])

        # Define setid for this set of changed repositories
        setid = yield self.master.db.sourcestampsets.addSourceStampSet()

        # Changes are retrieved from database and grouped by their codebase
        for changeid in changeids:
            chdict = yield self.master.db.changes.getChange(changeid)
            # group change by codebase
            changesByCodebase.setdefault(chdict["codebase"], []).append(chdict)

        for codebase in self.codebases:
            args = {'codebase': codebase, 'sourcestampsetid': setid}
            if codebase not in changesByCodebase:
                # codebase has no changes
                # create a sourcestamp that has no changes
                args['repository'] = self.codebases[codebase]['repository']
                args['branch'] = self.codebases[codebase].get('branch', None)
                args['revision'] = self.codebases[codebase].get(
                    'revision', None)
                args['changeids'] = set()
                args['project'] = ''
            else:
                #codebase has changes
                args['changeids'] = [
                    c["changeid"] for c in changesByCodebase[codebase]
                ]
                lastChange = get_last_change_for_codebase(codebase)
                for key in ['repository', 'branch', 'revision', 'project']:
                    args[key] = lastChange[key]

            yield self.master.db.sourcestamps.addSourceStamp(**args)

        # add one buildset, this buildset is connected to the sourcestamps by the setid
        bsid, brids = yield self.addBuildsetForSourceStamp(
            setid=setid,
            reason=reason,
            external_idstring=external_idstring,
            builderNames=builderNames,
            properties=properties)

        defer.returnValue((bsid, brids))

    @defer.inlineCallbacks
    def addBuildsetForSourceStamp(self,
                                  ssid=None,
                                  setid=None,
                                  reason='',
                                  external_idstring=None,
                                  properties=None,
                                  builderNames=None):
        """
        Add a buildset for the given, already-existing sourcestamp.

        This method will add any properties provided to the scheduler
        constructor to the buildset, and will call the master's
        L{BuildMaster.addBuildset} method with the appropriate parameters, and
        return the same result.

        @param reason: reason for this buildset
        @type reason: unicode string
        @param external_idstring: external identifier for this buildset, or None
        @param properties: a properties object containing initial properties for
            the buildset
        @type properties: L{buildbot.process.properties.Properties}
        @param builderNames: builders to name in the buildset (defaults to
            C{self.builderNames})
        @param setid: idenitification of a set of sourcestamps
        @returns: (buildset ID, buildrequest IDs) via Deferred
        """
        assert (ssid is None and setid is not None) \
            or (ssid is not None and setid is None), "pass a single sourcestamp OR set not both"

        # combine properties
        if properties:
            properties.updateFromProperties(self.properties)
        else:
            properties = self.properties

        # apply the default builderNames
        if not builderNames:
            builderNames = self.builderNames

        # translate properties object into a dict as required by the
        # addBuildset method
        properties_dict = properties.asDict()

        if setid == None:
            if ssid is not None:
                ssdict = yield self.master.db.sourcestamps.getSourceStamp(ssid)
                setid = ssdict['sourcestampsetid']
            else:
                # no sourcestamp and no sets
                yield None

        rv = yield self.master.addBuildset(sourcestampsetid=setid,
                                           reason=reason,
                                           properties=properties_dict,
                                           builderNames=builderNames,
                                           external_idstring=external_idstring)
        defer.returnValue(rv)
Exemplo n.º 40
0
class TestWithProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testBasic(self):
        # test basic substitution with WithProperties
        self.props.setProperty("revision", "47", "test")
        command = WithProperties("build-%s.tar.gz", "revision")
        self.failUnlessEqual(self.props.render(command), "build-47.tar.gz")

    def testDict(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("other", "foo", "test")
        command = WithProperties("build-%(other)s.tar.gz")
        self.failUnlessEqual(self.props.render(command), "build-foo.tar.gz")

    def testDictColonMinus(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("prop1", "foo", "test")
        command = WithProperties(
            "build-%(prop1:-empty)s-%(prop2:-empty)s.tar.gz")
        self.failUnlessEqual(self.props.render(command),
                             "build-foo-empty.tar.gz")

    def testDictColonPlus(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("prop1", "foo", "test")
        command = WithProperties(
            "build-%(prop1:+exists)s-%(prop2:+exists)s.tar.gz")
        self.failUnlessEqual(self.props.render(command),
                             "build-exists-.tar.gz")

    def testEmpty(self):
        # None should render as ''
        self.props.setProperty("empty", None, "test")
        command = WithProperties("build-%(empty)s.tar.gz")
        self.failUnlessEqual(self.props.render(command), "build-.tar.gz")

    def testRecursiveList(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = [
            WithProperties("%(x)s %(y)s"), "and",
            WithProperties("%(y)s %(x)s")
        ]
        self.failUnlessEqual(self.props.render(command),
                             ["10 20", "and", "20 10"])

    def testRecursiveTuple(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = (WithProperties("%(x)s %(y)s"), "and",
                   WithProperties("%(y)s %(x)s"))
        self.failUnlessEqual(self.props.render(command),
                             ("10 20", "and", "20 10"))

    def testRecursiveDict(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = {
            WithProperties("%(x)s %(y)s"): WithProperties("%(y)s %(x)s")
        }
        self.failUnlessEqual(self.props.render(command), {"10 20": "20 10"})

    def testLambdaSubst(self):
        command = WithProperties('%(foo)s', foo=lambda _: 'bar')
        self.failUnlessEqual(self.props.render(command), 'bar')

    def testLambdaOverride(self):
        self.props.setProperty('x', 10, 'test')
        command = WithProperties('%(x)s', x=lambda _: 20)
        self.failUnlessEqual(self.props.render(command), '20')

    def testLambdaCallable(self):
        self.assertRaises(ValueError,
                          lambda: WithProperties('%(foo)s', foo='bar'))

    def testLambdaUseExisting(self):
        self.props.setProperty('x', 10, 'test')
        self.props.setProperty('y', 20, 'test')
        command = WithProperties('%(z)s', z=lambda pmap: pmap['x'] + pmap['y'])
        self.failUnlessEqual(self.props.render(command), '30')
Exemplo n.º 41
0
class TestWithProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testBasic(self):
        # test basic substitution with WithProperties
        self.props.setProperty("revision", "47", "test")
        command = WithProperties("build-%s.tar.gz", "revision")
        self.failUnlessEqual(self.props.render(command), "build-47.tar.gz")

    def testDict(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("other", "foo", "test")
        command = WithProperties("build-%(other)s.tar.gz")
        self.failUnlessEqual(self.props.render(command), "build-foo.tar.gz")

    def testDictColonMinus(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("prop1", "foo", "test")
        command = WithProperties(
            "build-%(prop1:-empty)s-%(prop2:-empty)s.tar.gz")
        self.failUnlessEqual(self.props.render(command),
                             "build-foo-empty.tar.gz")

    def testDictColonPlus(self):
        # test dict-style substitution with WithProperties
        self.props.setProperty("prop1", "foo", "test")
        command = WithProperties(
            "build-%(prop1:+exists)s-%(prop2:+exists)s.tar.gz")
        self.failUnlessEqual(self.props.render(command),
                             "build-exists-.tar.gz")

    def testEmpty(self):
        # None should render as ''
        self.props.setProperty("empty", None, "test")
        command = WithProperties("build-%(empty)s.tar.gz")
        self.failUnlessEqual(self.props.render(command), "build-.tar.gz")

    def testRecursiveList(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = [
            WithProperties("%(x)s %(y)s"), "and",
            WithProperties("%(y)s %(x)s")
        ]
        self.failUnlessEqual(self.props.render(command),
                             ["10 20", "and", "20 10"])

    def testRecursiveTuple(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = (WithProperties("%(x)s %(y)s"), "and",
                   WithProperties("%(y)s %(x)s"))
        self.failUnlessEqual(self.props.render(command),
                             ("10 20", "and", "20 10"))

    def testRecursiveDict(self):
        self.props.setProperty("x", 10, "test")
        self.props.setProperty("y", 20, "test")
        command = {
            WithProperties("%(x)s %(y)s"): WithProperties("%(y)s %(x)s")
        }
        self.failUnlessEqual(self.props.render(command), {"10 20": "20 10"})
Exemplo n.º 42
0
class TestProperty(unittest.TestCase):
    def setUp(self):
        self.props = Properties()

    def testIntProperty(self):
        self.props.setProperty("do-tests", 1, "scheduler")
        value = Property("do-tests")

        self.failUnlessEqual(self.props.render(value), 1)

    def testStringProperty(self):
        self.props.setProperty("do-tests", "string", "scheduler")
        value = Property("do-tests")

        self.failUnlessEqual(self.props.render(value), "string")

    def testMissingProperty(self):
        value = Property("do-tests")

        self.failUnlessEqual(self.props.render(value), None)

    def testDefaultValue(self):
        value = Property("do-tests", default="Hello!")

        self.failUnlessEqual(self.props.render(value), "Hello!")

    def testIgnoreDefaultValue(self):
        self.props.setProperty("do-tests", "string", "scheduler")
        value = Property("do-tests", default="Hello!")

        self.failUnlessEqual(self.props.render(value), "string")

    def testIgnoreFalseValue(self):
        self.props.setProperty("do-tests-string", "", "scheduler")
        self.props.setProperty("do-tests-int", 0, "scheduler")
        self.props.setProperty("do-tests-list", [], "scheduler")
        self.props.setProperty("do-tests-None", None, "scheduler")

        value = [
            Property("do-tests-string", default="Hello!"),
            Property("do-tests-int", default="Hello!"),
            Property("do-tests-list", default="Hello!"),
            Property("do-tests-None", default="Hello!")
        ]

        self.failUnlessEqual(self.props.render(value), ["Hello!"] * 4)

    def testDefaultWhenFalse(self):
        self.props.setProperty("do-tests-string", "", "scheduler")
        self.props.setProperty("do-tests-int", 0, "scheduler")
        self.props.setProperty("do-tests-list", [], "scheduler")
        self.props.setProperty("do-tests-None", None, "scheduler")

        value = [
            Property("do-tests-string",
                     default="Hello!",
                     defaultWhenFalse=False),
            Property("do-tests-int", default="Hello!", defaultWhenFalse=False),
            Property("do-tests-list", default="Hello!",
                     defaultWhenFalse=False),
            Property("do-tests-None", default="Hello!", defaultWhenFalse=False)
        ]

        self.failUnlessEqual(self.props.render(value), ["", 0, [], None])
Exemplo n.º 43
0
class AbstractBuildSlave(pb.Avatar, service.MultiService):
    """This is the master-side representative for a remote buildbot slave.
    There is exactly one for each slave described in the config file (the
    c['slaves'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a build slave -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IBuildSlave)
    keepalive_timer = None
    keepalive_interval = None

    def __init__(self, name, password, max_builds=None,
                 notify_on_missing=[], missing_timeout=3600,
                 properties={}, locks=None, keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this slave
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this slave
                      can be used
        @type locks: dictionary
        """
        service.MultiService.__init__(self)
        self.slavename = name
        self.password = password
        self.botmaster = None # no buildmaster yet
        self.slave_status = SlaveStatus(name)
        self.slave = None # a RemoteReference to the Bot, when connected
        self.slave_commands = None
        self.slavebuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            assert isinstance(i, str)
        self.missing_timeout = missing_timeout
        self.missing_timer = None
        self.keepalive_interval = keepalive_interval

        self._old_builder_list = None

    def update(self, new):
        """
        Given a new BuildSlave, configure this one identically.  Because
        BuildSlave objects are remotely referenced, we can't replace them
        without disconnecting the slave, yet there's no reason to do that.
        """
        # the reconfiguration logic should guarantee this:
        assert self.slavename == new.slavename
        assert self.password == new.password
        assert self.__class__ == new.__class__
        self.max_builds = new.max_builds
        self.access = new.access
        self.notify_on_missing = new.notify_on_missing
        self.missing_timeout = new.missing_timeout

        self.properties = Properties()
        self.properties.updateFromProperties(new.properties)

        if self.botmaster:
            self.updateLocks()

    def __repr__(self):
        if self.botmaster:
            builders = self.botmaster.getBuildersForSlave(self.slavename)
            return "<%s '%s', current builders: %s>" % \
               (self.__class__.__name__, self.slavename,
                ','.join(map(lambda b: b.name, builders)))
        else:
            return "<%s '%s', (no builders yet)>" % \
                (self.__class__.__name__, self.slavename)

    def updateLocks(self):
        # convert locks into their real form
        locks = []
        for access in self.access:
            if not isinstance(access, LockAccess):
                access = access.defaultAccess()
            lock = self.botmaster.getLockByID(access.lockid)
            locks.append((lock, access))
        self.locks = [(l.getLock(self), la) for l, la in locks]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another slave
        to look at it.
        """
        log.msg("acquireLocks(slave %s, locks %s)" % (self, self.locks))
        if not self.locksAvailable():
            log.msg("slave %s can't lock, giving up" % (self, ))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def setBotmaster(self, botmaster):
        assert not self.botmaster, "BuildSlave already has a botmaster"
        self.botmaster = botmaster
        self.updateLocks()
        self.startMissingTimer()

    def stopMissingTimer(self):
        if self.missing_timer:
            self.missing_timer.cancel()
            self.missing_timer = None

    def startMissingTimer(self):
        if self.notify_on_missing and self.missing_timeout and self.parent:
            self.stopMissingTimer() # in case it's already running
            self.missing_timer = reactor.callLater(self.missing_timeout,
                                                self._missing_timer_fired)

    def doKeepalive(self):
        self.keepalive_timer = reactor.callLater(self.keepalive_interval,
                                                self.doKeepalive)
        if not self.slave:
            return
        d = self.slave.callRemote("print", "Received keepalive from master")
        d.addErrback(log.msg, "Keepalive failed for '%s'" % (self.slavename, ))

    def stopKeepaliveTimer(self):
        if self.keepalive_timer:
            self.keepalive_timer.cancel()

    def startKeepaliveTimer(self):
        assert self.keepalive_interval
        log.msg("Starting buildslave keepalive timer for '%s'" % \
                                        (self.slavename, ))
        self.doKeepalive()

    def recordConnectTime(self):
        if self.slave_status:
            self.slave_status.recordConnectTime()

    def isConnected(self):
        return self.slave

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        buildmaster = self.botmaster.parent
        status = buildmaster.getStatus()
        text = "The Buildbot working for '%s'\n" % status.getProjectName()
        text += ("has noticed that the buildslave named %s went away\n" %
                 self.slavename)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout)) # approx
        text += "\n"
        text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
        text += "was '%s'.\n" % self.slave_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getProjectURL()
        subject = "Buildbot: buildslave %s was lost" % self.slavename
        return self._mail_missing_message(subject, text)


    def updateSlave(self):
        """Called to add or remove builders after the slave has connected.

        @return: a Deferred that indicates when an attached slave has
        accepted the new builders and/or released the old ones."""
        if self.slave:
            return self.sendBuilderList()
        else:
            return defer.succeed(None)

    def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
        if buildStarted:
            self.slave_status.buildStarted(buildStarted)
        if buildFinished:
            self.slave_status.buildFinished(buildFinished)

    def attached(self, bot):
        """This is called when the slave connects.

        @return: a Deferred that fires when the attachment is complete
        """

        # the botmaster should ensure this.
        assert not self.isConnected()

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this slave to all the
        # Builders that care about it.

        # we accumulate slave information in this 'state' dictionary, then
        # set it atomically if we make it far enough through the process
        state = {}

        # Reset graceful shutdown status
        self.slave_status.setGraceful(False)
        # We want to know when the graceful shutdown flag changes
        self.slave_status.addGracefulWatcher(self._gracefulChanged)

        d = defer.succeed(None)
        def _log_attachment_on_slave(res):
            d1 = bot.callRemote("print", "attached")
            d1.addErrback(lambda why: None)
            return d1
        d.addCallback(_log_attachment_on_slave)

        def _get_info(res):
            d1 = bot.callRemote("getSlaveInfo")
            def _got_info(info):
                log.msg("Got slaveinfo from '%s'" % self.slavename)
                # TODO: info{} might have other keys
                state["admin"] = info.get("admin")
                state["host"] = info.get("host")
                state["access_uri"] = info.get("access_uri", None)
                state["slave_environ"] = info.get("environ", {})
                state["slave_basedir"] = info.get("basedir", None)
                state["slave_system"] = info.get("system", None)
            def _info_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # maybe an old slave, doesn't implement remote_getSlaveInfo
                log.msg("BuildSlave.info_unavailable")
                log.err(why)
            d1.addCallbacks(_got_info, _info_unavailable)
            return d1
        d.addCallback(_get_info)
        self.startKeepaliveTimer()

        def _get_version(res):
            d = bot.callRemote("getVersion")
            def _got_version(version):
                state["version"] = version
            def _version_unavailable(why):
                why.trap(pb.NoSuchMethod)
                # probably an old slave
                state["version"] = '(unknown)'
            d.addCallbacks(_got_version, _version_unavailable)
            return d
        d.addCallback(_get_version)

        def _get_commands(res):
            d1 = bot.callRemote("getCommands")
            def _got_commands(commands):
                state["slave_commands"] = commands
            def _commands_unavailable(why):
                # probably an old slave
                log.msg("BuildSlave._commands_unavailable")
                if why.check(AttributeError):
                    return
                log.err(why)
            d1.addCallbacks(_got_commands, _commands_unavailable)
            return d1
        d.addCallback(_get_commands)

        def _accept_slave(res):
            self.slave_status.setAdmin(state.get("admin"))
            self.slave_status.setHost(state.get("host"))
            self.slave_status.setAccessURI(state.get("access_uri"))
            self.slave_status.setVersion(state.get("version"))
            self.slave_status.setConnected(True)
            self.slave_commands = state.get("slave_commands")
            self.slave_environ = state.get("slave_environ")
            self.slave_basedir = state.get("slave_basedir")
            self.slave_system = state.get("slave_system")
            self.slave = bot
            if self.slave_system == "win32":
                self.path_module = namedModule("win32path")
            else:
                # most eveything accepts / as separator, so posix should be a
                # reasonable fallback
                self.path_module = namedModule("posixpath")
            log.msg("bot attached")
            self.messageReceivedFromSlave()
            self.stopMissingTimer()
            self.botmaster.parent.status.slaveConnected(self.slavename)

            return self.updateSlave()
        d.addCallback(_accept_slave)
        d.addCallback(lambda res: self.botmaster.triggerNewBuildCheck())

        # Finally, the slave gets a reference to this BuildSlave. They
        # receive this later, after we've started using them.
        d.addCallback(lambda res: self)
        return d

    def messageReceivedFromSlave(self):
        now = time.time()
        self.lastMessageReceived = now
        self.slave_status.setLastMessageReceived(now)

    def detached(self, mind):
        self.slave = None
        self.slave_status.removeGracefulWatcher(self._gracefulChanged)
        self.slave_status.setConnected(False)
        log.msg("BuildSlave.detached(%s)" % self.slavename)
        self.botmaster.parent.status.slaveDisconnected(self.slavename)
        self.stopKeepaliveTimer()

    def disconnect(self):
        """Forcibly disconnect the slave.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the slave is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a slave is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown slave. The second is
        when we wind up with two connections for the same slave, in which
        case we disconnect the older connection.
        """

        if not self.slave:
            return defer.succeed(None)
        log.msg("disconnecting old slave %s now" % self.slavename)
        # When this Deferred fires, we'll be ready to accept the new slave
        return self._disconnect(self.slave)

    def _disconnect(self, slave):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new slave. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback with one argument, the
        # RemoteReference being disconnected.
        def _disconnected(rref):
            reactor.callLater(0, d.callback, None)
        slave.notifyOnDisconnect(_disconnected)
        tport = slave.broker.transport
        # this is the polite way to request that a socket be closed
        tport.loseConnection()
        try:
            # but really we don't want to wait for the transmit queue to
            # drain. The remote end is unlikely to ACK the data, so we'd
            # probably have to wait for a (20-minute) TCP timeout.
            #tport._closeSocket()
            # however, doing _closeSocket (whether before or after
            # loseConnection) somehow prevents the notifyOnDisconnect
            # handlers from being run. Bummer.
            tport.offset = 0
            tport.dataBuffer = ""
        except:
            # however, these hacks are pretty internal, so don't blow up if
            # they fail or are unavailable
            log.msg("failed to accelerate the shutdown process")
        log.msg("waiting for slave to finish disconnecting")

        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForSlave(self.slavename)
        blist = [(b.name, b.slavebuilddir) for b in our_builders]
        if blist == self._old_builder_list:
            log.msg("Builder list is unchanged; not calling setBuilderList")
            return defer.succeed(None)

        d = self.slave.callRemote("setBuilderList", blist)
        def sentBuilderList(ign):
            self._old_builder_list = blist
            return ign
        d.addCallback(sentBuilderList)
        return d

    def perspective_keepalive(self):
        self.messageReceivedFromSlave()

    def perspective_shutdown(self):
        log.msg("slave %s wants to shut down" % self.slavename)
        self.slave_status.setGraceful(True)

    def addSlaveBuilder(self, sb):
        self.slavebuilders[sb.builder_name] = sb

    def removeSlaveBuilder(self, sb):
        try:
            del self.slavebuilders[sb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, sb):
        """This is called when a build on this slave is finished."""
        raise NotImplementedError

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this buildslave
        can start a build.  This function can be used to limit overall
        concurrency on the buildslave.
        """
        # If we're waiting to shutdown gracefully, then we shouldn't
        # accept any new jobs.
        if self.slave_status.getGraceful():
            return False

        if self.max_builds:
            active_builders = [sb for sb in self.slavebuilders.values()
                               if sb.isBusy()]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    def _mail_missing_message(self, subject, text):
        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.parent
        for st in buildmaster.statusTargets:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("buildslave-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = subject
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def _gracefulChanged(self, graceful):
        """This is called when our graceful shutdown setting changes"""
        self.maybeShutdown()

    @defer.deferredGenerator
    def shutdown(self):
        """Shutdown the slave"""
        if not self.slave:
            log.msg("no remote; slave is already shut down")
            return

        # First, try the "new" way - calling our own remote's shutdown
        # method.  The method was only added in 0.8.3, so ignore NoSuchMethod
        # failures.
        def new_way():
            d = self.slave.callRemote('shutdown')
            d.addCallback(lambda _ : True) # successful shutdown request
            def check_nsm(f):
                f.trap(pb.NoSuchMethod)
                return False # fall through to the old way
            d.addErrback(check_nsm)
            def check_connlost(f):
                f.trap(pb.PBConnectionLost)
                return True # the slave is gone, so call it finished
            d.addErrback(check_connlost)
            return d

        wfd = defer.waitForDeferred(new_way())
        yield wfd
        if wfd.getResult():
            return # done!

        # Now, the old way.  Look for a builder with a remote reference to the
        # client side slave.  If we can find one, then call "shutdown" on the
        # remote builder, which will cause the slave buildbot process to exit.
        def old_way():
            d = None
            for b in self.slavebuilders.values():
                if b.remote:
                    d = b.remote.callRemote("shutdown")
                    break

            if d:
                log.msg("Shutting down (old) slave: %s" % self.slavename)
                # The remote shutdown call will not complete successfully since the
                # buildbot process exits almost immediately after getting the
                # shutdown request.
                # Here we look at the reason why the remote call failed, and if
                # it's because the connection was lost, that means the slave
                # shutdown as expected.
                def _errback(why):
                    if why.check(pb.PBConnectionLost):
                        log.msg("Lost connection to %s" % self.slavename)
                    else:
                        log.err("Unexpected error when trying to shutdown %s" % self.slavename)
                d.addErrback(_errback)
                return d
            log.err("Couldn't find remote builder to shut down slave")
            return defer.succeed(None)
        wfd = defer.waitForDeferred(old_way())
        yield wfd
        wfd.getResult()

    def maybeShutdown(self):
        """Shut down this slave if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self.slave_status.getGraceful():
            return
        active_builders = [sb for sb in self.slavebuilders.values()
                           if sb.isBusy()]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down slave')
Exemplo n.º 44
0
    def forceWithWebRequest(self, owner, builder_name, req):
        """Called by the web UI.
        Authentication is already done, thus owner is passed as argument
        We check the parameters, and launch the build, if everything is correct
        """
        if not builder_name in self.builderNames:
            # in the case of buildAll, this method will be called several times
            # for all the builders
            # we just do nothing on a builder that is not in our builderNames
            return defer.succeed(None)
        master = self.master
        properties = {}
        changeids = []
        # probably need to clean that out later as the IProperty is already a
        # validation mechanism

        validation = master.config.validation
        if self.branch.regex == None:
            self.branch.regex = validation['branch']
        if self.revision.regex == None:
            self.revision.regex = validation['revision']

        for param in self.all_fields:
            if owner and param == self.username:
                continue  # dont enforce username if auth
            param.update_from_post(master, properties, changeids, req)

        changeids = map(lambda a: type(a) == int and a or a.number, changeids)
        # everything is validated, we can create our source stamp, and buildrequest
        reason = properties[self.reason.name]
        branch = properties[self.branch.name]
        revision = properties[self.revision.name]
        repository = properties[self.repository.name]
        project = properties[self.project.name]
        if owner is None:
            owner = properties[self.username.name]

        std_prop_names = [
            self.branch.name, self.revision.name, self.repository.name,
            self.project.name, self.username.name
        ]
        real_properties = Properties()
        for pname, pvalue in properties.items():
            if not pname in std_prop_names:
                real_properties.setProperty(pname, pvalue, "Force Build Form")

        real_properties.setProperty("owner", owner, "Force Build Form")

        r = ("The web-page 'force build' button was pressed by '%s': %s\n" %
             (owner, reason))

        d = master.db.sourcestampsets.addSourceStampSet()

        def add_master_with_setid(sourcestampsetid):
            master.db.sourcestamps.addSourceStamp(
                sourcestampsetid=sourcestampsetid,
                branch=branch,
                revision=revision,
                project=project,
                repository=repository,
                changeids=changeids)
            return sourcestampsetid

        d.addCallback(add_master_with_setid)

        def got_setid(sourcestampsetid):
            return self.addBuildsetForSourceStamp(builderNames=[builder_name],
                                                  setid=sourcestampsetid,
                                                  reason=r,
                                                  properties=real_properties)

        d.addCallback(got_setid)
        return d
Exemplo n.º 45
0
def buildUIDSchedFunc(sched, t, ssid):
    """Return a Properties instance with 'builduid' set to a randomly generated
    id."""
    props = Properties()
    props.setProperty('builduid', genBuildUID(), 'buildUIDSchedFunc')
    return props
Exemplo n.º 46
0
class BuildSlave(NewCredPerspective, service.MultiService):
    """This is the master-side representative for a remote buildbot slave.
    There is exactly one for each slave described in the config file (the
    c['slaves'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a build slave -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IBuildSlave)

    def __init__(self,
                 name,
                 password,
                 max_builds=None,
                 notify_on_missing=[],
                 missing_timeout=3600,
                 properties={}):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on 
                           this slave
        @type properties: dictionary
        """
        service.MultiService.__init__(self)
        self.slavename = name
        self.password = password
        self.botmaster = None  # no buildmaster yet
        self.slave_status = SlaveStatus(name)
        self.slave = None  # a RemoteReference to the Bot, when connected
        self.slave_commands = None
        self.slavebuilders = []
        self.max_builds = max_builds

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            assert isinstance(i, str)
        self.missing_timeout = missing_timeout
        self.missing_timer = None

    def update(self, new):
        """
        Given a new BuildSlave, configure this one identically.  Because
        BuildSlave objects are remotely referenced, we can't replace them
        without disconnecting the slave, yet there's no reason to do that.
        """
        # the reconfiguration logic should guarantee this:
        assert self.slavename == new.slavename
        assert self.password == new.password
        assert self.__class__ == new.__class__
        self.max_builds = new.max_builds

    def __repr__(self):
        if self.botmaster:
            builders = self.botmaster.getBuildersForSlave(self.slavename)
            return "<BuildSlave '%s', current builders: %s>" % \
               (self.slavename, ','.join(map(lambda b: b.name, builders)))
        else:
            return "<BuildSlave '%s', (no builders yet)>" % self.slavename

    def setBotmaster(self, botmaster):
        assert not self.botmaster, "BuildSlave already has a botmaster"
        self.botmaster = botmaster

    def updateSlave(self):
        """Called to add or remove builders after the slave has connected.

        @return: a Deferred that indicates when an attached slave has
        accepted the new builders and/or released the old ones."""
        if self.slave:
            return self.sendBuilderList()
        return defer.succeed(None)

    def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
        if buildStarted:
            self.slave_status.buildStarted(buildStarted)
        if buildFinished:
            self.slave_status.buildFinished(buildFinished)

    def attached(self, bot):
        """This is called when the slave connects.

        @return: a Deferred that fires with a suitable pb.IPerspective to
                 give to the slave (i.e. 'self')"""

        if self.slave:
            # uh-oh, we've got a duplicate slave. The most likely
            # explanation is that the slave is behind a slow link, thinks we
            # went away, and has attempted to reconnect, so we've got two
            # "connections" from the same slave, but the previous one is
            # stale. Give the new one precedence.
            log.msg("duplicate slave %s replacing old one" % self.slavename)

            # just in case we've got two identically-configured slaves,
            # report the IP addresses of both so someone can resolve the
            # squabble
            tport = self.slave.broker.transport
            log.msg("old slave was connected from", tport.getPeer())
            log.msg("new slave is from", bot.broker.transport.getPeer())
            d = self.disconnect()
        else:
            d = defer.succeed(None)
        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this slave to all the
        # Builders that care about it.

        # we accumulate slave information in this 'state' dictionary, then
        # set it atomically if we make it far enough through the process
        state = {}

        def _log_attachment_on_slave(res):
            d1 = bot.callRemote("print", "attached")
            d1.addErrback(lambda why: None)
            return d1

        d.addCallback(_log_attachment_on_slave)

        def _get_info(res):
            d1 = bot.callRemote("getSlaveInfo")

            def _got_info(info):
                log.msg("Got slaveinfo from '%s'" % self.slavename)
                # TODO: info{} might have other keys
                state["admin"] = info.get("admin")
                state["host"] = info.get("host")

            def _info_unavailable(why):
                # maybe an old slave, doesn't implement remote_getSlaveInfo
                log.msg("BuildSlave.info_unavailable")
                log.err(why)

            d1.addCallbacks(_got_info, _info_unavailable)
            return d1

        d.addCallback(_get_info)

        def _get_commands(res):
            d1 = bot.callRemote("getCommands")

            def _got_commands(commands):
                state["slave_commands"] = commands

            def _commands_unavailable(why):
                # probably an old slave
                log.msg("BuildSlave._commands_unavailable")
                if why.check(AttributeError):
                    return
                log.err(why)

            d1.addCallbacks(_got_commands, _commands_unavailable)
            return d1

        d.addCallback(_get_commands)

        def _accept_slave(res):
            self.slave_status.setAdmin(state.get("admin"))
            self.slave_status.setHost(state.get("host"))
            self.slave_status.setConnected(True)
            self.slave_commands = state.get("slave_commands")
            self.slave = bot
            log.msg("bot attached")
            self.messageReceivedFromSlave()
            if self.missing_timer:
                self.missing_timer.cancel()
                self.missing_timer = None

            return self.updateSlave()

        d.addCallback(_accept_slave)

        # Finally, the slave gets a reference to this BuildSlave. They
        # receive this later, after we've started using them.
        d.addCallback(lambda res: self)
        return d

    def messageReceivedFromSlave(self):
        now = time.time()
        self.lastMessageReceived = now
        self.slave_status.setLastMessageReceived(now)

    def detached(self, mind):
        self.slave = None
        self.slave_status.setConnected(False)
        self.botmaster.slaveLost(self)
        log.msg("BuildSlave.detached(%s)" % self.slavename)
        if self.notify_on_missing and self.parent and not self.missing_timer:
            self.missing_timer = reactor.callLater(self.missing_timeout,
                                                   self._missing_timer_fired)

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.parent
        status = buildmaster.getStatus()
        for st in buildmaster.statusTargets:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("buildslave-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail
        text = "The Buildbot working for '%s'\n" % status.getProjectName()
        text += ("has noticed that the buildslave named %s went away\n" %
                 self.slavename)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout)
                 )  # close enough
        text += "\n"
        text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
        text += "was '%s'.\n" % self.slave_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getProjectURL()

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = "Buildbot: buildslave %s was lost" % self.slavename
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def disconnect(self):
        """Forcibly disconnect the slave.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the slave is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a slave is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown slave. The second is
        when we wind up with two connections for the same slave, in which
        case we disconnect the older connection.
        """

        if not self.slave:
            return defer.succeed(None)
        log.msg("disconnecting old slave %s now" % self.slavename)

        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new slave. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback with one argument, the
        # RemoteReference being disconnected.
        def _disconnected(rref):
            reactor.callLater(0, d.callback, None)

        self.slave.notifyOnDisconnect(_disconnected)
        tport = self.slave.broker.transport
        # this is the polite way to request that a socket be closed
        tport.loseConnection()
        try:
            # but really we don't want to wait for the transmit queue to
            # drain. The remote end is unlikely to ACK the data, so we'd
            # probably have to wait for a (20-minute) TCP timeout.
            #tport._closeSocket()
            # however, doing _closeSocket (whether before or after
            # loseConnection) somehow prevents the notifyOnDisconnect
            # handlers from being run. Bummer.
            tport.offset = 0
            tport.dataBuffer = ""
            pass
        except:
            # however, these hacks are pretty internal, so don't blow up if
            # they fail or are unavailable
            log.msg("failed to accelerate the shutdown process")
            pass
        log.msg("waiting for slave to finish disconnecting")

        # When this Deferred fires, we'll be ready to accept the new slave
        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForSlave(self.slavename)
        blist = [(b.name, b.builddir) for b in our_builders]
        d = self.slave.callRemote("setBuilderList", blist)

        def _sent(slist):
            dl = []
            for name, remote in slist.items():
                # use get() since we might have changed our mind since then
                b = self.botmaster.builders.get(name)
                if b:
                    d1 = b.attached(self, remote, self.slave_commands)
                    dl.append(d1)
            return defer.DeferredList(dl)

        def _set_failed(why):
            log.msg("BuildSlave.sendBuilderList (%s) failed" % self)
            log.err(why)
            # TODO: hang up on them?, without setBuilderList we can't use
            # them

        d.addCallbacks(_sent, _set_failed)
        return d

    def perspective_keepalive(self):
        pass

    def addSlaveBuilder(self, sb):
        log.msg("%s adding %s" % (self, sb))
        self.slavebuilders.append(sb)

    def removeSlaveBuilder(self, sb):
        log.msg("%s removing %s" % (self, sb))
        if sb in self.slavebuilders:
            self.slavebuilders.remove(sb)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this buildslave
        can start a build.  This function can be used to limit overall
        concurrency on the buildslave.
        """
        if self.max_builds:
            active_builders = [sb for sb in self.slavebuilders if sb.isBusy()]
            if len(active_builders) >= self.max_builds:
                return False
        return True
Exemplo n.º 47
0
class TestInterpolateProperties(unittest.TestCase):
    def setUp(self):
        self.props = Properties()
        self.build = FakeBuild(self.props)

    def test_properties(self):
        self.props.setProperty("buildername", "winbld", "test")
        command = Interpolate("echo buildby-%(prop:buildername)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo buildby-winbld")
        return d

    def test_property_not_set(self):
        command = Interpolate("echo buildby-%(prop:buildername)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo buildby-")
        return d

    def test_property_colon_minus(self):
        command = Interpolate("echo buildby-%(prop:buildername:-blddef)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo buildby-blddef")
        return d

    def test_property_colon_tilde_true(self):
        self.props.setProperty("buildername", "winbld", "test")
        command = Interpolate("echo buildby-%(prop:buildername:~blddef)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo buildby-winbld")
        return d

    def test_property_colon_tilde_false(self):
        self.props.setProperty("buildername", "", "test")
        command = Interpolate("echo buildby-%(prop:buildername:~blddef)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo buildby-blddef")
        return d

    def test_property_colon_plus(self):
        self.props.setProperty("project", "proj1", "test")
        command = Interpolate("echo %(prop:project:+projectdefined)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo projectdefined")
        return d

    def test_nested_property(self):
        self.props.setProperty("project", "so long!", "test")
        command = Interpolate("echo '%(prop:missing:~%(prop:project)s)s'")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo 'so long!'")
        return d

    def test_property_substitute_recursively(self):
        self.props.setProperty("project", "proj1", "test")
        command = Interpolate("echo '%(prop:no_such:-%(prop:project)s)s'")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo 'proj1'")
        return d

    def test_property_colon_ternary_present(self):
        self.props.setProperty("project", "proj1", "test")
        command = Interpolate("echo %(prop:project:?:defined:missing)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo defined")
        return d

    def test_property_colon_ternary_missing(self):
        command = Interpolate("echo %(prop:project:?|defined|missing)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo missing")
        return d

    def test_property_colon_ternary_hash_true(self):
        self.props.setProperty("project", "winbld", "test")
        command = Interpolate("echo buildby-%(prop:project:#?:T:F)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo buildby-T")
        return d

    def test_property_colon_ternary_hash_false(self):
        self.props.setProperty("project", "", "test")
        command = Interpolate("echo buildby-%(prop:project:#?|T|F)s")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo buildby-F")
        return d

    def test_property_colon_ternary_substitute_recursively_true(self):
        self.props.setProperty("P", "present", "test")
        self.props.setProperty("one", "proj1", "test")
        self.props.setProperty("two", "proj2", "test")
        command = Interpolate("echo '%(prop:P:?|%(prop:one)s|%(prop:two)s)s'")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo 'proj1'")
        return d

    def test_property_colon_ternary_substitute_recursively_false(self):
        self.props.setProperty("one", "proj1", "test")
        self.props.setProperty("two", "proj2", "test")
        command = Interpolate("echo '%(prop:P:?|%(prop:one)s|%(prop:two)s)s'")
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo 'proj2'")
        return d

    def test_property_colon_ternary_substitute_recursively_delimited_true(
            self):
        self.props.setProperty("P", "present", "test")
        self.props.setProperty("one", "proj1", "test")
        self.props.setProperty("two", "proj2", "test")
        command = Interpolate(
            "echo '%(prop:P:?|%(prop:one:?|true|false)s|%(prop:two:?|false|true)s)s'"
        )
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo 'true'")
        return d

    def test_property_colon_ternary_substitute_recursively_delimited_false(
            self):
        self.props.setProperty("one", "proj1", "test")
        self.props.setProperty("two", "proj2", "test")
        command = Interpolate(
            "echo '%(prop:P:?|%(prop:one:?|true|false)s|%(prop:two:?|false|true)s)s'"
        )
        d = self.build.render(command)
        d.addCallback(self.failUnlessEqual, "echo 'false'")
        return d
Exemplo n.º 48
0
class TestProperty(unittest.TestCase):
    def setUp(self):
        self.props = Properties()
        self.build = FakeBuild(self.props)

    def testIntProperty(self):
        self.props.setProperty("do-tests", 1, "scheduler")
        value = Property("do-tests")

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, 1)
        return d

    def testStringProperty(self):
        self.props.setProperty("do-tests", "string", "scheduler")
        value = Property("do-tests")

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, "string")
        return d

    def testMissingProperty(self):
        value = Property("do-tests")

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, None)
        return d

    def testDefaultValue(self):
        value = Property("do-tests", default="Hello!")

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, "Hello!")
        return d

    def testDefaultValueNested(self):
        self.props.setProperty("xxx", 'yyy', "scheduler")
        value = Property("do-tests", default=WithProperties("a-%(xxx)s-b"))

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, "a-yyy-b")
        return d

    def testIgnoreDefaultValue(self):
        self.props.setProperty("do-tests", "string", "scheduler")
        value = Property("do-tests", default="Hello!")

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, "string")
        return d

    def testIgnoreFalseValue(self):
        self.props.setProperty("do-tests-string", "", "scheduler")
        self.props.setProperty("do-tests-int", 0, "scheduler")
        self.props.setProperty("do-tests-list", [], "scheduler")
        self.props.setProperty("do-tests-None", None, "scheduler")

        value = [
            Property("do-tests-string", default="Hello!"),
            Property("do-tests-int", default="Hello!"),
            Property("do-tests-list", default="Hello!"),
            Property("do-tests-None", default="Hello!")
        ]

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, ["Hello!"] * 4)
        return d

    def testDefaultWhenFalse(self):
        self.props.setProperty("do-tests-string", "", "scheduler")
        self.props.setProperty("do-tests-int", 0, "scheduler")
        self.props.setProperty("do-tests-list", [], "scheduler")
        self.props.setProperty("do-tests-None", None, "scheduler")

        value = [
            Property("do-tests-string",
                     default="Hello!",
                     defaultWhenFalse=False),
            Property("do-tests-int", default="Hello!", defaultWhenFalse=False),
            Property("do-tests-list", default="Hello!",
                     defaultWhenFalse=False),
            Property("do-tests-None", default="Hello!", defaultWhenFalse=False)
        ]

        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, ["", 0, [], None])
        return d

    def testDeferredDefault(self):
        default = DeferredRenderable()
        value = Property("no-such-property", default)
        d = self.build.render(value)
        d.addCallback(self.failUnlessEqual, "default-value")
        default.callback("default-value")
        return d
Exemplo n.º 49
0
    def start(self):
        properties = self.build.getProperties()

        # make a new properties object from a dict rendered by the old
        # properties object
        props_to_set = Properties()
        props_to_set.update(properties.render(self.set_properties), "Trigger")
        for p in self.copy_properties:
            if p not in properties:
                raise RuntimeError(
                    "copy_property '%s' is not set in the triggering build" %
                    p)
            props_to_set.setProperty(
                p, properties[p],
                "%s (in triggering build)" % properties.getPropertySource(p))

        self.running = True

        # (is there an easier way to find the BuildMaster?)
        all_schedulers = self.build.builder.botmaster.parent.allSchedulers()
        all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])
        unknown_schedulers = []
        triggered_schedulers = []

        # TODO: don't fire any schedulers if we discover an unknown one
        for scheduler in self.schedulerNames:
            scheduler = properties.render(scheduler)
            if all_schedulers.has_key(scheduler):
                sch = all_schedulers[scheduler]
                if isinstance(sch, Triggerable):
                    triggered_schedulers.append(scheduler)
                else:
                    unknown_schedulers.append(scheduler)
            else:
                unknown_schedulers.append(scheduler)

        if unknown_schedulers:
            self.step_status.setText(['no scheduler:'] + unknown_schedulers)
            return self.end(FAILURE)

        master = self.build.builder.botmaster.parent  # seriously?!
        if self.sourceStamp:
            d = master.db.sourcestamps.addSourceStamp(
                **properties.render(self.sourceStamp))
        elif self.alwaysUseLatest:
            d = defer.succeed(None)
        else:
            ss = self.build.getSourceStamp()
            if self.updateSourceStamp:
                got = properties.getProperty('got_revision')
                if got:
                    ss = ss.getAbsoluteSourceStamp(got)
            d = ss.getSourceStampId(master)

        def start_builds(ssid):
            dl = []
            for scheduler in triggered_schedulers:
                sch = all_schedulers[scheduler]
                dl.append(sch.trigger(ssid, set_props=props_to_set))
            self.step_status.setText(['triggered'] + triggered_schedulers)

            d = defer.DeferredList(dl, consumeErrors=1)
            if self.waitForFinish:
                return d
            else:
                # do something to handle errors
                d.addErrback(
                    log.err,
                    '(ignored) while invoking Triggerable schedulers:')
                self.end(SUCCESS)
                return None

        d.addCallback(start_builds)

        def cb(rclist):
            rc = SUCCESS  # (this rc is not the same variable as that above)
            for was_cb, results in rclist:
                # TODO: make this algo more configurable
                if not was_cb:
                    rc = EXCEPTION
                    log.err(results)
                    break
                if results == FAILURE:
                    rc = FAILURE
            return self.end(rc)

        def eb(why):
            return self.end(FAILURE)

        if self.waitForFinish:
            d.addCallbacks(cb, eb)

        d.addErrback(log.err, '(ignored) while triggering builds:')
Exemplo n.º 50
0
    def start(self):
        config = yield self.getStepConfig()

        ss = self.build.getSourceStamp('')
        got = self.build.getProperty('got_revision')
        if got:
            ss = ss.getAbsoluteSourceStamp(got)

        # Stop the build early if .travis.yml says we should ignore branch
        if ss.branch and not config.can_build_branch(ss.branch):
            defer.returnValue(self.end(SUCCESS))

        # Find the master object
        master = self.build.builder.botmaster.parent

        # Find the scheduler we are going to use to queue actual builds
        all_schedulers = self.build.builder.botmaster.parent.allSchedulers()
        all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])
        sch = all_schedulers[self.scheduler]

        triggered = []

        self.running = True

        for env in config.environments:
            props_to_set = Properties()
            props_to_set.updateFromProperties(self.build.getProperties())
            props_to_set.update(env, ".travis.yml")
            props_to_set.setProperty("spawned_by",
                                     self.build.build_status.number,
                                     "Scheduler")

            ss_setid = yield ss.getSourceStampSetId(master)
            triggered.append(sch.trigger(ss_setid, set_props=props_to_set))

        results = yield defer.DeferredList(triggered, consumeErrors=1)

        was_exception = was_failure = False
        brids = {}

        for was_cb, results in results:
            if isinstance(results, tuple):
                results, some_brids = results
                brids.update(some_brids)

            if not was_cb:
                was_exception = True
                log.err(results)
                continue

            if results == FAILURE:
                was_failure = True

        if was_exception:
            result = EXCEPTION
        elif was_failure:
            result = FAILURE
        else:
            result = SUCCESS

        if brids:
            brid_to_bn = dict((_brid, _bn) for _bn, _brid in brids.iteritems())
            res = yield defer.DeferredList([
                master.db.builds.getBuildsForRequest(br)
                for br in brids.values()
            ],
                                           consumeErrors=1)
            for was_cb, builddicts in res:
                if was_cb:
                    for build in builddicts:
                        bn = brid_to_bn[build['brid']]
                        num = build['number']

                        url = master.status.getURLForBuild(bn, num)
                        self.step_status.addURL("%s #%d" % (bn, num), url)

        defer.returnValue(self.end(result))
Exemplo n.º 51
0
class AbstractBuildSlave(service.BuildbotService, object):
    """This is the master-side representative for a remote buildbot slave.
    There is exactly one for each slave described in the config file (the
    c['slaves'] list). When buildbots connect in (.attach), they get a
    reference to this instance. The BotMaster object is stashed as the
    .botmaster attribute. The BotMaster is also our '.parent' Service.

    I represent a build slave -- a remote machine capable of
    running builds.  I am instantiated by the configuration file, and can be
    subclassed to add extra functionality."""

    implements(IBuildSlave)

    # reconfig slaves after builders
    reconfig_priority = 64

    def checkConfig(
            self,
            name,
            password,
            max_builds=None,
            notify_on_missing=None,
            missing_timeout=10 * 60,  # Ten minutes
            properties=None,
            locks=None,
            keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this slave
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this slave
                      can be used
        @type locks: dictionary
        """
        self.name = name = ascii2unicode(name)

        if properties is None:
            properties = {}

        self.password = password

        # protocol registration
        self.registration = None

        # these are set when the service is started
        self.manager = None
        self.buildslaveid = None

        self.slave_status = SlaveStatus(name)
        self.slave_commands = None
        self.slavebuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0

        if notify_on_missing is None:
            notify_on_missing = []
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error('notify_on_missing arg %r is not a string' %
                             (i, ))

        self.missing_timeout = missing_timeout
        self.missing_timer = None

        # a protocol connection, if we're currently connected
        self.conn = None

        self._old_builder_list = None

    def __repr__(self):
        return "<%s %r>" % (self.__class__.__name__, self.name)

    @property
    def slavename(self):
        # slavename is now an alias to twisted.Service's name
        return self.name

    @property
    def botmaster(self):
        if self.master is None:
            return None
        return self.master.botmaster

    def updateLocks(self):
        """Convert the L{LockAccess} objects in C{self.locks} into real lock
        objects, while also maintaining the subscriptions to lock releases."""
        # unsubscribe from any old locks
        for s in self.lock_subscriptions:
            s.unsubscribe()

        # convert locks into their real form
        locks = [(self.botmaster.getLockFromLockAccess(a), a)
                 for a in self.access]
        self.locks = [(l.getLock(self), la) for l, la in locks]
        self.lock_subscriptions = [
            l.subscribeToReleases(self._lockReleased) for l, la in self.locks
        ]

    def locksAvailable(self):
        """
        I am called to see if all the locks I depend on are available,
        in which I return True, otherwise I return False
        """
        if not self.locks:
            return True
        for lock, access in self.locks:
            if not lock.isAvailable(self, access):
                return False
        return True

    def acquireLocks(self):
        """
        I am called when a build is preparing to run. I try to claim all
        the locks that are needed for a build to happen. If I can't, then
        my caller should give up the build and try to get another slave
        to look at it.
        """
        log.msg("acquireLocks(slave %s, locks %s)" % (self, self.locks))
        if not self.locksAvailable():
            log.msg("slave %s can't lock, giving up" % (self, ))
            return False
        # all locks are available, claim them all
        for lock, access in self.locks:
            lock.claim(self, access)
        return True

    def releaseLocks(self):
        """
        I am called to release any locks after a build has finished
        """
        log.msg("releaseLocks(%s): %s" % (self, self.locks))
        for lock, access in self.locks:
            lock.release(self, access)

    def _lockReleased(self):
        """One of the locks for this slave was released; try scheduling
        builds."""
        if not self.botmaster:
            return  # oh well..
        self.botmaster.maybeStartBuildsForSlave(self.name)

    def _applySlaveInfo(self, info):
        if not info:
            return

        self.slave_status.setAdmin(info.get("admin"))
        self.slave_status.setHost(info.get("host"))
        self.slave_status.setAccessURI(info.get("access_uri", None))
        self.slave_status.setVersion(info.get("version", "(unknown)"))

    @defer.inlineCallbacks
    def _getSlaveInfo(self):
        buildslave = yield self.master.data.get(
            ('buildslaves', self.buildslaveid))
        self._applySlaveInfo(buildslave['slaveinfo'])

    def setServiceParent(self, parent):
        # botmaster needs to set before setServiceParent which calls startService

        self.manager = parent
        return service.BuildbotService.setServiceParent(self, parent)

    @defer.inlineCallbacks
    def startService(self):
        self.updateLocks()
        self.startMissingTimer()
        self.buildslaveid = yield self.master.data.updates.findBuildslaveId(
            self.name)

        yield self._getSlaveInfo()
        yield service.BuildbotService.startService(self)

    @defer.inlineCallbacks
    def reconfigService(self,
                        name,
                        password,
                        max_builds=None,
                        notify_on_missing=None,
                        missing_timeout=3600,
                        properties=None,
                        locks=None,
                        keepalive_interval=3600):
        # Given a BuildSlave config arguments, configure this one identically.
        # Because BuildSlave objects are remotely referenced, we can't replace them
        # without disconnecting the slave, yet there's no reason to do that.

        assert self.name == name
        self.password = password

        # adopt new instance's configuration parameters
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.notify_on_missing = notify_on_missing

        if self.missing_timeout != missing_timeout:
            running_missing_timer = self.missing_timer
            self.stopMissingTimer()
            self.missing_timeout = missing_timeout
            if running_missing_timer:
                self.startMissingTimer()

        if properties is None:
            properties = {}
        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        # update our records with the buildslave manager
        if not self.registration:
            self.registration = yield self.master.buildslaves.register(self)
        yield self.registration.update(self, self.master.config)

        self.updateLocks()

        bids = [
            b._builderid for b in self.botmaster.getBuildersForSlave(self.name)
        ]
        yield self.master.data.updates.buildslaveConfigured(
            self.buildslaveid, self.master.masterid, bids)

        # update the attached slave's notion of which builders are attached.
        # This assumes that the relevant builders have already been configured,
        # which is why the reconfig_priority is set low in this class.
        yield self.updateSlave()

    @defer.inlineCallbacks
    def stopService(self):
        if self.registration:
            yield self.registration.unregister()
            self.registration = None
        self.stopMissingTimer()
        yield service.BuildbotService.stopService(self)

    def startMissingTimer(self):
        if self.notify_on_missing and self.missing_timeout and self.parent:
            self.stopMissingTimer()  # in case it's already running
            self.missing_timer = reactor.callLater(self.missing_timeout,
                                                   self._missing_timer_fired)

    def stopMissingTimer(self):
        if self.missing_timer:
            self.missing_timer.cancel()
            self.missing_timer = None

    def isConnected(self):
        return self.conn

    def _missing_timer_fired(self):
        self.missing_timer = None
        # notify people, but only if we're still in the config
        if not self.parent:
            return

        buildmaster = self.botmaster.master
        status = buildmaster.getStatus()
        text = "The Buildbot working for '%s'\n" % status.getTitle()
        text += ("has noticed that the buildslave named %s went away\n" %
                 self.name)
        text += "\n"
        text += ("It last disconnected at %s (buildmaster-local time)\n" %
                 time.ctime(time.time() - self.missing_timeout))  # approx
        text += "\n"
        text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
        text += "was '%s'.\n" % self.slave_status.getAdmin()
        text += "\n"
        text += "Sincerely,\n"
        text += " The Buildbot\n"
        text += " %s\n" % status.getTitleURL()
        text += "\n"
        text += "%s\n" % status.getURLForThing(self.slave_status)
        subject = "Buildbot: buildslave %s was lost" % (self.name, )
        return self._mail_missing_message(subject, text)

    def updateSlave(self):
        """Called to add or remove builders after the slave has connected.

        @return: a Deferred that indicates when an attached slave has
        accepted the new builders and/or released the old ones."""
        if self.conn:
            return self.sendBuilderList()
        else:
            return defer.succeed(None)

    def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
        # TODO
        pass

    @defer.inlineCallbacks
    def attached(self, conn):
        """This is called when the slave connects."""

        metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", 1)

        # now we go through a sequence of calls, gathering information, then
        # tell the Botmaster that it can finally give this slave to all the
        # Builders that care about it.

        # Reset graceful shutdown status
        self.slave_status.setGraceful(False)
        # We want to know when the graceful shutdown flag changes
        self.slave_status.addGracefulWatcher(self._gracefulChanged)
        self.conn = conn
        self._old_builder_list = None  # clear builder list before proceed
        self.slave_status.addPauseWatcher(self._pauseChanged)

        self.slave_status.setConnected(True)

        self._applySlaveInfo(conn.info)
        self.slave_commands = conn.info.get("slave_commands", {})
        self.slave_environ = conn.info.get("environ", {})
        self.slave_basedir = conn.info.get("basedir", None)
        self.slave_system = conn.info.get("system", None)

        self.conn.notifyOnDisconnect(self.detached)

        slaveinfo = {
            'admin': conn.info.get('admin'),
            'host': conn.info.get('host'),
            'access_uri': conn.info.get('access_uri'),
            'version': conn.info.get('version')
        }

        yield self.master.data.updates.buildslaveConnected(
            buildslaveid=self.buildslaveid,
            masterid=self.master.masterid,
            slaveinfo=slaveinfo)

        if self.slave_system == "nt":
            self.path_module = namedModule("ntpath")
        else:
            # most everything accepts / as separator, so posix should be a
            # reasonable fallback
            self.path_module = namedModule("posixpath")
        log.msg("bot attached")
        self.messageReceivedFromSlave()
        self.stopMissingTimer()
        self.master.status.slaveConnected(self.name)
        yield self.updateSlave()
        yield self.botmaster.maybeStartBuildsForSlave(self.name)

    def messageReceivedFromSlave(self):
        now = time.time()
        self.lastMessageReceived = now
        self.slave_status.setLastMessageReceived(now)

    @defer.inlineCallbacks
    def detached(self):
        metrics.MetricCountEvent.log("AbstractBuildSlave.attached_slaves", -1)
        self.conn = None
        self._old_builder_list = []
        self.slave_status.removeGracefulWatcher(self._gracefulChanged)
        self.slave_status.removePauseWatcher(self._pauseChanged)
        self.slave_status.setConnected(False)
        log.msg("BuildSlave.detached(%s)" % (self.name, ))
        self.master.status.slaveDisconnected(self.name)
        self.releaseLocks()
        yield self.master.data.updates.buildslaveDisconnected(
            buildslaveid=self.buildslaveid,
            masterid=self.master.masterid,
        )

    def disconnect(self):
        """Forcibly disconnect the slave.

        This severs the TCP connection and returns a Deferred that will fire
        (with None) when the connection is probably gone.

        If the slave is still alive, they will probably try to reconnect
        again in a moment.

        This is called in two circumstances. The first is when a slave is
        removed from the config file. In this case, when they try to
        reconnect, they will be rejected as an unknown slave. The second is
        when we wind up with two connections for the same slave, in which
        case we disconnect the older connection.
        """

        if self.conn is None:
            return defer.succeed(None)
        log.msg("disconnecting old slave %s now" % (self.name, ))
        # When this Deferred fires, we'll be ready to accept the new slave
        return self._disconnect(self.conn)

    def _disconnect(self, conn):
        # all kinds of teardown will happen as a result of
        # loseConnection(), but it happens after a reactor iteration or
        # two. Hook the actual disconnect so we can know when it is safe
        # to connect the new slave. We have to wait one additional
        # iteration (with callLater(0)) to make sure the *other*
        # notifyOnDisconnect handlers have had a chance to run.
        d = defer.Deferred()

        # notifyOnDisconnect runs the callback
        def _disconnected():
            eventually(d.callback, None)

        conn.notifyOnDisconnect(_disconnected)
        conn.loseConnection()
        log.msg("waiting for slave to finish disconnecting")

        return d

    def sendBuilderList(self):
        our_builders = self.botmaster.getBuildersForSlave(self.name)
        blist = [(b.name, b.config.slavebuilddir) for b in our_builders]
        if blist == self._old_builder_list:
            return defer.succeed(None)

        d = self.conn.remoteSetBuilderList(builders=blist)

        @d.addCallback
        def sentBuilderList(ign):
            self._old_builder_list = blist
            return ign

        return d

    def shutdownRequested(self):
        log.msg("slave %s wants to shut down" % (self.name, ))
        self.slave_status.setGraceful(True)

    def addSlaveBuilder(self, sb):
        self.slavebuilders[sb.builder_name] = sb

    def removeSlaveBuilder(self, sb):
        try:
            del self.slavebuilders[sb.builder_name]
        except KeyError:
            pass

    def buildFinished(self, sb):
        """This is called when a build on this slave is finished."""
        self.botmaster.maybeStartBuildsForSlave(self.name)

    def canStartBuild(self):
        """
        I am called when a build is requested to see if this buildslave
        can start a build.  This function can be used to limit overall
        concurrency on the buildslave.

        Note for subclassers: if a slave can become willing to start a build
        without any action on that slave (for example, by a resource in use on
        another slave becoming available), then you must arrange for
        L{maybeStartBuildsForSlave} to be called at that time, or builds on
        this slave will not start.
        """

        if self.slave_status.isPaused():
            return False

        # If we're waiting to shutdown gracefully, then we shouldn't
        # accept any new jobs.
        if self.slave_status.getGraceful():
            return False

        if self.max_builds:
            active_builders = [
                sb for sb in self.slavebuilders.values() if sb.isBusy()
            ]
            if len(active_builders) >= self.max_builds:
                return False

        if not self.locksAvailable():
            return False

        return True

    def _mail_missing_message(self, subject, text):
        # FIXME: This should be handled properly via the event api
        # we should send a missing message on the mq, and let any reporter handle that

        # first, see if we have a MailNotifier we can use. This gives us a
        # fromaddr and a relayhost.
        buildmaster = self.botmaster.master
        for st in buildmaster.services:
            if isinstance(st, MailNotifier):
                break
        else:
            # if not, they get a default MailNotifier, which always uses SMTP
            # to localhost and uses a dummy fromaddr of "buildbot".
            log.msg("buildslave-missing msg using default MailNotifier")
            st = MailNotifier("buildbot")
        # now construct the mail

        m = Message()
        m.set_payload(text)
        m['Date'] = formatdate(localtime=True)
        m['Subject'] = subject
        m['From'] = st.fromaddr
        recipients = self.notify_on_missing
        m['To'] = ", ".join(recipients)
        d = st.sendMessage(m, recipients)
        # return the Deferred for testing purposes
        return d

    def _gracefulChanged(self, graceful):
        """This is called when our graceful shutdown setting changes"""
        self.maybeShutdown()

    @defer.inlineCallbacks
    def shutdown(self):
        """Shutdown the slave"""
        if not self.conn:
            log.msg("no remote; slave is already shut down")
            return

        yield self.conn.remoteShutdown()

    def maybeShutdown(self):
        """Shut down this slave if it has been asked to shut down gracefully,
        and has no active builders."""
        if not self.slave_status.getGraceful():
            return
        active_builders = [
            sb for sb in self.slavebuilders.values() if sb.isBusy()
        ]
        if active_builders:
            return
        d = self.shutdown()
        d.addErrback(log.err, 'error while shutting down slave')

    def _pauseChanged(self, paused):
        if paused is True:
            self.botmaster.master.status.slavePaused(self.name)
        else:
            self.botmaster.master.status.slaveUnpaused(self.name)

    def pause(self):
        """Stop running new builds on the slave."""
        self.slave_status.setPaused(True)

    def unpause(self):
        """Restart running new builds on the slave."""
        self.slave_status.setPaused(False)
        self.botmaster.maybeStartBuildsForSlave(self.name)

    def isPaused(self):
        return self.slave_status.isPaused()