def setupConfig(self, configFunc): """ Setup and start a master configured by the function configFunc defined in the test module. @type configFunc: string @param configFunc: name of a function without argument defined in the test module that returns a BuildmasterConfig object. """ self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) self.configfile = os.path.join(self.basedir, 'master.cfg') if self.proto == 'pb': proto = '{"pb": {"port": "tcp:0:interface=127.0.0.1"}}' elif self.proto == 'null': proto = '{"null": {}}' # We create a master.cfg, which loads the configuration from the # test module. Only the slave config is kept there, as it should not # be changed open(self.configfile, "w").write( textwrap.dedent(""" from buildbot.buildslave import BuildSlave from %s import %s c = BuildmasterConfig = %s() c['slaves'] = [BuildSlave("local1", "localpw")] c['protocols'] = %s """ % (self.__class__.__module__, configFunc, configFunc, proto))) # create the master and set its config m = BuildMaster(self.basedir, self.configfile) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # mock reactor.stop (which trial *really* doesn't # like test code to call!) mock_reactor = mock.Mock(spec=reactor) mock_reactor.callWhenRunning = reactor.callWhenRunning # start the service yield m.startService(_reactor=mock_reactor) self.failIf(mock_reactor.stop.called, "startService tried to stop the reactor; check logs") if self.proto == 'pb': # We find out the slave port automatically slavePort = list(itervalues( m.pbmanager.dispatchers))[0].port.getHost().port # create a slave, and attach it to the master, it will be started, and stopped # along with the master s = BuildSlave("127.0.0.1", slavePort, "local1", "localpw", self.basedir, False, False) elif self.proto == 'null': s = LocalBuildSlave("local1", self.basedir, False) s.setServiceParent(m)
def setupConfig(self, configFunc): """ Setup and start a master configured by the function configFunc defined in the test module. @type configFunc: string @param configFunc: name of a function without argument defined in the test module that returns a BuildmasterConfig object. """ self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) self.configfile = os.path.join(self.basedir, 'master.cfg') if self.proto == 'pb': proto = '{"pb": {"port": "tcp:0:interface=127.0.0.1"}}' elif self.proto == 'null': proto = '{"null": {}}' # We create a master.cfg, which loads the configuration from the # test module. Only the slave config is kept there, as it should not # be changed open(self.configfile, "w").write(textwrap.dedent(""" from buildbot.buildslave import BuildSlave from %s import %s c = BuildmasterConfig = %s() c['slaves'] = [BuildSlave("local1", "localpw")] c['protocols'] = %s """ % (self.__class__.__module__, configFunc, configFunc, proto))) # create the master and set its config m = BuildMaster(self.basedir, self.configfile) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # mock reactor.stop (which trial *really* doesn't # like test code to call!) mock_reactor = mock.Mock(spec=reactor) mock_reactor.callWhenRunning = reactor.callWhenRunning # start the service yield m.startService(_reactor=mock_reactor) self.failIf(mock_reactor.stop.called, "startService tried to stop the reactor; check logs") if self.proto == 'pb': # We find out the slave port automatically slavePort = list(itervalues(m.pbmanager.dispatchers))[0].port.getHost().port # create a slave, and attach it to the master, it will be started, and stopped # along with the master s = BuildSlave("127.0.0.1", slavePort, "local1", "localpw", self.basedir, False, False) elif self.proto == 'null': s = LocalBuildSlave("local1", self.basedir, False) s.setServiceParent(m)
def setupConfig(self, config_dict): """ Setup and start a master configured by the function configFunc defined in the test module. @type config_dict: dict @param configFunc: The BuildmasterConfig dictionary. """ self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) # mock reactor.stop (which trial *really* doesn't # like test code to call!) mock_reactor = mock.Mock(spec=reactor) mock_reactor.callWhenRunning = reactor.callWhenRunning mock_reactor.getThreadPool = reactor.getThreadPool mock_reactor.callFromThread = reactor.callFromThread workerclass = worker.Worker if self.proto == 'pb': proto = {"pb": {"port": "tcp:0:interface=127.0.0.1"}} elif self.proto == 'null': proto = {"null": {}} workerclass = worker.LocalWorker config_dict['workers'] = [workerclass("local1", "localpw")] config_dict['protocols'] = proto # create the master and set its config m = BuildMaster(self.basedir, reactor=mock_reactor, config_loader=DictLoader(config_dict)) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # start the service yield m.startService() self.failIf(mock_reactor.stop.called, "startService tried to stop the reactor; check logs") if self.proto == 'pb': # We find out the worker port automatically workerPort = list(itervalues( m.pbmanager.dispatchers))[0].port.getHost().port # create a worker, and attach it to the master, it will be started, and stopped # along with the master self.w = BuildSlave("127.0.0.1", workerPort, "local1", "localpw", self.basedir, False, False) elif self.proto == 'null': self.w = None if self.w is not None: self.w.setServiceParent(m)
def setUp(self): self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) self.configfile = os.path.join(self.basedir, 'master.cfg') # We create a master.cfg, which loads the configuration from the # test module. Only the slave config is kept there, as it should not # be changed open(self.configfile, "w").write(textwrap.dedent(""" from buildbot.buildslave import BuildSlave from %s import masterConfig c = BuildmasterConfig = masterConfig() c['slaves'] = [BuildSlave("local1", "localpw")] c['protocols'] = {"pb": {"port": "tcp:0:interface=127.0.0.1"}} """ % self.__class__.__module__)) # create the master and set its config m = BuildMaster(self.basedir, self.configfile) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # mock reactor.stop (which trial *really* doesn't # like test code to call!) mock_reactor = mock.Mock(spec=reactor) mock_reactor.callWhenRunning = reactor.callWhenRunning # start the service yield m.startService(_reactor=mock_reactor) self.failIf(mock_reactor.stop.called, "startService tried to stop the reactor; check logs") # We find out the slave port automatically slavePort = m.pbmanager.dispatchers.values()[0].port.getHost().port # create a slave, and attach it to the master, it will be started, and stopped # along with the master s = BuildSlave("127.0.0.1", slavePort, "local1", "localpw", self.basedir, False, False) s.setServiceParent(m)
def setupConfig(self, config_dict, startWorker=True): """ Setup and start a master configured by the function configFunc defined in the test module. @type config_dict: dict @param configFunc: The BuildmasterConfig dictionary. """ self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) self.addCleanup(self.tearDownDirs) # mock reactor.stop (which trial *really* doesn't # like test code to call!) stop = mock.create_autospec(reactor.stop) self.patch(reactor, 'stop', stop) if startWorker: if self.proto == 'pb': proto = {"pb": {"port": "tcp:0:interface=127.0.0.1"}} workerclass = worker.Worker elif self.proto == 'null': proto = {"null": {}} workerclass = worker.LocalWorker config_dict['workers'] = [workerclass("local1", "localpw")] config_dict['protocols'] = proto # create the master and set its config m = BuildMaster(self.basedir, reactor=reactor, config_loader=DictLoader(config_dict)) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # start the service yield m.startService() self.failIf(stop.called, "startService tried to stop the reactor; check logs") # and shutdown the db threadpool, as is normally done at reactor stop self.addCleanup(m.db.pool.shutdown) self.addCleanup(m.stopService) if not startWorker: return if self.proto == 'pb': # We find out the worker port automatically workerPort = list(itervalues( m.pbmanager.dispatchers))[0].port.getHost().port # create a worker, and attach it to the master, it will be started, and stopped # along with the master self.w = BuildSlave("127.0.0.1", workerPort, "local1", "localpw", self.basedir, False, False) elif self.proto == 'null': self.w = None if self.w is not None: self.w.startService() self.addCleanup(self.w.stopService) @defer.inlineCallbacks def dump(): if not self._passed: dump = StringIO.StringIO() print("FAILED! dumping build db for debug", file=dump) builds = yield self.master.data.get(("builds", )) for build in builds: yield self.printBuild(build, dump, withLogs=True) raise self.failureException(dump.getvalue()) self.addCleanup(dump)
class RunMasterBase(dirs.DirsMixin, unittest.TestCase): proto = "null" if BuildSlave is None: skip = "buildbot-slave package is not installed" @defer.inlineCallbacks def setupConfig(self, config_dict, startWorker=True): """ Setup and start a master configured by the function configFunc defined in the test module. @type config_dict: dict @param configFunc: The BuildmasterConfig dictionary. """ self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) self.addCleanup(self.tearDownDirs) # mock reactor.stop (which trial *really* doesn't # like test code to call!) stop = mock.create_autospec(reactor.stop) self.patch(reactor, 'stop', stop) if startWorker: if self.proto == 'pb': proto = {"pb": {"port": "tcp:0:interface=127.0.0.1"}} workerclass = worker.Worker elif self.proto == 'null': proto = {"null": {}} workerclass = worker.LocalWorker config_dict['workers'] = [workerclass("local1", "localpw")] config_dict['protocols'] = proto # create the master and set its config m = BuildMaster(self.basedir, reactor=reactor, config_loader=DictLoader(config_dict)) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # start the service yield m.startService() self.failIf(stop.called, "startService tried to stop the reactor; check logs") # and shutdown the db threadpool, as is normally done at reactor stop self.addCleanup(m.db.pool.shutdown) self.addCleanup(m.stopService) if not startWorker: return if self.proto == 'pb': # We find out the worker port automatically workerPort = list(itervalues( m.pbmanager.dispatchers))[0].port.getHost().port # create a worker, and attach it to the master, it will be started, and stopped # along with the master self.w = BuildSlave("127.0.0.1", workerPort, "local1", "localpw", self.basedir, False, False) elif self.proto == 'null': self.w = None if self.w is not None: self.w.startService() self.addCleanup(self.w.stopService) @defer.inlineCallbacks def dump(): if not self._passed: dump = StringIO.StringIO() print("FAILED! dumping build db for debug", file=dump) builds = yield self.master.data.get(("builds", )) for build in builds: yield self.printBuild(build, dump, withLogs=True) raise self.failureException(dump.getvalue()) self.addCleanup(dump) @defer.inlineCallbacks def doForceBuild(self, wantSteps=False, wantProperties=False, wantLogs=False, useChange=False): # force a build, and wait until it is finished d = defer.Deferred() # in order to allow trigger based integration tests # we wait until the first started build is finished self.firstBuildId = None def newCallback(_, data): if self.firstBuildId is None: self.firstBuildId = data['buildid'] newConsumer.stopConsuming() def finishedCallback(_, data): if self.firstBuildId == data['buildid']: d.callback(data) newConsumer = yield self.master.mq.startConsuming( newCallback, ('builds', None, 'new')) finishedConsumer = yield self.master.mq.startConsuming( finishedCallback, ('builds', None, 'finished')) if useChange is False: # use data api to force a build yield self.master.data.control("force", {}, ("forceschedulers", "force")) else: # use data api to force a build, via a new change yield self.master.data.updates.addChange(**useChange) # wait until we receive the build finished event build = yield d finishedConsumer.stopConsuming() yield self.enrichBuild(build, wantSteps, wantProperties, wantLogs) defer.returnValue(build) @defer.inlineCallbacks def enrichBuild(self, build, wantSteps=False, wantProperties=False, wantLogs=False): # enrich the build result, with the step results if wantSteps: build["steps"] = yield self.master.data.get( ("builds", build['buildid'], "steps")) # enrich the step result, with the logs results if wantLogs: build["steps"] = list(build["steps"]) for step in build["steps"]: step['logs'] = yield self.master.data.get( ("steps", step['stepid'], "logs")) step["logs"] = list(step['logs']) for log in step["logs"]: log['contents'] = yield self.master.data.get( ("logs", log['logid'], "contents")) if wantProperties: build["properties"] = yield self.master.data.get( ("builds", build['buildid'], "properties")) @defer.inlineCallbacks def printBuild(self, build, out=sys.stdout, withLogs=False): # helper for debugging: print a build yield self.enrichBuild(build, wantSteps=True, wantProperties=True, wantLogs=True) print("*** BUILD %d *** ==> %s (%s)" % (build['buildid'], build['state_string'], statusToString(build['results'])), file=out) for step in build['steps']: print(" *** STEP %s *** ==> %s (%s)" % (step['name'], step['state_string'], statusToString(step['results'])), file=out) for url in step['urls']: print(" url:%s (%s)" % (url['name'], url['url']), file=out) for log in step['logs']: print(" log:%s (%d)" % (log['name'], log['num_lines']), file=out) if step['results'] != SUCCESS or withLogs: self.printLog(log, out) def printLog(self, log, out): print(" " * 8 + "*********** LOG: %s *********" % (log['name'], ), file=out) if log['type'] == 's': for line in log['contents']['content'].splitlines(): linetype = line[0] line = line[1:] if linetype == 'h': # cyan line = "\x1b[36m" + line + "\x1b[0m" if linetype == 'e': # red line = "\x1b[31m" + line + "\x1b[0m" print(" " * 8 + line) else: print(log['contents']['content'], file=out) print(" " * 8 + "********************************", file=out)
# directory; do not edit it. application = service.Application('buildmaster') import sys from twisted.python.log import ILogObserver, FileLogObserver application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit) m = BuildMaster(basedir, configfile, umask) m.setServiceParent(application) # and slave on the same process! buildmaster_host = 'localhost' port = 19989 slavename = 'example-slave' passwd = 'pass' keepalive = 600 usepty = 0 umask = None maxdelay = 300 allow_shutdown = None slavedir = os.path.join(basedir, "slave") if not os.path.exists(slavedir): os.mkdir(slavedir) s = BuildSlave(buildmaster_host, port, slavename, passwd, slavedir, keepalive, usepty, umask=umask, maxdelay=maxdelay, allow_shutdown=allow_shutdown) s.setServiceParent(application)
def setupConfig(self, config_dict, startWorker=True): """ Setup and start a master configured by the function configFunc defined in the test module. @type config_dict: dict @param configFunc: The BuildmasterConfig dictionary. """ self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) self.addCleanup(self.tearDownDirs) # mock reactor.stop (which trial *really* doesn't # like test code to call!) stop = mock.create_autospec(reactor.stop) self.patch(reactor, 'stop', stop) if startWorker: if self.proto == 'pb': proto = {"pb": {"port": "tcp:0:interface=127.0.0.1"}} workerclass = worker.Worker elif self.proto == 'null': proto = {"null": {}} workerclass = worker.LocalWorker config_dict['workers'] = [workerclass("local1", "localpw")] config_dict['protocols'] = proto # create the master and set its config m = BuildMaster(self.basedir, reactor=reactor, config_loader=DictLoader(config_dict)) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # start the service yield m.startService() self.failIf(stop.called, "startService tried to stop the reactor; check logs") # and shutdown the db threadpool, as is normally done at reactor stop self.addCleanup(m.db.pool.shutdown) self.addCleanup(m.stopService) if not startWorker: return if self.proto == 'pb': # We find out the worker port automatically workerPort = list(itervalues(m.pbmanager.dispatchers))[ 0].port.getHost().port # create a worker, and attach it to the master, it will be started, and stopped # along with the master self.w = BuildSlave( "127.0.0.1", workerPort, "local1", "localpw", self.basedir, False, False) elif self.proto == 'null': self.w = None if self.w is not None: self.w.startService() self.addCleanup(self.w.stopService) @defer.inlineCallbacks def dump(): if not self._passed: dump = StringIO.StringIO() print("FAILED! dumping build db for debug", file=dump) builds = yield self.master.data.get(("builds",)) for build in builds: yield self.printBuild(build, dump, withLogs=True) raise self.failureException(dump.getvalue()) self.addCleanup(dump)
class RunMasterBase(dirs.DirsMixin, unittest.TestCase): proto = "null" if BuildSlave is None: skip = "buildbot-slave package is not installed" @defer.inlineCallbacks def setupConfig(self, config_dict, startWorker=True): """ Setup and start a master configured by the function configFunc defined in the test module. @type config_dict: dict @param configFunc: The BuildmasterConfig dictionary. """ self.basedir = os.path.abspath('basdir') self.setUpDirs(self.basedir) self.addCleanup(self.tearDownDirs) # mock reactor.stop (which trial *really* doesn't # like test code to call!) stop = mock.create_autospec(reactor.stop) self.patch(reactor, 'stop', stop) if startWorker: if self.proto == 'pb': proto = {"pb": {"port": "tcp:0:interface=127.0.0.1"}} workerclass = worker.Worker elif self.proto == 'null': proto = {"null": {}} workerclass = worker.LocalWorker config_dict['workers'] = [workerclass("local1", "localpw")] config_dict['protocols'] = proto # create the master and set its config m = BuildMaster(self.basedir, reactor=reactor, config_loader=DictLoader(config_dict)) self.master = m # update the DB yield m.db.setup(check_version=False) yield m.db.model.upgrade() # stub out m.db.setup since it was already called above m.db.setup = lambda: None # start the service yield m.startService() self.failIf(stop.called, "startService tried to stop the reactor; check logs") # and shutdown the db threadpool, as is normally done at reactor stop self.addCleanup(m.db.pool.shutdown) self.addCleanup(m.stopService) if not startWorker: return if self.proto == 'pb': # We find out the worker port automatically workerPort = list(itervalues(m.pbmanager.dispatchers))[ 0].port.getHost().port # create a worker, and attach it to the master, it will be started, and stopped # along with the master self.w = BuildSlave( "127.0.0.1", workerPort, "local1", "localpw", self.basedir, False, False) elif self.proto == 'null': self.w = None if self.w is not None: self.w.startService() self.addCleanup(self.w.stopService) @defer.inlineCallbacks def dump(): if not self._passed: dump = StringIO.StringIO() print("FAILED! dumping build db for debug", file=dump) builds = yield self.master.data.get(("builds",)) for build in builds: yield self.printBuild(build, dump, withLogs=True) raise self.failureException(dump.getvalue()) self.addCleanup(dump) @defer.inlineCallbacks def doForceBuild(self, wantSteps=False, wantProperties=False, wantLogs=False, useChange=False): # force a build, and wait until it is finished d = defer.Deferred() # in order to allow trigger based integration tests # we wait until the first started build is finished self.firstBuildId = None def newCallback(_, data): if self.firstBuildId is None: self.firstBuildId = data['buildid'] newConsumer.stopConsuming() def finishedCallback(_, data): if self.firstBuildId == data['buildid']: d.callback(data) newConsumer = yield self.master.mq.startConsuming( newCallback, ('builds', None, 'new')) finishedConsumer = yield self.master.mq.startConsuming( finishedCallback, ('builds', None, 'finished')) if useChange is False: # use data api to force a build yield self.master.data.control("force", {}, ("forceschedulers", "force")) else: # use data api to force a build, via a new change yield self.master.data.updates.addChange(**useChange) # wait until we receive the build finished event build = yield d finishedConsumer.stopConsuming() yield self.enrichBuild(build, wantSteps, wantProperties, wantLogs) defer.returnValue(build) @defer.inlineCallbacks def enrichBuild(self, build, wantSteps=False, wantProperties=False, wantLogs=False): # enrich the build result, with the step results if wantSteps: build["steps"] = yield self.master.data.get(("builds", build['buildid'], "steps")) # enrich the step result, with the logs results if wantLogs: build["steps"] = list(build["steps"]) for step in build["steps"]: step['logs'] = yield self.master.data.get(("steps", step['stepid'], "logs")) step["logs"] = list(step['logs']) for log in step["logs"]: log['contents'] = yield self.master.data.get(("logs", log['logid'], "contents")) if wantProperties: build["properties"] = yield self.master.data.get(("builds", build['buildid'], "properties")) @defer.inlineCallbacks def printBuild(self, build, out=sys.stdout, withLogs=False): # helper for debugging: print a build yield self.enrichBuild(build, wantSteps=True, wantProperties=True, wantLogs=True) print("*** BUILD %d *** ==> %s (%s)" % (build['buildid'], build['state_string'], statusToString(build['results'])), file=out) for step in build['steps']: print(" *** STEP %s *** ==> %s (%s)" % (step['name'], step['state_string'], statusToString(step['results'])), file=out) for url in step['urls']: print(" url:%s (%s)" % (url['name'], url['url']), file=out) for log in step['logs']: print(" log:%s (%d)" % (log['name'], log['num_lines']), file=out) if step['results'] != SUCCESS or withLogs: self.printLog(log, out) def printLog(self, log, out): print(" " * 8 + "*********** LOG: %s *********" % (log['name'],), file=out) if log['type'] == 's': for line in log['contents']['content'].splitlines(): linetype = line[0] line = line[1:] if linetype == 'h': # cyan line = "\x1b[36m" + line + "\x1b[0m" if linetype == 'e': # red line = "\x1b[31m" + line + "\x1b[0m" print(" " * 8 + line) else: print(log['contents']['content'], file=out) print(" " * 8 + "********************************", file=out)