def test_worker_multiple_substantiations_succeed(self): """ If multiple builders trigger try to substantiate a worker at the same time, if the substantiation succeeds then all of the builds proceeed. """ controller = LatentController('local') config_dict = { 'builders': [ BuilderConfig(name="testy-1", workernames=["local"], factory=BuildFactory(), ), BuilderConfig(name="testy-2", workernames=["local"], factory=BuildFactory(), ), ], 'workers': [controller.worker], 'protocols': {'null': {}}, 'multiMaster': True, } master = self.successResultOf( getMaster(self, self.reactor, config_dict)) builder_ids = [ self.successResultOf(master.data.updates.findBuilderId('testy-1')), self.successResultOf(master.data.updates.findBuilderId('testy-2')), ] finished_builds = [] self.successResultOf(master.mq.startConsuming( lambda key, build: finished_builds.append(build), ('builds', None, 'finished'))) # Trigger a buildrequest bsid, brids = self.successResultOf( master.data.updates.addBuildset( waited_for=False, builderids=builder_ids, sourcestamps=[ {'codebase': '', 'repository': '', 'branch': None, 'revision': None, 'project': ''}, ], ) ) # The worker fails to substantiate. controller.start_instance(True) local_workdir = FilePath(self.mktemp()) local_workdir.createDirectory() controller.connect_worker(local_workdir) # We check that there were two builds that finished, and # that they both finished with success self.assertEqual([build['results'] for build in finished_builds], [SUCCESS] * 2)
def test_latent_max_builds(self): """ If max_builds is set, only one build is started on a latent worker at a time. """ controller = LatentController(self, 'local', max_builds=1) step_controller = StepController() config_dict = { 'builders': [ BuilderConfig( name="testy-1", workernames=["local"], factory=BuildFactory([step_controller]), ), BuilderConfig( name="testy-2", workernames=["local"], factory=BuildFactory([step_controller]), ), ], 'workers': [controller.worker], 'protocols': { 'null': {} }, 'multiMaster': True, } self.master = master = yield getMaster(self, self.reactor, config_dict) builder_ids = [ (yield master.data.updates.findBuilderId('testy-1')), (yield master.data.updates.findBuilderId('testy-2')), ] started_builds = [] yield master.mq.startConsuming( lambda key, build: started_builds.append(build), ('builds', None, 'new')) # Trigger a buildrequest bsid, brids = yield master.data.updates.addBuildset( waited_for=False, builderids=builder_ids, sourcestamps=[ { 'codebase': '', 'repository': '', 'branch': None, 'revision': None, 'project': '' }, ], ) # The worker fails to substantiate. controller.start_instance(True) controller.connect_worker() self.assertEqual(len(started_builds), 1) yield controller.auto_stop(True)
def test_latent_max_builds(self): """ If max_builds is set, only one build is started on a latent worker at a time. """ controller = LatentController( 'local', max_builds=1, ) step_controller = StepController() config_dict = { 'builders': [ BuilderConfig(name="testy-1", workernames=["local"], factory=BuildFactory([step_controller]), ), BuilderConfig(name="testy-2", workernames=["local"], factory=BuildFactory([step_controller]), ), ], 'workers': [controller.worker], 'protocols': {'null': {}}, 'multiMaster': True, } self.master = master = self.successResultOf( getMaster(self, self.reactor, config_dict)) builder_ids = [ self.successResultOf(master.data.updates.findBuilderId('testy-1')), self.successResultOf(master.data.updates.findBuilderId('testy-2')), ] started_builds = [] self.successResultOf(master.mq.startConsuming( lambda key, build: started_builds.append(build), ('builds', None, 'new'))) # Trigger a buildrequest bsid, brids = self.successResultOf( master.data.updates.addBuildset( waited_for=False, builderids=builder_ids, sourcestamps=[ {'codebase': '', 'repository': '', 'branch': None, 'revision': None, 'project': ''}, ], ) ) # The worker fails to substantiate. controller.start_instance(True) controller.connect_worker(self) self.assertEqual(len(started_builds), 1) controller.auto_stop(True)
def test_worker_multiple_substantiations_succeed(self): """ If multiple builders trigger try to substantiate a worker at the same time, if the substantiation succeeds then all of the builds proceed. """ controller = LatentController('local') config_dict = { 'builders': [ BuilderConfig( name="testy-1", workernames=["local"], factory=BuildFactory(), ), BuilderConfig( name="testy-2", workernames=["local"], factory=BuildFactory(), ), ], 'workers': [controller.worker], 'protocols': { 'null': {} }, 'multiMaster': True, } master = self.getMaster(config_dict) builder_ids = [ self.successResultOf(master.data.updates.findBuilderId('testy-1')), self.successResultOf(master.data.updates.findBuilderId('testy-2')), ] finished_builds = [] self.successResultOf( master.mq.startConsuming( lambda key, build: finished_builds.append(build), ('builds', None, 'finished'))) # Trigger a buildrequest bsid, brids = self.createBuildrequest(master, builder_ids) # The worker succeeds to substantiate. controller.start_instance(True) controller.connect_worker(self) # We check that there were two builds that finished, and # that they both finished with success self.assertEqual([build['results'] for build in finished_builds], [SUCCESS] * 2) controller.auto_stop(True)
def test_worker_close_connection_while_building(self): """ If the worker close connection in the middle of the build, the next build can start correctly """ controller = LatentController('local', build_wait_timeout=0) # a step that we can finish when we want stepcontroller = BuildStepController() config_dict = { 'builders': [ BuilderConfig( name="testy", workernames=["local"], factory=BuildFactory([stepcontroller.step]), ), ], 'workers': [controller.worker], 'protocols': { 'null': {} }, # Disable checks about missing scheduler. 'multiMaster': True, } master = self.getMaster(config_dict) builder_id = self.successResultOf( master.data.updates.findBuilderId('testy')) # Request two builds. for i in range(2): self.createBuildrequest(master, [builder_id]) controller.auto_stop(True) self.assertTrue(controller.starting) controller.start_instance(True) controller.connect_worker(self) builds = self.successResultOf(master.data.get(("builds", ))) self.assertEqual(builds[0]['results'], None) controller.disconnect_worker(self) builds = self.successResultOf(master.data.get(("builds", ))) self.assertEqual(builds[0]['results'], RETRY) # Request one build. self.createBuildrequest(master, [builder_id]) controller.start_instance(True) controller.connect_worker(self) builds = self.successResultOf(master.data.get(("builds", ))) self.assertEqual(builds[1]['results'], None) stepcontroller.finish_step(SUCCESS) builds = self.successResultOf(master.data.get(("builds", ))) self.assertEqual(builds[1]['results'], SUCCESS)
def test_worker_close_connection_while_building(self): """ If the worker close connection in the middle of the build, the next build can start correctly """ controller = LatentController('local', build_wait_timeout=0) # a step that we can finish when we want stepcontroller = BuildStepController() config_dict = { 'builders': [ BuilderConfig(name="testy", workernames=["local"], factory=BuildFactory([stepcontroller.step]), ), ], 'workers': [controller.worker], 'protocols': {'null': {}}, # Disable checks about missing scheduler. 'multiMaster': True, } master = self.getMaster(config_dict) builder_id = self.successResultOf( master.data.updates.findBuilderId('testy')) # Request two builds. for i in range(2): self.createBuildrequest(master, [builder_id]) controller.auto_stop(True) self.assertTrue(controller.starting) controller.start_instance(True) controller.connect_worker(self) builds = self.successResultOf( master.data.get(("builds",))) self.assertEqual(builds[0]['results'], None) controller.disconnect_worker(self) builds = self.successResultOf( master.data.get(("builds",))) self.assertEqual(builds[0]['results'], RETRY) # Request one build. self.createBuildrequest(master, [builder_id]) controller.start_instance(True) controller.connect_worker(self) builds = self.successResultOf( master.data.get(("builds",))) self.assertEqual(builds[1]['results'], None) stepcontroller.finish_step(SUCCESS) builds = self.successResultOf( master.data.get(("builds",))) self.assertEqual(builds[1]['results'], SUCCESS)
def test_worker_multiple_substantiations_succeed(self): """ If multiple builders trigger try to substantiate a worker at the same time, if the substantiation succeeds then all of the builds proceeed. """ controller = LatentController("local") config_dict = { "builders": [ BuilderConfig(name="testy-1", workernames=["local"], factory=BuildFactory()), BuilderConfig(name="testy-2", workernames=["local"], factory=BuildFactory()), ], "workers": [controller.worker], "protocols": {"null": {}}, "multiMaster": True, } master = self.getMaster(config_dict) builder_ids = [ self.successResultOf(master.data.updates.findBuilderId("testy-1")), self.successResultOf(master.data.updates.findBuilderId("testy-2")), ] finished_builds = [] self.successResultOf( master.mq.startConsuming(lambda key, build: finished_builds.append(build), ("builds", None, "finished")) ) # Trigger a buildrequest bsid, brids = self.successResultOf( master.data.updates.addBuildset( waited_for=False, builderids=builder_ids, sourcestamps=[{"codebase": "", "repository": "", "branch": None, "revision": None, "project": ""}], ) ) # The worker fails to substantiate. controller.start_instance(True) local_workdir = FilePath(self.mktemp()) local_workdir.createDirectory() controller.connect_worker(local_workdir) # We check that there were two builds that finished, and # that they both finished with success self.assertEqual([build["results"] for build in finished_builds], [SUCCESS] * 2)
def test_worker_multiple_substantiations_succeed(self): """ If multiple builders trigger try to substantiate a worker at the same time, if the substantiation succeeds then all of the builds proceeed. """ controller = LatentController('local') config_dict = { 'builders': [ BuilderConfig(name="testy-1", workernames=["local"], factory=BuildFactory(), ), BuilderConfig(name="testy-2", workernames=["local"], factory=BuildFactory(), ), ], 'workers': [controller.worker], 'protocols': {'null': {}}, 'multiMaster': True, } master = self.getMaster(config_dict) builder_ids = [ self.successResultOf(master.data.updates.findBuilderId('testy-1')), self.successResultOf(master.data.updates.findBuilderId('testy-2')), ] finished_builds = [] self.successResultOf(master.mq.startConsuming( lambda key, build: finished_builds.append(build), ('builds', None, 'finished'))) # Trigger a buildrequest bsid, brids = self.createBuildrequest(master, builder_ids) # The worker succeeds to substantiate. controller.start_instance(True) controller.connect_worker(self) # We check that there were two builds that finished, and # that they both finished with success self.assertEqual([build['results'] for build in finished_builds], [SUCCESS] * 2) controller.auto_stop(True)
def test_failed_ping_get_requeued(self): """ sendBuilderList can fail due to missing permissions on the workdir, the build request becomes unclaimed """ controller = LatentController('local') config_dict = { 'builders': [ BuilderConfig( name="testy", workernames=["local"], factory=BuildFactory(), ), ], 'workers': [controller.worker], 'protocols': { 'null': {} }, # Disable checks about missing scheduler. 'multiMaster': True, } master = self.getMaster(config_dict) builder_id = self.successResultOf( master.data.updates.findBuilderId('testy')) # Trigger a buildrequest bsid, brids = self.createBuildrequest(master, [builder_id]) unclaimed_build_requests = [] self.successResultOf( master.mq.startConsuming( lambda key, request: unclaimed_build_requests.append(request), ('buildrequests', None, 'unclaimed'))) logs = [] self.successResultOf( master.mq.startConsuming(lambda key, log: logs.append(log), ('logs', None, 'new'))) # The worker succeed to substantiate def remote_print(self, msg): if msg == "ping": raise TestException("can't ping") controller.patchBot(self, 'remote_print', remote_print) controller.start_instance(True) controller.connect_worker(self) # Flush the errors logged by the failure. self.flushLoggedErrors(TestException) # When the substantiation fails, the buildrequest becomes unclaimed. self.assertEqual( set(brids), set([req['buildrequestid'] for req in unclaimed_build_requests]), ) # should get 2 logs (html and txt) with proper information in there self.assertEqual(len(logs), 2) logs_by_name = {} for _log in logs: fulllog = self.successResultOf( master.data.get(("logs", str(_log['logid']), "raw"))) logs_by_name[fulllog['filename']] = fulllog['raw'] for i in ["err_text", "err_html"]: self.assertIn("can't ping", logs_by_name[i]) # make sure stacktrace is present in html self.assertIn( "buildbot.test.integration.test_latent.TestException", logs_by_name[i]) controller.auto_stop(True)
def test_worker_multiple_substantiations_succeed(self): """ If multiple builders trigger try to substantiate a worker at the same time, if the substantiation succeeds then all of the builds proceeed. """ controller = LatentController('local') config_dict = { 'builders': [ BuilderConfig( name="testy-1", workernames=["local"], factory=BuildFactory(), ), BuilderConfig( name="testy-2", workernames=["local"], factory=BuildFactory(), ), ], 'workers': [controller.worker], 'protocols': { 'null': {} }, 'multiMaster': True, } master = self.getMaster(config_dict) builder_ids = [ self.successResultOf(master.data.updates.findBuilderId('testy-1')), self.successResultOf(master.data.updates.findBuilderId('testy-2')), ] finished_builds = [] self.successResultOf( master.mq.startConsuming( lambda key, build: finished_builds.append(build), ('builds', None, 'finished'))) # Trigger a buildrequest bsid, brids = self.successResultOf( master.data.updates.addBuildset( waited_for=False, builderids=builder_ids, sourcestamps=[ { 'codebase': '', 'repository': '', 'branch': None, 'revision': None, 'project': '' }, ], )) # The worker fails to substantiate. controller.start_instance(True) local_workdir = FilePath(self.mktemp()) local_workdir.createDirectory() controller.connect_worker(local_workdir) # We check that there were two builds that finished, and # that they both finished with success self.assertEqual([build['results'] for build in finished_builds], [SUCCESS] * 2)
def test_rejects_build_on_instance_with_different_type_timeout_nonzero( self): """ If latent worker supports getting its instance type from properties that are rendered from build then the buildrequestdistributor must not schedule any builds on workers that are running different instance type than what these builds will require. """ controller = LatentController(self, 'local', kind=Interpolate('%(prop:worker_kind)s'), build_wait_timeout=5) # a step that we can finish when we want stepcontroller = BuildStepController() config_dict = { 'builders': [ BuilderConfig( name="testy", workernames=["local"], factory=BuildFactory([stepcontroller.step]), ), ], 'workers': [controller.worker], 'protocols': { 'null': {} }, 'multiMaster': True, } master = self.getMaster(config_dict) builder_id = self.successResultOf( master.data.updates.findBuilderId('testy')) # create build request self.createBuildrequest(master, [builder_id], properties=Properties(worker_kind='a')) # start the build and verify the kind of the worker. Note that the # buildmaster needs to restart the worker in order to change the worker # kind, so we allow it both to auto start and stop self.assertEqual(True, controller.starting) controller.auto_connect_worker = True controller.auto_disconnect_worker = True controller.auto_start(True) controller.auto_stop(True) controller.connect_worker() self.assertEqual('a', (yield controller.get_started_kind())) # before the other build finished, create another build request self.createBuildrequest(master, [builder_id], properties=Properties(worker_kind='b')) stepcontroller.finish_step(SUCCESS) # give the botmaster chance to insubstantiate the worker and # maybe substantiate it for the pending build the builds on worker self.reactor.advance(0.1) # verify build has not started, even though the worker is waiting # for one self.assertIsNone((yield master.db.builds.getBuild(2))) self.assertTrue(controller.started) # wait until the latent worker times out, is insubstantiated, # is substantiated because of pending buildrequest and starts the build self.reactor.advance(6) self.assertIsNotNone((yield master.db.builds.getBuild(2))) # verify that the second build restarted with the expected instance # kind self.assertEqual('b', (yield controller.get_started_kind())) stepcontroller.finish_step(SUCCESS) dbdict = yield master.db.builds.getBuild(1) self.assertEqual(SUCCESS, dbdict['results']) dbdict = yield master.db.builds.getBuild(2) self.assertEqual(SUCCESS, dbdict['results'])
def test_failed_ping_get_requeued(self): """ sendBuilderList can fail due to missing permissions on the workdir, the build request becomes unclaimed """ controller = LatentController('local') config_dict = { 'builders': [ BuilderConfig(name="testy", workernames=["local"], factory=BuildFactory(), ), ], 'workers': [controller.worker], 'protocols': {'null': {}}, # Disable checks about missing scheduler. 'multiMaster': True, } master = self.getMaster(config_dict) builder_id = self.successResultOf( master.data.updates.findBuilderId('testy')) # Trigger a buildrequest bsid, brids = self.createBuildrequest(master, [builder_id]) unclaimed_build_requests = [] self.successResultOf(master.mq.startConsuming( lambda key, request: unclaimed_build_requests.append(request), ('buildrequests', None, 'unclaimed'))) logs = [] self.successResultOf(master.mq.startConsuming( lambda key, log: logs.append(log), ('logs', None, 'new'))) # The worker succeed to substantiate def remote_print(self, msg): if msg == "ping": raise TestException("can't ping") controller.patchBot(self, 'remote_print', remote_print) controller.start_instance(True) controller.connect_worker(self) # Flush the errors logged by the failure. self.flushLoggedErrors(TestException) # When the substantiation fails, the buildrequest becomes unclaimed. self.assertEqual( set(brids), set([req['buildrequestid'] for req in unclaimed_build_requests]), ) # should get 2 logs (html and txt) with proper information in there self.assertEqual(len(logs), 2) logs_by_name = {} for _log in logs: fulllog = self.successResultOf( master.data.get(("logs", str(_log['logid']), "raw"))) logs_by_name[fulllog['filename']] = fulllog['raw'] for i in ["err_text", "err_html"]: self.assertIn("can't ping", logs_by_name[i]) # make sure stacktrace is present in html self.assertIn(os.path.join( "integration", "test_latent.py"), logs_by_name[i]) controller.auto_stop(True)
def test_rejects_build_on_instance_with_different_type_timeout_nonzero(self): """ If latent worker supports getting its instance type from properties that are rendered from build then the buildrequestdistributor must not schedule any builds on workers that are running different instance type than what these builds will require. """ controller = LatentController(self, 'local', kind=Interpolate('%(prop:worker_kind)s'), build_wait_timeout=5) # a step that we can finish when we want stepcontroller = BuildStepController() config_dict = { 'builders': [ BuilderConfig(name="testy", workernames=["local"], factory=BuildFactory([stepcontroller.step]), ), ], 'workers': [controller.worker], 'protocols': {'null': {}}, 'multiMaster': True, } master = self.getMaster(config_dict) builder_id = self.successResultOf( master.data.updates.findBuilderId('testy')) # create build request self.createBuildrequest(master, [builder_id], properties=Properties(worker_kind='a')) # start the build and verify the kind of the worker. Note that the # buildmaster needs to restart the worker in order to change the worker # kind, so we allow it both to auto start and stop self.assertEqual(True, controller.starting) controller.auto_connect_worker = True controller.auto_disconnect_worker = True controller.auto_start(True) controller.auto_stop(True) controller.connect_worker() self.assertEqual('a', (yield controller.get_started_kind())) # before the other build finished, create another build request self.createBuildrequest(master, [builder_id], properties=Properties(worker_kind='b')) stepcontroller.finish_step(SUCCESS) # give the botmaster chance to insubstantiate the worker and # maybe substantiate it for the pending build the builds on worker self.reactor.advance(0.1) # verify build has not started, even though the worker is waiting # for one self.assertIsNone((yield master.db.builds.getBuild(2))) self.assertTrue(controller.started) # wait until the latent worker times out, is insubstantiated, # is substantiated because of pending buildrequest and starts the build self.reactor.advance(6) self.assertIsNotNone((yield master.db.builds.getBuild(2))) # verify that the second build restarted with the expected instance # kind self.assertEqual('b', (yield controller.get_started_kind())) stepcontroller.finish_step(SUCCESS) dbdict = yield master.db.builds.getBuild(1) self.assertEqual(SUCCESS, dbdict['results']) dbdict = yield master.db.builds.getBuild(2) self.assertEqual(SUCCESS, dbdict['results'])