Beispiel #1
0
    def setUpTestReactor(self, use_asyncio=False):

        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        _setReactor(self.reactor)

        def deferToThread(f, *args, **kwargs):
            return threads.deferToThreadPool(self.reactor,
                                             self.reactor.getThreadPool(), f,
                                             *args, **kwargs)

        self.patch(threads, 'deferToThread', deferToThread)

        # During shutdown sequence we must first stop the reactor and only then
        # set unset the reactor used for eventually() because any callbacks
        # that are run during reactor.stop() may use eventually() themselves.
        self.addCleanup(_setReactor, None)
        self.addCleanup(self.reactor.stop)

        if use_asyncio:
            self.asyncio_loop = TwistedLoop(self.reactor)
            asyncio.set_event_loop(self.asyncio_loop)
            self.asyncio_loop.start()

            def stop():
                self.asyncio_loop.stop()
                self.asyncio_loop.close()
                asyncio.set_event_loop(None)

            self.addCleanup(stop)
Beispiel #2
0
 def setUp(self):
     self.patch(threadpool, 'ThreadPool', NonThreadPool)
     self.reactor = TestReactor()
     _setReactor(self.reactor)
     self.patch(workerhyper, 'Hyper', hyper.Client)
     self.build = Properties(
         image="busybox:latest", builder="docker_worker")
     self.worker = None
    def setUp(self):
        self.setUpDirs('workdir')
        self.addCleanup(self.tearDownDirs)

        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        self.spawned_process = mock.Mock()
        self.reactor.spawnProcess = mock.Mock(
            return_value=self.spawned_process)
Beispiel #4
0
class TestReactorMixin:
    """
    Mix this in to get TestReactor as self.reactor which is correctly cleaned up
    at the end
    """
    def setup_test_reactor(self, use_asyncio=False):

        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        _setReactor(self.reactor)

        def deferToThread(f, *args, **kwargs):
            return threads.deferToThreadPool(self.reactor,
                                             self.reactor.getThreadPool(), f,
                                             *args, **kwargs)

        self.patch(threads, 'deferToThread', deferToThread)

        # During shutdown sequence we must first stop the reactor and only then
        # set unset the reactor used for eventually() because any callbacks
        # that are run during reactor.stop() may use eventually() themselves.
        self.addCleanup(_setReactor, None)
        self.addCleanup(self.reactor.stop)

        if use_asyncio:
            self.asyncio_loop = AsyncIOLoopWithTwisted(self.reactor)
            asyncio.set_event_loop(self.asyncio_loop)
            self.asyncio_loop.start()

            def stop():
                self.asyncio_loop.stop()
                self.asyncio_loop.close()
                asyncio.set_event_loop(None)

            self.addCleanup(stop)
Beispiel #5
0
 def setUp(self):
     self.patch(threadpool, "ThreadPool", NonThreadPool)
     self.reactor = TestReactor()
     _setReactor(self.reactor)
     self.patch(workerhyper, "Hyper", hyper.Client)
     self.build = Properties(image="busybox:latest", builder="docker_worker")
     self.worker = None
    def setUp(self):
        self.setUpDirs('workdir')
        self.addCleanup(self.tearDownDirs)

        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        self.spawned_process = mock.Mock()
        self.reactor.spawnProcess = mock.Mock(return_value=self.spawned_process)
Beispiel #7
0
    def setUpTestReactor(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        _setReactor(self.reactor)

        def deferToThread(f, *args, **kwargs):
            return threads.deferToThreadPool(self.reactor,
                                             self.reactor.getThreadPool(), f,
                                             *args, **kwargs)

        self.patch(threads, 'deferToThread', deferToThread)

        # During shutdown sequence we must first stop the reactor and only then
        # set unset the reactor used for eventually() because any callbacks
        # that are run during reactor.stop() may use eventually() themselves.
        self.addCleanup(_setReactor, None)
        self.addCleanup(self.reactor.stop)
Beispiel #8
0
 def setUpTestReactor(self):
     self.patch(threadpool, 'ThreadPool', NonThreadPool)
     self.reactor = TestReactor()
     _setReactor(self.reactor)
     # During shutdown sequence we must first stop the reactor and only then
     # set unset the reactor used for eventually() because any callbacks
     # that are run during reactor.stop() may use eventually() themselves.
     self.addCleanup(_setReactor, None)
     self.addCleanup(self.reactor.stop)
Beispiel #9
0
    def setUp(self):
        def deferToThread(f, *args, **kwargs):
            return threads.deferToThreadPool(self.reactor, self.reactor.getThreadPool(),
                                             f, *args, **kwargs)
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.patch(threads, 'deferToThread', deferToThread)
        self.reactor = TestReactor()

        _setReactor(self.reactor)
        self.build = Properties(
            image='busybox:latest', builder='docker_worker', distro='wheezy')
        self.patch(dockerworker, 'client', docker)
Beispiel #10
0
    def setUp(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        _setReactor(self.reactor)
        self.addCleanup(_setReactor, None)

        # to ease debugging we display the error logs in the test log
        origAddCompleteLog = BuildStep.addCompleteLog

        def addCompleteLog(self, name, _log):
            if name.endswith("err.text"):
                log.msg("got error log!", name, _log)
            return origAddCompleteLog(self, name, _log)

        self.patch(BuildStep, "addCompleteLog", addCompleteLog)

        if 'BBTRACE' in os.environ:
            enable_trace(self, [
                "twisted", "worker_transition.py", "util/tu", "util/path",
                "log.py", "/mq/", "/db/", "buildbot/data/", "fake/reactor.py"
            ])
Beispiel #11
0
    def setUp(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        _setReactor(self.reactor)
        self.addCleanup(_setReactor, None)

        # to ease debugging we display the error logs in the test log
        origAddCompleteLog = BuildStep.addCompleteLog

        def addCompleteLog(self, name, _log):
            if name.endswith("err.text"):
                log.msg("got error log!", name, _log)
            return origAddCompleteLog(self, name, _log)

        self.patch(BuildStep, "addCompleteLog", addCompleteLog)
Beispiel #12
0
    def setUp(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        _setReactor(self.reactor)
        self.addCleanup(_setReactor, None)

        # to ease debugging we display the error logs in the test log
        origAddCompleteLog = BuildStep.addCompleteLog

        def addCompleteLog(self, name, _log):
            if name.endswith("err.text"):
                log.msg("got error log!", name, _log)
            return origAddCompleteLog(self, name, _log)
        self.patch(BuildStep, "addCompleteLog", addCompleteLog)

        if 'BBTRACE' in os.environ:
            enable_trace(self, ["twisted", "worker_transition.py", "util/tu", "util/path",
                                "log.py", "/mq/", "/db/", "buildbot/data/", "fake/reactor.py"])
Beispiel #13
0
class TestHyperLatentWorker(unittest.SynchronousTestCase):
    def setUp(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        _setReactor(self.reactor)
        self.patch(workerhyper, 'Hyper', hyper.Client)
        self.build = Properties(image="busybox:latest",
                                builder="docker_worker")
        self.worker = None

    def tearDown(self):
        if self.worker is not None:
            self.worker.master.stopService()
            self.reactor.pump([.1])
        self.assertIsNone(hyper.Client.instance)
        _setReactor(None)

    def test_constructor_normal(self):
        worker = HyperLatentWorker('bot', 'pass', 'tcp://hyper.sh/', 'foo',
                                   'bar', 'debian:wheezy')
        # class instantiation configures nothing
        self.assertEqual(worker.client, None)

    def test_constructor_nohyper(self):
        self.patch(workerhyper, 'Hyper', None)
        with self.assertRaises(config.ConfigErrors):
            HyperLatentWorker('bot', 'pass', 'tcp://hyper.sh/', 'foo', 'bar',
                              'debian:wheezy')

    def test_constructor_badsize(self):
        with self.assertRaises(config.ConfigErrors):
            HyperLatentWorker('bot',
                              'pass',
                              'tcp://hyper.sh/',
                              'foo',
                              'bar',
                              'debian:wheezy',
                              hyper_size="big")

    def makeWorker(self, **kwargs):
        kwargs.setdefault('image', 'debian:wheezy')
        worker = HyperLatentWorker('bot', 'pass', 'tcp://hyper.sh/', 'foo',
                                   'bar', **kwargs)
        self.worker = worker
        master = fakemaster.make_master(testcase=self, wantData=True)
        worker.setServiceParent(master)
        worker.reactor = self.reactor
        self.successResultOf(master.startService())
        return worker

    def test_start_service(self):
        worker = self.worker = self.makeWorker()
        # client is lazily created on worker substantiation
        self.assertNotEqual(worker.client, None)

    def test_start_worker(self):
        worker = self.makeWorker()

        d = worker.substantiate(None, fakebuild.FakeBuildForRendering())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertIsNotNone(worker.client)
        self.assertEqual(
            worker.instance, {
                'Id':
                '8a61192da2b3bb2d922875585e29b74ec0dc4e0117fcbf84c962204e97564cd7',
                'Warnings': None,
                'image': 'rendered:debian:wheezy'
            })
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_no_connection_and_shutdown(self):
        worker = self.makeWorker()
        worker.substantiate(None, fakebuild.FakeBuildForRendering())
        self.assertIsNotNone(worker.client)
        self.assertEqual(
            worker.instance, {
                'Id':
                '8a61192da2b3bb2d922875585e29b74ec0dc4e0117fcbf84c962204e97564cd7',
                'Warnings': None,
                'image': 'rendered:debian:wheezy'
            })
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_error(self):
        worker = self.makeWorker(image="buggy")
        d = worker.substantiate(None, fakebuild.FakeBuildForRendering())
        self.reactor.advance(.1)
        self.failureResultOf(d)
        self.assertIsNotNone(worker.client)
        self.assertEqual(worker.instance, None)
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_already_created_with_same_name(self):
        worker = self.makeWorker(image="cool")
        worker.client.create_container(image="foo",
                                       name=worker.getContainerName())
        d = worker.substantiate(None, fakebuild.FakeBuildForRendering())
        self.reactor.advance(.1)
        worker.attached(FakeBot())
        self.successResultOf(d)
        self.assertIsNotNone(worker.client)

    def test_non_substantiated_worker_compatible_with_any_build(self):
        worker = self.makeWorker(image=Interpolate('%(prop:image)s'),
                                 hyper_size=Interpolate('%(prop:size)s'))

        build = Properties(image='image1', size='size1')
        self.assertTrue(worker.isCompatibleWithBuild(build))
        build = Properties(image='image2', size='size2')
        self.assertTrue(worker.isCompatibleWithBuild(build))

    @defer.inlineCallbacks
    def test_substantiated_worker_not_compatible_until_shutdown(self):
        worker = self.makeWorker(image=Interpolate('%(prop:image)s'),
                                 hyper_size=Interpolate('%(prop:size)s'))

        build1 = Properties(image='image1', size='size1')
        build2 = Properties(image='image2', size='size2')
        yield worker.substantiate(None, build1)
        yield worker.insubstantiate()
        self.assertTrue(worker.isCompatibleWithBuild(build1))
        self.assertTrue(worker.isCompatibleWithBuild(build2))
        yield worker.substantiate(None, build2)
        self.assertFalse(worker.isCompatibleWithBuild(build1))
        self.assertTrue(worker.isCompatibleWithBuild(build2))
class TestLogWatcher(unittest.SynchronousTestCase, dirs.DirsMixin):
    def setUp(self):
        self.setUpDirs('workdir')
        self.addCleanup(self.tearDownDirs)

        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        self.spawned_process = mock.Mock()
        self.reactor.spawnProcess = mock.Mock(
            return_value=self.spawned_process)

    def test_start(self):
        lw = LogWatcher('workdir/test.log', _reactor=self.reactor)
        lw._start = mock.Mock()

        lw.start()
        self.reactor.spawnProcess.assert_called()
        self.assertTrue(os.path.exists('workdir/test.log'))
        self.assertTrue(lw.running)

    @defer.inlineCallbacks
    def test_success_before_timeout(self):
        lw = LogWatcher('workdir/test.log', timeout=5, _reactor=self.reactor)
        d = lw.start()
        self.reactor.advance(4.9)
        lw.lineReceived(b'BuildMaster is running')
        res = yield d
        self.assertEqual(res, 'buildmaster')

    @defer.inlineCallbacks
    def test_failure_after_timeout(self):
        lw = LogWatcher('workdir/test.log', timeout=5, _reactor=self.reactor)
        d = lw.start()
        self.reactor.advance(5.1)
        lw.lineReceived(b'BuildMaster is running')
        with self.assertRaises(BuildmasterTimeoutError):
            yield d

    @defer.inlineCallbacks
    def test_progress_restarts_timeout(self):
        lw = LogWatcher('workdir/test.log', timeout=5, _reactor=self.reactor)
        d = lw.start()
        self.reactor.advance(4.9)
        lw.lineReceived(b'added builder')
        self.reactor.advance(4.9)
        lw.lineReceived(b'BuildMaster is running')
        res = yield d
        self.assertEqual(res, 'buildmaster')

    @defer.inlineCallbacks
    def test_matches_lines(self):
        lines_and_expected = [
            (b'reconfig aborted without making any changes', ReconfigError()),
            (b'WARNING: reconfig partially applied; master may malfunction',
             ReconfigError()),
            (b'Server Shut Down', ReconfigError()),
            (b'BuildMaster startup failed', BuildmasterStartupError()),
            (b'message from master: attached', 'worker'),
            (b'configuration update complete', 'buildmaster'),
            (b'BuildMaster is running', 'buildmaster'),
        ]

        for line, expected in lines_and_expected:
            lw = LogWatcher('workdir/test.log',
                            timeout=5,
                            _reactor=self.reactor)
            d = lw.start()
            lw.lineReceived(line)

            if isinstance(expected, Exception):
                with self.assertRaises(type(expected)):
                    yield d
            else:
                res = yield d
                self.assertEqual(res, expected)
class TestLogWatcher(unittest.SynchronousTestCase, dirs.DirsMixin):

    def setUp(self):
        self.setUpDirs('workdir')
        self.addCleanup(self.tearDownDirs)

        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        self.spawned_process = mock.Mock()
        self.reactor.spawnProcess = mock.Mock(return_value=self.spawned_process)

    def test_start(self):
        lw = LogWatcher('workdir/test.log', _reactor=self.reactor)
        lw._start = mock.Mock()

        lw.start()
        self.reactor.spawnProcess.assert_called()
        self.assertTrue(os.path.exists('workdir/test.log'))
        self.assertTrue(lw.running)

    @defer.inlineCallbacks
    def test_success_before_timeout(self):
        lw = LogWatcher('workdir/test.log', timeout=5, _reactor=self.reactor)
        d = lw.start()
        self.reactor.advance(4.9)
        lw.lineReceived(b'BuildMaster is running')
        res = yield d
        self.assertEqual(res, 'buildmaster')

    @defer.inlineCallbacks
    def test_failure_after_timeout(self):
        lw = LogWatcher('workdir/test.log', timeout=5, _reactor=self.reactor)
        d = lw.start()
        self.reactor.advance(5.1)
        lw.lineReceived(b'BuildMaster is running')
        with self.assertRaises(BuildmasterTimeoutError):
            yield d

    @defer.inlineCallbacks
    def test_progress_restarts_timeout(self):
        lw = LogWatcher('workdir/test.log', timeout=5, _reactor=self.reactor)
        d = lw.start()
        self.reactor.advance(4.9)
        lw.lineReceived(b'added builder')
        self.reactor.advance(4.9)
        lw.lineReceived(b'BuildMaster is running')
        res = yield d
        self.assertEqual(res, 'buildmaster')

    @defer.inlineCallbacks
    def test_matches_lines(self):
        lines_and_expected = [
            (b'reconfig aborted without making any changes', ReconfigError()),
            (b'WARNING: reconfig partially applied; master may malfunction',
             ReconfigError()),
            (b'Server Shut Down', ReconfigError()),
            (b'BuildMaster startup failed', BuildmasterStartupError()),
            (b'message from master: attached', 'worker'),
            (b'configuration update complete', 'buildmaster'),
            (b'BuildMaster is running', 'buildmaster'),
        ]

        for line, expected in lines_and_expected:
            lw = LogWatcher('workdir/test.log', timeout=5,
                            _reactor=self.reactor)
            d = lw.start()
            lw.lineReceived(line)

            if isinstance(expected, Exception):
                with self.assertRaises(type(expected)):
                    yield d
            else:
                res = yield d
                self.assertEqual(res, expected)
Beispiel #16
0
 def setUp(self):
     self.patch(threadpool, 'ThreadPool', NonThreadPool)
     self.reactor = TestReactor()
     self.addCleanup(self.reactor.stop)
Beispiel #17
0
 def setUp(self):
     self.patch(threadpool, 'ThreadPool', NonThreadPool)
     self.reactor = TestReactor()
 def setUp(self):
     self.reactor = TestReactor()
     _setReactor(self.reactor)
     self.build = Properties(
         image="busybox:latest", builder="docker_worker")
     self.worker = None
class TestMarathonLatentWorker(unittest.SynchronousTestCase):
    def setUp(self):
        self.reactor = TestReactor()
        _setReactor(self.reactor)
        self.build = Properties(
            image="busybox:latest", builder="docker_worker")
        self.worker = None

    def tearDown(self):
        if self.worker is not None:
            class FakeResult(object):
                code = 200
            self._http.delete = lambda _: defer.succeed(FakeResult())
            self.worker.master.stopService()
        _setReactor(None)

    def test_constructor_normal(self):
        worker = MarathonLatentWorker('bot', 'tcp://marathon.local', 'foo',
                                      'bar', 'debian:wheezy')
        # class instantiation configures nothing
        self.assertEqual(worker._http, None)

    def makeWorker(self, **kwargs):
        kwargs.setdefault('image', 'debian:wheezy')
        worker = MarathonLatentWorker('bot', 'tcp://marathon.local', **kwargs)
        self.worker = worker
        master = fakemaster.make_master(testcase=self, wantData=True)
        self._http = self.successResultOf(
            fakehttpclientservice.HTTPClientService.getFakeService(
                master, self, 'tcp://marathon.local', auth=kwargs.get(
                    'auth')))
        worker.setServiceParent(master)
        worker.reactor = self.reactor
        self.successResultOf(master.startService())
        worker.masterhash = "masterhash"
        return worker

    def test_start_service(self):
        worker = self.worker = self.makeWorker()
        # http is lazily created on worker substantiation
        self.assertNotEqual(worker._http, None)

    def test_start_worker(self):
        # http://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps
        worker = self.makeWorker()
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbot-bot-masterhash')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': 'BRIDGE'
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbot-bot-masterhash',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******"
                }
            },
            code=201,
            content_json={'Id': 'id'})
        d = worker.substantiate(None, FakeBuild())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertEqual(worker.instance, {'Id': 'id'})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_no_connection_and_shutdown(self):
        worker = self.makeWorker()
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbot-bot-masterhash')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': 'BRIDGE'
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbot-bot-masterhash',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******"
                }
            },
            code=201,
            content_json={'Id': 'id'})

        worker.substantiate(None, FakeBuild())
        self.assertEqual(worker.instance, {'Id': 'id'})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_error(self):
        worker = self.makeWorker()
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbot-bot-masterhash')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': 'BRIDGE'
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbot-bot-masterhash',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******"
                }
            },
            code=404,
            content_json={'message': 'image not found'})
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbot-bot-masterhash')
        d = worker.substantiate(None, FakeBuild())
        self.reactor.advance(.1)
        self.failureResultOf(d)
        self.assertEqual(worker.instance, None)
        # teardown makes sure all containers are cleaned up

    def test_start_worker_with_params(self):
        # http://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps
        worker = self.makeWorker(marathon_extra_config={
            'container': {
                'docker': {
                    'network': None
                }
            },
            'env': {
                'PARAMETER': 'foo'
            }
        })
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbot-bot-masterhash')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': None
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbot-bot-masterhash',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******",
                    'PARAMETER': 'foo'
                }
            },
            code=201,
            content_json={'Id': 'id'})
        d = worker.substantiate(None, FakeBuild())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertEqual(worker.instance, {'Id': 'id'})
class TestMarathonLatentWorker(unittest.SynchronousTestCase):
    def setUp(self):
        self.reactor = TestReactor()
        _setReactor(self.reactor)
        self.build = Properties(image="busybox:latest", builder="docker_worker")
        self.worker = None

    def tearDown(self):
        if self.worker is not None:

            class FakeResult(object):
                code = 200

            self._http.delete = lambda _: defer.succeed(FakeResult())
            self.worker.master.stopService()
        _setReactor(None)

    def test_constructor_normal(self):
        worker = MarathonLatentWorker("bot", "tcp://marathon.local", "foo", "bar", "debian:wheezy")
        # class instanciation configures nothing
        self.assertEqual(worker._http, None)

    def makeWorker(self, **kwargs):
        kwargs.setdefault("image", "debian:wheezy")
        worker = MarathonLatentWorker("bot", "tcp://marathon.local", **kwargs)
        self.worker = worker
        master = fakemaster.make_master(testcase=self, wantData=True)
        self._http = self.successResultOf(
            fakehttpclientservice.HTTPClientService.getFakeService(
                master, self, "tcp://marathon.local", auth=kwargs.get("auth")
            )
        )
        worker.setServiceParent(master)
        worker.reactor = self.reactor
        self.successResultOf(master.startService())
        worker.masterhash = "masterhash"
        return worker

    def test_start_service(self):
        worker = self.worker = self.makeWorker()
        # http is lazily created on worker substantiation
        self.assertNotEqual(worker._http, None)

    def test_start_worker(self):
        # http://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps
        worker = self.makeWorker()
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(method="delete", ep="/v2/apps/buildbot-worker/buildbotmasterhash-bot")
        self._http.expect(
            method="post",
            ep="/v2/apps",
            json={
                "instances": 1,
                "container": {"docker": {"image": "rendered:debian:wheezy", "network": "BRIDGE"}, "type": "DOCKER"},
                "id": u"buildbot-worker/buildbotmasterhash-bot",
                "env": {
                    "BUILDMASTER": "master",
                    "BUILDMASTER_PORT": "1234",
                    "WORKERNAME": u"bot",
                    "WORKERPASS": "******",
                },
            },
            code=201,
            content_json={"Id": "id"},
        )
        d = worker.substantiate(None, FakeBuild())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertEqual(worker.instance, {"Id": "id"})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_no_connection_and_shutdown(self):
        worker = self.makeWorker()
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(method="delete", ep="/v2/apps/buildbot-worker/buildbotmasterhash-bot")
        self._http.expect(
            method="post",
            ep="/v2/apps",
            json={
                "instances": 1,
                "container": {"docker": {"image": "rendered:debian:wheezy", "network": "BRIDGE"}, "type": "DOCKER"},
                "id": u"buildbot-worker/buildbotmasterhash-bot",
                "env": {
                    "BUILDMASTER": "master",
                    "BUILDMASTER_PORT": "1234",
                    "WORKERNAME": u"bot",
                    "WORKERPASS": "******",
                },
            },
            code=201,
            content_json={"Id": "id"},
        )

        worker.substantiate(None, FakeBuild())
        self.assertEqual(worker.instance, {"Id": "id"})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_error(self):
        worker = self.makeWorker()
        self._http.expect(method="delete", ep="/v2/apps/buildbot-worker/buildbotmasterhash-bot")
        self._http.expect(
            method="post",
            ep="/v2/apps",
            json={
                "instances": 1,
                "container": {"docker": {"image": "rendered:debian:wheezy", "network": "BRIDGE"}, "type": "DOCKER"},
                "id": u"buildbot-worker/buildbotmasterhash-bot",
                "env": {
                    "BUILDMASTER": "master",
                    "BUILDMASTER_PORT": "1234",
                    "WORKERNAME": u"bot",
                    "WORKERPASS": "******",
                },
            },
            code=404,
            content_json={"message": "image not found"},
        )
        self._http.expect(method="delete", ep="/v2/apps/buildbot-worker/buildbotmasterhash-bot")
        d = worker.substantiate(None, FakeBuild())
        self.reactor.advance(0.1)
        self.failureResultOf(d)
        self.assertEqual(worker.instance, None)
        # teardown makes sure all containers are cleaned up

    def test_start_worker_with_params(self):
        # http://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps
        worker = self.makeWorker(
            marathon_extra_config={"container": {"docker": {"network": None}}, "env": {"PARAMETER": "foo"}}
        )
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(method="delete", ep="/v2/apps/buildbot-worker/buildbotmasterhash-bot")
        self._http.expect(
            method="post",
            ep="/v2/apps",
            json={
                "instances": 1,
                "container": {"docker": {"image": "rendered:debian:wheezy", "network": None}, "type": "DOCKER"},
                "id": u"buildbot-worker/buildbotmasterhash-bot",
                "env": {
                    "BUILDMASTER": "master",
                    "BUILDMASTER_PORT": "1234",
                    "WORKERNAME": u"bot",
                    "WORKERPASS": "******",
                    "PARAMETER": "foo",
                },
            },
            code=201,
            content_json={"Id": "id"},
        )
        d = worker.substantiate(None, FakeBuild())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertEqual(worker.instance, {"Id": "id"})
Beispiel #21
0
class Tests(SynchronousTestCase):
    def setUp(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        _setReactor(self.reactor)
        self.addCleanup(_setReactor, None)

        # to ease debugging we display the error logs in the test log
        origAddCompleteLog = BuildStep.addCompleteLog

        def addCompleteLog(self, name, _log):
            if name.endswith("err.text"):
                log.msg("got error log!", name, _log)
            return origAddCompleteLog(self, name, _log)

        self.patch(BuildStep, "addCompleteLog", addCompleteLog)

        if 'BBTRACE' in os.environ:
            enable_trace(self, [
                "twisted", "worker_transition.py", "util/tu", "util/path",
                "log.py", "/mq/", "/db/", "buildbot/data/", "fake/reactor.py"
            ])

    def tearDown(self):
        # Flush the errors logged by the master stop cancelling the builds.
        self.flushLoggedErrors(LatentWorkerSubstantiatiationCancelled)
        self.assertFalse(self.master.running, "master is still running!")

    def getMaster(self, config_dict):
        self.master = master = self.successResultOf(
            getMaster(self, self.reactor, config_dict))
        return master

    def createBuildrequest(self, master, builder_ids, properties=None):
        properties = properties.asDict() if properties is not None else None
        return self.successResultOf(
            master.data.updates.addBuildset(
                waited_for=False,
                builderids=builder_ids,
                sourcestamps=[
                    {
                        'codebase': '',
                        'repository': '',
                        'branch': None,
                        'revision': None,
                        'project': ''
                    },
                ],
                properties=properties,
            ))

    def test_latent_workers_start_in_parallel(self):
        """
        If there are two latent workers configured, and two build
        requests for them, both workers will start substantiating
        concurrently.
        """
        controllers = [
            LatentController(self, 'local1'),
            LatentController(self, 'local2'),
        ]
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local1", "local2"],
                              factory=BuildFactory()),
            ],
            'workers': [controller.worker for controller in controllers],
            'protocols': {
                'null': {}
            },
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Request two builds.
        for i in range(2):
            self.createBuildrequest(master, [builder_id])

        # Check that both workers were requested to start.
        self.assertEqual(controllers[0].starting, True)
        self.assertEqual(controllers[1].starting, True)
        for controller in controllers:
            controller.start_instance(True)
            controller.auto_stop(True)

    def test_refused_substantiations_get_requeued(self):
        """
        If a latent worker refuses to substantiate, the build request becomes unclaimed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))

        # Indicate that the worker can't start an instance.
        controller.start_instance(False)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid']
             for req in unclaimed_build_requests})
        controller.auto_stop(True)
        self.flushLoggedErrors(LatentWorkerFailedToSubstantiate)

    def test_failed_substantiations_get_requeued(self):
        """
        If a latent worker fails to substantiate, the build request becomes unclaimed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))

        # The worker fails to substantiate.
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid']
             for req in unclaimed_build_requests})
        controller.auto_stop(True)

    @defer.inlineCallbacks
    def test_failed_substantiations_get_exception(self):
        """
        If a latent worker fails to substantiate, the result is an exception.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        self.createBuildrequest(master, [builder_id])

        # The worker fails to substantiate.
        controller.start_instance(
            Failure(LatentWorkerCannotSubstantiate("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(LatentWorkerCannotSubstantiate)

        dbdict = yield master.db.builds.getBuildByNumber(builder_id, 1)

        # When the substantiation fails, the result is an exception.
        self.assertEqual(EXCEPTION, dbdict['results'])
        controller.auto_stop(True)

    def test_worker_accepts_builds_after_failure(self):
        """
        If a latent worker fails to substantiate, the worker is still able to accept jobs.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        controller.auto_stop(True)
        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))
        # The worker fails to substantiate.
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # The retry logic should only trigger after a exponential backoff
        self.assertEqual(controller.starting, False)

        # advance the time to the point where we should retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)

        # If the worker started again after the failure, then the retry logic will have
        # already kicked in to start a new build on this (the only) worker. We check that
        # a new instance was requested, which indicates that the worker
        # accepted the build.
        self.assertEqual(controller.starting, True)

        # The worker fails to substantiate(again).
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # advance the time to the point where we should not retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)
        self.assertEqual(controller.starting, False)
        # advance the time to the point where we should retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)
        self.assertEqual(controller.starting, True)

    def test_worker_multiple_substantiations_succeed(self):
        """
        If multiple builders trigger try to substantiate a worker at
        the same time, if the substantiation succeeds then all of
        the builds proceed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy-1",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
                BuilderConfig(
                    name="testy-2",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_ids = [
            self.successResultOf(master.data.updates.findBuilderId('testy-1')),
            self.successResultOf(master.data.updates.findBuilderId('testy-2')),
        ]

        finished_builds = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, build: finished_builds.append(build),
                ('builds', None, 'finished')))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, builder_ids)

        # The worker succeeds to substantiate.
        controller.start_instance(True)

        controller.connect_worker()

        # We check that there were two builds that finished, and
        # that they both finished with success
        self.assertEqual([build['results'] for build in finished_builds],
                         [SUCCESS] * 2)
        controller.auto_stop(True)

    def test_stalled_substantiation_then_timeout_get_requeued(self):
        """
        If a latent worker substantiate, but not connect, and then be unsubstantiated,
        the build request becomes unclaimed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))

        # We never start the worker, rather timeout it.
        master.reactor.advance(controller.worker.missing_timeout)
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(defer.TimeoutError)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid']
             for req in unclaimed_build_requests})
        controller.auto_stop(True)

    def test_failed_sendBuilderList_get_requeued(self):
        """
        sendBuilderList can fail due to missing permissions on the workdir,
        the build request becomes unclaimed
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))
        logs = []
        self.successResultOf(
            master.mq.startConsuming(lambda key, log: logs.append(log),
                                     ('logs', None, 'new')))

        # The worker succeed to substantiate
        def remote_setBuilderList(self, dirs):
            raise TestException("can't create dir")

        controller.patchBot(self, 'remote_setBuilderList',
                            remote_setBuilderList)
        controller.start_instance(True)
        controller.connect_worker()

        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid']
             for req in unclaimed_build_requests})
        # should get 2 logs (html and txt) with proper information in there
        self.assertEqual(len(logs), 2)
        logs_by_name = {}
        for _log in logs:
            fulllog = self.successResultOf(
                master.data.get(("logs", str(_log['logid']), "raw")))
            logs_by_name[fulllog['filename']] = fulllog['raw']

        for i in ["err_text", "err_html"]:
            self.assertIn("can't create dir", logs_by_name[i])
            # make sure stacktrace is present in html
            self.assertIn(
                "buildbot.test.integration.test_latent.TestException",
                logs_by_name[i])
        controller.auto_stop(True)

    def test_failed_ping_get_requeued(self):
        """
        sendBuilderList can fail due to missing permissions on the workdir,
        the build request becomes unclaimed
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))
        logs = []
        self.successResultOf(
            master.mq.startConsuming(lambda key, log: logs.append(log),
                                     ('logs', None, 'new')))

        # The worker succeed to substantiate
        def remote_print(self, msg):
            if msg == "ping":
                raise TestException("can't ping")

        controller.patchBot(self, 'remote_print', remote_print)
        controller.start_instance(True)
        controller.connect_worker()

        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid']
             for req in unclaimed_build_requests})
        # should get 2 logs (html and txt) with proper information in there
        self.assertEqual(len(logs), 2)
        logs_by_name = {}
        for _log in logs:
            fulllog = self.successResultOf(
                master.data.get(("logs", str(_log['logid']), "raw")))
            logs_by_name[fulllog['filename']] = fulllog['raw']

        for i in ["err_text", "err_html"]:
            self.assertIn("can't ping", logs_by_name[i])
            # make sure stacktrace is present in html
            self.assertIn(
                "buildbot.test.integration.test_latent.TestException",
                logs_by_name[i])
        controller.auto_stop(True)

    def test_worker_close_connection_while_building(self):
        """
        If the worker close connection in the middle of the build, the next build can start correctly
        """
        controller = LatentController(self, 'local', build_wait_timeout=0)
        # a step that we can finish when we want
        stepcontroller = BuildStepController()
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory([stepcontroller.step]),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Request two builds.
        for i in range(2):
            self.createBuildrequest(master, [builder_id])
        controller.auto_stop(True)

        self.assertTrue(controller.starting)
        controller.start_instance(True)
        controller.connect_worker()

        builds = self.successResultOf(master.data.get(("builds", )))
        self.assertEqual(builds[0]['results'], None)
        controller.disconnect_worker()
        builds = self.successResultOf(master.data.get(("builds", )))
        self.assertEqual(builds[0]['results'], RETRY)

        # Request one build.
        self.createBuildrequest(master, [builder_id])
        controller.start_instance(True)
        controller.connect_worker()
        builds = self.successResultOf(master.data.get(("builds", )))
        self.assertEqual(builds[1]['results'], None)
        stepcontroller.finish_step(SUCCESS)
        builds = self.successResultOf(master.data.get(("builds", )))
        self.assertEqual(builds[1]['results'], SUCCESS)

    @defer.inlineCallbacks
    def test_build_stop_with_cancelled_during_substantiation(self):
        """
        If a build is stopping during latent worker substantiating, the build becomes cancelled
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder = master.botmaster.builders['testy']
        builder_id = self.successResultOf(builder.getBuilderId())

        # Trigger a buildrequest
        self.createBuildrequest(master, [builder_id])

        # Stop the build
        build = builder.getBuild(0)
        build.stopBuild('no reason', results=CANCELLED)

        # Indicate that the worker can't start an instance.
        controller.start_instance(False)

        dbdict = yield master.db.builds.getBuildByNumber(builder_id, 1)
        self.assertEqual(CANCELLED, dbdict['results'])
        controller.auto_stop(True)
        self.flushLoggedErrors(LatentWorkerFailedToSubstantiate)

    @defer.inlineCallbacks
    def test_build_stop_with_retry_during_substantiation(self):
        """
        If master is shutting down during latent worker substantiating, the build becomes retry.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder = master.botmaster.builders['testy']
        builder_id = self.successResultOf(builder.getBuilderId())

        # Trigger a buildrequest
        _, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))

        # Stop the build
        build = builder.getBuild(0)
        build.stopBuild('no reason', results=RETRY)

        # Indicate that the worker can't start an instance.
        controller.start_instance(False)

        dbdict = yield master.db.builds.getBuildByNumber(builder_id, 1)
        self.assertEqual(RETRY, dbdict['results'])
        self.assertEqual(
            set(brids),
            {req['buildrequestid']
             for req in unclaimed_build_requests})
        controller.auto_stop(True)
        self.flushLoggedErrors(LatentWorkerFailedToSubstantiate)

    @defer.inlineCallbacks
    def test_rejects_build_on_instance_with_different_type_timeout_zero(self):
        """
        If latent worker supports getting its instance type from properties that
        are rendered from build then the buildrequestdistributor must not
        schedule any builds on workers that are running different instance type
        than what these builds will require.
        """
        controller = LatentController(self,
                                      'local',
                                      kind=Interpolate('%(prop:worker_kind)s'),
                                      build_wait_timeout=0)

        # a step that we can finish when we want
        stepcontroller = BuildStepController()
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory([stepcontroller.step]),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            'multiMaster':
            True,
        }

        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # create build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='a'))

        # start the build and verify the kind of the worker. Note that the
        # buildmaster needs to restart the worker in order to change the worker
        # kind, so we allow it both to auto start and stop
        self.assertEqual(True, controller.starting)

        controller.auto_connect_worker = True
        controller.auto_disconnect_worker = True
        controller.auto_start(True)
        controller.auto_stop(True)
        controller.connect_worker()
        self.assertEqual('a', (yield controller.get_started_kind()))

        # before the other build finished, create another build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='b'))
        stepcontroller.finish_step(SUCCESS)

        # give the botmaster chance to insubstantiate the worker and
        # maybe substantiate it for the pending build the builds on worker
        self.reactor.advance(0.1)

        # verify that the second build restarted with the expected instance
        # kind
        self.assertEqual('b', (yield controller.get_started_kind()))
        stepcontroller.finish_step(SUCCESS)

        dbdict = yield master.db.builds.getBuild(1)
        self.assertEqual(SUCCESS, dbdict['results'])
        dbdict = yield master.db.builds.getBuild(2)
        self.assertEqual(SUCCESS, dbdict['results'])

    @defer.inlineCallbacks
    def test_rejects_build_on_instance_with_different_type_timeout_nonzero(
            self):
        """
        If latent worker supports getting its instance type from properties that
        are rendered from build then the buildrequestdistributor must not
        schedule any builds on workers that are running different instance type
        than what these builds will require.
        """
        controller = LatentController(self,
                                      'local',
                                      kind=Interpolate('%(prop:worker_kind)s'),
                                      build_wait_timeout=5)

        # a step that we can finish when we want
        stepcontroller = BuildStepController()
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory([stepcontroller.step]),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            'multiMaster':
            True,
        }

        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # create build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='a'))

        # start the build and verify the kind of the worker. Note that the
        # buildmaster needs to restart the worker in order to change the worker
        # kind, so we allow it both to auto start and stop
        self.assertEqual(True, controller.starting)

        controller.auto_connect_worker = True
        controller.auto_disconnect_worker = True
        controller.auto_start(True)
        controller.auto_stop(True)
        controller.connect_worker()
        self.assertEqual('a', (yield controller.get_started_kind()))

        # before the other build finished, create another build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='b'))
        stepcontroller.finish_step(SUCCESS)

        # give the botmaster chance to insubstantiate the worker and
        # maybe substantiate it for the pending build the builds on worker
        self.reactor.advance(0.1)

        # verify build has not started, even though the worker is waiting
        # for one
        self.assertIsNone((yield master.db.builds.getBuild(2)))
        self.assertTrue(controller.started)

        # wait until the latent worker times out, is insubstantiated,
        # is substantiated because of pending buildrequest and starts the build
        self.reactor.advance(6)
        self.assertIsNotNone((yield master.db.builds.getBuild(2)))

        # verify that the second build restarted with the expected instance
        # kind
        self.assertEqual('b', (yield controller.get_started_kind()))
        stepcontroller.finish_step(SUCCESS)

        dbdict = yield master.db.builds.getBuild(1)
        self.assertEqual(SUCCESS, dbdict['results'])
        dbdict = yield master.db.builds.getBuild(2)
        self.assertEqual(SUCCESS, dbdict['results'])
Beispiel #22
0
class TestHyperLatentWorker(unittest.SynchronousTestCase):

    def setUp(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        _setReactor(self.reactor)
        self.patch(workerhyper, 'Hyper', hyper.Client)
        self.build = Properties(
            image="busybox:latest", builder="docker_worker")
        self.worker = None

    def tearDown(self):
        if self.worker is not None:
            self.worker.stopService()
            self.reactor.pump([.1])
        self.assertIsNone(hyper.Client.instance)
        _setReactor(None)

    def test_constructor_normal(self):
        worker = HyperLatentWorker('bot', 'pass', 'tcp://hyper.sh/', 'foo', 'bar', 'debian:wheezy')
        # class instanciation configures nothing
        self.assertEqual(worker.client, None)
        self.assertEqual(worker.client_args, None)

    def test_constructor_nohyper(self):
        self.patch(workerhyper, 'Hyper', None)
        self.assertRaises(config.ConfigErrors, HyperLatentWorker,
                          'bot', 'pass', 'tcp://hyper.sh/', 'foo', 'bar', 'debian:wheezy')

    def test_constructor_badsize(self):
        self.assertRaises(config.ConfigErrors, HyperLatentWorker,
                          'bot', 'pass', 'tcp://hyper.sh/', 'foo', 'bar', 'debian:wheezy', hyper_size="big")

    def makeWorker(self, **kwargs):
        kwargs.setdefault('image', 'debian:wheezy')
        worker = HyperLatentWorker('bot', 'pass', 'tcp://hyper.sh/', 'foo', 'bar', **kwargs)
        self.worker = worker
        master = fakemaster.make_master(testcase=self, wantData=True)
        worker.setServiceParent(master)
        worker.reactor = self.reactor
        self.successResultOf(worker.startService())
        return worker

    def test_start_service(self):
        worker = self.worker = self.makeWorker()
        self.assertEqual(worker.client_args, {'clouds': {'tcp://hyper.sh/': {
            'secretkey': 'bar', 'accesskey': 'foo'}}})
        # client is lazily created on worker substantiation
        self.assertEqual(worker.client, None)

    def test_start_worker(self):
        worker = self.makeWorker()

        d = worker.substantiate(None, FakeBuild())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertIsNotNone(worker.client)
        self.assertEqual(worker.instance, {
            'Id': '8a61192da2b3bb2d922875585e29b74ec0dc4e0117fcbf84c962204e97564cd7',
            'Warnings': None,
            'image': 'rendered:debian:wheezy'})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_no_connection_and_shutdown(self):
        worker = self.makeWorker()
        worker.substantiate(None, FakeBuild())
        self.assertIsNotNone(worker.client)
        self.assertEqual(worker.instance, {
            'Id': '8a61192da2b3bb2d922875585e29b74ec0dc4e0117fcbf84c962204e97564cd7',
            'Warnings': None,
            'image': 'rendered:debian:wheezy'})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_error(self):
        worker = self.makeWorker(image="buggy")
        d = worker.substantiate(None, FakeBuild())
        self.reactor.pump([.1])
        self.failureResultOf(d)
        self.assertIsNotNone(worker.client)
        self.assertEqual(worker.instance, None)
 def setUp(self):
     self.reactor = TestReactor()
     _setReactor(self.reactor)
     self.build = Properties(image="busybox:latest", builder="docker_worker")
     self.worker = None
Beispiel #24
0
class TestMarathonLatentWorker(unittest.SynchronousTestCase):
    def setUp(self):
        self.reactor = TestReactor()
        _setReactor(self.reactor)
        self.build = Properties(
            image="busybox:latest", builder="docker_worker")
        self.worker = None

    def tearDown(self):
        if self.worker is not None:
            class FakeResult(object):
                code = 200
            self._http.delete = lambda _: defer.succeed(FakeResult())
            self.worker.master.stopService()
        _setReactor(None)

    def test_constructor_normal(self):
        worker = MarathonLatentWorker('bot', 'tcp://marathon.local', 'foo',
                                      'bar', 'debian:wheezy')
        # class instantiation configures nothing
        self.assertEqual(worker._http, None)

    def makeWorker(self, **kwargs):
        kwargs.setdefault('image', 'debian:wheezy')
        worker = MarathonLatentWorker('bot', 'tcp://marathon.local', **kwargs)
        self.worker = worker
        master = fakemaster.make_master(testcase=self, wantData=True)
        self._http = self.successResultOf(
            fakehttpclientservice.HTTPClientService.getFakeService(
                master, self, 'tcp://marathon.local', auth=kwargs.get(
                    'auth')))
        worker.setServiceParent(master)
        worker.reactor = self.reactor
        self.successResultOf(master.startService())
        worker.masterhash = "masterhash"
        return worker

    def test_start_service(self):
        worker = self.worker = self.makeWorker()
        # http is lazily created on worker substantiation
        self.assertNotEqual(worker._http, None)

    def test_start_worker(self):
        # http://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps
        worker = self.makeWorker()
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbotmasterhash-bot')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': 'BRIDGE'
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbotmasterhash-bot',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******"
                }
            },
            code=201,
            content_json={'Id': 'id'})
        d = worker.substantiate(None, FakeBuild())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertEqual(worker.instance, {'Id': 'id'})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_no_connection_and_shutdown(self):
        worker = self.makeWorker()
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbotmasterhash-bot')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': 'BRIDGE'
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbotmasterhash-bot',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******"
                }
            },
            code=201,
            content_json={'Id': 'id'})

        worker.substantiate(None, FakeBuild())
        self.assertEqual(worker.instance, {'Id': 'id'})
        # teardown makes sure all containers are cleaned up

    def test_start_worker_but_error(self):
        worker = self.makeWorker()
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbotmasterhash-bot')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': 'BRIDGE'
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbotmasterhash-bot',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******"
                }
            },
            code=404,
            content_json={'message': 'image not found'})
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbotmasterhash-bot')
        d = worker.substantiate(None, FakeBuild())
        self.reactor.advance(.1)
        self.failureResultOf(d)
        self.assertEqual(worker.instance, None)
        # teardown makes sure all containers are cleaned up

    def test_start_worker_with_params(self):
        # http://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps
        worker = self.makeWorker(marathon_extra_config={
            'container': {
                'docker': {
                    'network': None
                }
            },
            'env': {
                'PARAMETER': 'foo'
            }
        })
        worker.password = "******"
        worker.masterFQDN = "master"
        self._http.expect(
            method='delete',
            ep='/v2/apps/buildbot-worker/buildbotmasterhash-bot')
        self._http.expect(
            method='post',
            ep='/v2/apps',
            json={
                'instances': 1,
                'container': {
                    'docker': {
                        'image': 'rendered:debian:wheezy',
                        'network': None
                    },
                    'type': 'DOCKER'
                },
                'id': u'buildbot-worker/buildbotmasterhash-bot',
                'env': {
                    'BUILDMASTER': "master",
                    'BUILDMASTER_PORT': '1234',
                    'WORKERNAME': u'bot',
                    'WORKERPASS': "******",
                    'PARAMETER': 'foo'
                }
            },
            code=201,
            content_json={'Id': 'id'})
        d = worker.substantiate(None, FakeBuild())
        # we simulate a connection
        worker.attached(FakeBot())
        self.successResultOf(d)

        self.assertEqual(worker.instance, {'Id': 'id'})
Beispiel #25
0
class Tests(SynchronousTestCase):

    def setUp(self):
        self.patch(threadpool, 'ThreadPool', NonThreadPool)
        self.reactor = TestReactor()
        self.addCleanup(self.reactor.stop)
        _setReactor(self.reactor)
        self.addCleanup(_setReactor, None)

        # to ease debugging we display the error logs in the test log
        origAddCompleteLog = BuildStep.addCompleteLog

        def addCompleteLog(self, name, _log):
            if name.endswith("err.text"):
                log.msg("got error log!", name, _log)
            return origAddCompleteLog(self, name, _log)
        self.patch(BuildStep, "addCompleteLog", addCompleteLog)

        if 'BBTRACE' in os.environ:
            enable_trace(self, ["twisted", "worker_transition.py", "util/tu", "util/path",
                                "log.py", "/mq/", "/db/", "buildbot/data/", "fake/reactor.py"])

    def tearDown(self):
        # Flush the errors logged by the master stop cancelling the builds.
        self.flushLoggedErrors(LatentWorkerSubstantiatiationCancelled)
        self.assertFalse(self.master.running, "master is still running!")

    def getMaster(self, config_dict):
        self.master = master = self.successResultOf(
            getMaster(self, self.reactor, config_dict))
        return master

    def createBuildrequest(self, master, builder_ids, properties=None):
        properties = properties.asDict() if properties is not None else None
        return self.successResultOf(
            master.data.updates.addBuildset(
                waited_for=False,
                builderids=builder_ids,
                sourcestamps=[
                    {'codebase': '',
                     'repository': '',
                     'branch': None,
                     'revision': None,
                     'project': ''},
                ],
                properties=properties,
            )
        )

    def test_latent_workers_start_in_parallel(self):
        """
        If there are two latent workers configured, and two build
        requests for them, both workers will start substantiating
        concurrently.
        """
        controllers = [
            LatentController(self, 'local1'),
            LatentController(self, 'local2'),
        ]
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local1", "local2"],
                              factory=BuildFactory()),
            ],
            'workers': [controller.worker for controller in controllers],
            'protocols': {'null': {}},
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Request two builds.
        for i in range(2):
            self.createBuildrequest(master, [builder_id])

        # Check that both workers were requested to start.
        self.assertEqual(controllers[0].starting, True)
        self.assertEqual(controllers[1].starting, True)
        for controller in controllers:
            controller.start_instance(True)
            controller.auto_stop(True)

    def test_refused_substantiations_get_requeued(self):
        """
        If a latent worker refuses to substantiate, the build request becomes unclaimed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, request: unclaimed_build_requests.append(request),
            ('buildrequests', None, 'unclaimed')))

        # Indicate that the worker can't start an instance.
        controller.start_instance(False)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid'] for req in unclaimed_build_requests}
        )
        controller.auto_stop(True)
        self.flushLoggedErrors(LatentWorkerFailedToSubstantiate)

    def test_failed_substantiations_get_requeued(self):
        """
        If a latent worker fails to substantiate, the build request becomes unclaimed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, request: unclaimed_build_requests.append(request),
            ('buildrequests', None, 'unclaimed')))

        # The worker fails to substantiate.
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid'] for req in unclaimed_build_requests}
        )
        controller.auto_stop(True)

    @defer.inlineCallbacks
    def test_failed_substantiations_get_exception(self):
        """
        If a latent worker fails to substantiate, the result is an exception.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        self.createBuildrequest(master, [builder_id])

        # The worker fails to substantiate.
        controller.start_instance(
            Failure(LatentWorkerCannotSubstantiate("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(LatentWorkerCannotSubstantiate)

        dbdict = yield master.db.builds.getBuildByNumber(builder_id, 1)

        # When the substantiation fails, the result is an exception.
        self.assertEqual(EXCEPTION, dbdict['results'])
        controller.auto_stop(True)

    def test_worker_accepts_builds_after_failure(self):
        """
        If a latent worker fails to substantiate, the worker is still able to accept jobs.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        controller.auto_stop(True)
        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, request: unclaimed_build_requests.append(request),
            ('buildrequests', None, 'unclaimed')))
        # The worker fails to substantiate.
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # The retry logic should only trigger after a exponential backoff
        self.assertEqual(controller.starting, False)

        # advance the time to the point where we should retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)

        # If the worker started again after the failure, then the retry logic will have
        # already kicked in to start a new build on this (the only) worker. We check that
        # a new instance was requested, which indicates that the worker
        # accepted the build.
        self.assertEqual(controller.starting, True)

        # The worker fails to substantiate(again).
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # advance the time to the point where we should not retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)
        self.assertEqual(controller.starting, False)
        # advance the time to the point where we should retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)
        self.assertEqual(controller.starting, True)

    def test_worker_multiple_substantiations_succeed(self):
        """
        If multiple builders trigger try to substantiate a worker at
        the same time, if the substantiation succeeds then all of
        the builds proceed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy-1",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
                BuilderConfig(name="testy-2",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_ids = [
            self.successResultOf(master.data.updates.findBuilderId('testy-1')),
            self.successResultOf(master.data.updates.findBuilderId('testy-2')),
        ]

        finished_builds = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, build: finished_builds.append(build),
            ('builds', None, 'finished')))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, builder_ids)

        # The worker succeeds to substantiate.
        controller.start_instance(True)

        controller.connect_worker()

        # We check that there were two builds that finished, and
        # that they both finished with success
        self.assertEqual([build['results']
                          for build in finished_builds], [SUCCESS] * 2)
        controller.auto_stop(True)

    def test_stalled_substantiation_then_timeout_get_requeued(self):
        """
        If a latent worker substantiate, but not connect, and then be unsubstantiated,
        the build request becomes unclaimed.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, request: unclaimed_build_requests.append(request),
            ('buildrequests', None, 'unclaimed')))

        # We never start the worker, rather timeout it.
        master.reactor.advance(controller.worker.missing_timeout)
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(defer.TimeoutError)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid'] for req in unclaimed_build_requests}
        )
        controller.auto_stop(True)

    def test_failed_sendBuilderList_get_requeued(self):
        """
        sendBuilderList can fail due to missing permissions on the workdir,
        the build request becomes unclaimed
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, request: unclaimed_build_requests.append(request),
            ('buildrequests', None, 'unclaimed')))
        logs = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, log: logs.append(log),
            ('logs', None, 'new')))

        # The worker succeed to substantiate
        def remote_setBuilderList(self, dirs):
            raise TestException("can't create dir")
        controller.patchBot(self, 'remote_setBuilderList',
                            remote_setBuilderList)
        controller.start_instance(True)
        controller.connect_worker()

        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid'] for req in unclaimed_build_requests}
        )
        # should get 2 logs (html and txt) with proper information in there
        self.assertEqual(len(logs), 2)
        logs_by_name = {}
        for _log in logs:
            fulllog = self.successResultOf(
                master.data.get(("logs", str(_log['logid']), "raw")))
            logs_by_name[fulllog['filename']] = fulllog['raw']

        for i in ["err_text", "err_html"]:
            self.assertIn("can't create dir", logs_by_name[i])
            # make sure stacktrace is present in html
            self.assertIn("buildbot.test.integration.test_latent.TestException",
                logs_by_name[i])
        controller.auto_stop(True)

    def test_failed_ping_get_requeued(self):
        """
        sendBuilderList can fail due to missing permissions on the workdir,
        the build request becomes unclaimed
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, request: unclaimed_build_requests.append(request),
            ('buildrequests', None, 'unclaimed')))
        logs = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, log: logs.append(log),
            ('logs', None, 'new')))

        # The worker succeed to substantiate
        def remote_print(self, msg):
            if msg == "ping":
                raise TestException("can't ping")
        controller.patchBot(self, 'remote_print', remote_print)
        controller.start_instance(True)
        controller.connect_worker()

        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # When the substantiation fails, the buildrequest becomes unclaimed.
        self.assertEqual(
            set(brids),
            {req['buildrequestid'] for req in unclaimed_build_requests}
        )
        # should get 2 logs (html and txt) with proper information in there
        self.assertEqual(len(logs), 2)
        logs_by_name = {}
        for _log in logs:
            fulllog = self.successResultOf(
                master.data.get(("logs", str(_log['logid']), "raw")))
            logs_by_name[fulllog['filename']] = fulllog['raw']

        for i in ["err_text", "err_html"]:
            self.assertIn("can't ping", logs_by_name[i])
            # make sure stacktrace is present in html
            self.assertIn("buildbot.test.integration.test_latent.TestException",
                logs_by_name[i])
        controller.auto_stop(True)

    def test_worker_close_connection_while_building(self):
        """
        If the worker close connection in the middle of the build, the next build can start correctly
        """
        controller = LatentController(self, 'local', build_wait_timeout=0)
        # a step that we can finish when we want
        stepcontroller = BuildStepController()
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory([stepcontroller.step]),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # Request two builds.
        for i in range(2):
            self.createBuildrequest(master, [builder_id])
        controller.auto_stop(True)

        self.assertTrue(controller.starting)
        controller.start_instance(True)
        controller.connect_worker()

        builds = self.successResultOf(
            master.data.get(("builds",)))
        self.assertEqual(builds[0]['results'], None)
        controller.disconnect_worker()
        builds = self.successResultOf(
            master.data.get(("builds",)))
        self.assertEqual(builds[0]['results'], RETRY)

        # Request one build.
        self.createBuildrequest(master, [builder_id])
        controller.start_instance(True)
        controller.connect_worker()
        builds = self.successResultOf(
            master.data.get(("builds",)))
        self.assertEqual(builds[1]['results'], None)
        stepcontroller.finish_step(SUCCESS)
        builds = self.successResultOf(
            master.data.get(("builds",)))
        self.assertEqual(builds[1]['results'], SUCCESS)

    @defer.inlineCallbacks
    def test_build_stop_with_cancelled_during_substantiation(self):
        """
        If a build is stopping during latent worker substantiating, the build becomes cancelled
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder = master.botmaster.builders['testy']
        builder_id = self.successResultOf(builder.getBuilderId())

        # Trigger a buildrequest
        self.createBuildrequest(master, [builder_id])

        # Stop the build
        build = builder.getBuild(0)
        build.stopBuild('no reason', results=CANCELLED)

        # Indicate that the worker can't start an instance.
        controller.start_instance(False)

        dbdict = yield master.db.builds.getBuildByNumber(builder_id, 1)
        self.assertEqual(CANCELLED, dbdict['results'])
        controller.auto_stop(True)
        self.flushLoggedErrors(LatentWorkerFailedToSubstantiate)

    @defer.inlineCallbacks
    def test_build_stop_with_retry_during_substantiation(self):
        """
        If master is shutting down during latent worker substantiating, the build becomes retry.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory(),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            # Disable checks about missing scheduler.
            'multiMaster': True,
        }
        master = self.getMaster(config_dict)
        builder = master.botmaster.builders['testy']
        builder_id = self.successResultOf(builder.getBuilderId())

        # Trigger a buildrequest
        _, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(master.mq.startConsuming(
            lambda key, request: unclaimed_build_requests.append(request),
            ('buildrequests', None, 'unclaimed')))

        # Stop the build
        build = builder.getBuild(0)
        build.stopBuild('no reason', results=RETRY)

        # Indicate that the worker can't start an instance.
        controller.start_instance(False)

        dbdict = yield master.db.builds.getBuildByNumber(builder_id, 1)
        self.assertEqual(RETRY, dbdict['results'])
        self.assertEqual(
            set(brids),
            {req['buildrequestid'] for req in unclaimed_build_requests}
        )
        controller.auto_stop(True)
        self.flushLoggedErrors(LatentWorkerFailedToSubstantiate)

    @defer.inlineCallbacks
    def test_rejects_build_on_instance_with_different_type_timeout_zero(self):
        """
        If latent worker supports getting its instance type from properties that
        are rendered from build then the buildrequestdistributor must not
        schedule any builds on workers that are running different instance type
        than what these builds will require.
        """
        controller = LatentController(self, 'local',
                                      kind=Interpolate('%(prop:worker_kind)s'),
                                      build_wait_timeout=0)

        # a step that we can finish when we want
        stepcontroller = BuildStepController()
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory([stepcontroller.step]),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            'multiMaster': True,
        }

        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # create build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='a'))

        # start the build and verify the kind of the worker. Note that the
        # buildmaster needs to restart the worker in order to change the worker
        # kind, so we allow it both to auto start and stop
        self.assertEqual(True, controller.starting)

        controller.auto_connect_worker = True
        controller.auto_disconnect_worker = True
        controller.auto_start(True)
        controller.auto_stop(True)
        controller.connect_worker()
        self.assertEqual('a', (yield controller.get_started_kind()))

        # before the other build finished, create another build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='b'))
        stepcontroller.finish_step(SUCCESS)

        # give the botmaster chance to insubstantiate the worker and
        # maybe substantiate it for the pending build the builds on worker
        self.reactor.advance(0.1)

        # verify that the second build restarted with the expected instance
        # kind
        self.assertEqual('b', (yield controller.get_started_kind()))
        stepcontroller.finish_step(SUCCESS)

        dbdict = yield master.db.builds.getBuild(1)
        self.assertEqual(SUCCESS, dbdict['results'])
        dbdict = yield master.db.builds.getBuild(2)
        self.assertEqual(SUCCESS, dbdict['results'])

    @defer.inlineCallbacks
    def test_rejects_build_on_instance_with_different_type_timeout_nonzero(self):
        """
        If latent worker supports getting its instance type from properties that
        are rendered from build then the buildrequestdistributor must not
        schedule any builds on workers that are running different instance type
        than what these builds will require.
        """
        controller = LatentController(self, 'local',
                                      kind=Interpolate('%(prop:worker_kind)s'),
                                      build_wait_timeout=5)

        # a step that we can finish when we want
        stepcontroller = BuildStepController()
        config_dict = {
            'builders': [
                BuilderConfig(name="testy",
                              workernames=["local"],
                              factory=BuildFactory([stepcontroller.step]),
                              ),
            ],
            'workers': [controller.worker],
            'protocols': {'null': {}},
            'multiMaster': True,
        }

        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        # create build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='a'))

        # start the build and verify the kind of the worker. Note that the
        # buildmaster needs to restart the worker in order to change the worker
        # kind, so we allow it both to auto start and stop
        self.assertEqual(True, controller.starting)

        controller.auto_connect_worker = True
        controller.auto_disconnect_worker = True
        controller.auto_start(True)
        controller.auto_stop(True)
        controller.connect_worker()
        self.assertEqual('a', (yield controller.get_started_kind()))

        # before the other build finished, create another build request
        self.createBuildrequest(master, [builder_id],
                                properties=Properties(worker_kind='b'))
        stepcontroller.finish_step(SUCCESS)

        # give the botmaster chance to insubstantiate the worker and
        # maybe substantiate it for the pending build the builds on worker
        self.reactor.advance(0.1)

        # verify build has not started, even though the worker is waiting
        # for one
        self.assertIsNone((yield master.db.builds.getBuild(2)))
        self.assertTrue(controller.started)

        # wait until the latent worker times out, is insubstantiated,
        # is substantiated because of pending buildrequest and starts the build
        self.reactor.advance(6)
        self.assertIsNotNone((yield master.db.builds.getBuild(2)))

        # verify that the second build restarted with the expected instance
        # kind
        self.assertEqual('b', (yield controller.get_started_kind()))
        stepcontroller.finish_step(SUCCESS)

        dbdict = yield master.db.builds.getBuild(1)
        self.assertEqual(SUCCESS, dbdict['results'])
        dbdict = yield master.db.builds.getBuild(2)
        self.assertEqual(SUCCESS, dbdict['results'])