예제 #1
0
    def test_worker_sync_builds_uploading(self, storage):
        """Make sure scheduler takes into account runs that are UPLOADING.

        1. Create a "synchronous" Project
        2. Add an UPLOADING build and and QUEUED build

        Make sure the QUEUED build is not assigned
        """
        if db.engine.dialect.name == "sqlite":
            self.skipTest("Test requires MySQL")
        rundef = {"run_url": "foo", "runner_url": "foo", "env": {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker("w1", "ubuntu", 12, 2, "aarch64", "key", 2, ["aarch96"])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        self.create_projects("job-1")
        (p1, ) = Project.query.all()
        p1.synchronous_builds = True
        db.session.commit()

        # add active build
        b = Build.create(p1)
        r = Run(b, "p1b1r1")
        r.status = BuildStatus.UPLOADING
        r.host_tag = "aarch96"
        db.session.add(r)

        b = Build.create(p1)
        r = Run(b, "p1b2r1")
        r.host_tag = "aarch96"
        db.session.add(r)
        db.session.commit()

        headers = [
            ("Content-type", "application/json"),
            ("Authorization", "Token key"),
        ]
        qs = "available_runners=1&foo=2"

        # There should be no work available
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertNotIn("run-defs", data["data"]["worker"],
                         data["data"]["worker"])
예제 #2
0
    def test_surge_complex(self):
        # we'll have two amd64 workers and one armhf
        worker = Worker('w2', 'd', 1, 1, 'amd64', 'k', 1, 'amd64')
        worker.enlisted = True
        worker.online = True
        db.session.add(worker)
        worker = Worker('w3', 'd', 1, 1, 'armhf', 'k', 1, 'armhf')
        worker.enlisted = True
        worker.online = True
        db.session.add(worker)
        db.session.commit()

        self.create_projects('proj1')
        b = Build.create(Project.query.all()[0])
        for x in range(SURGE_SUPPORT_RATIO + 1):
            r = Run(b, 'amd%d' % x)
            r.host_tag = 'amd64'
            db.session.add(r)
            r = Run(b, 'armhf%d' % x)
            r.host_tag = 'armhf'
            db.session.add(r)

        db.session.commit()
        _check_queue()
        self.assertFalse(os.path.exists(jobserv.worker.SURGE_FILE + '-amd64'))
        self.assertTrue(os.path.exists(jobserv.worker.SURGE_FILE + '-armhf'))

        # get us under surge for armhf
        db.session.delete(Run.query.filter(Run.host_tag == 'armhf').first())
        # and over surge for amd64
        for x in range(SURGE_SUPPORT_RATIO + 1):
            r = Run(b, 'run%d' % x)
            r.host_tag = 'amd64'
            db.session.add(r)

        db.session.commit()
        worker_module.DETECT_FLAPPING = False
        _check_queue()
        self.assertTrue(os.path.exists(jobserv.worker.SURGE_FILE + '-amd64'))
        self.assertFalse(os.path.exists(jobserv.worker.SURGE_FILE + '-armhf'))

        # make sure we know about deleted workers
        worker.deleted = True
        db.session.commit()
        _check_queue()
        self.assertTrue(os.path.exists(jobserv.worker.SURGE_FILE + '-armhf'))
예제 #3
0
    def test_surge_complex(self):
        # we'll have two amd64 workers and one armhf
        worker = Worker("w2", "d", 1, 1, "amd64", "k", 1, "amd64")
        worker.enlisted = True
        worker.online = True
        db.session.add(worker)
        worker = Worker("w3", "d", 1, 1, "armhf", "k", 1, "armhf")
        worker.enlisted = True
        worker.online = True
        db.session.add(worker)
        db.session.commit()

        self.create_projects("proj1")
        b = Build.create(Project.query.all()[0])
        for x in range(SURGE_SUPPORT_RATIO + 1):
            r = Run(b, "amd%d" % x)
            r.host_tag = "amd64"
            db.session.add(r)
            r = Run(b, "armhf%d" % x)
            r.host_tag = "armhf"
            db.session.add(r)

        db.session.commit()
        _check_queue()
        self.assertFalse(os.path.exists(jobserv.worker.SURGE_FILE + "-amd64"))
        self.assertTrue(os.path.exists(jobserv.worker.SURGE_FILE + "-armhf"))

        # get us under surge for armhf
        db.session.delete(Run.query.filter(Run.host_tag == "armhf").first())
        # and over surge for amd64
        for x in range(SURGE_SUPPORT_RATIO + 1):
            r = Run(b, "run%d" % x)
            r.host_tag = "amd64"
            db.session.add(r)

        db.session.commit()
        worker_module.DETECT_FLAPPING = False
        _check_queue()
        self.assertTrue(os.path.exists(jobserv.worker.SURGE_FILE + "-amd64"))
        self.assertFalse(os.path.exists(jobserv.worker.SURGE_FILE + "-armhf"))

        # make sure we know about deleted workers
        worker.deleted = True
        db.session.commit()
        _check_queue()
        self.assertTrue(os.path.exists(jobserv.worker.SURGE_FILE + "-armhf"))
예제 #4
0
    def test_worker_sync_builds_regression(self, storage):
        """Make sure scheduler takes into account other active projects for
        sync builds.
        """
        if db.engine.dialect.name == "sqlite":
            self.skipTest("Test requires MySQL")
        rundef = {"run_url": "foo", "runner_url": "foo", "env": {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker("w1", "ubuntu", 12, 2, "aarch64", "key", 2, ["aarch96"])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        self.create_projects("job-1")
        self.create_projects("job-2")
        p1, p2 = Project.query.all()
        p1.synchronous_builds = True
        db.session.commit()

        # add active build
        b = Build.create(p2)
        r = Run(b, "p2b1r1")
        r.status = BuildStatus.RUNNING
        r.host_tag = "aarch96"
        db.session.add(r)

        b = Build.create(p1)
        r = Run(b, "p1b1r1")
        r.host_tag = "aarch96"
        db.session.add(r)
        db.session.commit()

        headers = [
            ("Content-type", "application/json"),
            ("Authorization", "Token key"),
        ]
        qs = "available_runners=1&foo=2"

        # This should make the p1b1r2 run running
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))
예제 #5
0
    def test_worker_queue_priority(self, storage):
        """Validate queue priorities for Runs are honored.

           1. Create a normal project with 2 QUEUED builds.
           2. Set the priority of the newer build higher than the older build
           3. Verify queue priority is done properly.
        """
        if db.engine.dialect.name == 'sqlite':
            self.skipTest('Test requires MySQL')
        rundef = {'run_url': 'foo', 'runner_url': 'foo', 'env': {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker('w1', 'ubuntu', 12, 2, 'aarch64', 'key', 2, ['aarch96'])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        self.create_projects('job-1')
        p = Project.query.all()[0]
        p.synchronous_builds = True
        db.session.commit()

        b = Build.create(p)
        r = Run(b, 'r1')
        r.host_tag = 'aarch96'
        db.session.add(r)
        r = Run(b, 'r2')
        r.host_tag = 'aarch96'
        r.queue_priority = 2  # this is *newer* build but *higher* priority
        db.session.add(r)

        db.session.commit()
        headers = [
            ('Content-type', 'application/json'),
            ('Authorization', 'Token key'),
        ]
        qs = 'available_runners=1&foo=2'
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data['data']['worker']['run-defs']))
        self.assertEqual([BuildStatus.QUEUED, BuildStatus.RUNNING],
                         [x.status for x in Run.query])
예제 #6
0
    def test_worker_queue_priority(self, storage):
        """Validate queue priorities for Runs are honored.

        1. Create a normal project with 2 QUEUED builds.
        2. Set the priority of the newer build higher than the older build
        3. Verify queue priority is done properly.
        """
        if db.engine.dialect.name == "sqlite":
            self.skipTest("Test requires MySQL")
        rundef = {"run_url": "foo", "runner_url": "foo", "env": {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker("w1", "ubuntu", 12, 2, "aarch64", "key", 2, ["aarch96"])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        self.create_projects("job-1")
        p = Project.query.all()[0]
        p.synchronous_builds = True
        db.session.commit()

        b = Build.create(p)
        r = Run(b, "r1")
        r.host_tag = "aarch96"
        db.session.add(r)
        r = Run(b, "r2")
        r.host_tag = "aarch96"
        r.queue_priority = 2  # this is *newer* build but *higher* priority
        db.session.add(r)

        db.session.commit()
        headers = [
            ("Content-type", "application/json"),
            ("Authorization", "Token key"),
        ]
        qs = "available_runners=1&foo=2"
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))
        self.assertEqual([BuildStatus.QUEUED, BuildStatus.RUNNING],
                         [x.status for x in Run.query])
예제 #7
0
    def test_surge_simple(self):
        self.create_projects('proj1')
        b = Build.create(Project.query.all()[0])
        for x in range(SURGE_SUPPORT_RATIO + 1):
            r = Run(b, 'run%d' % x)
            r.host_tag = 'amd64'
            db.session.add(r)
        db.session.commit()
        _check_queue()
        self.assertTrue(os.path.exists(jobserv.worker.SURGE_FILE + '-amd64'))

        db.session.delete(Run.query.all()[0])
        db.session.commit()
        worker_module.DETECT_FLAPPING = False
        _check_queue()
        self.assertFalse(os.path.exists(jobserv.worker.SURGE_FILE + '-amd64'))
예제 #8
0
    def test_worker_sync_builds(self, storage):
        """Ensure Projects with "synchronous_builds" are assigned properly.

           1. Create a "synchronous" Project
           2. Add a RUNNING build and and QUEUED build
           3. Create a regular Project with a QUEUED build

           Make sure the QUEUED build from the second Project is assigned
           rather than the *older* but blocked build from the first Project.
        """
        if db.engine.dialect.name == 'sqlite':
            self.skipTest('Test requires MySQL')
        rundef = {'run_url': 'foo', 'runner_url': 'foo', 'env': {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker('w1', 'ubuntu', 12, 2, 'aarch64', 'key', 2, ['aarch96'])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        # create a "synchronous" builds project
        self.create_projects('job-1')
        p = Project.query.all()[0]
        p.synchronous_builds = True
        db.session.commit()

        # add an active build
        b = Build.create(p)
        r = Run(b, 'p1b1r1')
        r.host_tag = 'aarch96'
        r.status = BuildStatus.RUNNING
        db.session.add(r)

        # Queue up another run on this build. The project is sync, but the
        # runs in a single build can go in parallel
        r = Run(b, 'p1b1r2')
        r.host_tag = 'aarch96'
        db.session.add(r)

        # now queue a build up
        b = Build.create(p)
        r = Run(b, 'p1b2r1')
        r.host_tag = 'aarch97'  # different host-tag, but should be blocked
        db.session.add(r)

        # create a normal project
        self.create_projects('job-2')
        p = Project.query.all()[1]
        db.session.commit()

        # queue up a build. This is "older" than the queued build for
        # the synchronous project, but should get selected below
        b = Build.create(p)
        r = Run(b, 'p2b1r1')
        r.host_tag = 'aarch97'
        db.session.add(r)

        db.session.commit()
        headers = [
            ('Content-type', 'application/json'),
            ('Authorization', 'Token key'),
        ]
        qs = 'available_runners=1&foo=2'

        # This should make the p1b1r2 run running
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data['data']['worker']['run-defs']))
        self.assertEqual(
            [
                BuildStatus.RUNNING, BuildStatus.RUNNING, BuildStatus.QUEUED,
                BuildStatus.QUEUED
            ],  # NOQA
            [x.status for x in Run.query])

        # now job-1 should get blocked and job-2's run will get popped
        # lets change the host-tag to ensure this does *all* runs
        w.host_tags = ['aarch97']
        db.session.commit()
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data['data']['worker']['run-defs']))
        self.assertEqual(
            [
                BuildStatus.RUNNING, BuildStatus.RUNNING, BuildStatus.QUEUED,
                BuildStatus.RUNNING
            ],  # NOQA
            [x.status for x in Run.query])
예제 #9
0
    def test_worker_get_run(self, storage):
        if db.engine.dialect.name == 'sqlite':
            self.skipTest('Test requires MySQL')
        rundef = {'run_url': 'foo', 'runner_url': 'foo', 'env': {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker('w1', 'ubuntu', 12, 2, 'aarch64', 'key', 2, ['aarch96'])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        self.create_projects('job-1')
        p = Project.query.all()[0]
        b = Build.create(p)
        r = Run(b, 'run0')
        r.host_tag = 'aarch96'
        db.session.add(r)

        db.session.commit()
        headers = [
            ('Content-type', 'application/json'),
            ('Authorization', 'Token key'),
        ]
        qs = 'available_runners=1&foo=2'
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data['data']['worker']['run-defs']))

        # now put a tag on the worker that doesn't match
        r.status = BuildStatus.QUEUED
        w.host_tags = 'amd64, foo'
        db.session.commit()
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertNotIn('run-defs', data['data']['worker'])

        # now tag the run with the worker's host name
        r.host_tag = 'w1'
        w.host_tags = ''
        db.session.commit()
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data['data']['worker']['run-defs']))

        # now do a pattern match
        w.host_tags = 'aarch96'
        r.host_tag = 'aa?c*'
        r.status = BuildStatus.QUEUED
        db.session.commit()
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data['data']['worker']['run-defs']))

        # now mark it only for surges
        w.surges_only = True
        r.status = BuildStatus.QUEUED
        r.host_tag = 'aarch96'
        db.session.commit()
        resp = self.client.get('/workers/w1/',
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertNotIn('run-defs', data['data']['worker'])
예제 #10
0
    def test_worker_sync_builds_across_tags(self, storage):
        """Ensure Projects with "synchronous_builds" are assigned properly
        for builds/runs with mixed worker-tags.

        1. Create a "synchronous" Project
        2. Add QUEUED build for amd64 worker
        3. Add QUEUED build for aarch64 worker

        Make sure the QUEUED build stays blocked until the amd64 Run
        completes
        """
        if db.engine.dialect.name == "sqlite":
            self.skipTest("Test requires MySQL")
        rundef = {"run_url": "foo", "runner_url": "foo", "env": {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker("w1", "ubuntu", 12, 2, "aarch64", "key", 2, ["aarch64"])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        # create a "synchronous" builds project
        self.create_projects("job-1")
        p = Project.query.all()[0]
        p.synchronous_builds = True
        db.session.commit()

        # add a QUEUED build for amd64
        b = Build.create(p)
        r1 = Run(b, "p1b1r1")
        r1.host_tag = "amd64"
        db.session.add(r1)

        # now queue an aarch64 build
        b = Build.create(p)
        r = Run(b, "p1b2r1")
        r.host_tag = "aarch64"  # different host-tag, but should be blocked
        db.session.add(r)

        db.session.commit()
        headers = [
            ("Content-type", "application/json"),
            ("Authorization", "Token key"),
        ]
        qs = "available_runners=1&foo=2"

        # There shouldn't be any work for aarch64 (only amd64)
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertNotIn("run-defs", data["data"]["worker"])

        r1.status = BuildStatus.FAILED
        db.session.commit()

        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))
예제 #11
0
    def test_worker_sync_builds(self, storage):
        """Ensure Projects with "synchronous_builds" are assigned properly.

        1. Create a "synchronous" Project
        2. Add a RUNNING build and and QUEUED build
        3. Create a regular Project with a QUEUED build

        Make sure the QUEUED build from the second Project is assigned
        rather than the *older* but blocked build from the first Project.
        """
        if db.engine.dialect.name == "sqlite":
            self.skipTest("Test requires MySQL")
        rundef = {"run_url": "foo", "runner_url": "foo", "env": {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker("w1", "ubuntu", 12, 2, "aarch64", "key", 2, ["aarch96"])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        # create a "synchronous" builds project
        self.create_projects("job-1")
        p = Project.query.all()[0]
        p.synchronous_builds = True
        db.session.commit()

        # add an active build
        b = Build.create(p)
        r = Run(b, "p1b1r1")
        r.host_tag = "aarch96"
        r.status = BuildStatus.RUNNING
        db.session.add(r)

        # Queue up another run on this build. The project is sync, but the
        # runs in a single build can go in parallel
        r = Run(b, "p1b1r2")
        r.host_tag = "aarch96"
        db.session.add(r)

        # now queue a build up
        b = Build.create(p)
        r = Run(b, "p1b2r1")
        r.host_tag = "aarch97"  # different host-tag, but should be blocked
        db.session.add(r)

        # create a normal project
        self.create_projects("job-2")
        p = Project.query.all()[1]
        db.session.commit()

        # queue up a build. This is "older" than the queued build for
        # the synchronous project, but should get selected below
        b = Build.create(p)
        r = Run(b, "p2b1r1")
        r.host_tag = "aarch97"
        db.session.add(r)

        db.session.commit()
        headers = [
            ("Content-type", "application/json"),
            ("Authorization", "Token key"),
        ]
        qs = "available_runners=1&foo=2"

        # This should make the p1b1r2 run running
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))
        self.assertEqual(
            [
                BuildStatus.RUNNING,
                BuildStatus.RUNNING,
                BuildStatus.QUEUED,
                BuildStatus.QUEUED,
            ],  # NOQA
            [x.status for x in Run.query],
        )

        # now job-1 should get blocked and job-2's run will get popped
        # lets change the host-tag to ensure this does *all* runs
        w.host_tags = ["aarch97"]
        db.session.commit()
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))
        self.assertEqual(
            [
                BuildStatus.RUNNING,
                BuildStatus.RUNNING,
                BuildStatus.QUEUED,
                BuildStatus.RUNNING,
            ],  # NOQA
            [x.status for x in Run.query],
        )
예제 #12
0
    def test_worker_get_run(self, storage):
        if db.engine.dialect.name == "sqlite":
            self.skipTest("Test requires MySQL")
        rundef = {"run_url": "foo", "runner_url": "foo", "env": {}}
        storage().get_run_definition.return_value = json.dumps(rundef)
        w = Worker("w1", "ubuntu", 12, 2, "aarch64", "key", 2, ["aarch96"])
        w.enlisted = True
        w.online = True
        db.session.add(w)

        self.create_projects("job-1")
        p = Project.query.all()[0]
        b = Build.create(p)
        r = Run(b, "run0")
        r.host_tag = "aarch96"
        db.session.add(r)

        db.session.commit()
        headers = [
            ("Content-type", "application/json"),
            ("Authorization", "Token key"),
        ]
        qs = "available_runners=1&foo=2"
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code, resp.data)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))

        # now put a tag on the worker that doesn't match
        r.status = BuildStatus.QUEUED
        w.host_tags = "amd64, foo"
        db.session.commit()
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertNotIn("run-defs", data["data"]["worker"])

        # now tag the run with the worker's host name
        r.host_tag = "w1"
        w.host_tags = ""
        db.session.commit()
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))

        # now do a pattern match
        w.host_tags = "aarch96"
        r.host_tag = "aa?c*"
        r.status = BuildStatus.QUEUED
        db.session.commit()
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertEqual(1, len(data["data"]["worker"]["run-defs"]))

        # now mark it only for surges
        w.surges_only = True
        r.status = BuildStatus.QUEUED
        r.host_tag = "aarch96"
        db.session.commit()
        resp = self.client.get("/workers/w1/",
                               headers=headers,
                               query_string=qs)
        self.assertEqual(200, resp.status_code)
        data = json.loads(resp.data.decode())
        self.assertNotIn("run-defs", data["data"]["worker"])