Example #1
0
    def test_allow_partial(self):
        def absent_uuids(response):
            return [part for part in response.json()['error'].split() if util.is_valid_uuid(part)]

        job_uuid_1, resp = util.submit_job(self.cook_url)
        self.assertEqual(201, resp.status_code)
        job_uuid_2, resp = util.submit_job(self.cook_url)
        self.assertEqual(201, resp.status_code)

        # Only valid job uuids
        resp = util.query_jobs(self.cook_url, job=[job_uuid_1, job_uuid_2])
        self.assertEqual(200, resp.status_code)

        # Mixed valid, invalid job uuids
        bogus_uuid = str(uuid.uuid4())
        resp = util.query_jobs(self.cook_url, job=[job_uuid_1, job_uuid_2, bogus_uuid])
        self.assertEqual(404, resp.status_code)
        self.assertEqual([bogus_uuid], absent_uuids(resp))
        resp = util.query_jobs(self.cook_url, job=[job_uuid_1, job_uuid_2, bogus_uuid], partial='false')
        self.assertEqual(404, resp.status_code, resp.json())
        self.assertEqual([bogus_uuid], absent_uuids(resp))

        # Partial results with mixed valid, invalid job uuids
        resp = util.query_jobs(self.cook_url, job=[job_uuid_1, job_uuid_2, bogus_uuid], partial='true')
        self.assertEqual(200, resp.status_code, resp.json())
        self.assertEqual(2, len(resp.json()))
        self.assertEqual([job_uuid_1, job_uuid_2].sort(), [job['uuid'] for job in resp.json()].sort())

        # Only valid instance uuids
        job = util.wait_for_job(self.cook_url, job_uuid_1, 'completed')
        instance_uuid_1 = job['instances'][0]['task_id']
        job = util.wait_for_job(self.cook_url, job_uuid_2, 'completed')
        instance_uuid_2 = job['instances'][0]['task_id']
        resp = util.query_jobs(self.cook_url, instance=[instance_uuid_1, instance_uuid_2])
        self.assertEqual(200, resp.status_code)

        # Mixed valid, invalid instance uuids
        resp = util.query_jobs(self.cook_url, instance=[instance_uuid_1, instance_uuid_2, bogus_uuid])
        self.assertEqual(404, resp.status_code)
        self.assertEqual([bogus_uuid], absent_uuids(resp))
        resp = util.query_jobs(self.cook_url, instance=[instance_uuid_1, instance_uuid_2, bogus_uuid], partial='false')
        self.assertEqual(404, resp.status_code)
        self.assertEqual([bogus_uuid], absent_uuids(resp))

        # Partial results with mixed valid, invalid instance uuids
        resp = util.query_jobs(self.cook_url, instance=[instance_uuid_1, instance_uuid_2, bogus_uuid], partial='true')
        self.assertEqual(200, resp.status_code)
        self.assertEqual(2, len(resp.json()))
        self.assertEqual([job_uuid_1, job_uuid_2].sort(), [job['uuid'] for job in resp.json()].sort())
Example #2
0
 def is_rate_limit_triggered(_):
     jobs1 = util.query_jobs(self.cook_url,
                             True,
                             uuid=job_uuids).json()
     waiting_jobs = [
         j for j in jobs1 if j['status'] == 'waiting'
     ]
     running_jobs = [
         j for j in jobs1 if j['status'] == 'running'
     ]
     # We submitted just under two buckets. We should only see a bucket + some extra running. No more.
     return len(running_jobs) >= bucket_size and len(
         waiting_jobs) > 0
Example #3
0
 def is_rate_limit_triggered(_):
     jobs1 = util.query_jobs(self.cook_url,
                             True,
                             uuid=job_uuids).json()
     running_jobs = [
         j for j in jobs1 if j['status'] == 'running'
     ]
     waiting_jobs = [
         j for j in jobs1 if j['status'] == 'waiting'
     ]
     self.logger.debug(
         f'There are {len(waiting_jobs)} waiting jobs')
     return len(waiting_jobs) > 0 and len(
         running_jobs) >= bucket_size
Example #4
0
    def test_federated_query(self):
        # Submit to cluster #1
        job_uuid_1, resp = util.submit_job(self.cook_url_1)
        self.assertEqual(resp.status_code, 201)

        # Submit to cluster #2
        job_uuid_2, resp = util.submit_job(self.cook_url_2)
        self.assertEqual(resp.status_code, 201)

        # Ask for both jobs from cluster #1, expect to get the first
        resp = util.query_jobs(self.cook_url_1,
                               job=[job_uuid_1, job_uuid_2],
                               partial='true')
        self.assertEqual(200, resp.status_code, resp.json())
        self.assertEqual(1, len(resp.json()))
        self.assertEqual([job_uuid_1], [job['uuid'] for job in resp.json()])

        # Ask for both jobs from cluster #2, expect to get the second
        resp = util.query_jobs(self.cook_url_2,
                               job=[job_uuid_1, job_uuid_2],
                               partial='true')
        self.assertEqual(200, resp.status_code, resp.json())
        self.assertEqual(1, len(resp.json()))
        self.assertEqual([job_uuid_2], [job['uuid'] for job in resp.json()])
Example #5
0
 def get_job(self, job_uuid):
     """Loads a job by UUID using GET /rawscheduler"""
     return util.query_jobs(self.cook_url, job=[job_uuid]).json()[0]
Example #6
0
    def test_rate_limit_launching_jobs(self):
        settings = util.settings(self.cook_url)
        if settings['rate-limit']['job-launch'] is None:
            pytest.skip(
                "Can't test job launch rate limit without launch rate limit set."
            )

        # Allow an environmental variable override.
        name = os.getenv('COOK_LAUNCH_RATE_LIMIT_NAME')
        if name is not None:
            user = self.user_factory.user_class(name)
        else:
            user = self.user_factory.new_user()

        if not settings['rate-limit']['job-launch']['enforce?']:
            pytest.skip("Enforcing must be on for test to run")
        bucket_size = settings['rate-limit']['job-launch']['bucket-size']
        token_rate = settings['rate-limit']['job-launch'][
            'tokens-replenished-per-minute']
        # In some environments, e.g., minimesos, we can only launch so many concurrent jobs.
        if token_rate < 5 or token_rate > 20:
            pytest.skip(
                "Job launch rate limit test is only validated to reliably work correctly with certain token rates."
            )
        if bucket_size < 10 or bucket_size > 20:
            pytest.skip(
                "Job launch rate limit test is only validated to reliably work correctly with certain token bucket sizes."
            )
        with user:
            job_uuids = []
            try:
                jobspec = {"command": "sleep 240", 'cpus': 0.03, 'mem': 32}

                self.logger.info(
                    f'Submitting initial batch of {bucket_size-1} jobs')
                initial_uuids, initial_response = util.submit_jobs(
                    self.cook_url, jobspec, bucket_size - 1)
                job_uuids.extend(initial_uuids)
                self.assertEqual(201,
                                 initial_response.status_code,
                                 msg=initial_response.content)

                def submit_jobs():
                    self.logger.info(
                        f'Submitting subsequent batch of {bucket_size-1} jobs')
                    subsequent_uuids, subsequent_response = util.submit_jobs(
                        self.cook_url, jobspec, bucket_size - 1)
                    job_uuids.extend(subsequent_uuids)
                    self.assertEqual(201,
                                     subsequent_response.status_code,
                                     msg=subsequent_response.content)

                def is_rate_limit_triggered(_):
                    jobs1 = util.query_jobs(self.cook_url,
                                            True,
                                            uuid=job_uuids).json()
                    waiting_jobs = [
                        j for j in jobs1 if j['status'] == 'waiting'
                    ]
                    running_jobs = [
                        j for j in jobs1 if j['status'] == 'running'
                    ]
                    self.logger.debug(
                        f'There are {len(waiting_jobs)} waiting jobs')
                    # We submitted just under two buckets. We should only see a bucket + some extra running. No more.
                    return len(running_jobs) >= bucket_size and len(
                        running_jobs) < (bucket_size + token_rate /
                                         2) and len(waiting_jobs) > 0

                util.wait_until(submit_jobs, is_rate_limit_triggered)
                jobs2 = util.query_jobs(self.cook_url, True,
                                        uuid=job_uuids).json()
                running_jobs = [j for j in jobs2 if j['status'] == 'running']
                self.assertEqual(len(running_jobs), bucket_size)
            finally:
                util.kill_jobs(self.cook_url, job_uuids)