Exemple #1
0
    def test_get_queue(self):
        uuids, resp = util.submit_jobs(self.master_url,
                                       {'command': 'sleep 30'},
                                       clones=100)
        self.assertEqual(201, resp.status_code, resp.content)
        try:
            slave_queue = util.session.get('%s/queue' % self.slave_url,
                                           allow_redirects=False)
            self.assertEqual(307, slave_queue.status_code)
            default_pool = util.default_pool(self.master_url)
            pool = default_pool or 'no-pool'
            self.logger.info(f'Checking the queue endpoint for pool {pool}')

            @retry(stop_max_delay=30000,
                   wait_fixed=1000)  # Need to wait for a rank cycle
            def check_queue():
                master_queue = util.session.get(
                    slave_queue.headers['Location'])
                self.assertEqual(200, master_queue.status_code,
                                 master_queue.content)
                self.assertTrue(
                    any([
                        job['job/uuid'] in uuids
                        for job in master_queue.json()[pool]
                    ]))

            check_queue()
        finally:
            util.kill_jobs(self.master_url, uuids)
Exemple #2
0
 def test_rate_limit_while_creating_job(self):
     # Make sure the rate limit cuts a user off.
     settings = util.settings(self.cook_url)
     if settings['rate-limit']['job-submission'] is None:
         pytest.skip(
             "Can't test job submission rate limit without submission rate limit set."
         )
     if not settings['rate-limit']['job-submission']['enforce?']:
         pytest.skip("Enforcing must be on for test to run")
     user = self.user_factory.new_user()
     bucket_size = settings['rate-limit']['job-submission']['bucket-size']
     extra_size = replenishment_rate = settings['rate-limit'][
         'job-submission']['tokens-replenished-per-minute']
     if extra_size < 100:
         extra_size = 100
     if bucket_size > 3000 or extra_size > 1000:
         pytest.skip(
             "Job submission rate limit test would require making too many or too few jobs to run the test."
         )
     with user:
         jobs_to_kill = []
         try:
             # First, empty most but not all of the tocken bucket.
             jobs1, resp1 = util.submit_jobs(self.cook_url, {},
                                             bucket_size - 60)
             jobs_to_kill.extend(jobs1)
             self.assertEqual(resp1.status_code, 201)
             # Then another 1060 to get us very negative.
             jobs2, resp2 = util.submit_jobs(self.cook_url, {},
                                             extra_size + 60)
             jobs_to_kill.extend(jobs2)
             self.assertEqual(resp2.status_code, 201)
             # And finally a request that gets cut off.
             jobs3, resp3 = util.submit_jobs(self.cook_url, {}, 10)
             self.assertEqual(resp3.status_code, 400)
             # The timestamp can change so we should only match on the prefix.
             expectedPrefix = f'User {user.name} is inserting too quickly. Not allowed to insert for'
             self.assertEqual(resp3.json()['error'][:len(expectedPrefix)],
                              expectedPrefix)
             # Earn back 70 seconds of tokens.
             time.sleep(70.0 * extra_size / replenishment_rate)
             jobs4, resp4 = util.submit_jobs(self.cook_url, {}, 10)
             jobs_to_kill.extend(jobs4)
             self.assertEqual(resp4.status_code, 201)
         finally:
             util.kill_jobs(self.cook_url, jobs_to_kill)
Exemple #3
0
 def submit_jobs():
     self.logger.info(
         f'Submitting subsequent batch of {bucket_size-1} jobs')
     subsequent_uuids, subsequent_response = util.submit_jobs(
         self.cook_url, jobspec, bucket_size - 1)
     job_uuids.extend(subsequent_uuids)
     self.assertEqual(201,
                      subsequent_response.status_code,
                      msg=subsequent_response.content)
Exemple #4
0
    def test_rate_limit_launching_jobs(self):
        settings = util.settings(self.cook_url)
        if settings['rate-limit']['job-launch'] is None:
            pytest.skip(
                "Can't test job launch rate limit without launch rate limit set."
            )

        # Allow an environmental variable override.
        name = os.getenv('COOK_LAUNCH_RATE_LIMIT_NAME')
        if name is not None:
            user = self.user_factory.user_class(name)
        else:
            user = self.user_factory.new_user()

        if not settings['rate-limit']['job-launch']['enforce?']:
            pytest.skip("Enforcing must be on for test to run")
        bucket_size = settings['rate-limit']['job-launch']['bucket-size']
        token_rate = settings['rate-limit']['job-launch'][
            'tokens-replenished-per-minute']
        # In some environments, e.g., minimesos, we can only launch so many concurrent jobs.
        if token_rate < 5 or token_rate > 20:
            pytest.skip(
                "Job launch rate limit test is only validated to reliably work correctly with certain token rates."
            )
        if bucket_size < 10 or bucket_size > 20:
            pytest.skip(
                "Job launch rate limit test is only validated to reliably work correctly with certain token bucket sizes."
            )
        with user:
            job_uuids = []
            try:
                jobspec = {"command": "sleep 240", 'cpus': 0.03, 'mem': 32}

                self.logger.info(
                    f'Submitting initial batch of {bucket_size-1} jobs')
                initial_uuids, initial_response = util.submit_jobs(
                    self.cook_url, jobspec, bucket_size - 1)
                job_uuids.extend(initial_uuids)
                self.assertEqual(201,
                                 initial_response.status_code,
                                 msg=initial_response.content)

                def submit_jobs():
                    self.logger.info(
                        f'Submitting subsequent batch of {bucket_size-1} jobs')
                    subsequent_uuids, subsequent_response = util.submit_jobs(
                        self.cook_url, jobspec, bucket_size - 1)
                    job_uuids.extend(subsequent_uuids)
                    self.assertEqual(201,
                                     subsequent_response.status_code,
                                     msg=subsequent_response.content)

                def is_rate_limit_triggered(_):
                    jobs1 = util.query_jobs(self.cook_url,
                                            True,
                                            uuid=job_uuids).json()
                    waiting_jobs = [
                        j for j in jobs1 if j['status'] == 'waiting'
                    ]
                    running_jobs = [
                        j for j in jobs1 if j['status'] == 'running'
                    ]
                    self.logger.debug(
                        f'There are {len(waiting_jobs)} waiting jobs')
                    # We submitted just under two buckets. We should only see a bucket + some extra running. No more.
                    return len(running_jobs) >= bucket_size and len(
                        running_jobs) < (bucket_size + token_rate /
                                         2) and len(waiting_jobs) > 0

                util.wait_until(submit_jobs, is_rate_limit_triggered)
                jobs2 = util.query_jobs(self.cook_url, True,
                                        uuid=job_uuids).json()
                running_jobs = [j for j in jobs2 if j['status'] == 'running']
                self.assertEqual(len(running_jobs), bucket_size)
            finally:
                util.kill_jobs(self.cook_url, job_uuids)