def testPriorityLevels(self): """Test that priority inversion doesn't happen. Increases a job's priority and makes several requests for hosts, checking that priority inversion doesn't happen. @raises AssertionError: If the unimportant job gets h1 while it is still unimportant, or doesn't get h1 while after it becomes the most important job. """ deps = set(['a', 'b']) acls = set(['a', 'b']) self.db_helper.create_host('h1', deps=deps, acls=acls) # Create jobs that will bucket differently and confirm that jobs in an # earlier bucket get a host. first_job = self.create_job(user='******', deps=deps, acls=acls) important_job = self.create_job(user='******', deps=deps, acls=acls, priority=2) deps.pop() unimportant_job = self.create_job(user='******', deps=deps, acls=acls, priority=1) queue_entries = self._dispatcher._refresh_pending_queue_entries() self.god.stub_with( rdb_requests.BaseHostRequestManager, 'response', AssignmentValidator.priority_checking_response_handler) self.check_hosts(rdb_lib.acquire_hosts(queue_entries)) # Elevate the priority of the unimportant job, so we now have # 2 jobs at the same priority. self.db_helper.increment_priority(job_id=unimportant_job.id) queue_entries = self._dispatcher._refresh_pending_queue_entries() self._release_unused_hosts() self.check_hosts(rdb_lib.acquire_hosts(queue_entries)) # Prioritize the first job, and confirm that it gets the host over the # jobs that got it the last time. self.db_helper.increment_priority(job_id=unimportant_job.id) queue_entries = self._dispatcher._refresh_pending_queue_entries() self._release_unused_hosts() self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
def testBasicPriority(self): """Test that priority inversion doesn't happen. Schedule 2 jobs with the same deps, acls and user, but different priorities, and confirm that the higher priority request gets the host. This confirmation happens through the AssignmentValidator. @raises AssertionError: If the un important request gets host h1 instead of the important request. """ deps = set(['a', 'b']) acls = set(['a', 'b']) self.db_helper.create_host('h1', deps=deps, acls=acls) important_job = self.create_job(user='******', deps=deps, acls=acls, priority=2) un_important_job = self.create_job(user='******', deps=deps, acls=acls, priority=0) queue_entries = self._dispatcher._refresh_pending_queue_entries() self.god.stub_with( rdb_requests.BaseHostRequestManager, 'response', AssignmentValidator.priority_checking_response_handler) self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
def min_dut_test_helper(self, num_hosts, suite_settings): """A helper function to test min_dut logic. @param num_hosts: Total number of hosts to create. @param suite_settings: A dictionary specify how suites would be created and verified. E.g. {'priority': 10, 'num_jobs': 3, 'min_duts':2, 'expected_aquired': 1} With this setting, will create a suite that has 3 child jobs, with priority 10 and min_duts 2. The suite is expected to get 1 dut. """ acls = set(['fake_acl']) hosts = [] for i in range (0, num_hosts): hosts.append(self.db_helper.create_host( 'h%d' % i, deps=set(['board:lumpy']), acls=acls)) suites = {} suite_min_duts = {} for setting in suite_settings: s = self.create_suite(num=setting['num_jobs'], priority=setting['priority'], board='board:lumpy', acls=acls) # Empty list will be used to store acquired hosts. suites[s['parent_job'].id] = (setting, []) suite_min_duts[s['parent_job'].id] = setting['min_duts'] queue_entries = self._dispatcher._refresh_pending_queue_entries() matching_hosts = rdb_lib.acquire_hosts(queue_entries, suite_min_duts) for host, queue_entry in zip(matching_hosts, queue_entries): if host: suites[queue_entry.job.parent_job_id][1].append(host) for setting, hosts in suites.itervalues(): self.assertEqual(len(hosts),setting['expected_aquired'])
def testCachingEmptyList(self): """Test that the 'no available hosts' condition isn't a cache miss.""" default_params = test_utils.get_default_job_params() for i in range(0, 3): default_params['parent_job_id'] = i self.create_job(**default_params) default_host_params = test_utils.get_default_host_params() self.db_helper.create_host('h1', **default_host_params) def local_get_response(self): """ Local rdb.get_response handler.""" if not (self.cache.misses == 1 and self.cache.hits == 2): raise AssertionError( 'The first request should have taken h1 ' 'while the other 2 should have hit the cache.') request = test_utils.AbstractBaseRDBTester.get_request( test_utils.DEFAULT_DEPS, test_utils.DEFAULT_ACLS) key = self.cache.get_key(deps=request.deps, acls=request.acls) if self.cache._cache_backend.get(key) != []: raise AssertionError('A request with no hosts does not get ' 'cached corrrectly.') return test_utils.wire_format_response_map(self.response_map) queue_entries = self._dispatcher._refresh_pending_queue_entries() self.god.stub_with(rdb.AvailableHostRequestHandler, 'get_response', local_get_response) self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
def testAcquireLeasedHostRace(self): """Test behaviour when hosts are leased just before acquisition. If a fraction of the hosts somehow get leased between finding and acquisition, the rdb should just return the remaining hosts for the request to use. @raises AssertionError: If both the requests get a host successfully, since one host gets leased before the final attempt to lease both. """ j1 = self.create_job(deps=set(['a'])) j2 = self.create_job(deps=set(['a'])) hosts = [self.db_helper.create_host('h1', deps=set(['a'])), self.db_helper.create_host('h2', deps=set(['a']))] @rdb_hosts.return_rdb_host def local_find_hosts(host_query_manger, deps, acls): """Return a predetermined list of hosts, one of which is leased.""" h1 = models.Host.objects.get(hostname='h1') h1.leased = 1 h1.save() h2 = models.Host.objects.get(hostname='h2') return [h1, h2] self.god.stub_with(rdb.AvailableHostQueryManager, 'find_hosts', local_find_hosts) queue_entries = self._dispatcher._refresh_pending_queue_entries() hosts = list(rdb_lib.acquire_hosts(queue_entries)) self.assertTrue(len(hosts) == 2 and None in hosts) self.check_hosts(iter(hosts))
def testCachingBasic(self): """Test that different requests will hit the database.""" # r1 should cache h2 and use h1; r2 should cach [] and use h2 # at the end the cache should contain one stale line, with # h2 in it, and one empty line since r2 acquired h2. default_params = test_utils.get_default_job_params() self.create_job(**default_params) default_params['deps'] = default_params['deps'][0] self.create_job(**default_params) for i in range(0, 2): self.db_helper.create_host('h%s' % i, **test_utils.get_default_host_params()) queue_entries = self._dispatcher._refresh_pending_queue_entries() def local_get_response(self): """ Local rdb.get_response handler.""" requests = self.response_map.keys() if not (self.cache.hits == 0 and self.cache.misses == 2): raise AssertionError( 'Neither request should have hit the ' 'cache, but both should have inserted into it.') lines = get_line_with_labels( test_utils.DEFAULT_DEPS, self.cache._cache_backend._cache.values()) if len(lines) > 1: raise AssertionError( 'Caching was too agressive, ' 'the second request should not have cached anything ' 'because it used the one free host.') cached_host = lines[0].pop() default_params = test_utils.get_default_job_params() job1_host = get_hosts_for_request(self.response_map, **default_params)[0] default_params['deps'] = default_params['deps'][0] job2_host = get_hosts_for_request(self.response_map, **default_params)[0] if (job2_host.hostname == job1_host.hostname or cached_host.hostname not in [job2_host.hostname, job1_host.hostname]): raise AssertionError( 'Wrong host cached %s. The first job ' 'should have cached the host used by the second.' % cached_host.hostname) # Shouldn't be able to lease this host since r2 used it. try: cached_host.lease() except rdb_utils.RDBException: pass else: raise AssertionError('Was able to lease a stale host. The ' 'second request should have leased it.') return test_utils.wire_format_response_map(self.response_map) self.god.stub_with(rdb.AvailableHostRequestHandler, 'get_response', local_get_response) self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
def testFrontendJobScheduling(self): """Test that basic frontend job scheduling. @raises AssertionError: If the received and requested host don't match, or the mis-matching host is returned instead. """ deps = set(['x', 'y']) acls = set(['a', 'b']) # Create 2 frontend jobs and only one matching host. matching_job = self.create_job(acls=acls, deps=deps) matching_host = self.db_helper.create_host('h1', acls=acls, deps=deps) mis_matching_job = self.create_job(acls=acls, deps=deps) mis_matching_host = self.db_helper.create_host( 'h2', acls=acls, deps=deps.pop()) self.db_helper.add_host_to_job(matching_host, matching_job.id) self.db_helper.add_host_to_job(mis_matching_host, mis_matching_job.id) # Check that only the matching host is returned, and that we get 'None' # for the second request. queue_entries = self._dispatcher._refresh_pending_queue_entries() hosts = list(rdb_lib.acquire_hosts(queue_entries)) self.assertTrue(len(hosts) == 2 and None in hosts) returned_host = [host for host in hosts if host].pop() self.assertTrue(matching_host.id == returned_host.id)
def testFrontendJobPriority(self): """Test that frontend job scheduling doesn't ignore priorities. @raises ValueError: If the priorities of frontend jobs are ignored. """ board = 'x' high_priority = self.create_job(priority=2, deps=set([board])) low_priority = self.create_job(priority=1, deps=set([board])) host = self.db_helper.create_host('h1', deps=set([board])) self.db_helper.add_host_to_job(host, low_priority.id) self.db_helper.add_host_to_job(host, high_priority.id) queue_entries = self._dispatcher._refresh_pending_queue_entries() def local_response_handler(request_manager): """Confirms that a higher priority frontend job gets a host. @raises ValueError: If priority inversion happens and the job with priority 1 gets the host instead. """ result = request_manager.api_call(request_manager.request_queue) if not result: raise ValueError('Excepted the high priority request to ' 'get a host, but the result is empty.') for request, hosts in result.iteritems(): if request.priority == 1: raise ValueError('Priority of frontend job ignored.') if len(hosts) > 1: raise ValueError('Multiple hosts returned against one ' 'frontend job scheduling request.') yield hosts[0] self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response', local_response_handler) self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
def testAcquireLeasedHostBasic(self): """Test that acquisition of a leased host doesn't happen. @raises AssertionError: If the one host that satisfies the request is acquired. """ job = self.create_job(deps=set(['a'])) host = self.db_helper.create_host('h1', deps=set(['a'])) host.leased = 1 host.save() queue_entries = self._dispatcher._refresh_pending_queue_entries() hosts = list(rdb_lib.acquire_hosts(queue_entries)) self.assertTrue(len(hosts) == 1 and hosts[0] is None)
def acquire_hosts(self, host_jobs): """Override acquire_hosts. This method overrides the method in parent class. It figures out a set of suites that |host_jobs| belong to; and get min_duts requirement for each suite. It pipes min_duts for each suite to rdb. """ parent_job_ids = set( [q.job.parent_job_id for q in host_jobs if q.job.parent_job_id]) suite_min_duts = self._suite_recorder.get_min_duts(parent_job_ids) return rdb_lib.acquire_hosts(host_jobs, suite_min_duts)
def testConfigurations(self): """Test that configurations don't matter. @raises AssertionError: If the request doesn't find a host, this will happen if configurations are not stripped out. """ self.god.stub_with(provision.Cleanup, '_actions', {'action': 'fakeTest'}) job_labels = set(['action', 'a']) host_deps = set(['a']) db_host = self.db_helper.create_host('h1', deps=host_deps) self.create_job(user='******', deps=job_labels) queue_entries = self._dispatcher._refresh_pending_queue_entries() matching_host = rdb_lib.acquire_hosts(queue_entries).next() self.assert_(matching_host.id == db_host.id)
def testBadDeps(self): """Test that we find no hosts when only acls match. @raises AssertionError: If the request finds a host, since the only host in the ready pool will not have matching deps. """ host_labels = set(['a']) job_deps = set(['b']) acls = set(['a', 'b']) self.db_helper.create_host('h1', deps=host_labels, acls=acls) job = self.create_job(user='******', deps=job_deps, acls=acls) queue_entries = self._dispatcher._refresh_pending_queue_entries() matching_host = rdb_lib.acquire_hosts(queue_entries).next() self.assert_(not matching_host)
def acquire_hosts(self, host_jobs): """Accquire hosts for given jobs. This method sends jobs that need hosts to rdb. Child class can override this method to pipe more args to rdb. @param host_jobs: A list of queue entries that either require hosts, or require host assignment validation through the rdb. @param return: A generator that yields an rdb_hosts.RDBClientHostWrapper for each host acquired on behalf of a queue_entry, or None if a host wasn't found. """ return rdb_lib.acquire_hosts(host_jobs)
def testCachingPriority(self): """Test requests with the same deps but different priorities.""" # All 3 jobs should find hosts, and there should be one host left # behind in the cache. The first job will take one host and cache 3, # the second will take one and cache 2, while the last will take one. # The remaining host in the cache should not be stale. default_job_params = test_utils.get_default_job_params() for i in range(0, 3): default_job_params['priority'] = i job = self.create_job(**default_job_params) default_host_params = test_utils.get_default_host_params() for i in range(0, 4): self.db_helper.create_host('h%s' % i, **default_host_params) queue_entries = self._dispatcher._refresh_pending_queue_entries() def local_get_response(self): """ Local rdb.get_response handler.""" if not (self.cache.hits == 2 and self.cache.misses == 1): raise AssertionError('The first request should have populated ' 'the cache for the others.') default_job_params = test_utils.get_default_job_params() lines = get_line_with_labels( default_job_params['deps'], self.cache._cache_backend._cache.values()) if len(lines) > 1: raise AssertionError('Should only be one cache line left.') # Make sure that all the jobs got different hosts, and that # the host cached isn't being used by a job. cached_host = lines[0].pop() cached_host.lease() job_hosts = [] default_job_params = test_utils.get_default_job_params() for i in range(0, 3): default_job_params['priority'] = i hosts = get_hosts_for_request(self.response_map, **default_job_params) assert (len(hosts) == 1) host = hosts[0] assert (host.id not in job_hosts and cached_host.id != host.id) return test_utils.wire_format_response_map(self.response_map) self.god.stub_with(rdb.AvailableHostRequestHandler, 'get_response', local_get_response) self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
def testBasicDepsAcls(self): """Test a basic deps/acls request. Make sure that a basic request with deps and acls, finds a host from the ready pool that has matching labels and is in a matching aclgroups. @raises AssertionError: If the request doesn't find a host, since the we insert a matching host in the ready pool. """ deps = set(['a', 'b']) acls = set(['a', 'b']) self.db_helper.create_host('h1', deps=deps, acls=acls) job = self.create_job(user='******', deps=deps, acls=acls) queue_entries = self._dispatcher._refresh_pending_queue_entries() matching_host = rdb_lib.acquire_hosts(queue_entries).next() self.check_host_assignment(job.id, matching_host.id) self.assertTrue(matching_host.leased == 1)
def testStaleCacheLine(self): """Test that a stale cache line doesn't satisfy a request.""" # Create 3 jobs, all of which can use the same hosts. The first # will cache the only remaining host after taking one, the second # will also take a host, but not cache anything, while the third # will try to use the host cached by the first job but fail because # it is already leased. default_params = test_utils.get_default_job_params() default_params['priority'] = 2 self.create_job(**default_params) default_params['priority'] = 1 default_params['deps'] = default_params['deps'][0] self.create_job(**default_params) default_params['priority'] = 0 default_params['deps'] = test_utils.DEFAULT_DEPS self.create_job(**default_params) host_1 = self.db_helper.create_host( 'h1', **test_utils.get_default_host_params()) host_2 = self.db_helper.create_host( 'h2', **test_utils.get_default_host_params()) queue_entries = self._dispatcher._refresh_pending_queue_entries() def local_get_response(self): """ Local rdb.get_response handler.""" default_job_params = test_utils.get_default_job_params() # Confirm that even though the third job hit the cache, it wasn't # able to use the cached host because it was already leased, and # that it doesn't add it back to the cache. assert (self.cache.misses == 2 and self.cache.hits == 1) lines = get_line_with_labels( default_job_params['deps'], self.cache._cache_backend._cache.values()) assert (len(lines) == 0) assert (int(self.cache.mean_staleness()) == 100) return test_utils.wire_format_response_map(self.response_map) self.god.stub_with(rdb.AvailableHostRequestHandler, 'get_response', local_get_response) acquired_hosts = list(rdb_lib.acquire_hosts(queue_entries)) self.assertTrue(acquired_hosts[0].id == host_1.id and acquired_hosts[1].id == host_2.id and acquired_hosts[2] is None)
def testBadAcls(self): """Test that we find no hosts when only deps match. @raises AssertionError: If the request finds a host, since the only host in the ready pool will not have matching acls. """ deps = set(['a']) host_acls = set(['a']) job_acls = set(['b']) self.db_helper.create_host('h1', deps=deps, acls=host_acls) # Create the job as a new user who is only in the 'b' and 'Everyone' # aclgroups. Though there are several hosts in the Everyone group, the # 1 host that has the 'a' dep isn't. job = self.create_job(user='******', deps=deps, acls=job_acls) queue_entries = self._dispatcher._refresh_pending_queue_entries() matching_host = rdb_lib.acquire_hosts(queue_entries).next() self.assert_(not matching_host)
def testPreferredDeps(self): """Test that perferred deps is respected. If multiple hosts satisfied a job's deps, the one with preferred label will be assigned to the job. @raises AssertionError: If a host without a preferred label is assigned to the job instead of one with a preferred label. """ lumpy_deps = set(['board:lumpy']) stumpy_deps = set(['board:stumpy']) stumpy_deps_with_crosversion = set( ['board:stumpy', 'cros-version:lumpy-release/R41-6323.0.0']) acls = set(['a', 'b']) # Hosts lumpy1 and lumpy2 are created as a control group, # which ensures that if no preferred label is used, the host # with a smaller id will be chosen first. We need to make sure # stumpy2 was chosen because it has a cros-version label, but not # because of other randomness. self.db_helper.create_host('lumpy1', deps=lumpy_deps, acls=acls) self.db_helper.create_host('lumpy2', deps=lumpy_deps, acls=acls) self.db_helper.create_host('stumpy1', deps=stumpy_deps, acls=acls) self.db_helper.create_host('stumpy2', deps=stumpy_deps_with_crosversion, acls=acls) job_1 = self.create_job(user='******', deps=lumpy_deps, acls=acls) job_2 = self.create_job(user='******', deps=stumpy_deps_with_crosversion, acls=acls) queue_entries = self._dispatcher._refresh_pending_queue_entries() matching_hosts = list(rdb_lib.acquire_hosts(queue_entries)) assignment = {} import logging for job, host in zip(queue_entries, matching_hosts): self.check_host_assignment(job.id, host.id) assignment[job.id] = host.hostname self.assertEqual(assignment[job_1.id], 'lumpy1') self.assertEqual(assignment[job_2.id], 'stumpy2')
def testDummyCache(self): """Test that the dummy cache doesn't save hosts.""" # Create 2 jobs and 3 hosts. Both the jobs should not hit the cache, # nor should they cache anything, but both jobs should acquire hosts. default_params = test_utils.get_default_job_params() default_host_params = test_utils.get_default_host_params() for i in range(0, 2): default_params['parent_job_id'] = i self.create_job(**default_params) self.db_helper.create_host('h%s' % i, **default_host_params) self.db_helper.create_host('h2', **default_host_params) queue_entries = self._dispatcher._refresh_pending_queue_entries() self.god.stub_with(rdb_cache_manager.RDBHostCacheManager, 'use_cache', False) def local_get_response(self): """ Local rdb.get_response handler.""" requests = self.response_map.keys() if not (self.cache.hits == 0 and self.cache.misses == 2): raise AssertionError( 'Neither request should have hit the ' 'cache, but both should have inserted into it.') # Make sure both requests actually found a host default_params = test_utils.get_default_job_params() job1_host = get_hosts_for_request(self.response_map, **default_params)[0] default_params['parent_job_id'] = 1 job2_host = get_hosts_for_request(self.response_map, **default_params)[0] if (not job1_host or not job2_host or job2_host.hostname == job1_host.hostname): raise AssertionError('Excected acquisitions did not occur.') assert (hasattr(self.cache._cache_backend, '_cache') == False) return test_utils.wire_format_response_map(self.response_map) self.god.stub_with(rdb.AvailableHostRequestHandler, 'get_response', local_get_response) self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
def testSuiteOrderedHostAcquisition(self): """Test that older suite jobs acquire hosts first. Make sure older suite jobs get hosts first, but not at the expense of higher priority jobs. @raises ValueError: If unexpected acquisitions occur, eg: suite_job_2 acquires the last 2 hosts instead of suite_job_1. isolated_important_job doesn't get any hosts. Any job acquires more hosts than necessary. """ board = 'x' # Create 2 suites such that the later suite has an ordering of deps # that places it ahead of the earlier suite, if parent_job_id is # ignored. suite_without_dep = self.create_suite(num=2, priority=0, board=board) suite_with_dep = self.create_suite(num=1, priority=0, board=board) self.db_helper.add_deps_to_job(suite_with_dep[0], dep_names=list('y')) # Create an important job that should be ahead of the first suite, # because priority trumps parent_job_id and time of creation. isolated_important_job = self.create_job(priority=3, deps=set([board])) # Create 3 hosts, all with the deps to satisfy the last suite. for i in range(0, 3): self.db_helper.create_host('h%s' % i, deps=set([board, 'y'])) queue_entries = self._dispatcher._refresh_pending_queue_entries() def local_response_handler(request_manager): """Reorder requests and check host acquisition. @raises ValueError: If unexpected/no acquisitions occur. """ if any([request for request in request_manager.request_queue if request.parent_job_id is None]): raise ValueError('Parent_job_id can never be None.') # This will result in the ordering: # [suite_2_1, suite_1_*, suite_1_*, isolated_important_job] # The priority scheduling order should be: # [isolated_important_job, suite_1_*, suite_1_*, suite_2_1] # Since: # a. the isolated_important_job is the most important. # b. suite_1 was created before suite_2, regardless of deps disorderly_queue = sorted(request_manager.request_queue, key=lambda r: -r.parent_job_id) request_manager.request_queue = disorderly_queue result = request_manager.api_call(request_manager.request_queue) if not result: raise ValueError('Expected results but got none.') # Verify that the isolated_important_job got a host, and that the # first suite got both remaining free hosts. for request, hosts in result.iteritems(): if request.parent_job_id == 0: if len(hosts) > 1: raise ValueError('First job acquired more hosts than ' 'necessary. Response map: %s' % result) continue if request.parent_job_id == 1: if len(hosts) < 2: raise ValueError('First suite job requests were not ' 'satisfied. Response_map: %s' % result) continue # The second suite job got hosts instead of one of # the others. Eitherway this is a failure. raise ValueError('Unexpected host acquisition ' 'Response map: %s' % result) yield None self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response', local_response_handler) list(rdb_lib.acquire_hosts(queue_entries))