def setup(self, build: Build, executor_start_index: int) -> bool: """ Execute a setup command on the slave for the specified build. The setup process executes asynchronously on the slave and the slave will alert the master when setup is complete and it is ready to start working on subjobs. :param build: The build to set up this slave to work on :param executor_start_index: The index the slave should number its executors from for this build :return: Whether or not the call to start setup on the slave was successful """ slave_project_type_params = build.build_request.build_parameters().copy() slave_project_type_params.update(build.project_type.slave_param_overrides()) setup_url = self._slave_api.url('build', build.build_id(), 'setup') post_data = { 'project_type_params': slave_project_type_params, 'build_executor_start_index': executor_start_index, } self.current_build_id = build.build_id() try: self._network.post_with_digest(setup_url, post_data, Secret.get()) except (requests.ConnectionError, requests.Timeout) as ex: self._logger.warning('Setup call to {} failed with {}: {}.', self, ex.__class__.__name__, str(ex)) self.mark_dead() return False return True
def test_validate_update_params_for_cancelling_build(self): build = Build(BuildRequest({})) success, response = build.validate_update_params({'status': 'canceled'}) self.assertTrue(success, "Correct status update should report success") self.assertEqual({}, response, "Error response should be empty")
def handle_request_for_new_build(self, build_params): """ Creates a new Build object and adds it to the request queue to be processed. :param build_params: :type build_params: dict[str, str] :rtype tuple [bool, dict [str, str]] """ build_request = BuildRequest(build_params) success = False if build_request.is_valid(): build = Build(build_request) self._all_builds_by_id[build.build_id()] = build self._request_queue.put(build) analytics.record_event(analytics.BUILD_REQUEST_QUEUED, build_id=build.build_id()) response = {'build_id': build.build_id()} success = True elif not build_request.is_valid_type(): response = {'error': 'Invalid build request type.'} else: required_params = build_request.required_parameters() response = {'error': 'Missing required parameter. Required parameters: {}'.format(required_params)} return success, response
def setup(self, build: Build, executor_start_index: int) -> bool: """ Execute a setup command on the slave for the specified build. The setup process executes asynchronously on the slave and the slave will alert the master when setup is complete and it is ready to start working on subjobs. :param build: The build to set up this slave to work on :param executor_start_index: The index the slave should number its executors from for this build :return: Whether or not the call to start setup on the slave was successful """ slave_project_type_params = build.build_request.build_parameters( ).copy() slave_project_type_params.update( build.project_type.slave_param_overrides()) setup_url = self._slave_api.url('build', build.build_id(), 'setup') post_data = { 'project_type_params': slave_project_type_params, 'build_executor_start_index': executor_start_index, } self.current_build_id = build.build_id() try: self._network.post_with_digest(setup_url, post_data, Secret.get()) except (requests.ConnectionError, requests.Timeout) as ex: self._logger.warning('Setup call to {} failed with {}: {}.', self, ex.__class__.__name__, str(ex)) self.mark_dead() return False return True
def test_allocate_slave_increments_by_num_executors_when_max_is_inf(self): build = Build(BuildRequest({})) slave = Mock() slave.num_executors = 10 build.allocate_slave(slave) self.assertEqual(build._num_executors_allocated, 10, "Should be incremented by num executors")
def handle_request_for_new_build(self, build_params): """ Creates a new Build object and adds it to the request queue to be processed. :param build_params: :type build_params: dict[str, str] :rtype tuple [bool, dict [str, str]] """ build_request = BuildRequest(build_params) success = False if build_request.is_valid(): build = Build(build_request) self._all_builds_by_id[build.build_id()] = build build.generate_project_type() # WIP(joey): This should be internal to the Build object. self._build_request_handler.handle_build_request(build) response = {'build_id': build.build_id()} success = True elif not build_request.is_valid_type(): response = {'error': 'Invalid build request type.'} else: required_params = build_request.required_parameters() response = {'error': 'Missing required parameter. Required parameters: {}'.format(required_params)} return success, response # todo: refactor to use exception instead of boolean
def test_subjobs_with_pagination_request( self, offset: Optional[int], limit: Optional[int], expected_first_subjob_id: int, expected_last_subjob_id: int, ): build = Build(BuildRequest({})) # Create 20 mock subjobs with ids 1 to 20 for subjob_id in range(1, self._NUM_SUBJOBS + 1): subjob_mock = Mock(spec=Subjob) subjob_mock.subjob_id = subjob_id build._all_subjobs_by_id[subjob_id] = subjob_mock requested_subjobs = build.get_subjobs(offset, limit) id_of_first_subjob = requested_subjobs[0].subjob_id if len( requested_subjobs) else None id_of_last_subjob = requested_subjobs[-1].subjob_id if len( requested_subjobs) else None num_subjobs = len(requested_subjobs) self.assertEqual(id_of_first_subjob, expected_first_subjob_id, 'Received the wrong first subjob from request') self.assertEqual(id_of_last_subjob, expected_last_subjob_id, 'Received the wrong last subjob from request') if offset is not None and limit is not None: self.assertLessEqual(num_subjobs, self._PAGINATION_MAX_LIMIT, 'Received too many subjobs from request')
def handle_request_for_new_build(self, build_params): """ Creates a new Build object and adds it to the request queue to be processed. :param build_params: :type build_params: dict[str, str] :rtype tuple [bool, dict [str, str]] """ build_request = BuildRequest(build_params) success = False if build_request.is_valid(): build = Build(build_request) BuildStore.add(build) build.generate_project_type( ) # WIP(joey): This should be internal to the Build object. self._build_request_handler.handle_build_request(build) response = {'build_id': build.build_id()} success = True elif not build_request.is_valid_type(): response = {'error': 'Invalid build request type.'} else: required_params = build_request.required_parameters() response = { 'error': 'Missing required parameter. Required parameters: {}'.format( required_params) } return success, response # todo: refactor to use exception instead of boolean
def test_allocate_slave_increments_by_per_slave_when_max_not_inf_and_less_than_num(self): build = Build(BuildRequest({})) build._max_executors_per_slave = 5 slave = Mock() slave.num_executors = 10 build.allocate_slave(slave) self.assertEqual(build._num_executors_allocated, 5, "Should be incremented by num executors")
def test_update_state_to_canceled_sets_state_correctly(self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() success = build.update_state({'status': 'canceled'}) self.assertEqual(build._status(), BuildStatus.CANCELED, "Status not set to canceled") self.assertTrue(success, "Update did not report success")
def test_validate_update_params_rejects_bad_keys(self): build = Build(BuildRequest({})) success, response = build.validate_update_params({'badkey': 'foo'}) self.assertFalse(success, "Bad status update reported success") self.assertEqual({'error': "Key (badkey) is not in list of allowed keys (status)"}, response, "Error response not expected")
def test_validate_update_params_rejects_bad_params(self): build = Build(BuildRequest({})) success, response = build.validate_update_params({'status': 'foo'}) self.assertFalse(success, "Bad status update reported success") self.assertEqual({'error': "Value (foo) is not in list of allowed values (['canceled']) for status"}, response, "Error response not expected")
def test_teardown_called_on_slave_when_no_subjobs_remain(self): build = Build(BuildRequest({})) slave = Slave('', 1) slave.teardown = MagicMock() slave.free_executor = MagicMock(return_value=0) build._unstarted_subjobs = Queue() build.execute_next_subjob_on_slave(slave) slave.teardown.assert_called_with()
def test_generate_project_type_raises_error_if_failed_to_generate_project( self): build = Build(BuildRequest({})) self.patch( 'app.master.build.util.create_project_type').return_value = None with self.assertRaises(BuildProjectError): build.generate_project_type()
def test_build_status_returns_requested_after_build_creation(self): build = Build(BuildRequest({})) status = build._status() self.assertEqual( status, BuildStatus.QUEUED, 'Build status should be QUEUED immediately after build has been created.' )
def test_validate_update_params_for_cancelling_build(self): build = Build(BuildRequest({})) success, response = build.validate_update_params( {'status': 'canceled'}) self.assertTrue(success, "Correct status update should report success") self.assertEqual({}, response, "Error response should be empty")
def test_build_status_returns_queued_after_build_preparation(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config(self._FAKE_MAX_EXECUTORS)) status = build._status() self.assertEqual(status, BuildStatus.QUEUED)
def test_allocate_slave_increments_by_per_slave_when_max_not_inf_and_less_than_num( self): build = Build(BuildRequest({})) build._max_executors_per_slave = 5 slave = Mock() slave.num_executors = 10 build.allocate_slave(slave) self.assertEqual(build._num_executors_allocated, 5, "Should be incremented by num executors")
def test_validate_update_params_rejects_bad_keys(self): build = Build(BuildRequest({})) success, response = build.validate_update_params({'badkey': 'foo'}) self.assertFalse(success, "Bad status update reported success") self.assertEqual( {'error': "Key (badkey) is not in list of allowed keys (status)"}, response, "Error response not expected")
def test_build_cannot_be_prepared_more_than_once(self): build = Build(BuildRequest({})) subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() build.prepare(subjobs, mock_project_type, self._create_job_config(max_executors=self._FAKE_MAX_EXECUTORS)) with self.assertRaises(Exception): build.prepare(subjobs, mock_project_type, self._create_job_config(max_executors=self._FAKE_MAX_EXECUTORS))
def test_validate_update_params_rejects_bad_params(self): build = Build(BuildRequest({})) success, response = build.validate_update_params({'status': 'foo'}) self.assertFalse(success, "Bad status update reported success") self.assertEqual( { 'error': "Value (foo) is not in list of allowed values (['canceled']) for status" }, response, "Error response not expected")
def test_build_status_returns_queued_after_build_preparation(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config()) status = build._status() self.assertEqual(status, BuildStatus.QUEUED, 'Build status should be QUEUED after build has been prepared.')
def test_cancel_depletes_queue_and_sets_canceled(self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() build._unstarted_subjobs.put(1) slave_mock = Mock() build._slaves_allocated = [slave_mock] build.cancel() self.assertTrue(build._is_canceled, "Build should've been canceled") self.assertTrue(build._unstarted_subjobs.empty(), "Build's unstarted subjobs should've been depleted")
def test_execute_next_subjob_with_empty_queue_cant_teardown_same_slave_twice(self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() slave = Mock() slave.free_executor = Mock(return_value=0) build._slaves_allocated.append(slave) build.execute_next_subjob_or_teardown_slave(slave) build.execute_next_subjob_or_teardown_slave(slave) self.assertEqual(slave.teardown.call_count, 1, "Teardown should only be called once")
def test_build_status_returns_queued_after_build_preparation(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config()) status = build._status() self.assertEqual( status, BuildStatus.QUEUED, 'Build status should be QUEUED after build has been prepared.')
def test_allocate_slave_doesnt_use_more_than_max_executors(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() fake_setup_command = 'mock command' mock_slave = self._create_mock_slave() build = Build(BuildRequest({'setup': fake_setup_command})) build.prepare(subjobs, mock_project_type, self._create_job_config(1)) build.allocate_slave(mock_slave) self.assertEqual(build._num_allocated_executors, build._max_executors)
def test_teardown_called_on_slave_when_slave_in_shutdown_mode(self): build = Build(BuildRequest({})) slave = Slave('', 1) slave.teardown = MagicMock() slave._is_in_shutdown_mode = True slave.free_executor = MagicMock(return_value=0) build._unstarted_subjobs = Queue() build._unstarted_subjobs.put(Mock(spec=Subjob)) build._slaves_allocated = [slave] build.execute_next_subjob_or_teardown_slave(slave) slave.teardown.assert_called_with()
def test_execute_next_subjob_with_empty_queue_cant_teardown_same_slave_twice( self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() slave = Mock() slave.free_executor = Mock(return_value=0) build._slaves_allocated.append(slave) build.execute_next_subjob_on_slave(slave) build.execute_next_subjob_on_slave(slave) self.assertEqual(slave.teardown.call_count, 1, "Teardown should only be called once")
def test_build_doesnt_use_more_than_max_executors(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() fake_setup_command = 'mock command' mock_slaves = [ self._create_mock_slave(num_executors=5) for _ in range(3) ] expected_num_executors = 12 # We expect the build to use 12 out of 15 available executors. build = Build(BuildRequest({'setup': fake_setup_command})) build._project_type = mock_project_type build.execute_next_subjob_or_teardown_slave = MagicMock() build.prepare( subjobs, self._create_job_config(max_executors=expected_num_executors)) [build.allocate_slave(mock_slave) for mock_slave in mock_slaves] [ build.begin_subjob_executions_on_slave(mock_slave) for mock_slave in mock_slaves ] self.assertEqual( build.execute_next_subjob_or_teardown_slave.call_count, expected_num_executors, 'Build should start executing as many subjobs as its max_executors setting.' )
def test_build_doesnt_use_more_than_max_executors_per_slave(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() fake_setup_command = 'mock command' mock_slaves = [self._create_mock_slave(num_executors=5) for _ in range(3)] max_executors_per_slave = 2 expected_total_num_executors_used = 6 # We expect the build to use 2 executors on each of the 3 slaves. build = Build(BuildRequest({'setup': fake_setup_command})) build._project_type = mock_project_type build.execute_next_subjob_or_teardown_slave = MagicMock() build.prepare(subjobs, self._create_job_config(max_executors_per_slave=max_executors_per_slave)) [build.allocate_slave(mock_slave) for mock_slave in mock_slaves] expected_current_num_executors_used = 0 for i in range(len(mock_slaves)): build.begin_subjob_executions_on_slave(mock_slaves[i]) expected_current_num_executors_used += max_executors_per_slave self.assertEqual( build.execute_next_subjob_or_teardown_slave.call_count, expected_current_num_executors_used, 'After allocating {} slaves, build with max_executors_per_slave set to {} should only be using {} ' 'executors.'.format(i + 1, max_executors_per_slave, expected_current_num_executors_used)) self.assertEqual( build.execute_next_subjob_or_teardown_slave.call_count, expected_total_num_executors_used, 'Build should start executing as many subjobs per slave as its max_executors_per_slave setting.')
def test_build_doesnt_use_more_than_max_executors_per_slave(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() fake_setup_command = 'mock command' mock_slaves = [ self._create_mock_slave(num_executors=5) for _ in range(3) ] max_executors_per_slave = 2 expected_total_num_executors_used = 6 # We expect the build to use 2 executors on each of the 3 slaves. build = Build(BuildRequest({'setup': fake_setup_command})) build.execute_next_subjob_on_slave = MagicMock() build.prepare( subjobs, mock_project_type, self._create_job_config( max_executors_per_slave=max_executors_per_slave)) [build.allocate_slave(mock_slave) for mock_slave in mock_slaves] expected_current_num_executors_used = 0 for i in range(len(mock_slaves)): build.begin_subjob_executions_on_slave(mock_slaves[i]) expected_current_num_executors_used += max_executors_per_slave self.assertEqual( build.execute_next_subjob_on_slave.call_count, expected_current_num_executors_used, 'After allocating {} slaves, build with max_executors_per_slave set to {} should only be using {} ' 'executors.'.format(i + 1, max_executors_per_slave, expected_current_num_executors_used)) self.assertEqual( build.execute_next_subjob_on_slave.call_count, expected_total_num_executors_used, 'Build should start executing as many subjobs per slave as its max_executors_per_slave setting.' )
def test_add_idle_slave_does_not_mark_build_finished_when_slaves_not_done(self): master = ClusterMaster() slave1 = Slave('', 1) slave2 = Slave('', 1) slave3 = Slave('', 1) slave1.current_build_id = 1 slave2.current_build_id = None slave3.current_build_id = 1 build1 = Build(BuildRequest({})) master._all_slaves_by_url = {'1': slave1, '2': slave2, '3': slave3} master._all_builds_by_id = {1: build1} build1._build_id = 1 build1.finish = MagicMock() master.add_idle_slave(slave1) self.assertFalse(build1.finish.called)
def test_updating_slave_to_idle_state_does_not_mark_build_finished_when_slaves_not_done( self): master = ClusterMaster() slave1 = Slave('', 1) slave2 = Slave('', 1) slave3 = Slave('', 1) slave1.current_build_id = 1 slave2.current_build_id = None slave3.current_build_id = 1 build1 = Build(BuildRequest({})) master._all_slaves_by_url = {'1': slave1, '2': slave2, '3': slave3} master._all_builds_by_id = {1: build1} build1._build_id = 1 build1.finish = MagicMock() master.handle_slave_state_update(slave1, SlaveState.IDLE) self.assertFalse(build1.finish.called)
def test_build_status_returns_building_after_setup_is_complete_and_subjobs_are_executing(self): subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=2) build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config()) build.allocate_slave(mock_slave) build.begin_subjob_executions_on_slave(mock_slave) # two out of three subjobs are now in progress self.assertEqual(build._status(), BuildStatus.BUILDING, 'Build status should be BUILDING after subjobs have started executing on slaves.')
def add(cls, build: Build): """ Add new build to collection. :param build: The build to add to the store. """ build_id = cls._store_build(build) build._build_id = build_id cls._cached_builds_by_id[build_id] = build
def test_build_status_returns_building_after_setup_is_complete_and_subjobs_are_executing( self): subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=2) build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config()) build.allocate_slave(mock_slave) build.begin_subjob_executions_on_slave( mock_slave) # two out of three subjobs are now in progress self.assertEqual( build._status(), BuildStatus.BUILDING, 'Build status should be BUILDING after subjobs have started executing on slaves.' )
def test_deserialized_build_api_representation_is_same_as_original_build_no_failures( self): build = Build( BuildRequest({ 'type': 'git', 'url': 'git@name/repo.git', 'job_name': 'Example' })) build.generate_project_type() BuildStore.add(build) reconstructed_build = Build.load_from_db(build.build_id()) original_build_results = build.api_representation() reconstructed_build_results = reconstructed_build.api_representation() diff = self._compare_dictionaries_with_same_keys( original_build_results, reconstructed_build_results) # The build_project_directory is an auto generated tmp directory -- these will never be the same diff.pop('request_params|build_project_directory', None) # This is very similar to self.assertDictEqual, but here we won't consider different key orderings # as "not equal" which matters because `api_representation` does not have deterministic ordering self.assertEqual( diff, {}, 'Deserialized build is not the same as the original build.')
def test_need_more_slaves_returns_false_if_max_processes_is_reached(self): subjobs = self._create_subjobs(count=5) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=1) build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config(max_executors=1)) build.allocate_slave(mock_slave) self.assertFalse( build.needs_more_slaves(), "if max processes is reached, we shouldn't need more slaves")
def test_handle_result_reported_from_slave_does_nothing_when_build_is_canceled(self): build_id = 1 slave_url = "url" build = Build(BuildRequest({})) build._is_canceled = True self.patch_object(build, '_handle_subjob_payload') self.patch_object(build, '_mark_subjob_complete') master = ClusterMaster() master._all_builds_by_id[build_id] = build master._all_slaves_by_url[slave_url] = Mock() mock_scheduler = self.mock_scheduler_pool.get(build) master.handle_result_reported_from_slave(slave_url, build_id, 1) self.assertEqual(build._handle_subjob_payload.call_count, 0, "Build is canceled, should not handle payload") self.assertEqual(build._mark_subjob_complete.call_count, 0, "Build is canceled, should not complete subjobs") self.assertEqual(mock_scheduler.execute_next_subjob_or_free_executor.call_count, 0, "Build is canceled, should not do next subjob")
def test_handle_result_reported_from_slave_does_nothing_when_build_is_canceled(self): build_id = 1 slave_url = "url" build = Build(BuildRequest({})) build._is_canceled = True self.patch_object(build, '_handle_subjob_payload') self.patch_object(build, '_mark_subjob_complete') self.patch_object(build, 'execute_next_subjob_or_teardown_slave') master = ClusterMaster() master._all_builds_by_id[build_id] = build master._all_slaves_by_url[slave_url] = Mock() master.handle_result_reported_from_slave(slave_url, build_id, 1) self.assertEqual(build._handle_subjob_payload.call_count, 0, "Build is canceled, should not handle payload") self.assertEqual(build._mark_subjob_complete.call_count, 0, "Build is canceled, should not complete subjobs") self.assertEqual(build.execute_next_subjob_or_teardown_slave.call_count, 0, "Build is canceled, should not do next subjob")
def test_need_more_slaves_returns_true_if_max_processes_is_not_reached(self): subjobs = self._create_subjobs(count=8) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=5) build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config(max_executors=8)) build.allocate_slave(mock_slave) self.assertTrue(build.needs_more_slaves(), "if max_processes is not reached, we should need more slaves")
def test_exception_is_raised_if_problem_occurs_writing_subjob(self): Configuration['results_directory'] = '/tmp/results' build = Build(BuildRequest({})) build._project_type = self._create_mock_project_type() subjob = self._create_subjobs(count=1, build_id=build.build_id())[0] build.prepare([subjob], self._create_job_config()) self.mock_fs.write_file.side_effect = FileExistsError with self.assertRaises(Exception): payload = { 'filename': 'turtles.txt', 'body': 'Heroes in a half shell.' } build.complete_subjob(subjob.subjob_id(), payload=payload)
def test_handle_result_reported_from_slave_when_build_is_canceled(self): build_id = 1 slave_url = "url" build = Build(BuildRequest({})) self.patch('app.master.build.util') build.generate_project_type() build.cancel() self.patch_object(build, '_handle_subjob_payload') self.patch_object(build, '_mark_subjob_complete') master = ClusterMaster() slave_registry = SlaveRegistry.singleton() BuildStore._all_builds_by_id[build_id] = build slave_registry._all_slaves_by_url[slave_url] = Mock() mock_scheduler = self.mock_scheduler_pool.get(build) master.handle_result_reported_from_slave(slave_url, build_id, 1) self.assertEqual(build._handle_subjob_payload.call_count, 1, "Canceled builds should " "handle payload") self.assertEqual( build._mark_subjob_complete.call_count, 1, "Canceled builds should mark " "their subjobs complete") self.assertTrue( mock_scheduler.execute_next_subjob_or_free_executor.called)
def test_build_status_returns_building_after_setup_has_started(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave() build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config()) build.allocate_slave(mock_slave) self.assertEqual( build._status(), BuildStatus.BUILDING, 'Build status should be BUILDING after setup has started on slaves.' )
def test_build_cannot_be_prepared_more_than_once(self): build = Build(BuildRequest({})) subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config()) with self.assertRaises(Exception): build.prepare(subjobs, mock_project_type, self._create_job_config())
def test_allocate_slave_calls_slave_setup(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave() build = Build(Mock(spec_set=BuildRequest)) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config()) build.allocate_slave(mock_slave) mock_slave.setup.assert_called_once_with(build)
def test_exception_is_raised_if_problem_occurs_writing_subjob(self): Configuration['results_directory'] = abspath(join('some', 'temp', 'directory')) build = Build(BuildRequest({})) build._project_type = self._create_mock_project_type() subjob = self._create_subjobs(count=1, build_id=build.build_id())[0] build.prepare([subjob], self._create_job_config()) self.mock_fs.write_file.side_effect = FileExistsError with self.assertRaises(Exception): payload = {'filename': 'turtles.txt', 'body': 'Heroes in a half shell.'} build.complete_subjob(subjob.subjob_id(), payload=payload)
def test_build_status_returns_finished_after_all_subjobs_complete_and_slaves_finished(self): subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=3) postbuild_tasks_complete_event = Event() build = Build(BuildRequest({})) build._project_type = mock_project_type build._create_build_artifact = MagicMock() self._on_async_postbuild_tasks_completed(build, postbuild_tasks_complete_event.set) build.prepare(subjobs, self._create_job_config()) build.allocate_slave(mock_slave) # all three subjobs are now "in progress" for subjob in subjobs: build.complete_subjob(subjob.subjob_id()) # Wait for the async thread to complete executing postbuild tasks. self.assertTrue(postbuild_tasks_complete_event.wait(timeout=2), 'Postbuild tasks should complete within a few' 'seconds.') # Verify build artifacts was called after subjobs completed build._create_build_artifact.assert_called_once_with() self.assertTrue(build._subjobs_are_finished) self.assertEqual(build._status(), BuildStatus.FINISHED)
def test_cancel_exits_early_if_build_not_running(self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() slave_mock = Mock() build._slaves_allocated = [slave_mock] build._status = Mock(return_value=BuildStatus.FINISHED) build.cancel() self.assertFalse(build._is_canceled, "Build should not be canceled") self.assertEqual(slave_mock.teardown.call_count, 0, "Teardown should not have been called")
def test_complete_subjob_writes_and_extracts_payload_to_correct_directory(self): Configuration['results_directory'] = '/tmp/results' build = Build(BuildRequest({})) build._project_type = self._create_mock_project_type() subjob = self._create_subjobs(count=1, build_id=build.build_id())[0] build.prepare([subjob], self._create_job_config()) payload = {'filename': 'turtles.txt', 'body': 'Heroes in a half shell.'} build.complete_subjob(subjob.subjob_id(), payload=payload) self.mock_fs.write_file.assert_called_once_with('Heroes in a half shell.', '/tmp/results/1/turtles.txt') self.mock_fs.extract_tar.assert_called_once_with('/tmp/results/1/turtles.txt', delete=True)
def test_build_status_returns_building_after_setup_has_started(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave() build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config()) build.allocate_slave(mock_slave) self.assertEqual(build._status(), BuildStatus.BUILDING, 'Build status should be BUILDING after setup has started on slaves.')
def test_need_more_slaves_returns_false_if_max_processes_is_reached(self): subjobs = self._create_subjobs(count=5) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=1) build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config(max_executors=1)) build.allocate_slave(mock_slave) self.assertFalse(build.needs_more_slaves(), "if max processes is reached, we shouldn't need more slaves")
def test_need_more_slaves_returns_true_if_max_processes_is_not_reached( self): subjobs = self._create_subjobs(count=8) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=5) build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config(max_executors=8)) build.allocate_slave(mock_slave) self.assertTrue( build.needs_more_slaves(), "if max_processes is not reached, we should need more slaves")
def test_handle_result_reported_from_slave_when_build_is_canceled(self): build_id = 1 slave_url = "url" build = Build(BuildRequest({})) self.patch('app.master.build.util') build.generate_project_type() build.cancel() self.patch_object(build, '_handle_subjob_payload') self.patch_object(build, '_mark_subjob_complete') master = ClusterMaster() slave_registry = SlaveRegistry.singleton() BuildStore._all_builds_by_id[build_id] = build slave_registry._all_slaves_by_url[slave_url] = Mock() mock_scheduler = self.mock_scheduler_pool.get(build) master.handle_result_reported_from_slave(slave_url, build_id, 1) self.assertEqual(build._handle_subjob_payload.call_count, 1, "Canceled builds should " "handle payload") self.assertEqual(build._mark_subjob_complete.call_count, 1, "Canceled builds should mark " "their subjobs complete") self.assertTrue(mock_scheduler.execute_next_subjob_or_free_executor.called)
def test_subjobs_with_pagination_request( self, offset: Optional[int], limit: Optional[int], expected_first_subjob_id: int, expected_last_subjob_id: int, ): build = Build(BuildRequest({})) # Create 20 mock subjobs with ids 1 to 20 for subjob_id in range(1, self._NUM_SUBJOBS + 1): subjob_mock = Mock(spec=Subjob) subjob_mock.subjob_id = subjob_id build._all_subjobs_by_id[subjob_id] = subjob_mock requested_subjobs = build.get_subjobs(offset, limit) id_of_first_subjob = requested_subjobs[0].subjob_id if len(requested_subjobs) else None id_of_last_subjob = requested_subjobs[-1].subjob_id if len(requested_subjobs) else None num_subjobs = len(requested_subjobs) self.assertEqual(id_of_first_subjob, expected_first_subjob_id, 'Received the wrong first subjob from request') self.assertEqual(id_of_last_subjob, expected_last_subjob_id, 'Received the wrong last subjob from request') if offset is not None and limit is not None: self.assertLessEqual(num_subjobs, self._PAGINATION_MAX_LIMIT, 'Received too many subjobs from request')
def get(cls, build_id: int) -> Optional[Build]: """ Returns a build by id. :param build_id: The id for the build whose status we are getting """ build = cls._cached_builds_by_id.get(build_id) if build is None: cls._logger.debug( 'Requested build (id: {}) was not found in cache, checking database.' .format(build_id)) build = Build.load_from_db(build_id) if build is not None: cls._cached_builds_by_id[build_id] = build return build
def test_build_status_returns_building_after_some_subjobs_are_executing(self): subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=2) build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config(self._FAKE_MAX_EXECUTORS)) build.allocate_slave(mock_slave) # two out of three subjobs are now "in progress" status = build._status() self.assertEqual(status, BuildStatus.BUILDING)