def handle_request_for_new_build(self, build_params): """ Creates a new Build object and adds it to the request queue to be processed. :param build_params: :type build_params: dict[str, str] :rtype tuple [bool, dict [str, str]] """ build_request = BuildRequest(build_params) success = False if build_request.is_valid(): build = Build(build_request) self._all_builds_by_id[build.build_id()] = build build.generate_project_type() # WIP(joey): This should be internal to the Build object. self._build_request_handler.handle_build_request(build) response = {'build_id': build.build_id()} success = True elif not build_request.is_valid_type(): response = {'error': 'Invalid build request type.'} else: required_params = build_request.required_parameters() response = {'error': 'Missing required parameter. Required parameters: {}'.format(required_params)} return success, response # todo: refactor to use exception instead of boolean
def handle_request_for_new_build(self, build_params): """ Creates a new Build object and adds it to the request queue to be processed. :param build_params: :type build_params: dict[str, str] :rtype tuple [bool, dict [str, str]] """ build_request = BuildRequest(build_params) success = False if build_request.is_valid(): build = Build(build_request) self._all_builds_by_id[build.build_id()] = build build.generate_project_type() self._build_request_handler.handle_build_request(build) response = {'build_id': build.build_id()} success = True elif not build_request.is_valid_type(): response = {'error': 'Invalid build request type.'} else: required_params = build_request.required_parameters() response = { 'error': 'Missing required parameter. Required parameters: {}'.format( required_params) } return success, response # todo: refactor to use exception instead of boolean
def handle_request_for_new_build(self, build_params): """ Creates a new Build object and adds it to the request queue to be processed. :param build_params: :type build_params: dict[str, str] :rtype tuple [bool, dict [str, str]] """ build_request = BuildRequest(build_params) success = False if build_request.is_valid(): build = Build(build_request) self._all_builds_by_id[build.build_id()] = build self._request_queue.put(build) analytics.record_event(analytics.BUILD_REQUEST_QUEUED, build_id=build.build_id()) response = {'build_id': build.build_id()} success = True elif not build_request.is_valid_type(): response = {'error': 'Invalid build request type.'} else: required_params = build_request.required_parameters() response = {'error': 'Missing required parameter. Required parameters: {}'.format(required_params)} return success, response
def test_build_status_returns_finished_after_all_subjobs_complete_and_slaves_finished( self): subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=3) postbuild_tasks_complete_event = Event() build = Build(BuildRequest({})) build._project_type = mock_project_type build._create_build_artifact = MagicMock() self._on_async_postbuild_tasks_completed( build, postbuild_tasks_complete_event.set) build.prepare(subjobs, self._create_job_config()) build.allocate_slave( mock_slave) # all three subjobs are now "in progress" for subjob in subjobs: build.complete_subjob(subjob.subjob_id()) # Wait for the async thread to complete executing postbuild tasks. self.assertTrue( postbuild_tasks_complete_event.wait(timeout=2), 'Postbuild tasks should complete within a few' 'seconds.') # Verify build artifacts was called after subjobs completed build._create_build_artifact.assert_called_once_with() self.assertTrue(build._subjobs_are_finished) self.assertEqual(build._status(), BuildStatus.FINISHED)
def test_build_status_returns_finished_after_all_subjobs_complete_and_slaves_finished( self): subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=3) build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config()) build.allocate_slave( mock_slave) # all three subjobs are now "in progress" # Mock out call to create build artifacts after subjobs complete build._create_build_artifact = MagicMock() for subjob in subjobs: build.mark_subjob_complete(subjob.subjob_id()) # Note: this was never a unit test! We have to wait for a thread to complete post build # actions here. TODO: Fix this poll.wait_for(lambda: build._postbuild_tasks_are_finished, 5) # Verify build artifacts was called after subjobs completed build._create_build_artifact.assert_called_once_with() build.finish() status = build._status() self.assertTrue(build._subjobs_are_finished) self.assertTrue(build._postbuild_tasks_are_finished) self.assertTrue(build._teardowns_finished) self.assertEqual(status, BuildStatus.FINISHED)
def test_git_project_params_are_modified_for_slave(self): slave = self._create_slave() slave._network.post_with_digest = Mock() build_request = BuildRequest({ 'type': 'git', 'url': 'http://original-user-specified-url', }) mock_git = Mock(slave_param_overrides=Mock(return_value={ 'url': 'ssh://new-url-for-clusterrunner-master', 'extra': 'something_extra', })) mock_build = MagicMock(spec=Build, build_request=build_request, build_id=Mock(return_value=888), project_type=mock_git) slave.setup(mock_build, executor_start_index=777) slave._network.post_with_digest.assert_called_with( 'http://{}/v1/build/888/setup'.format(self._FAKE_SLAVE_URL), { 'build_executor_start_index': 777, 'project_type_params': { 'type': 'git', 'url': 'ssh://new-url-for-clusterrunner-master', 'extra': 'something_extra'} }, Secret.get() )
def test_build_doesnt_use_more_than_max_executors(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() fake_setup_command = 'mock command' mock_slaves = [ self._create_mock_slave(num_executors=5) for _ in range(3) ] expected_num_executors = 12 # We expect the build to use 12 out of 15 available executors. build = Build(BuildRequest({'setup': fake_setup_command})) build.execute_next_subjob_on_slave = MagicMock() build.prepare( subjobs, mock_project_type, self._create_job_config(max_executors=expected_num_executors)) [build.allocate_slave(mock_slave) for mock_slave in mock_slaves] [ build.begin_subjob_executions_on_slave(mock_slave) for mock_slave in mock_slaves ] self.assertEqual( build.execute_next_subjob_on_slave.call_count, expected_num_executors, 'Build should start executing as many subjobs as its max_executors setting.' )
def test_allocate_slave_increments_by_num_executors_when_max_is_inf(self): build = Build(BuildRequest({})) slave = Mock() slave.num_executors = 10 build.allocate_slave(slave) self.assertEqual(build._num_executors_allocated, 10, "Should be incremented by num executors")
def test_build_doesnt_use_more_than_max_executors_per_slave(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() fake_setup_command = 'mock command' mock_slaves = [ self._create_mock_slave(num_executors=5) for _ in range(3) ] max_executors_per_slave = 2 expected_total_num_executors_used = 6 # We expect the build to use 2 executors on each of the 3 slaves. build = Build(BuildRequest({'setup': fake_setup_command})) build.execute_next_subjob_on_slave = MagicMock() build.prepare( subjobs, mock_project_type, self._create_job_config( max_executors_per_slave=max_executors_per_slave)) [build.allocate_slave(mock_slave) for mock_slave in mock_slaves] expected_current_num_executors_used = 0 for i in range(len(mock_slaves)): build.begin_subjob_executions_on_slave(mock_slaves[i]) expected_current_num_executors_used += max_executors_per_slave self.assertEqual( build.execute_next_subjob_on_slave.call_count, expected_current_num_executors_used, 'After allocating {} slaves, build with max_executors_per_slave set to {} should only be using {} ' 'executors.'.format(i + 1, max_executors_per_slave, expected_current_num_executors_used)) self.assertEqual( build.execute_next_subjob_on_slave.call_count, expected_total_num_executors_used, 'Build should start executing as many subjobs per slave as its max_executors_per_slave setting.' )
def test_subjobs_with_pagination_request( self, offset: Optional[int], limit: Optional[int], expected_first_subjob_id: int, expected_last_subjob_id: int, ): build = Build(BuildRequest({})) # Create 20 mock subjobs with ids 1 to 20 for subjob_id in range(1, self._NUM_SUBJOBS + 1): subjob_mock = Mock(spec=Subjob) subjob_mock.subjob_id = subjob_id build._all_subjobs_by_id[subjob_id] = subjob_mock requested_subjobs = build.get_subjobs(offset, limit) id_of_first_subjob = requested_subjobs[0].subjob_id if len( requested_subjobs) else None id_of_last_subjob = requested_subjobs[-1].subjob_id if len( requested_subjobs) else None num_subjobs = len(requested_subjobs) self.assertEqual(id_of_first_subjob, expected_first_subjob_id, 'Received the wrong first subjob from request') self.assertEqual(id_of_last_subjob, expected_last_subjob_id, 'Received the wrong last subjob from request') if offset is not None and limit is not None: self.assertLessEqual(num_subjobs, self._PAGINATION_MAX_LIMIT, 'Received too many subjobs from request')
def test_handle_result_reported_from_slave_when_build_is_canceled(self): build_id = 1 slave_url = "url" build = Build(BuildRequest({})) self.patch('app.master.build.util') build.generate_project_type() build.cancel() self.patch_object(build, '_handle_subjob_payload') self.patch_object(build, '_mark_subjob_complete') master = ClusterMaster() slave_registry = SlaveRegistry.singleton() BuildStore._all_builds_by_id[build_id] = build slave_registry._all_slaves_by_url[slave_url] = Mock() mock_scheduler = self.mock_scheduler_pool.get(build) master.handle_result_reported_from_slave(slave_url, build_id, 1) self.assertEqual(build._handle_subjob_payload.call_count, 1, "Canceled builds should " "handle payload") self.assertEqual( build._mark_subjob_complete.call_count, 1, "Canceled builds should mark " "their subjobs complete") self.assertTrue( mock_scheduler.execute_next_subjob_or_free_executor.called)
def test_deserialized_build_api_representation_is_same_as_original_build_no_failures( self): build = Build( BuildRequest({ 'type': 'git', 'url': 'git@name/repo.git', 'job_name': 'Example' })) build.generate_project_type() BuildStore.add(build) reconstructed_build = Build.load_from_db(build.build_id()) original_build_results = build.api_representation() reconstructed_build_results = reconstructed_build.api_representation() diff = self._compare_dictionaries_with_same_keys( original_build_results, reconstructed_build_results) # The build_project_directory is an auto generated tmp directory -- these will never be the same diff.pop('request_params|build_project_directory', None) # This is very similar to self.assertDictEqual, but here we won't consider different key orderings # as "not equal" which matters because `api_representation` does not have deterministic ordering self.assertEqual( diff, {}, 'Deserialized build is not the same as the original build.')
def test_build_status_returns_requested_after_build_creation(self): build = Build(BuildRequest({})) status = build._status() self.assertEqual( status, BuildStatus.QUEUED, 'Build status should be QUEUED immediately after build has been created.' )
def test_generate_project_type_raises_error_if_failed_to_generate_project( self): build = Build(BuildRequest({})) self.patch( 'app.master.build.util.create_project_type').return_value = None with self.assertRaises(BuildProjectError): build.generate_project_type()
def test_validate_update_params_for_cancelling_build(self): build = Build(BuildRequest({})) success, response = build.validate_update_params( {'status': 'canceled'}) self.assertTrue(success, "Correct status update should report success") self.assertEqual({}, response, "Error response should be empty")
def test_validate_update_params_rejects_bad_keys(self): build = Build(BuildRequest({})) success, response = build.validate_update_params({'badkey': 'foo'}) self.assertFalse(success, "Bad status update reported success") self.assertEqual( {'error': "Key (badkey) is not in list of allowed keys (status)"}, response, "Error response not expected")
def test_update_state_to_canceled_sets_state_correctly(self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() success = build.update_state({'status': 'canceled'}) self.assertEqual(build._status(), BuildStatus.CANCELED, "Status not set to canceled") self.assertTrue(success, "Update did not report success")
def test_allocate_slave_increments_by_per_slave_when_max_not_inf_and_less_than_num( self): build = Build(BuildRequest({})) build._max_executors_per_slave = 5 slave = Mock() slave.num_executors = 10 build.allocate_slave(slave) self.assertEqual(build._num_executors_allocated, 5, "Should be incremented by num executors")
def test_build_cannot_be_prepared_more_than_once(self): build = Build(BuildRequest({})) subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() build.prepare(subjobs, mock_project_type, self._create_job_config()) with self.assertRaises(Exception): build.prepare(subjobs, mock_project_type, self._create_job_config())
def test_validate_update_params_rejects_bad_params(self): build = Build(BuildRequest({})) success, response = build.validate_update_params({'status': 'foo'}) self.assertFalse(success, "Bad status update reported success") self.assertEqual( { 'error': "Value (foo) is not in list of allowed values (['canceled']) for status" }, response, "Error response not expected")
def test_teardown_called_on_slave_when_no_subjobs_remain(self): build = Build(BuildRequest({})) slave = Slave('', 1) slave.teardown = MagicMock() slave.free_executor = MagicMock(return_value=0) build._unstarted_subjobs = Queue() build._slaves_allocated = [slave] build.execute_next_subjob_on_slave(slave) slave.teardown.assert_called_with()
def test_build_status_returns_queued_after_build_preparation(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config()) status = build._status() self.assertEqual( status, BuildStatus.QUEUED, 'Build status should be QUEUED after build has been prepared.')
def test_need_more_slaves_returns_false_if_max_processes_is_reached(self): subjobs = self._create_subjobs(count=5) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=1) build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config(max_executors=1)) build.allocate_slave(mock_slave) self.assertFalse( build.needs_more_slaves(), "if max processes is reached, we shouldn't need more slaves")
def test_cancel_depletes_queue_and_sets_canceled(self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() build._unstarted_subjobs.put(1) slave_mock = Mock() build._slaves_allocated = [slave_mock] build.cancel() self.assertTrue(build._is_canceled, "Build should've been canceled") self.assertTrue(build._unstarted_subjobs.empty(), "Build's unstarted subjobs should've been depleted")
def test_cancel_exits_early_if_build_not_running(self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() slave_mock = Mock() build._slaves_allocated = [slave_mock] build._status = Mock(return_value=BuildStatus.FINISHED) build.cancel() self.assertFalse(build._is_canceled, "Build should not be canceled") self.assertEqual(slave_mock.teardown.call_count, 0, "Teardown should not have been called")
def test_teardown_called_on_slave_when_slave_in_shutdown_mode(self): build = Build(BuildRequest({})) slave = Slave('', 1) slave.teardown = MagicMock() slave._is_in_shutdown_mode = True slave.free_executor = MagicMock(return_value=0) build._unstarted_subjobs = Queue() build._unstarted_subjobs.put(Mock(spec=Subjob)) build._slaves_allocated = [slave] build.execute_next_subjob_or_teardown_slave(slave) slave.teardown.assert_called_with()
def test_need_more_slaves_returns_true_if_max_processes_is_not_reached( self): subjobs = self._create_subjobs(count=8) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=5) build = Build(BuildRequest({})) build._project_type = mock_project_type build.prepare(subjobs, self._create_job_config(max_executors=8)) build.allocate_slave(mock_slave) self.assertTrue( build.needs_more_slaves(), "if max_processes is not reached, we should need more slaves")
def test_execute_next_subjob_with_empty_queue_cant_teardown_same_slave_twice( self): build = Build(BuildRequest({})) build._unstarted_subjobs = Queue() slave = Mock() slave.free_executor = Mock(return_value=0) build._slaves_allocated.append(slave) build.execute_next_subjob_on_slave(slave) build.execute_next_subjob_on_slave(slave) self.assertEqual(slave.teardown.call_count, 1, "Teardown should only be called once")
def test_build_status_returns_building_after_setup_has_started(self): subjobs = self._create_subjobs() mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave() build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config()) build.allocate_slave(mock_slave) self.assertEqual( build._status(), BuildStatus.BUILDING, 'Build status should be BUILDING after setup has started on slaves.' )
def test_exception_is_raised_if_problem_occurs_writing_subjob(self): Configuration['results_directory'] = '/tmp/results' build = Build(BuildRequest({})) build._project_type = self._create_mock_project_type() subjob = self._create_subjobs(count=1, build_id=build.build_id())[0] build.prepare([subjob], self._create_job_config()) self.mock_fs.write_file.side_effect = FileExistsError with self.assertRaises(Exception): payload = { 'filename': 'turtles.txt', 'body': 'Heroes in a half shell.' } build.complete_subjob(subjob.subjob_id(), payload=payload)
def test_build_status_returns_building_after_setup_is_complete_and_subjobs_are_executing( self): subjobs = self._create_subjobs(count=3) mock_project_type = self._create_mock_project_type() mock_slave = self._create_mock_slave(num_executors=2) build = Build(BuildRequest({})) build.prepare(subjobs, mock_project_type, self._create_job_config()) build.allocate_slave(mock_slave) build.begin_subjob_executions_on_slave( mock_slave) # two out of three subjobs are now in progress self.assertEqual( build._status(), BuildStatus.BUILDING, 'Build status should be BUILDING after subjobs have started executing on slaves.' )