def handle_request_for_new_build(self, build_params): """ Creates a new Build object and adds it to the request queue to be processed. :param build_params: :type build_params: dict[str, str] :rtype tuple [bool, dict [str, str]] """ build_request = BuildRequest(build_params) success = False if build_request.is_valid(): build = Build(build_request) BuildStore.add(build) build.generate_project_type( ) # WIP(joey): This should be internal to the Build object. self._build_request_handler.handle_build_request(build) response = {'build_id': build.build_id()} success = True elif not build_request.is_valid_type(): response = {'error': 'Invalid build request type.'} else: required_params = build_request.required_parameters() response = { 'error': 'Missing required parameter. Required parameters: {}'.format( required_params) } return success, response # todo: refactor to use exception instead of boolean
def test_deserialized_build_api_representation_is_same_as_original_build_no_failures( self): build = Build( BuildRequest({ 'type': 'git', 'url': 'git@name/repo.git', 'job_name': 'Example' })) build.generate_project_type() BuildStore.add(build) reconstructed_build = Build.load_from_db(build.build_id()) original_build_results = build.api_representation() reconstructed_build_results = reconstructed_build.api_representation() diff = self._compare_dictionaries_with_same_keys( original_build_results, reconstructed_build_results) # The build_project_directory is an auto generated tmp directory -- these will never be the same diff.pop('request_params|build_project_directory', None) # This is very similar to self.assertDictEqual, but here we won't consider different key orderings # as "not equal" which matters because `api_representation` does not have deterministic ordering self.assertEqual( diff, {}, 'Deserialized build is not the same as the original build.')
def get_builds(self, offset: int = None, limit: int = None) -> List['Build']: """ Returns a list of all builds. :param offset: The starting index of the requested build :param limit: The number of builds requested """ num_builds = BuildStore.count_all_builds() start, end = get_paginated_indices(offset, limit, num_builds) return BuildStore.get_range(start, end)
def test_get_build_from_store(self, build_id, expected_build_id): build = BuildStore.get(build_id) if build is None: self.assertEqual(None, expected_build_id, 'Couldn\'t find build in BuildStore.') else: self.assertEqual(build.build_id(), expected_build_id, 'Got the wrong build from BuildStore.')
def get_build(self, build_id): """ Returns a build by id :param build_id: The id for the build whose status we are getting :type build_id: int :rtype: Build """ build = BuildStore.get(build_id) if build is None: raise ItemNotFoundError('Invalid build id: {}.'.format(build_id)) return build
def handle_request_to_update_build(self, build_id, update_params): """ Updates the state of a build with the values passed in. Used for cancelling running builds. :type build_id: int :param update_params: The fields that should be updated and their new values :type update_params: dict [str, str] :return: The success/failure and the response we want to send to the requestor :rtype: tuple [bool, dict [str, str]] """ build = BuildStore.get(int(build_id)) if build is None: raise ItemNotFoundError('Invalid build id.') success, response = build.validate_update_params(update_params) if not success: return success, response return build.update_state(update_params), {}
def get_path_for_build_results_archive(self, build_id: int, is_tar_request: bool = False ) -> str: """ Given a build id, get the absolute file path for the archive file containing the build results. :param build_id: The build id for which to retrieve the artifacts archive file :param is_tar_request: If true, download the tar.gz archive instead of a zip. :return: The path to the archived results file """ build = BuildStore.get(build_id) if build is None: raise ItemNotFoundError('Invalid build id.') archive_file = build.artifacts_tar_file if is_tar_request else build.artifacts_zip_file if archive_file is None: raise ItemNotReadyError( 'Build artifact file is not yet ready. Try again later.') return archive_file
def handle_result_reported_from_slave(self, slave_url, build_id, subjob_id, payload=None): """ Process the result and dispatch the next subjob :type slave_url: str :type build_id: int :type subjob_id: int :type payload: dict :rtype: str """ self._logger.info( 'Results received from {} for subjob. (Build {}, Subjob {})', slave_url, build_id, subjob_id) build = BuildStore.get(int(build_id)) slave = self._all_slaves_by_url[slave_url] try: build.complete_subjob(subjob_id, payload) finally: scheduler = self._scheduler_pool.get(build) self._thread_pool_executor.submit( scheduler.execute_next_subjob_or_free_executor, slave=slave)
def test_add_build_to_store_sets_build_id(self, expected_build_id): build = Build(BuildRequest({})) BuildStore.add(build) self.assertEqual(build.build_id(), expected_build_id, 'The wrong build_id was set.')