def test_merged_chroots_on_tests_without_build( clean_before_and_after, runs_without_build ): result = list(RunModel.get_merged_chroots(0, 10)) assert len(result) == 2 for item in result: assert len(item.test_run_id[0]) == 1
def test_process_runs_without_build(clean_before_and_after, runs_without_build): merged_runs = RunModel.get_merged_chroots(0, 10) result = process_runs(merged_runs) for item in result: assert not item["srpm"] assert item["time_submitted"] assert len(item["test_run"]) == 1 assert item["trigger"]
def get(self): """List all runs.""" first, last = indices() result = process_runs(RunModel.get_merged_chroots(first, last)) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT.value, ) resp.headers["Content-Range"] = f"runs {first + 1}-{last}/*" return resp
def test_merged_runs(clean_before_and_after, few_runs): for i, run_id in enumerate(few_runs, 1): merged_run = RunModel.get_merged_run(run_id) srpm_build_id = merged_run.srpm_build_id # for different_pr (i=1) there are Copr builds twice, since the second # run of TFT from the same Copr build produces new row with same SRPM # and Copr IDs, but different Testing Farm IDs # ^ handled in API by iterating over set of IDs instead of list assert len(merged_run.copr_build_id) == 2 * i for copr_build in map(lambda ids: CoprBuildModel.get_by_id(ids[0]), merged_run.copr_build_id): assert copr_build.get_srpm_build().id == srpm_build_id assert len(merged_run.test_run_id) == 2 * i
def get(self, id): """Return details for given run.""" run = RunModel.get_run(id_=id) if not run: return response_maker( {"error": "No run has been found in DB"}, status=HTTPStatus.NOT_FOUND.value, ) result = { "run_id": run.id, "trigger": get_project_info_from_build(run.srpm_build), "srpm_build_id": run.srpm_build_id, "copr_build_id": run.copr_build_id, "koji_build_id": run.koji_build_id, "test_run_id": run.test_run_id, } return response_maker(result)
def build_model(self) -> Optional[KojiBuildModel]: if not super().build_model: self._build_model = KojiBuildModel.create( build_id=str(self.build_id), commit_sha=self._commit_sha, web_url=self.web_url, target="noarch", # TODO: where to get this info from? status=self.state.value, run_model=RunModel.create( type=JobTriggerModelType.branch_push, trigger_id=GitBranchModel.get_or_create( branch_name=self.branch_name, repo_name=self.repo_name, namespace=self.namespace, project_url=self.project_url, ).id, ), ) return self._build_model
def get(self, id): """Return details for merged run.""" if result := process_runs([RunModel.get_merged_run(id)]): return response_maker(result[0])
def run_testing_farm(self, chroot: str, build: Optional["CoprBuildModel"]) -> TaskResults: if chroot not in self.tests_targets: # Leaving here just to be sure that we will discover this situation if it occurs. # Currently not possible to trigger this situation. msg = f"Target '{chroot}' not defined for tests but triggered." logger.error(msg) send_to_sentry(PackitConfigException(msg)) return TaskResults( success=False, details={"msg": msg}, ) if not self.skip_build and chroot not in self.build_targets: self.report_missing_build_chroot(chroot) return TaskResults( success=False, details={ "msg": f"Target '{chroot}' not defined for build. " "Cannot run tests without build." }, ) if (self.job_config.metadata.use_internal_tf and f"{self.project.service.hostname}/{self.project.full_repo_name}" not in self.service_config.enabled_projects_for_internal_tf): self.report_status_to_test_for_chroot( state=BaseCommitStatus.neutral, description= "Internal TF not allowed for this project. Let us know.", chroot=chroot, url="https://packit.dev/#contact", ) return TaskResults( success=True, details={"msg": "Project not allowed to use internal TF."}, ) self.report_status_to_test_for_chroot( state=BaseCommitStatus.running, description=f"{'Build succeeded. ' if not self.skip_build else ''}" f"Submitting the tests ...", chroot=chroot, ) logger.info("Sending testing farm request...") if self.is_fmf_configured(): artifact = (self._artifact(chroot, int(build.build_id), build.built_packages) if not self.skip_build else None) payload = self._payload(chroot, artifact, build) elif not self.is_fmf_configured() and not self.skip_build: payload = self._payload_install_test(int(build.build_id), chroot) else: return TaskResults( success=True, details={"msg": "No actions for TestingFarmHandler."}) endpoint = "requests" logger.debug(f"POSTing {payload} to {self.tft_api_url}{endpoint}") req = self.send_testing_farm_request( endpoint=endpoint, method="POST", data=payload, ) logger.debug(f"Request sent: {req}") if not req: msg = "Failed to post request to testing farm API." logger.debug("Failed to post request to testing farm API.") self.report_status_to_test_for_chroot( state=BaseCommitStatus.error, description=msg, chroot=chroot, ) return TaskResults(success=False, details={"msg": msg}) # success set check on pending if req.status_code != 200: # something went wrong if req.json() and "errors" in req.json(): msg = req.json()["errors"] # specific case, unsupported arch if nested_get(req.json(), "errors", "environments", "0", "arch"): msg = req.json()["errors"]["environments"]["0"]["arch"] else: msg = f"Failed to submit tests: {req.reason}" logger.error(msg) self.report_status_to_test_for_chroot( state=BaseCommitStatus.failure, description=msg, chroot=chroot, ) return TaskResults(success=False, details={"msg": msg}) # Response: {"id": "9fa3cbd1-83f2-4326-a118-aad59f5", ...} pipeline_id = req.json()["id"] logger.debug( f"Submitted ({req.status_code}) to testing farm as request {pipeline_id}" ) run_model = (RunModel.create( type=self.db_trigger.job_trigger_model_type, trigger_id=self.db_trigger.id, ) if self.skip_build else build.runs[-1]) created_model = TFTTestRunModel.create( pipeline_id=pipeline_id, commit_sha=self.metadata.commit_sha, status=TestingFarmResult.new, target=chroot, web_url=None, run_model=run_model, # In _payload() we ask TF to test commit_sha of fork (PR's source). # Store original url. If this proves to work, make it a separate column. data={"base_project_url": self.project.get_web_url()}, ) self.report_status_to_test_for_chroot( state=BaseCommitStatus.running, description="Tests have been submitted ...", url=get_testing_farm_info_url(created_model.id), chroot=chroot, ) return TaskResults(success=True, details={})