def get(self): """ List all Koji builds. """ first, last = indices() result = [] for build in KojiBuildModel.get_range(first, last): build_dict = { "build_id": build.build_id, "status": build.status, "build_submitted_time": optional_time(build.build_submitted_time), "chroot": build.target, "web_url": build.web_url, # from old data, sometimes build_logs_url is same and sometimes different to web_url "build_logs_url": build.build_logs_url, "pr_id": build.get_pr_id(), "branch_name": build.get_branch_name(), "release": build.get_release_tag(), } project = build.get_project() if project: build_dict["project_url"] = project.project_url build_dict["repo_namespace"] = project.namespace build_dict["repo_name"] = project.repo_name result.append(build_dict) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT.value, ) resp.headers["Content-Range"] = f"koji-builds {first + 1}-{last}/*" return resp
def get(self): """ List all Testing Farm results. """ result = [] first, last = indices() # results have nothing other than ref in common, so it doesnt make sense to # merge them like copr builds for tf_result in TFTTestRunModel.get_range(first, last): result_dict = { "pipeline_id": tf_result.pipeline_id, "ref": tf_result.commit_sha, "status": tf_result.status, "target": tf_result.target, "web_url": tf_result.web_url, "pr_id": tf_result.get_pr_id(), } project = tf_result.get_project() result_dict["repo_namespace"] = project.namespace result_dict["repo_name"] = project.repo_name result_dict["project_url"] = project.project_url result.append(result_dict) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT.value, ) resp.headers["Content-Range"] = f"test-results {first + 1}-{last}/*" return resp
def get(self): """List all GitProjects""" result = [] first, last = indices() projects_list = GitProjectModel.get_projects(first, last) if not projects_list: return response_maker([]) for project in projects_list: project_info = { "namespace": project.namespace, "repo_name": project.repo_name, "project_url": project.project_url, "prs_handled": len(project.pull_requests), "branches_handled": len(project.branches), "releases_handled": len(project.releases), "issues_handled": len(project.issues), } result.append(project_info) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT.value, ) resp.headers["Content-Range"] = f"git-projects {first + 1}-{last}/*" return resp
def get(self): """ List all Copr builds. """ # Return relevant info thats concise # Usecases like the packit-dashboard copr-builds table result = [] first, last = indices() for build in CoprBuildModel.get_merged_chroots(first, last): build_info = CoprBuildModel.get_by_build_id(build.build_id, None) project_info = build_info.get_project() build_dict = { "project": build_info.project_name, "build_id": build.build_id, "status_per_chroot": {}, "build_submitted_time": optional_time(build_info.build_submitted_time), "web_url": build_info.web_url, "ref": build_info.commit_sha, "pr_id": build_info.get_pr_id(), "branch_name": build_info.get_branch_name(), "repo_namespace": project_info.namespace, "repo_name": project_info.repo_name, } for count, chroot in enumerate(build.target): # [0] because sqlalchemy returns a single element sub-list build_dict["status_per_chroot"][chroot[0]] = build.status[count][0] result.append(build_dict) resp = response_maker(result, status=HTTPStatus.PARTIAL_CONTENT,) resp.headers["Content-Range"] = f"copr-builds {first + 1}-{last}/*" return resp
def get(self): """List all GitProjects""" result = [] first, last = indices() projects_list = GitProjectModel.get_projects(first, last) if not projects_list: return ([], HTTPStatus.OK) for project in projects_list: project_info = { "namespace": project.namespace, "repo_name": project.repo_name, "project_url": project.project_url, "prs_handled": len(project.pull_requests), "branches_handled": len(project.branches), "releases_handled": len(project.releases), "issues_handled": len(project.issues), } result.append(project_info) resp = make_response(dumps(result), HTTPStatus.PARTIAL_CONTENT) resp.headers["Content-Range"] = f"git-projects {first + 1}-{last}/*" resp.headers["Content-Type"] = "application/json" resp.headers["Access-Control-Allow-Origin"] = "*" return resp
def get(self, forge): """List of projects of given forge (e.g. github.com, gitlab.com)""" result = [] first, last = indices() for project in GitProjectModel.get_forge(first, last, forge): project_info = { "namespace": project.namespace, "repo_name": project.repo_name, "project_url": project.project_url, "prs_handled": len(project.pull_requests), "branches_handled": len(project.branches), "releases_handled": len(project.releases), "issues_handled": len(project.issues), } result.append(project_info) if not result: return response_maker([]) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT.value, ) resp.headers["Content-Range"] = f"git-projects {first + 1}-{last}/*" return resp
def get(self): """List all SRPM builds.""" result = [] first, last = indices() for build in SRPMBuildModel.get(first, last): build_dict = { "srpm_build_id": build.id, "success": build.success, "log_url": get_srpm_build_info_url(build.id), "build_submitted_time": optional_timestamp(build.build_submitted_time), } project = build.get_project() # Its possible that jobtrigger isnt stored in db if project: build_dict["repo_namespace"] = project.namespace build_dict["repo_name"] = project.repo_name build_dict["project_url"] = project.project_url build_dict["pr_id"] = build.get_pr_id() build_dict["branch_name"] = build.get_branch_name() result.append(build_dict) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT.value, ) resp.headers["Content-Range"] = f"srpm-builds {first + 1}-{last}/*" return resp
def get(self): """ List all Testing Farm results. """ result = [] first, last = indices() # results have nothing other than ref in common, so it doesnt make sense to # merge them like copr builds for tf_result in TFTTestRunModel.get_range(first, last): result_dict = { "pipeline_id": tf_result.pipeline_id, "ref": tf_result.commit_sha, "status": tf_result.status, "target": tf_result.target, "web_url": tf_result.web_url, "pr_id": tf_result.get_pr_id(), } project = tf_result.get_project() result_dict["repo_namespace"] = project.namespace result_dict["repo_name"] = project.repo_name result.append(result_dict) resp = make_response(dumps(result), HTTPStatus.PARTIAL_CONTENT) resp.headers["Content-Range"] = f"test-results {first + 1}-{last}/*" resp.headers["Content-Type"] = "application/json" resp.headers["Access-Control-Allow-Origin"] = "*" return resp
def get(self): """List all runs.""" first, last = indices() result = process_runs(RunModel.get_merged_chroots(first, last)) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT.value, ) resp.headers["Content-Range"] = f"runs {first + 1}-{last}/*" return resp
def get(self): """ List all Copr builds. """ # Return relevant info thats concise # Usecases like the packit-dashboard copr-builds table result = [] checklist = [] first, last = indices() for build in islice(CoprBuildModel.get_all(), first, last): if int(build.build_id) not in checklist: build_dict = { "project": build.project_name, "owner": build.owner, "build_id": build.build_id, "status": build.status, # Legacy, remove later. "status_per_chroot": {}, "chroots": [], "build_submitted_time": optional_time(build.build_submitted_time), "web_url": build.web_url, } project = build.get_project() if project: build_dict["repo_namespace"] = project.namespace build_dict["repo_name"] = project.repo_name # same_buildid_builds are copr builds created due to the same trigger # multiple identical builds are created which differ only in target # so we merge them into one same_buildid_builds = CoprBuildModel.get_all_by_build_id( str(build.build_id)) for sbid_build in same_buildid_builds: build_dict["chroots"].append(sbid_build.target) # Get status per chroot as well build_dict["status_per_chroot"][ sbid_build.target] = sbid_build.status checklist.append(int(build.build_id)) result.append(build_dict) resp = make_response(dumps(result), HTTPStatus.PARTIAL_CONTENT) resp.headers[ "Content-Range"] = f"copr-builds {first + 1}-{last}/{len(result)}" resp.headers["Content-Type"] = "application/json" return resp
def get(self): """ List all Celery tasks / jobs """ first, last = indices() tasks = [] for task in islice(TaskResultModel.get_all(), first, last): data = task.to_dict() data["event"] = Event.ts2str(data["event"]) tasks.append(data) resp = make_response(dumps(tasks), HTTPStatus.PARTIAL_CONTENT) resp.headers["Content-Range"] = f"tasks {first+1}-{last}/{len(tasks)}" resp.headers["Content-Type"] = "application/json" return resp
def get(self): """ List all Koji builds. """ result = [] first, last = indices() for build in islice(KojiBuildModel.get_all(), first, last): result.append(build.api_structure) resp = make_response(dumps(result), HTTPStatus.PARTIAL_CONTENT) resp.headers[ "Content-Range"] = f"koji-builds {first + 1}-{last}/{len(result)}" resp.headers["Content-Type"] = "application/json" return resp
def get(self, forge, namespace, repo_name): """List PRs""" result = [] first, last = indices() pr_list = GitProjectModel.get_project_prs(first, last, forge, namespace, repo_name) if not pr_list: return response_maker([]) for pr in pr_list: pr_info = { "pr_id": pr.pr_id, "builds": [], "tests": [], } copr_builds = [] test_runs = [] for build in pr.get_copr_builds(): build_info = { "build_id": build.build_id, "chroot": build.target, "status": build.status, "web_url": build.web_url, } copr_builds.append(build_info) pr_info["builds"] = copr_builds for test_run in pr.get_test_runs(): test_info = { "pipeline_id": test_run.pipeline_id, "chroot": test_run.target, "status": str(test_run.status), "web_url": test_run.web_url, } test_runs.append(test_info) pr_info["tests"] = test_runs result.append(pr_info) resp = response_maker( result, status=HTTPStatus.PARTIAL_CONTENT, ) resp.headers["Content-Range"] = f"git-project-prs {first + 1}-{last}/*" return resp
def get(self): """ List all Copr builds. From 'copr-builds' hash, filled by service. """ # I know it's expensive to first convert whole dict to a list and then slice the list, # but how else would you slice a dict, huh? result = [] for k, cb in CoprBuild.db().get_all().items(): if k == LAST_PK: # made-up keys, remove LAST_PK continue cb.pop("identifier", None) # see PR#179 result.append(cb) first, last = indices() resp = make_response(dumps(result[first:last]), HTTPStatus.PARTIAL_CONTENT) resp.headers[ "Content-Range"] = f"copr-builds {first + 1}-{last}/{len(result)}" resp.headers["Content-Type"] = "application/json" return resp
def get(self): """ List all Celery tasks / jobs """ first, last = indices() tasks = [] # The db.keys() always returns all matched keys, but there's no better way with redis. # Use islice (instead of [first:last]) to at least create an iterator instead of new list. keys = db.keys("celery-task-meta-*") for key in islice(keys, first, last): data = db.get(key) if data: data = loads(data) event = nested_get(data, "result", "event") if event: # timestamp to datetime string data["result"]["event"] = Event.ts2str(event) tasks.append(data) resp = make_response(dumps(tasks), HTTPStatus.PARTIAL_CONTENT) resp.headers["Content-Range"] = f"tasks {first+1}-{last}/{len(keys)}" resp.headers["Content-Type"] = "application/json" return resp