def parse_header_taskcluster(request): auth_header = request.headers.get("Authorization") if not auth_header: auth_header = request.headers.get("Authentication") if not auth_header: return NO_AUTH if not auth_header.startswith("Hawk"): return NO_AUTH # Get Endpoint configuration if ":" in request.host: host, port = request.host.split(":") else: host = request.host port = request.environ.get("HTTP_X_FORWARDED_PORT") if port is None: port = request.scheme == "https" and 443 or 80 method = request.method.lower() # Build taskcluster payload payload = {"resource": request.path, "method": method, "host": host, "port": int(port), "authorization": auth_header} # Auth with taskcluster auth = get_service("auth") try: resp = auth.authenticateHawk(payload) if not resp.get("status") == "auth-success": raise Exception("Taskcluster rejected the authentication") except Exception as e: logger.info("Taskcluster auth error for payload %s: %s", payload, e) return NO_AUTH return TaskclusterUser(resp)
def app_heartbeat(): config = flask.current_app.config results = [] try: auth_domain = config.get("AUTH_DOMAIN") r = requests.get(f"https://{auth_domain}/test") assert "clock" in r.json() except Exception: logger.info("Auth0 heartbeat error") results.append( checks.Error("Cannot connect to the mozilla auth0 service.", id="auth.auth0")) auth = get_service("auth") try: ping = auth.ping() assert ping["alive"] is True except Exception: logger.info("Taskcluster heartbeat error") results.append( checks.Error("Cannot connect to the Taskcluster service.", id="auth.taskcluster")) return results
def abandon_release(name): session = current_app.db.session release = session.query(Release).filter( Release.name == name).first_or_404() # we must require scope which depends on product required_permission = f"{SCOPE_PREFIX}/abandon_release/{release.product}" if not current_user.has_permissions(required_permission): user_permissions = ", ".join(current_user.get_permissions()) abort( 401, f"required permission: {required_permission}, user permissions: {user_permissions}" ) # Cancel all submitted task groups first for phase in filter(lambda x: x.submitted and not x.skipped, release.phases): try: actions = get_actions(phase.task_id) parameters = get_parameters(phase.task_id) cancel_action = find_action("cancel-all", actions) if not cancel_action: logger.info( "%s %s does not have `cancel-all` action, skipping...", release.name, phase.name) continue except ArtifactNotFound: logger.info("Ignoring not completed action task %s", phase.task_id) continue hook = generate_action_hook(task_group_id=phase.task_id, action_name="cancel-all", actions=actions, parameters=parameters, input_={}) hooks = get_service("hooks") client_id = hooks.options["credentials"]["clientId"].decode("utf-8") hook["context"]["clientId"] = client_id hook_payload_rendered = render_action_hook( payload=hook["hook_payload"], context=hook["context"], delete_params=[ "existing_tasks", "release_history", "release_partner_config" ]) logger.info("Cancel phase %s by hook %s with payload: %s", phase.name, hook["hook_id"], hook_payload_rendered) try: result = hooks.triggerHook(hook["hook_group_id"], hook["hook_id"], hook_payload_rendered) logger.debug("Done: %s", result) except TaskclusterRestFailure as e: abort(400, str(e)) release.status = "aborted" session.commit() logger.info("Canceled release %s", release.name) notify_via_matrix(release.product, f"Release {release.name} was just canceled.") return release.json
def find_decision_task_id(repo_url, project, revision, product): trust_domain = get_trust_domain(repo_url, project, product) if trust_domain.endswith("mobile"): _, project = extract_github_repo_owner_and_name(repo_url) decision_task_route = f"{trust_domain}.v2.{project}.revision.{revision}.taskgraph.decision" index = get_service("index") return index.findTask(decision_task_route)["taskId"]
def find_decision_task_id(repo_url, project, revision, product): trust_domain = get_trust_domain(repo_url, project, product) if repo_url.startswith("https://github.com"): # XXX "project" is a gecko-centric term which is translated into a branch in the git world. branch = project decision_task_route = f"{trust_domain}.v2.branch.{branch}.revision.{revision}.taskgraph.decision" else: decision_task_route = f"{trust_domain}.v2.{project}.revision.{revision}.taskgraph.decision" index = get_service("index") return index.findTask(decision_task_route)["taskId"]
def fetch_artifact(task_id, artifact): try: queue = get_service("queue") url = queue.buildUrl("getLatestArtifact", task_id, artifact) q = requests.get(url) q.raise_for_status() return yaml.safe_load(q.text) except requests.exceptions.HTTPError as e: if e.response.status_code == 404: raise ArtifactNotFound raise
def find_decision_task_id(repo_url, project, revision, product): trust_domain = get_trust_domain(repo_url, project, product) if trust_domain.endswith("mobile"): # XXX "project" is a gecko-centric term which is translated into a branch in the git world. branch = project _, repo_name = extract_github_repo_owner_and_name(repo_url) decision_task_route = f"{trust_domain}.v2.{repo_name}.branch.{branch}.revision.{revision}.taskgraph.decision" else: decision_task_route = f"{trust_domain}.v2.{project}.revision.{revision}.taskgraph.decision" index = get_service("index") return index.findTask(decision_task_route)["taskId"]
def notify_via_matrix(product, message): owners_section = current_app.config.get( "MATRIX_NOTIFICATIONS_OWNERS_PER_PRODUCT", {}) rooms_section = current_app.config.get( "MATRIX_NOTIFICATIONS_ROOMS_PER_PRODUCT", {}) owners = owners_section.get(product, owners_section.get("default")) rooms = rooms_section.get(product, rooms_section.get("default")) if not owners or not rooms: logger.info("Matrix notifications are not configured") return owners = ": ".join(owners) notify = get_service("notify") for room_id in rooms: try: notify.matrix({"roomId": room_id, "body": f"{owners}: {message}"}) except TaskclusterRestFailure: logger.exception("Failed to send Matrix notification")
def do_schedule_phase(session, phase, additional_shipit_emails=[]): if phase.submitted: abort(409, "Already submitted!") for signoff in phase.signoffs: if not signoff.signed: abort(400, "Pending signoffs") hook = phase.task_json hooks = get_service("hooks") client_id = hooks.options["credentials"]["clientId"].decode("utf-8") extra_context = {"clientId": client_id} try: result = hooks.triggerHook( hook["hook_group_id"], hook["hook_id"], rendered_hook_payload( phase, extra_context=extra_context, additional_shipit_emails=additional_shipit_emails)) phase.task_id = result["status"]["taskId"] except TaskclusterRestFailure as e: abort(400, str(e)) phase.submitted = True completed = datetime.datetime.utcnow() phase.completed_by = current_user.get_id() phase.completed = completed # If the previous phases are not submitted, mark them as submitted and they # will be calculated as skipped because they don't have taskId associated for ph in phase.release.phases: if ph.name == phase.name: break if not ph.submitted: ph.submitted = True ph.completed_by = current_user.get_id() ph.completed = completed session.commit() return phase
def generate_artifact_url(task_id, artifact_path): queue = get_service("queue") try: return queue.buildUrl("getLatestArtifact", task_id, artifact_path) except Exception as exc: raise Exception(f"task {task_id} exception {exc}")
def fetch_latest_artifacts(task_id): queue = get_service("queue") try: return queue.listLatestArtifacts(task_id)["artifacts"] except Exception as exc: raise Exception(f"task {task_id} exception {exc}")
def fetch_group_tasks(task_id): queue = get_service("queue") try: return queue.listTaskGroup(task_id)["tasks"] except Exception as exc: raise Exception(f"task {task_id} exception {exc}")