def schedule_phase(name, phase): session = current_app.db.session phase = session.query(XPIPhase).filter( XPIRelease.id == XPIPhase.release_id).filter( XPIRelease.name == name).filter( XPIPhase.name == phase).first_or_404() # we must require scope which depends on XPI type xpi_type = _xpi_type(phase.release.revision, phase.release.xpi_name) required_permission = f"{SCOPE_PREFIX}/schedule_phase/xpi/{xpi_type}/{phase.name}" if not current_user.has_permissions(required_permission): user_permissions = ", ".join(current_user.get_permissions()) abort( 401, f"required permission: {required_permission}, user permissions: {user_permissions}" ) phase = do_schedule_phase(session, phase) url = taskcluster_urls.ui(get_root_url(), f"/tasks/groups/{phase.task_id}") logger.info("Phase %s of %s started by %s. - %s", phase.name, phase.release.name, phase.completed_by, url) notify_via_matrix( "xpi", f"Phase {phase.name} of {phase.release.name} started by {phase.completed_by}. - {url}" ) return phase.json
def build_hook(target, args): """ Read a hook definition file and either create or update the hook """ hook_file_path = target.check_path(args.hook_file) hook_group_id = args.hook_group_id hook_id = args.hook_id with open(hook_file_path) as hook_file: payload = json.load(hook_file) # Load config from file/secret config = Configuration(args) hooks = taskcluster.Hooks(config.get_taskcluster_options()) hooks.ping() hook_name = "{}/{}".format(hook_group_id, hook_id) logger.info("Checking if hook %s exists", hook_name) try: hooks.hook(hook_group_id, hook_id) hook_exists = True logger.info("Hook %s exists", hook_name) except taskcluster.exceptions.TaskclusterRestFailure: hook_exists = False logger.info("Hook %s does not exists", hook_name) if hook_exists: hooks.updateHook(hook_group_id, hook_id, payload) logger.info("Hook %s was successfully updated", hook_name) else: hooks.createHook(hook_group_id, hook_id, payload) logger.info("Hook %s was successfully created", hook_name) hook_url = taskcluster_urls.ui( config.get_root_url(), "hooks/{}/{}".format(hook_group_id, hook_id)) logger.info("Hook URL for debugging: %r", hook_url)
def schedule_phase(name, phase): session = current_app.db.session phases = session.query(XPIPhase).filter( XPIRelease.id == XPIPhase.release_id).filter( XPIRelease.name == name).all() phase_to_schedule = list( filter(lambda _phase: _phase.name == phase, phases)) # Get email for all signoffs from previous phases and phase scheduler additional_shipit_emails = get_signoff_emails(phases) if not phase_to_schedule: abort(404, f"phase {phase} not found") phase_to_schedule = phase_to_schedule[0] # we must require scope which depends on XPI type xpi_type = _xpi_type(phase_to_schedule.release.revision, phase_to_schedule.release.xpi_name) required_permission = f"{SCOPE_PREFIX}/schedule_phase/xpi/{xpi_type}/{phase_to_schedule.name}" if not current_user.has_permissions(required_permission): user_permissions = ", ".join(current_user.get_permissions()) abort( 401, f"required permission: {required_permission}, user permissions: {user_permissions}" ) scheduled_phase = do_schedule_phase(session, phase_to_schedule, additional_shipit_emails) url = taskcluster_urls.ui(get_root_url(), f"/tasks/groups/{scheduled_phase.task_id}") logger.info("Phase %s of %s started by %s. - %s", scheduled_phase.name, scheduled_phase.release.name, scheduled_phase.completed_by, url) notify_via_matrix( "xpi", f"Phase {scheduled_phase.name} of {scheduled_phase.release.name} started by {scheduled_phase.completed_by}. - {url}" ) return scheduled_phase.json
def schedule_phase(name, phase): session = current_app.db.session phase = session.query(Phase).filter(Release.id == Phase.release_id).filter( Release.name == name).filter(Phase.name == phase).first_or_404() # we must require scope which depends on product required_permission = f"{SCOPE_PREFIX}/schedule_phase/{phase.release.product}/{phase.name}" if not current_user.has_permissions(required_permission): user_permissions = ", ".join(current_user.get_permissions()) abort( 401, f"required permission: {required_permission}, user permissions: {user_permissions}" ) phase = do_schedule_phase(session, phase) url = taskcluster_urls.ui(get_root_url(), f"/tasks/groups/{phase.task_id}") logger.info("Phase %s of %s started by %s. - %s", phase.name, phase.release.name, phase.completed_by, url) notify_via_matrix( phase.release.product, f"Phase {phase.name} was just scheduled for {phase.release.name} - {url}" ) return phase.json
def schedule_phase(name, phase): session = current_app.db.session try: phase = session.query(Phase).filter( Release.id == Phase.release_id).filter( Release.name == name).filter(Phase.name == phase).one() except NoResultFound: abort(404) # we must require scope which depends on product required_permission = f"{SCOPE_PREFIX}/schedule_phase/{phase.release.product}/{phase.name}" if not current_user.has_permissions(required_permission): user_permissions = ", ".join(current_user.get_permissions()) abort( 401, f"required permission: {required_permission}, user permissions: {user_permissions}" ) if phase.submitted: abort(409, "Already submitted!") for signoff in phase.signoffs: if not signoff.signed: abort(400, "Pending signoffs") hook = phase.task_json if "hook_payload" not in hook: raise ValueError("Action tasks are not supported") hooks = get_service("hooks") client_id = hooks.options["credentials"]["clientId"].decode("utf-8") extra_context = {"clientId": client_id} result = hooks.triggerHook( hook["hook_group_id"], hook["hook_id"], phase.rendered_hook_payload(extra_context=extra_context)) phase.task_id = result["status"]["taskId"] phase.submitted = True completed = datetime.datetime.utcnow() phase.completed_by = current_user.get_id() phase.completed = completed # If the previous phases are not submitted, mark them as submitted and they # will be calculated as skipped because they don't have taskId associated for ph in phase.release.phases: if ph.name == phase.name: break if not ph.submitted: ph.submitted = True ph.completed_by = current_user.get_id() ph.completed = completed if all([ph.submitted for ph in phase.release.phases]): phase.release.status = "shipped" phase.release.completed = completed session.commit() root_url = hooks.options["rootUrl"] url = taskcluster_urls.ui(root_url, f"/tasks/groups/{phase.task_id}") notify_via_irc( phase.release.product, f"Phase {phase.name} was just scheduled for release {phase.release.product} {phase.release.version} build{phase.release.build_number} - {url}", ) return phase.json
def create_interactive_action(parameters, graph_config, input, task_group_id, task_id): # fetch the original task definition from the taskgraph, to avoid # creating interactive copies of unexpected tasks. Note that this only applies # to docker-worker tasks, so we can assume the docker-worker payload format. decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( parameters, graph_config) task = taskcluster.get_task_definition(task_id) label = task["metadata"]["name"] def edit(task): if task.label != label: return task task_def = task.task # drop task routes (don't index this!) task_def["routes"] = [] # only try this once task_def["retries"] = 0 # short expirations, at least 3 hour maxRunTime task_def["deadline"] = {"relative-datestamp": "12 hours"} task_def["created"] = {"relative-datestamp": "0 hours"} task_def["expires"] = {"relative-datestamp": "1 day"} # filter scopes with the SCOPE_WHITELIST task.task["scopes"] = [ s for s in task.task.get("scopes", []) if any( p.match(s) for p in SCOPE_WHITELIST) ] payload = task_def["payload"] # make sure the task runs for long enough.. payload["maxRunTime"] = max(3600 * 3, payload.get("maxRunTime", 0)) # no caches or artifacts payload["cache"] = {} payload["artifacts"] = {} # enable interactive mode payload.setdefault("features", {})["interactive"] = True payload.setdefault("env", {})["TASKCLUSTER_INTERACTIVE"] = "true" return task # Create the task and any of its dependencies. This uses a new taskGroupId to avoid # polluting the existing taskGroup with interactive tasks. action_task_id = os.environ.get("TASK_ID") label_to_taskid = create_tasks( graph_config, [label], full_task_graph, label_to_taskid, parameters, decision_task_id=action_task_id, modifier=edit, ) taskId = label_to_taskid[label] logger.info( "Created interactive task {}; sending notification".format(taskId)) if input and "notify" in input: email = input["notify"] # no point sending to a noreply address! if email == "*****@*****.**": return info = { "url": taskcluster_urls.ui(get_root_url(False), "tasks/{}/connect".format(taskId)), "label": label, "revision": parameters["head_rev"], "repo": parameters["head_repository"], } send_email( email, subject=EMAIL_SUBJECT.format(**info), content=EMAIL_CONTENT.format(**info), link={ "text": "Connect", "href": info["url"], }, use_proxy=True, )
def create_interactive_action(parameters, graph_config, input, task_group_id, task_id): # fetch the original task definition from the taskgraph, to avoid # creating interactive copies of unexpected tasks. Note that this only applies # to docker-worker tasks, so we can assume the docker-worker payload format. decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( parameters, graph_config) task = taskcluster.get_task_definition(task_id) label = task['metadata']['name'] def edit(task): if task.label != label: return task task_def = task.task # drop task routes (don't index this!) task_def['routes'] = [] # only try this once task_def['retries'] = 0 # short expirations, at least 3 hour maxRunTime task_def['deadline'] = {'relative-datestamp': '12 hours'} task_def['created'] = {'relative-datestamp': '0 hours'} task_def['expires'] = {'relative-datestamp': '1 day'} # filter scopes with the SCOPE_WHITELIST task.task['scopes'] = [ s for s in task.task.get('scopes', []) if any( p.match(s) for p in SCOPE_WHITELIST) ] payload = task_def['payload'] # make sure the task runs for long enough.. payload['maxRunTime'] = max(3600 * 3, payload.get('maxRunTime', 0)) # no caches or artifacts payload['cache'] = {} payload['artifacts'] = {} # enable interactive mode payload.setdefault('features', {})['interactive'] = True payload.setdefault('env', {})['TASKCLUSTER_INTERACTIVE'] = 'true' return task # Create the task and any of its dependencies. This uses a new taskGroupId to avoid # polluting the existing taskGroup with interactive tasks. label_to_taskid = create_tasks(graph_config, [label], full_task_graph, label_to_taskid, parameters, modifier=edit) taskId = label_to_taskid[label] logger.info( 'Created interactive task {}; sending notification'.format(taskId)) if input and 'notify' in input: email = input['notify'] # no point sending to a noreply address! if email == '*****@*****.**': return info = { 'url': taskcluster_urls.ui(get_root_url(False), 'tasks/{}/connect'.format(taskId)), 'label': label, 'revision': parameters['head_rev'], 'repo': parameters['head_repository'], } send_email(email, subject=EMAIL_SUBJECT.format(**info), content=EMAIL_CONTENT.format(**info), link={ 'text': 'Connect', 'href': info['url'], }, use_proxy=True)