Ejemplo n.º 1
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--disable', action='store_true', dest='quarantine',
                        help='disable the workers for 1000 years',
                        default=True)
    parser.add_argument('--enable', action='store_false', dest='quarantine',
                        help='enable the workers')
    parser.add_argument('-p', '--provisioner', required=True)
    parser.add_argument('-w', '--worker-type', required=True)
    parser.add_argument('-g', '--worker-group', required=True)
    parser.add_argument('workers', nargs='+', help='worker ids')

    args = parser.parse_args()

    if args.quarantine:
        quarantineUntil = fromNow('1000 years')
    else:
        quarantineUntil = fromNow('-1 hours')

    q = Queue()

    for worker_id in args.workers:
        res = q.quarantineWorker(args.provisioner, args. worker_type,
                                 args.worker_group, worker_id,
                                 payload={'quarantineUntil': quarantineUntil })
        if 'quarantineUntil' in res:
            print('{0[workerId]} quarantined until {0[quarantineUntil]}'.format(res))
        else:
            print('{0[workerId]} not quarantined'.format(res))
Ejemplo n.º 2
0
def get_items_from_common_tc_task(common_task_id, tc_config):
    tc_task_items = {}
    queue = Queue(tc_config)
    task = queue.task(common_task_id)
    tc_task_items["version"] = task["extra"]["build_props"]["version"]
    tc_task_items["build_number"] = task["extra"]["build_props"]["build_number"]
    tc_task_items["mozilla_revision"] = task["extra"]["build_props"]["revision"]
    tc_task_items["partials"] = task["extra"]["build_props"]["partials"]
    tc_task_items["mozharness_changeset"] = task["extra"]["build_props"]["mozharness_changeset"]
    return tc_task_items
Ejemplo n.º 3
0
def handle_worker_type(cfg):
    min_replicas = cfg["autoscale"]["args"]["min_replicas"]
    log = logger.bind(
        worker_type=cfg["worker_type"],
        provisioner=cfg["provisioner"],
        deployment_namespace=cfg["deployment_namespace"],
        deployment_name=cfg["deployment_name"],
        min_replicas=min_replicas,
    )
    api = get_api(cfg.get("kube_connfig"), cfg.get("kube_connfig_context"))
    log.info("Handling worker type. Getting the number of running replicas...")
    running = get_running(
        api=api,
        deployment_namespace=cfg["deployment_namespace"],
        deployment_name=cfg["deployment_name"],
    )
    log = log.bind(running=running)
    log.info("Calculating capacity")
    capacity = cfg["autoscale"]["args"]["max_replicas"] - running
    log = log.bind(capacity=capacity)

    log.info("Checking pending")
    queue = Queue({"rootUrl": cfg["root_url"]})
    pending = queue.pendingTasks(cfg["provisioner"], cfg["worker_type"])["pendingTasks"]
    log = log.bind(pending=pending)
    log.info("Calculated desired replica count")
    desired = get_new_worker_count(pending, running, cfg["autoscale"]["args"])
    log = log.bind(desired=desired)
    if desired == 0:
        log.info("Zero replicas needed")
        if running < min_replicas:
            log.info("Using min_replicas")
            adjust_scale(api, min_replicas, cfg["deployment_namespace"], cfg["deployment_name"])
        return
    if desired < 0:
        log.info(f"Need to remove {abs(desired)} of {running}")
        target_replicas = running + desired
        log = log.bind(target_replicas=target_replicas)
        if target_replicas < 0:
            log.info("Target is negative, setting to zero")
            target_replicas = 0
            log = log.bind(target_replicas=target_replicas)
        if target_replicas < min_replicas:
            log.info("Using min_replicas instead of target")
            target_replicas = min_replicas
        adjust_scale(api, target_replicas, cfg["deployment_namespace"], cfg["deployment_name"])
    else:
        adjustment = min([capacity, desired])
        log = log.bind(adjustment=adjustment)
        log.info(f"Need to increase capacity from {running} running by {adjustment}")
        if capacity <= 0:
            log.info("Maximum capacity reached")
            return
        adjust_scale(api, running + adjustment, cfg["deployment_namespace"], cfg["deployment_name"])
    log.info("Done handling worker type")
Ejemplo n.º 4
0
def get_items_from_common_tc_task(common_task_id, tc_config):
    tc_task_items = {}
    queue = Queue(tc_config)
    task = queue.task(common_task_id)
    tc_task_items["version"] = task["extra"]["build_props"]["version"]
    tc_task_items["build_number"] = task["extra"]["build_props"][
        "build_number"]
    tc_task_items["mozilla_revision"] = task["extra"]["build_props"][
        "revision"]
    tc_task_items["partials"] = task["extra"]["build_props"]["partials"]
    tc_task_items["mozharness_changeset"] = task["extra"]["build_props"][
        "mozharness_changeset"]
    return tc_task_items
 def __init__(self, json=None, task_id=None, queue=None):
     """Init."""
     # taskId is not provided in the definition
     if task_id:
         self.task_id = task_id
     if json:
         self.def_json = json.get('task', json)
         return
     if not task_id:
         raise ValueError('No task definition or taskId provided')
     self.queue = queue
     if not self.queue:
         self.queue = Queue(tc_options())
     self._fetch_definition()
 def __init__(self, json=None, task_id=None, queue=None):
     """Init."""
     if task_id:
         self.task_id = task_id
     if json:
         # We might be passed {'status': ... } or just the contents
         self.status_json = json.get('status', json)
         self.task_id = self.status_json['taskId']
         return
     if not task_id:
         raise ValueError('No task definition or taskId provided')
     self.queue = queue
     if not self.queue:
         self.queue = Queue(tc_options())
     self._fetch_status()
Ejemplo n.º 7
0
 def __init__(self, options):
     cert = options["credentials"].get("certificate")
     if cert:
         options["credentials"]["certificate"] = json.dumps(cert)
     self.queue = Queue(options)
     self.scheduler = Scheduler(options)
     log.debug("Dict of options: %s", options)
Ejemplo n.º 8
0
def main(name=None):
    if name in (None, __name__):
        if len(sys.argv) != 2:
            print("Usage: {} DECISION_TASK_ID".format(sys.argv[0]), file=sys.stderr)
            sys.exit(1)
        log.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            fmt="%(asctime)s %(levelname)8s - %(message)s", datefmt="%Y-%m-%dT%H:%M:%S"
        )
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        log.addHandler(handler)
        makedirs("build")
        orig_dir = os.getcwd()
        context = Values()
        gnupghome = os.path.join(os.getcwd(), 'gpg')
        os.chmod(gnupghome, 0o700)
        context.gpg = gnupg.GPG(gpgbinary='gpg2', gnupghome=gnupghome)
        context.gpg.encoding = 'utf-8'
        credentials = {
            'credentials': {
                'clientId': os.environ["TASKCLUSTER_CLIENT_ID"],
                'accessToken': os.environ["TASKCLUSTER_ACCESS_TOKEN"],
            }
        }
        loop = asyncio.get_event_loop()
        try:
            os.chdir("build")
            with aiohttp.ClientSession() as context.session:
                context.queue = Queue(credentials, session=context.session)
                context.decision_task_id = sys.argv[1]
                loop.run_until_complete(async_main(context))
        finally:
            os.chdir(orig_dir)
            loop.close()
Ejemplo n.º 9
0
def get_temp_queue(context):
    """Create an async taskcluster client Queue from the latest temp
    credentials.
    """
    temp_queue = Queue({
        'credentials': context.temp_credentials,
    },
                       session=context.session)
    return temp_queue
Ejemplo n.º 10
0
async def get_action_task_details(session, taskid):
    async with async_timeout.timeout(100):
        queue = Queue(session=session)
        task = await queue.task(taskid)
        return dict(taskid=taskid,
                    name=task['extra']['action']['name'],
                    buildnum=task['extra']['action']['context']['input']['build_number'],
                    flavor=task['extra']['action']['context']['input']['release_promotion_flavor'],
                    ci=task['taskGroupId'])
Ejemplo n.º 11
0
    def create_queue(self, credentials):
        """Create a taskcluster queue.

        Args:
            credentials (dict): taskcluster credentials.
        """
        if credentials:
            return Queue({
                'credentials': credentials,
            }, session=self.session)
class TaskDefinition:
    """Data and queries about a task definition."""

    def __init__(self, json=None, task_id=None, queue=None):
        """Init."""
        # taskId is not provided in the definition
        if task_id:
            self.task_id = task_id
        if json:
            self.def_json = json.get('task', json)
            return
        if not task_id:
            raise ValueError('No task definition or taskId provided')
        self.queue = queue
        if not self.queue:
            self.queue = Queue(tc_options())
        self._fetch_definition()

    def _fetch_definition(self):
        self.def_json = self.queue.task(self.task_id)

    def __repr__(self):
        """repr."""
        return "<TaskDefinition {}>".format(self.task_id)

    def __str__(self):
        """Str representation."""
        return "<TaskDefinition {}>".format(self.task_id)

    @property
    def json(self):
        """Return json as originally presented."""
        return {'task': self.def_json}

    @property
    def label(self):
        """Extract label."""
        return self.def_json.get('tags', {}).get(
            'label', self.def_json.get('metadata').get('name', '')
        )

    @property
    def kind(self):
        """Return the task's kind."""
        return self.def_json['tags'].get('kind', '')

    @property
    def scopes(self):
        """Return a list of the scopes used, if any."""
        return self.def_json.get('scopes', [])

    @property
    def name(self):
        """Return the name of the task."""
        return self.def_json['metadata']['name']
Ejemplo n.º 13
0
    def create_queue(self, credentials):
        """Create a taskcluster queue.

        Args:
            credentials (dict): taskcluster credentials.

        """
        if credentials:
            session = self.session or aiohttp.ClientSession(
                loop=asyncio.get_event_loop())
            return Queue({
                'credentials': credentials,
            }, session=session)
Ejemplo n.º 14
0
    def fetch_tasks(self, limit=None):
        """
        Return tasks with the associated group ID.

        Handles continuationToken without the user being aware of it.

        Enforces the limit parameter as a limit of the total number of tasks
        to be returned.
        """
        if self.cache_file:
            if self._read_file_cache():
                return

        query = {}
        if limit:
            # Default taskcluster-client api asks for 1000 tasks.
            query['limit'] = min(limit, 1000)

        def under_limit(length):
            """Indicate if we've returned enough tasks."""
            if not limit or length < limit:
                return True
            return False

        queue = Queue(options=tc_options())
        outcome = queue.listTaskGroup(self.groupid, query=query)
        tasks = outcome.get('tasks', [])
        while under_limit(len(tasks)) and outcome.get('continuationToken'):
            query.update(
                {'continuationToken': outcome.get('continuationToken')})
            outcome = queue.listTaskGroup(self.groupid, query=query)
            tasks.extend(outcome.get('tasks', []))

        if limit:
            tasks = tasks[:limit]
        self.tasklist = [Task(json=data) for data in tasks]

        if self.cache_file:
            self._write_file_cache()
Ejemplo n.º 15
0
async def test_verify_production_cot(branch_context):
    index = Index()
    queue = Queue()

    async def get_task_id_from_index(index_path):
        res = await index.findTask(index_path)
        return res['taskId']

    async def get_completed_task_info_from_labels(decision_task_id,
                                                  label_to_task_type):
        label_to_taskid = await queue.getLatestArtifact(
            decision_task_id, "public/label-to-taskid.json")
        task_info = {}
        for re_label, task_type in label_to_task_type.items():
            r = re.compile(re_label)
            for label, task_id in label_to_taskid.items():
                if r.match(label):
                    status = await queue.status(task_id)
                    # only run verify_cot against tasks with completed deps.
                    if status['status']['state'] in ('completed', 'running',
                                                     'pending', 'failed'):
                        task_info[task_id] = task_type
                        break
            else:
                log.warning(
                    "Not running verify_cot against {} {} because there are no elegible completed tasks"
                    .format(decision_task_id, task_type))
        return task_info

    async def verify_cot(name, task_id, task_type):
        log.info("Verifying {} {} {}...".format(name, task_id, task_type))
        async with get_context({'verify_cot_signature': False}) as context:
            context.task = await queue.task(task_id)
            cot = ChainOfTrust(context, task_type, task_id=task_id)
            await verify_chain_of_trust(cot)

    task_id = await get_task_id_from_index(branch_context['index'])
    assert task_id, "{}: Can't get task_id from index {}!".format(
        branch_context['name'], branch_context['index'])
    if branch_context.get('task_label_to_task_type'):
        task_info = await get_completed_task_info_from_labels(
            task_id, branch_context['task_label_to_task_type'])
        for task_id, task_type in task_info.items():
            name = "{} {}".format(branch_context['name'], task_type)
            await verify_cot(name, task_id, task_type)
    else:
        await verify_cot(branch_context['name'], task_id,
                         branch_context['task_type'])
 def __init__(self, json=None, task_id=None, queue=None):
     """init."""
     if json:
         self.def_json = json.get('task')
         self.status_json = json.get('status')
         self.task_id = self.status_json['taskId']
         return
     if task_id:
         self.task_id = task_id
     else:
         raise ValueError('No task definition or taskId provided')
     self.queue = queue
     if not self.queue:
         self.queue = Queue(tc_options())
     if self.task_id:
         self._fetch_definition()
         self._fetch_status()
Ejemplo n.º 17
0
async def async_main():
    conn = aiohttp.TCPConnector(limit=max_concurrent_aiohttp_streams)
    with aiohttp.ClientSession(connector=conn) as session:

        with tempfile.TemporaryDirectory() as tmp_dir:
            context = {
                'session': session,
                'tmp_dir': tmp_dir,
            }
            queue = Queue(session=session)
            graph = await queue.listTaskGroup(task_graph_id)
            filtered = get_filtered(graph)
            log.debug("filtered: {}".format(filtered))
            if len(filtered) != num_expected_signing_tasks:
                die("Expected {} signing tasks; only found {}".format(
                    num_expected_signing_tasks, filtered))

            abs_path_per_platform = await download(context)
            checksums(context, abs_path_per_platform)
            await upload()
Ejemplo n.º 18
0
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    rr_config = config['release-runner']

    buildbot_configs = rr_config['buildbot_configs']
    buildbot_configs_branch = rr_config['buildbot_configs_branch']
    sleeptime = rr_config['sleeptime']
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')
    docker_worker_key = rr_config.get('docker_worker_key')
    signing_pvt_key = config['signing'].get('pvt_key')
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = rr_config.get('smtp_server', 'localhost')
    tc_config = {
        "credentials": {
            "clientId": config['taskcluster'].get('client_id'),
            "accessToken": config['taskcluster'].get('access_token'),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = config['balrog'].get("username")
    balrog_password = config["balrog"].get("password")
    extra_balrog_submitter_params = config["balrog"].get(
        "extra_balrog_submitter_params", "")
    beetmover_aws_access_key_id = config["beetmover"].get("aws_access_key_id")
    beetmover_aws_secret_access_key = config["beetmover"].get(
        "aws_secret_access_key")
    gpg_key_path = config["signing"].get("gpg_key_path")

    # TODO: replace release sanity with direct checks of en-US and l10n
    # revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial,
          args=(buildbot_configs, CONFIGS_WORKDIR),
          kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config:
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config['symlinks']:
            symlink = config['symlinks'].get(target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig[
                'postrelease_version_bump_enabled'][release['product']]
            postrelease_bouncer_aliases_enabled = branchConfig[
                'postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig[
                'postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            graph_id = slugId()
            done = are_en_us_builds_completed(
                index=index,
                release_name=release['name'],
                submitted_at=release['submittedAt'],
                revision=release['mozillaRevision'],
                platforms=platforms,
                queue=queue,
                tc_task_indexes=branchConfig['tc_indexes'][release['product']])
            if not done:
                log.info(
                    'Builds are not completed yet, skipping release "%s" for now',
                    release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s',
                     release['name'])
            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key":
                docker_worker_key,
                "version":
                release["version"],
                # ESR should not use "esr" suffix here:
                "next_version":
                bump_version(release["version"].replace("esr", "")),
                "appVersion":
                getAppVersion(release["version"]),
                "buildNumber":
                release["buildNumber"],
                "release_eta":
                release.get("release_eta"),
                "source_enabled":
                True,
                "checksums_enabled":
                True,
                "binary_transparency_enabled":
                branchConfig.get("binary_transparency_enabled", False),
                "repo_path":
                release["branch"],
                "revision":
                release["mozillaRevision"],
                "product":
                release["product"],
                "funsize_product":
                get_funsize_product(release["product"]),
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset":
                release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates":
                release.get('partial_updates', list()),
                "branch":
                release['branchShortName'],
                "updates_enabled":
                bool(release["partials"]),
                "l10n_config":
                get_l10n_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "en_US_config":
                get_en_US_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "verifyConfigs": {},
                "balrog_api_root":
                branchConfig["balrog_api_root"],
                "funsize_balrog_api_root":
                branchConfig["funsize_balrog_api_root"],
                "balrog_username":
                balrog_username,
                "balrog_password":
                balrog_password,
                "beetmover_aws_access_key_id":
                beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key":
                beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class":
                branchConfig['signing_class'][release["product"]],
                "accepted_mar_channel_id":
                branchConfig.get('accepted_mar_channel_id',
                                 {}).get(release["product"]),
                "signing_cert":
                branchConfig['signing_cert'][release["product"]],
                "moz_disable_mar_cert_verification":
                branchConfig.get('moz_disable_mar_cert_verification'),
                "root_home_dir":
                branchConfig['root_home_dir'][release["product"]],
                "bouncer_enabled":
                branchConfig["bouncer_enabled"],
                "updates_builder_enabled":
                branchConfig["updates_builder_enabled"],
                "update_verify_enabled":
                branchConfig["update_verify_enabled"],
                "release_channels":
                release_channels,
                "final_verify_channels":
                final_verify_channels,
                "final_verify_platforms":
                branchConfig['release_platforms'],
                "uptake_monitoring_platforms":
                branchConfig['uptake_monitoring_platforms'][
                    release["product"]],
                "signing_pvt_key":
                signing_pvt_key,
                "build_tools_repo_path":
                branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled":
                branchConfig['push_to_candidates_enabled'],
                # TODO: temporary config enabled during 53 Fennec beta cycle
                "candidates_fennec_enabled":
                branchConfig.get('candidates_fennec_enabled'),
                "stage_product":
                branchConfig['stage_product'][release['product']],
                "postrelease_bouncer_aliases_enabled":
                postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled":
                branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url":
                branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled":
                postrelease_enabled,
                "postrelease_mark_as_shipped_enabled":
                postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled":
                push_to_releases_enabled,
                "push_to_releases_automatic":
                branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket":
                branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms":
                branchConfig.get("partner_repacks_platforms",
                                 {}).get(release["product"], []),
                "eme_free_repacks_platforms":
                branchConfig.get("eme_free_repacks_platforms",
                                 {}).get(release["product"], []),
                "sha1_repacks_platforms":
                branchConfig.get("sha1_repacks_platforms", []),
                "l10n_changesets":
                release['l10n_changesets'],
                "extra_balrog_submitter_params":
                extra_balrog_submitter_params + " --product " +
                release["product"].capitalize(),
                "publish_to_balrog_channels":
                publish_to_balrog_channels,
                "snap_enabled":
                branchConfig.get("snap_enabled",
                                 {}).get(release["product"], False),
                "update_verify_channel":
                branchConfig.get("update_verify_channel",
                                 {}).get(release["product"]),
                "update_verify_requires_cdn_push":
                branchConfig.get("update_verify_requires_cdn_push", False),
            }

            # TODO: en-US validation for multiple tasks
            # validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print(scheduler.createTaskGraph(graph_id, graph))

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  task_group_id=graph_id,
                                  l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s'
                % (graph_id, exception))
            log.exception(
                'Failed to start release "%s" promotion for graph %s. Error(s): %s',
                release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Ejemplo n.º 19
0
 def create_queue(self, credentials):
     if credentials:
         return Queue({
             'credentials': credentials,
         }, session=self.session)
Ejemplo n.º 20
0
def handle_worker_type(cfg):
    min_replicas = cfg["autoscale"]["args"]["min_replicas"]
    log_env = dict(
        worker_type=cfg["worker_type"],
        provisioner=cfg["provisioner"],
        deployment_namespace=cfg["deployment_namespace"],
        deployment_name=cfg["deployment_name"],
        min_replicas=min_replicas,
    )
    api = get_api(cfg.get("kube_connfig"), cfg.get("kube_connfig_context"))
    logger.info(
        "Handling worker type. Getting the number of running replicas...",
        extra=log_env)
    running = get_running(
        api=api,
        deployment_namespace=cfg["deployment_namespace"],
        deployment_name=cfg["deployment_name"],
    )
    log_env["running"] = running
    logger.info("Calculating capacity", extra=log_env)
    capacity = cfg["autoscale"]["args"]["max_replicas"] - running
    log_env["capacity"] = capacity

    logger.info("Checking pending", extra=log_env)
    queue = Queue({"rootUrl": cfg["root_url"]})
    pending = get_pending(queue, cfg["provisioner"], cfg["worker_type"])
    log_env["pending"] = pending
    logger.info("Calculated desired replica count", extra=log_env)
    desired = get_new_worker_count(pending, running, cfg["autoscale"]["args"])
    log_env["desired"] = desired
    if desired == 0:
        logger.info("Zero replicas needed", extra=log_env)
        if running < min_replicas:
            logger.info("Using min_replicas", extra=log_env)
            adjust_scale(api, min_replicas, cfg["deployment_namespace"],
                         cfg["deployment_name"])
        return
    if desired < 0:
        logger.info("Need to remove %s of %s",
                    abs(desired),
                    running,
                    extra=log_env)
        target_replicas = running + desired
        log_env["target_replicas"] = target_replicas
        if target_replicas < 0:
            logger.info("Target is negative, setting to zero", extra=log_env)
            target_replicas = 0
            log_env["target_replicas"] = target_replicas
        if target_replicas < min_replicas:
            logger.info("Using min_replicas instead of target", extra=log_env)
            target_replicas = min_replicas
            log_env["target_replicas"] = target_replicas
        adjust_scale(api, target_replicas, cfg["deployment_namespace"],
                     cfg["deployment_name"])
    else:
        adjustment = min([capacity, desired])
        log_env["adjustment"] = adjustment
        logger.info("Need to increase capacity from %s running by %s",
                    running,
                    adjustment,
                    extra=log_env)
        if capacity <= 0:
            logger.info("Maximum capacity reached", extra=log_env)
            return
        adjust_scale(api, running + adjustment, cfg["deployment_namespace"],
                     cfg["deployment_name"])
    logger.info("Done handling worker type", extra=log_env)
class TaskStatus:
    """Data and queries about a task status."""

    def __init__(self, json=None, task_id=None, queue=None):
        """Init."""
        if task_id:
            self.task_id = task_id
        if json:
            # We might be passed {'status': ... } or just the contents
            self.status_json = json.get('status', json)
            self.task_id = self.status_json['taskId']
            return
        if not task_id:
            raise ValueError('No task definition or taskId provided')
        self.queue = queue
        if not self.queue:
            self.queue = Queue(tc_options())
        self._fetch_status()

    def _fetch_status(self):
        json = self.queue.status(self.task_id)
        self.status_json = json.get('status', json)

    def __repr__(self):
        """repr."""
        return "<TaskStatus {}>".format(self.task_id)

    def __str__(self):
        """Str representation."""
        return "<TaskStatus {}:{}>".format(self.task_id, self.state)

    @property
    def json(self):
        """Return json as originally presented."""
        return {'status': self.status_json}

    @property
    def state(self):
        """Return current task state."""
        return self.status_json.get('state', '')

    @property
    def has_failures(self):
        """Return True if this task has any run failures."""
        return len([r for r in self.status_json.get('runs', list()) if r.get('state') in ['failed', 'exception']]) > 0

    @property
    def completed(self):
        """Return True if this task has completed.

        Returns: Bool
            if the highest runId has state 'completed'

        """
        return self.state == 'completed'

    def _extract_date(self, run_field, run_id=-1):
        """Return datetime of the given field in the task runs."""
        if not self.status_json.get('runs'):
            return
        field_data = self.status_json['runs'][run_id].get(run_field)
        if not field_data:
            return
        return dateutil.parser.parse(field_data)

    @property
    def scheduled(self):
        """Return datetime of the task scheduled time."""
        if self.state == 'unscheduled':
            return
        return self._extract_date('scheduled')

    @property
    def started(self):
        """Return datetime of the most recent run's start."""
        return self._extract_date('started')

    @property
    def resolved(self):
        """Return datetime of the most recent run's finish time."""
        return self._extract_date('resolved')

    def run_durations(self):
        """Return a list of timedelta objects, of run durations."""
        durations = list()
        if not self.json['status'].get('runs'):
            return durations
        for run in self.status_json.get('runs', list()):
            started = run.get('started')
            resolved = run.get('resolved')
            if started and resolved:
                durations.append(dateutil.parser.parse(resolved) - dateutil.parser.parse(started))
        return durations
Ejemplo n.º 22
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    signing_pvt_key = get_config(config, 'signing', 'pvt_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token",
                                      None),
        }
    }
    configs_workdir = 'buildbot-configs'
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)
    extra_balrog_submitter_params = get_config(
        config, "balrog", "extra_balrog_submitter_params", None)
    beetmover_aws_access_key_id = get_config(config, "beetmover",
                                             "aws_access_key_id", None)
    beetmover_aws_secret_access_key = get_config(config, "beetmover",
                                                 "aws_secret_access_key", None)
    gpg_key_path = get_config(config, "signing", "gpg_key_path", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                for release in rr.new_releases:
                    log.info('Got a new release request: %s' % release)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial,
          args=(buildbot_configs, configs_workdir),
          kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=configs_workdir)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)

    # TODO: this won't work for Thunderbird...do we care?
    branch = release["branch"].split("/")[-1]
    branchConfig = readBranchConfig(path.join(configs_workdir, "mozilla"),
                                    branch=branch)

    release_channels = update_channels(
        release["version"], branchConfig["release_channel_mappings"])
    # candidate releases are split in two graphs and release-runner only handles the first
    # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
    # channels are handled in the second generated graph outside of release-runner.
    # This is not elegant but it should do the job for now
    candidate_release = is_candidate_release(release_channels)
    if candidate_release:
        postrelease_enabled = False
        final_verify_channels = [
            c for c in release_channels
            if c not in branchConfig.get('mirror_requiring_channels', [])
        ]
        # TODO - use publish_to_balrog_channels once releasetasks publishes to balrog
        publish_to_balrog_channels = [
            c for c in release_channels
            if c not in branchConfig.get('mirror_requiring_channels', [])
        ]
        push_to_releases_enabled = False
    else:
        postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
        final_verify_channels = release_channels
        publish_to_balrog_channels = release_channels
        push_to_releases_enabled = True

    rc = 0
    for release in rr.new_releases:
        graph_id = slugId()
        try:
            rr.update_status(release, 'Generating task graph')
            l10n_changesets = parsePlainL10nChangesets(
                rr.get_release_l10n(release["name"]))

            kwargs = {
                "public_key":
                docker_worker_key,
                "version":
                release["version"],
                "next_version":
                bump_version(release["version"]),
                "appVersion":
                getAppVersion(release["version"]),
                "buildNumber":
                release["buildNumber"],
                "source_enabled":
                True,
                "checksums_enabled":
                True,
                "repo_path":
                release["branch"],
                "revision":
                release["mozillaRevision"],
                "product":
                release["product"],
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset":
                release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates":
                getPartials(release),
                "branch":
                branch,
                "updates_enabled":
                bool(release["partials"]),
                "l10n_config":
                get_l10n_config(release, branchConfig, branch, l10n_changesets,
                                index),
                "en_US_config":
                get_en_US_config(release, branchConfig, branch, index),
                "verifyConfigs": {},
                "balrog_api_root":
                branchConfig["balrog_api_root"],
                "funsize_balrog_api_root":
                branchConfig["funsize_balrog_api_root"],
                "balrog_username":
                balrog_username,
                "balrog_password":
                balrog_password,
                "beetmover_aws_access_key_id":
                beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key":
                beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class":
                "release-signing",
                "bouncer_enabled":
                branchConfig["bouncer_enabled"],
                "release_channels":
                release_channels,
                "final_verify_channels":
                final_verify_channels,
                "signing_pvt_key":
                signing_pvt_key,
                "build_tools_repo_path":
                branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled":
                branchConfig['push_to_candidates_enabled'],
                "postrelease_bouncer_aliases_enabled":
                branchConfig['postrelease_bouncer_aliases_enabled'],
                "tuxedo_server_url":
                branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled":
                postrelease_enabled,
                "push_to_releases_enabled":
                push_to_releases_enabled,
                "push_to_releases_automatic":
                branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket":
                branchConfig["beetmover_buckets"][release["product"]],
            }
            if extra_balrog_submitter_params:
                kwargs[
                    "extra_balrog_submitter_params"] = extra_balrog_submitter_params

            validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  graph_id=graph_id)
        except:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s)' % graph_id)
            log.exception("Failed to start release promotion for graph %s %s",
                          graph_id, release)

    if rc != 0:
        sys.exit(rc)
Ejemplo n.º 23
0
def main(release_runner_config, release_config, tc_config, options):

    api_root = release_runner_config['api']['api_root']
    username = release_runner_config['api']['username']
    password = release_runner_config['api']['password']

    queue = Queue(tc_config)
    index = Index(tc_config)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    log.info('Generating task graph')
    kwargs = {
        # release-runner.ini
        "signing_pvt_key":
        release_config['signing_pvt_key'],
        "public_key":
        release_config['docker_worker_key'],
        "balrog_username":
        release_config['balrog_username'],
        "balrog_password":
        release_config['balrog_password'],
        "beetmover_aws_access_key_id":
        release_config['beetmover_aws_access_key_id'],
        "beetmover_aws_secret_access_key":
        release_config['beetmover_aws_secret_access_key'],
        # ship-it items
        "version":
        release_config["version"],
        "revision":
        release_config["mozilla_revision"],
        "mozharness_changeset":
        release_config.get("mozharness_changeset")
        or release_config["mozilla_revision"],
        "buildNumber":
        release_config["build_number"],
        "l10n_changesets":
        release_config["l10n_changesets"],

        # was branchConfig items
        "balrog_vpn_proxy":
        release_config["balrog_vpn_proxy"],
        "funsize_balrog_api_root":
        release_config["funsize_balrog_api_root"],
        "balrog_api_root":
        release_config["balrog_api_root"],
        "build_tools_repo_path":
        release_config['build_tools_repo_path'],
        "tuxedo_server_url":
        release_config['tuxedo_server_url'],
        "uptake_monitoring_enabled":
        release_config['uptake_monitoring_enabled'],
        "beetmover_candidates_bucket":
        release_config["beetmover_candidates_bucket"],
        "signing_class":
        release_config["signing_class"],
        "accepted_mar_channel_id":
        release_config.get("accepted_mar_channel_id"),
        "signing_cert":
        release_config["signing_cert"],
        "mar_signing_format":
        get_mar_signing_format(release_config["version"]),
        "moz_disable_mar_cert_verification":
        release_config.get("moz_disable_mar_cert_verification"),
        "root_home_dir":
        release_config["root_home_dir"],
        "bouncer_enabled":
        release_config["bouncer_enabled"],
        "updates_builder_enabled":
        release_config["updates_builder_enabled"],
        "update_verify_enabled":
        release_config["update_verify_enabled"],
        "push_to_candidates_enabled":
        release_config['push_to_candidates_enabled'],
        # TODO: temporary config enabled during 53 Fennec beta cycle
        "candidates_fennec_enabled":
        release_config.get('candidates_fennec_enabled'),
        "stage_product":
        release_config['stage_product'],
        "postrelease_bouncer_aliases_enabled":
        release_config['postrelease_bouncer_aliases_enabled'],
        "postrelease_version_bump_enabled":
        release_config['postrelease_version_bump_enabled'],
        "push_to_releases_automatic":
        release_config['push_to_releases_automatic'],
        "partner_repacks_platforms":
        release_config["partner_repacks_platforms"],
        "eme_free_repacks_platforms":
        release_config["eme_free_repacks_platforms"],
        "sha1_repacks_platforms":
        release_config["sha1_repacks_platforms"],
        "repo_path":
        release_config["repo_path"],
        "branch":
        release_config["branch"],
        "product":
        release_config["product"],
        "funsize_product":
        release_config["funsize_product"],
        "release_channels":
        release_config['channels'],
        "final_verify_channels":
        release_config['final_verify_channels'],
        "final_verify_platforms":
        release_config['final_verify_platforms'],
        "uptake_monitoring_platforms":
        release_config['uptake_monitoring_platforms'],
        "source_enabled":
        release_config["source_enabled"],
        "checksums_enabled":
        release_config["checksums_enabled"],
        "binary_transparency_enabled":
        release_config.get("binary_transparency_enabled", False),
        "updates_enabled":
        release_config["updates_enabled"],
        "push_to_releases_enabled":
        release_config["push_to_releases_enabled"],
        "verifyConfigs": {},
        # ESR should not use "esr" suffix here:
        "next_version":
        bump_version(release_config["version"].replace("esr", "")),
        "appVersion":
        getAppVersion(release_config["version"]),
        "partial_updates":
        get_partials(rr, release_config["partials"],
                     release_config['product']),
        # in release-runner.py world we have a concept of branchConfig and release (shipit) vars
        # todo fix get_en_US_config and en_US_config helper methods to not require both
        "l10n_config":
        get_l10n_config(
            index=index,
            product=release_config["product"],
            branch=release_config["branch"],
            revision=release_config["mozilla_revision"],
            platforms=release_config['platforms'],
            l10n_platforms=release_config['l10n_release_platforms'] or {},
            l10n_changesets=release_config["l10n_changesets"],
            tc_task_indexes=None,
        ),
        "en_US_config":
        get_en_US_config(
            index=index,
            product=release_config["product"],
            branch=release_config["branch"],
            revision=release_config["mozilla_revision"],
            platforms=release_config['platforms'],
            tc_task_indexes=None,
        ),
        "extra_balrog_submitter_params":
        release_config['extra_balrog_submitter_params'],
        "publish_to_balrog_channels":
        release_config["publish_to_balrog_channels"],
        "postrelease_mark_as_shipped_enabled":
        release_config["postrelease_mark_as_shipped_enabled"],
        # TODO: use [] when snaps_enabled is landed
        "snap_enabled":
        release_config.get("snap_enabled", False),
        "update_verify_channel":
        release_config["update_verify_channel"],
        "update_verify_requires_cdn_push":
        release_config["update_verify_requires_cdn_push"],
        "release_eta":
        release_config.get("release_eta"),
        "lzma_to_bz2":
        release_config.get("lzma_to_bz2", False),
    }

    task_group_id, toplevel_task_id, tasks = make_task_graph_strict_kwargs(
        **kwargs)
    log.info('Tasks generated, but not yet submitted to Taskcluster.')
    import pprint
    for task_id, task_def in tasks.items():
        log.debug("%s ->\n%s", task_id,
                  pprint.pformat(task_def, indent=4, width=160))

    if not options.dry_run:
        submit_parallelized(queue, tasks)
        resolve_task(queue, toplevel_task_id)
        log_line = 'Task graph submitted: https://tools.taskcluster.net/groups/{}'.format(
            task_group_id)
        log.info(log_line)
        # TODO: We shouldn't need this extra print, but at the moment, calling the script in verbose
        # mode doesn't output anything.
        print log_line

    return task_group_id
Ejemplo n.º 24
0
import re

from taskcluster import Queue

from fennec_aurora_task_creator.exceptions import UnmatchedRouteError

_queue = Queue()

ROUTE_MATCHER = re.compile(
    r'index.gecko.v2.([^.]+).nightly.revision.([^.]+).mobile.([^.]+)')


def fetch_task_definition(task_id):
    return _queue.task(task_id)


def fetch_artifacts_list(task_id):
    return _queue.listLatestArtifacts(task_id)['artifacts']


def create_task(payload, task_id):
    return _queue.createTask(payload=payload, taskId=task_id)


def pluck_repository(task_definition):
    return _match_field_in_routes(task_definition, 'repository', 1)


def pluck_revision(task_definition):
    return _match_field_in_routes(task_definition, 'revision', 2)