Example #1
0
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    rr_config = config['release-runner']

    buildbot_configs = rr_config['buildbot_configs']
    buildbot_configs_branch = rr_config['buildbot_configs_branch']
    sleeptime = rr_config['sleeptime']
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')
    docker_worker_key = rr_config.get('docker_worker_key')
    signing_pvt_key = config['signing'].get('pvt_key')
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = rr_config.get('smtp_server', 'localhost')
    tc_config = {
        "credentials": {
            "clientId": config['taskcluster'].get('client_id'),
            "accessToken": config['taskcluster'].get('access_token'),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = config['balrog'].get("username")
    balrog_password = config["balrog"].get("password")
    extra_balrog_submitter_params = config["balrog"].get(
        "extra_balrog_submitter_params", "")
    beetmover_aws_access_key_id = config["beetmover"].get("aws_access_key_id")
    beetmover_aws_secret_access_key = config["beetmover"].get(
        "aws_secret_access_key")
    gpg_key_path = config["signing"].get("gpg_key_path")

    # TODO: replace release sanity with direct checks of en-US and l10n
    # revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial,
          args=(buildbot_configs, CONFIGS_WORKDIR),
          kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config:
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config['symlinks']:
            symlink = config['symlinks'].get(target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig[
                'postrelease_version_bump_enabled'][release['product']]
            postrelease_bouncer_aliases_enabled = branchConfig[
                'postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig[
                'postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            graph_id = slugId()
            done = are_en_us_builds_completed(
                index=index,
                release_name=release['name'],
                submitted_at=release['submittedAt'],
                revision=release['mozillaRevision'],
                platforms=platforms,
                queue=queue,
                tc_task_indexes=branchConfig['tc_indexes'][release['product']])
            if not done:
                log.info(
                    'Builds are not completed yet, skipping release "%s" for now',
                    release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s',
                     release['name'])
            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key":
                docker_worker_key,
                "version":
                release["version"],
                # ESR should not use "esr" suffix here:
                "next_version":
                bump_version(release["version"].replace("esr", "")),
                "appVersion":
                getAppVersion(release["version"]),
                "buildNumber":
                release["buildNumber"],
                "release_eta":
                release.get("release_eta"),
                "source_enabled":
                True,
                "checksums_enabled":
                True,
                "binary_transparency_enabled":
                branchConfig.get("binary_transparency_enabled", False),
                "repo_path":
                release["branch"],
                "revision":
                release["mozillaRevision"],
                "product":
                release["product"],
                "funsize_product":
                get_funsize_product(release["product"]),
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset":
                release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates":
                release.get('partial_updates', list()),
                "branch":
                release['branchShortName'],
                "updates_enabled":
                bool(release["partials"]),
                "l10n_config":
                get_l10n_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "en_US_config":
                get_en_US_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "verifyConfigs": {},
                "balrog_api_root":
                branchConfig["balrog_api_root"],
                "funsize_balrog_api_root":
                branchConfig["funsize_balrog_api_root"],
                "balrog_username":
                balrog_username,
                "balrog_password":
                balrog_password,
                "beetmover_aws_access_key_id":
                beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key":
                beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class":
                branchConfig['signing_class'][release["product"]],
                "accepted_mar_channel_id":
                branchConfig.get('accepted_mar_channel_id',
                                 {}).get(release["product"]),
                "signing_cert":
                branchConfig['signing_cert'][release["product"]],
                "moz_disable_mar_cert_verification":
                branchConfig.get('moz_disable_mar_cert_verification'),
                "root_home_dir":
                branchConfig['root_home_dir'][release["product"]],
                "bouncer_enabled":
                branchConfig["bouncer_enabled"],
                "updates_builder_enabled":
                branchConfig["updates_builder_enabled"],
                "update_verify_enabled":
                branchConfig["update_verify_enabled"],
                "release_channels":
                release_channels,
                "final_verify_channels":
                final_verify_channels,
                "final_verify_platforms":
                branchConfig['release_platforms'],
                "uptake_monitoring_platforms":
                branchConfig['uptake_monitoring_platforms'][
                    release["product"]],
                "signing_pvt_key":
                signing_pvt_key,
                "build_tools_repo_path":
                branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled":
                branchConfig['push_to_candidates_enabled'],
                # TODO: temporary config enabled during 53 Fennec beta cycle
                "candidates_fennec_enabled":
                branchConfig.get('candidates_fennec_enabled'),
                "stage_product":
                branchConfig['stage_product'][release['product']],
                "postrelease_bouncer_aliases_enabled":
                postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled":
                branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url":
                branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled":
                postrelease_enabled,
                "postrelease_mark_as_shipped_enabled":
                postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled":
                push_to_releases_enabled,
                "push_to_releases_automatic":
                branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket":
                branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms":
                branchConfig.get("partner_repacks_platforms",
                                 {}).get(release["product"], []),
                "eme_free_repacks_platforms":
                branchConfig.get("eme_free_repacks_platforms",
                                 {}).get(release["product"], []),
                "sha1_repacks_platforms":
                branchConfig.get("sha1_repacks_platforms", []),
                "l10n_changesets":
                release['l10n_changesets'],
                "extra_balrog_submitter_params":
                extra_balrog_submitter_params + " --product " +
                release["product"].capitalize(),
                "publish_to_balrog_channels":
                publish_to_balrog_channels,
                "snap_enabled":
                branchConfig.get("snap_enabled",
                                 {}).get(release["product"], False),
                "update_verify_channel":
                branchConfig.get("update_verify_channel",
                                 {}).get(release["product"]),
                "update_verify_requires_cdn_push":
                branchConfig.get("update_verify_requires_cdn_push", False),
            }

            # TODO: en-US validation for multiple tasks
            # validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print(scheduler.createTaskGraph(graph_id, graph))

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  task_group_id=graph_id,
                                  l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s'
                % (graph_id, exception))
            log.exception(
                'Failed to start release "%s" promotion for graph %s. Error(s): %s',
                release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Example #2
0
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    github_token = config['git']['github_token']
    partner_config = deepcopy(config['partners'])
    rr_config = config['release-runner']
    sleeptime = rr_config['sleeptime']
    smtp_server = rr_config.get('smtp_server', 'localhost')
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    tc_config = {
        "credentials": {
            "clientId": config["taskcluster"].get("client_id"),
            "accessToken": config["taskcluster"].get("access_token"),
        },
        "maxRetries": 12,
    }
    queue = taskcluster.Queue(tc_config)

    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    rc = 0
    for release in new_releases:
        try:
            version = release["version"]
            # XXX we may want to move next_version logic to the release_promotion action.
            next_version = bump_version(version.replace("esr", ""))
            project = release["branchShortName"]
            revision = release["mozillaRevision"]
            # XXX we probably want to find a decision task ID for the action, and a separate
            # one for the revision-to-promote, to allow for https://trello.com/c/u6MHrz8y .
            decision_task_id = find_decision_task_id(
                trust_domain=get_trust_domain(config['releases'], release),
                project=project, revision=revision,
            )
            action_task_input = {
                "build_number": release["buildNumber"],
                "next_version": next_version,
                # specify version rather than relying on in-tree version,
                # so if a version bump happens between the build and an action task
                # revision, we still use the correct version.
                "version": version,
                "release_promotion_flavor": "promote_{}".format(release["product"]),
                "previous_graph_ids": [decision_task_id],
                "release_eta": release.get("release_eta", ""),
            }
            if "partial_updates" in release:
                action_task_input["partial_updates"] = {}
                for partial_version, info in release["partial_updates"].items():
                    action_task_input["partial_updates"][partial_version] = {
                        "buildNumber": info["buildNumber"],
                        "locales": info["locales"]
                    }
            if release["product"] == "firefox":
                if is_rc(release):
                    # XXX The current plan is to run promote_firefox_rc, then
                    # ship_firefox_rc, then push_firefox, then ship_firefox.
                    # We need to support this workflow. However, rr3 doesn't
                    # support anything more than the first action task yet,
                    # so this isn't a missing feature for RCs specifically.
                    action_task_input["release_promotion_flavor"] = "{}_rc".format(
                        action_task_input["release_promotion_flavor"]
                    )
            if not is_partner_enabled(release, partner_config['partner_min_version']):
                action_task_input['release_enable_emefree'] = False
                action_task_input['release_enable_partners'] = False
            action_task_id, action_task = generate_action_task(
                decision_task_id=decision_task_id,
                action_task_input=action_task_input,
            )
            submit_action_task(queue=queue, action_task_id=action_task_id,
                               action_task=action_task)
            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server, from_=notify_from,
                                  to=[get_notify_to(config['releases'], release, notify_to)],
                                  release=release,
                                  task_group_id=action_task_id, l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion. Error(s): %s' % (exception)
            )
            log.exception('Failed to start release "%s". Error(s): %s',
                          release['name'], exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Example #3
0
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    github_token = config['git']['github_token']
    partner_config = deepcopy(config['partners'])
    rr_config = config['release-runner']
    sleeptime = rr_config['sleeptime']
    smtp_server = rr_config.get('smtp_server', 'localhost')
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    tc_config = {
        "credentials": {
            "clientId": config["taskcluster"].get("client_id"),
            "accessToken": config["taskcluster"].get("access_token"),
        },
        "maxRetries": 12,
    }
    queue = taskcluster.Queue(tc_config)

    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    rc = 0
    for release in new_releases:
        try:
            version = release["version"]
            # XXX we may want to move next_version logic to the release_promotion action.
            next_version = bump_version(version.replace("esr", ""))
            project = release["branchShortName"]
            revision = release["mozillaRevision"]
            # XXX we probably want to find a decision task ID for the action, and a separate
            # one for the revision-to-promote, to allow for https://trello.com/c/u6MHrz8y .
            decision_task_id = find_decision_task_id(
                trust_domain=get_trust_domain(config['releases'], release),
                project=project,
                revision=revision,
            )
            action_task_input = {
                "build_number": release["buildNumber"],
                "next_version": next_version,
                # specify version rather than relying on in-tree version,
                # so if a version bump happens between the build and an action task
                # revision, we still use the correct version.
                "version": version,
                "release_promotion_flavor":
                "promote_{}".format(release["product"]),
                "previous_graph_ids": [decision_task_id],
                "release_eta": release.get("release_eta", ""),
            }
            if "partial_updates" in release:
                action_task_input["partial_updates"] = {}
                for partial_version, info in release["partial_updates"].items(
                ):
                    action_task_input["partial_updates"][partial_version] = {
                        "buildNumber": info["buildNumber"],
                        "locales": info["locales"]
                    }
            if release["product"] == "firefox":
                if is_rc(release):
                    # XXX The current plan is to run promote_firefox_rc, then
                    # ship_firefox_rc, then push_firefox, then ship_firefox.
                    # We need to support this workflow. However, rr3 doesn't
                    # support anything more than the first action task yet,
                    # so this isn't a missing feature for RCs specifically.
                    action_task_input[
                        "release_promotion_flavor"] = "{}_rc".format(
                            action_task_input["release_promotion_flavor"])
            if not is_partner_enabled(release,
                                      partner_config['partner_min_version']):
                action_task_input['release_enable_emefree'] = False
                action_task_input['release_enable_partners'] = False
            action_task_id, action_task = generate_action_task(
                decision_task_id=decision_task_id,
                action_task_input=action_task_input,
            )
            submit_action_task(queue=queue,
                               action_task_id=action_task_id,
                               action_task=action_task)
            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  task_group_id=action_task_id,
                                  l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release, 'Failed to start release promotion. Error(s): %s' %
                (exception))
            log.exception('Failed to start release "%s". Error(s): %s',
                          release['name'], exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Example #4
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to_announce', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    signing_pvt_key = get_config(config, 'signing', 'pvt_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token", None),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)
    extra_balrog_submitter_params = get_config(config, "balrog", "extra_balrog_submitter_params", None)
    beetmover_aws_access_key_id = get_config(config, "beetmover", "aws_access_key_id", None)
    beetmover_aws_secret_access_key = get_config(config, "beetmover", "aws_secret_access_key", None)
    gpg_key_path = get_config(config, "signing", "gpg_key_path", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(rr)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial, args=(buildbot_configs, CONFIGS_WORKDIR), kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
            postrelease_bouncer_aliases_enabled = branchConfig['postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig['postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        ship_it_product_name = release['product']
        tc_product_name = branchConfig['stage_product'][ship_it_product_name]
        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            if not are_en_us_builds_completed(index, release_name=release['name'], submitted_at=release['submittedAt'],
                                              branch=release['branchShortName'], revision=release['mozillaRevision'],
                                              tc_product_name=tc_product_name, platforms=platforms):
                log.info('Builds are not completed yet, skipping release "%s" for now', release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s', release['name'])
            graph_id = slugId()

            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key": docker_worker_key,
                "version": release["version"],
                # ESR should not use "esr" suffix here:
                "next_version": bump_version(release["version"].replace("esr", "")),
                "appVersion": getAppVersion(release["version"]),
                "buildNumber": release["buildNumber"],
                "source_enabled": True,
                "checksums_enabled": True,
                "repo_path": release["branch"],
                "revision": release["mozillaRevision"],
                "product": release["product"],
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset": release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates": release['partial_updates'],
                "branch": release['branchShortName'],
                "updates_enabled": bool(release["partials"]),
                "l10n_config": get_l10n_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets']
                ),
                "en_US_config": get_en_US_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms']
                ),
                "verifyConfigs": {},
                "balrog_api_root": branchConfig["balrog_api_root"],
                "funsize_balrog_api_root": branchConfig["funsize_balrog_api_root"],
                "balrog_username": balrog_username,
                "balrog_password": balrog_password,
                "beetmover_aws_access_key_id": beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key": beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class": "release-signing",
                "bouncer_enabled": branchConfig["bouncer_enabled"],
                "updates_builder_enabled": branchConfig["updates_builder_enabled"],
                "update_verify_enabled": branchConfig["update_verify_enabled"],
                "release_channels": release_channels,
                "final_verify_channels": final_verify_channels,
                "final_verify_platforms": branchConfig['release_platforms'],
                "uptake_monitoring_platforms": branchConfig['release_platforms'],
                "signing_pvt_key": signing_pvt_key,
                "build_tools_repo_path": branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled": branchConfig['push_to_candidates_enabled'],
                "postrelease_bouncer_aliases_enabled": postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled": branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url": branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled": postrelease_enabled,
                "postrelease_mark_as_shipped_enabled": postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled": push_to_releases_enabled,
                "push_to_releases_automatic": branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket": branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms": branchConfig.get("partner_repacks_platforms", []),
                "l10n_changesets": release['l10n_changesets'],
                "extra_balrog_submitter_params": extra_balrog_submitter_params,
                "publish_to_balrog_channels": publish_to_balrog_channels,
                "snap_enabled": branchConfig.get("snap_enabled", False),
            }

            validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server, from_=notify_from,
                                  to=notify_to, release=release,
                                  task_group_id=graph_id, l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s' % (graph_id, exception)
            )
            log.exception('Failed to start release "%s" promotion for graph %s. Error(s): %s',
                          release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Example #5
0
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    rr_config = config['release-runner']
    sleeptime = rr_config['sleeptime']
    smtp_server = rr_config.get('smtp_server', 'localhost')
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    tc_config = {
        "credentials": {
            "clientId": config["taskcluster"].get("client_id"),
            "accessToken": config["taskcluster"].get("access_token"),
        },
        "maxRetries": 12,
    }
    queue = taskcluster.Queue(tc_config)

    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    rc = 0
    for release in new_releases:
        try:
            next_version = bump_version(release["version"].replace("esr", ""))
            project = release["branchShortName"]
            revision = release["mozillaRevision"]
            decision_task_id = find_decision_task_id(project, revision)
            action_task_input = {
                "build_number": release["buildNumber"],
                "next_version": next_version,
                "release_promotion_flavor":
                "promote_{}".format(release["product"]),
                "previous_graph_ids": [decision_task_id],
            }
            if "partial_updates" in release:
                action_task_input["partial_updates"] = {}
                for version, info in release["partial_updates"].items():
                    action_task_input["partial_updates"][version] = {
                        "buildNumber": info["buildNumber"],
                        "locales": info["locales"]
                    }
            if release["product"] == "firefox":
                if "b" in release["version"]:
                    action_task_input["desktop_release_type"] = "beta"
                elif "esr" in release["version"]:
                    action_task_input["desktop_release_type"] = "esr"
                else:
                    # RC release types will enable beta-channel testing &
                    # shipping. We need this for all "final" releases
                    # and also any releases that include a beta as a partial.
                    # The assumption than "shipping to beta channel" always
                    # implies other RC behaviour is bound to break at some
                    # point, but this works for now.
                    if isFinalRelease(release["version"]):
                        action_task_input["desktop_release_type"] = "rc"
                    else:
                        for version in release["partial_updates"]:
                            if "b" in version:
                                action_task_input[
                                    "desktop_release_type"] = "rc"
                                break
                        else:
                            action_task_input[
                                "desktop_release_type"] = "release"
            elif release["product"] == "devedition":
                action_task_input["desktop_release_type"] = "devedition"
            action_task_id, action_task = generate_action_task(
                project=release["branchShortName"],
                revision=release["mozillaRevision"],
                action_task_input=action_task_input,
            )
            submit_action_task(queue=queue,
                               action_task_id=action_task_id,
                               action_task=action_task)
            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  task_group_id=action_task_id,
                                  l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release, 'Failed to start release promotion. Error(s): %s' %
                (exception))
            log.exception('Failed to start release "%s". Error(s): %s',
                          release['name'], exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    rr_config = config['release-runner']
    sleeptime = rr_config['sleeptime']
    smtp_server = rr_config.get('smtp_server', 'localhost')
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)

    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    rc = 0
    for release in new_releases:
        try:
            relconfigs_tmpl = '{}_{}_fennec_full_graph.yml'.format(
                rr_config['relconfigs_prefix'], release['branchShortName'])
            configs = '/'.join([rr_config['relconfigs_root'], relconfigs_tmpl])

            # process stuff for releasetasks_graph_gen.py script
            release_runner_config = yaml.safe_load(
                open(rr_config['release_runner_config']))
            tc_config = {
                "credentials": {
                    "clientId":
                    release_runner_config["taskcluster"].get("client_id"),
                    "accessToken":
                    release_runner_config["taskcluster"].get("access_token"),
                },
                "maxRetries": 12,
            }
            branch_product_config = load_branch_and_product_config(configs)

            # hack: we simulate the options argument for the
            # releasetasks_graph_gen.py script in order to reuse some of the
            # functions defined there
            fake_options = OptionParser()
            fake_options.version = release['version']
            fake_options.build_number = release['buildNumber']
            fake_options.mozilla_revision = release['mozillaRevision']
            fake_options.common_task_id = None
            fake_options.partials = ''
            fake_options.dry_run = False

            releasetasks_kwargs = {}
            releasetasks_kwargs.update(branch_product_config)
            releasetasks_kwargs.update(
                get_release_items_from_runner_config(release_runner_config))
            releasetasks_kwargs.update(
                get_unique_release_items(fake_options, tc_config))

            rr.update_status(release, 'Generating task graph')

            task_group_id = gen_main(release_runner_config,
                                     releasetasks_kwargs,
                                     tc_config,
                                     options=fake_options)

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  task_group_id=task_group_id,
                                  l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release, 'Failed to start release promotion. Error(s): %s' %
                (exception))
            log.exception('Failed to start release "%s". Error(s): %s',
                          release['name'], exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)