Esempio n. 1
0
    def test_delete_watcher_if_all_builds_are_completed(self):
        self.index.findTask.side_effect = SideEffects.everything_has_an_id

        release_name = 'Firefox-32.0b1-build7'
        are_en_us_builds_completed(self.index, release_name, self.submitted_at,
                                   self.revision, self.platforms, self.queue,
                                   self.tc_task_indexes)
        self.assertEqual(self.index.findTask.call_count, 8)

        are_en_us_builds_completed(self.index, release_name, self.submitted_at,
                                   self.revision, self.platforms, self.queue,
                                   self.tc_task_indexes)
        self.assertEqual(self.index.findTask.call_count, 16)
Esempio n. 2
0
    def test_stores_results_of_the_previous_call(self):
        self.index.findTask.side_effect = SideEffects.linux_has_no_task

        release_name = 'Firefox-32.0b1-build5'
        are_en_us_builds_completed(self.index, release_name, self.submitted_at,
                                   self.revision, self.platforms, self.queue,
                                   self.tc_task_indexes)
        self.assertEqual(self.index.findTask.call_count, 7)

        are_en_us_builds_completed(self.index, release_name, self.submitted_at,
                                   self.revision, self.platforms, self.queue,
                                   self.tc_task_indexes)
        self.assertEqual(self.index.findTask.call_count, 8)
Esempio n. 3
0
    def test_creates_new_watcher_if_new_release_name(self):
        self.index.findTask.side_effect = SideEffects.linux_has_no_task

        release_name = 'Firefox-32.0b1-build6'
        are_en_us_builds_completed(self.index, release_name, self.submitted_at,
                                   self.revision, self.platforms, self.queue,
                                   self.tc_task_indexes)
        self.assertEqual(self.index.findTask.call_count, 7)

        release_name = 'Firefox-32.0b1-build99'
        are_en_us_builds_completed(self.index, release_name, self.submitted_at,
                                   self.revision, self.platforms, self.queue,
                                   self.tc_task_indexes)
        self.assertEqual(self.index.findTask.call_count, 14)
Esempio n. 4
0
    def test_delete_watcher_if_all_builds_are_completed(self):
        self.index.findTask.side_effect = SideEffects.everything_has_an_id

        release_name = 'Firefox-32.0b1-build7'
        are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        )
        self.assertEqual(self.index.findTask.call_count, len(self.platforms))

        are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        )
        self.assertEqual(self.index.findTask.call_count, len(self.platforms) * 2)
Esempio n. 5
0
    def test_stores_results_of_the_previous_call(self):
        self.index.findTask.side_effect = SideEffects.linux_has_no_task

        release_name = 'Firefox-32.0b1-build5'
        are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        )
        self.assertEqual(self.index.findTask.call_count, len(self.platforms))

        are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        )
        self.assertEqual(self.index.findTask.call_count, len(self.platforms) + 1)
Esempio n. 6
0
    def test_creates_new_watcher_if_new_release_name(self):
        self.index.findTask.side_effect = SideEffects.linux_has_no_task

        release_name = 'Firefox-32.0b1-build6'
        are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        )
        self.assertEqual(self.index.findTask.call_count, len(self.platforms))

        release_name = 'Firefox-32.0b1-build99'
        are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        )
        self.assertEqual(self.index.findTask.call_count, len(self.platforms) * 2)
Esempio n. 7
0
    def test_returns_false_if_one_task_is_missing(self):
        self.index.findTask.side_effect = SideEffects.linux_has_no_task

        release_name = 'Firefox-32.0b1-build2'
        self.assertFalse(are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        ))
Esempio n. 8
0
    def test_returns_true_when_everything_is_ready(self):
        self.index.findTask.side_effect = SideEffects.everything_has_an_id

        release_name = 'Firefox-32.0b1-build1'
        self.assertTrue(are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.branch,
            self.revision, self.tc_product_name, self.platforms
        ))
Esempio n. 9
0
    def test_returns_false_if_one_task_is_missing(self):
        self.index.findTask.side_effect = SideEffects.linux_has_no_task

        release_name = 'Firefox-32.0b1-build2'
        self.assertFalse(are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.revision,
            self.platforms, self.queue, self.tc_task_indexes
        ))
Esempio n. 10
0
    def test_returns_true_when_everything_is_ready(self):
        self.index.findTask.side_effect = SideEffects.everything_has_an_id

        release_name = 'Firefox-32.0b1-build1'
        self.assertTrue(are_en_us_builds_completed(
            self.index, release_name, self.submitted_at, self.revision,
            self.platforms, self.queue, self.tc_task_indexes
        ))
Esempio n. 11
0
def main(options):
    log.info('Loading config from %s' % options.config)

    with open(options.config, 'r') as config_file:
        config = yaml.load(config_file)

    if config['release-runner'].get('verbose', False):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    api_root = config['api']['api_root']
    username = config['api']['username']
    password = config['api']['password']

    rr_config = config['release-runner']

    buildbot_configs = rr_config['buildbot_configs']
    buildbot_configs_branch = rr_config['buildbot_configs_branch']
    sleeptime = rr_config['sleeptime']
    notify_from = rr_config.get('notify_from')
    notify_to = rr_config.get('notify_to_announce')
    docker_worker_key = rr_config.get('docker_worker_key')
    signing_pvt_key = config['signing'].get('pvt_key')
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = rr_config.get('smtp_server', 'localhost')
    tc_config = {
        "credentials": {
            "clientId": config['taskcluster'].get('client_id'),
            "accessToken": config['taskcluster'].get('access_token'),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = config['balrog'].get("username")
    balrog_password = config["balrog"].get("password")
    extra_balrog_submitter_params = config["balrog"].get(
        "extra_balrog_submitter_params", "")
    beetmover_aws_access_key_id = config["beetmover"].get("aws_access_key_id")
    beetmover_aws_secret_access_key = config["beetmover"].get(
        "aws_secret_access_key")
    gpg_key_path = config["signing"].get("gpg_key_path")

    # TODO: replace release sanity with direct checks of en-US and l10n
    # revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests([r['pattern'] for r in config['releases']])
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(
                    rr, config['releases'])
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial,
          args=(buildbot_configs, CONFIGS_WORKDIR),
          kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config:
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config['symlinks']:
            symlink = config['symlinks'].get(target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels
                if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig[
                'postrelease_version_bump_enabled'][release['product']]
            postrelease_bouncer_aliases_enabled = branchConfig[
                'postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig[
                'postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            graph_id = slugId()
            done = are_en_us_builds_completed(
                index=index,
                release_name=release['name'],
                submitted_at=release['submittedAt'],
                revision=release['mozillaRevision'],
                platforms=platforms,
                queue=queue,
                tc_task_indexes=branchConfig['tc_indexes'][release['product']])
            if not done:
                log.info(
                    'Builds are not completed yet, skipping release "%s" for now',
                    release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s',
                     release['name'])
            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key":
                docker_worker_key,
                "version":
                release["version"],
                # ESR should not use "esr" suffix here:
                "next_version":
                bump_version(release["version"].replace("esr", "")),
                "appVersion":
                getAppVersion(release["version"]),
                "buildNumber":
                release["buildNumber"],
                "release_eta":
                release.get("release_eta"),
                "source_enabled":
                True,
                "checksums_enabled":
                True,
                "binary_transparency_enabled":
                branchConfig.get("binary_transparency_enabled", False),
                "repo_path":
                release["branch"],
                "revision":
                release["mozillaRevision"],
                "product":
                release["product"],
                "funsize_product":
                get_funsize_product(release["product"]),
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset":
                release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates":
                release.get('partial_updates', list()),
                "branch":
                release['branchShortName'],
                "updates_enabled":
                bool(release["partials"]),
                "l10n_config":
                get_l10n_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "en_US_config":
                get_en_US_config(
                    index=index,
                    product=release["product"],
                    branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms'],
                    tc_task_indexes=branchConfig['tc_indexes'][
                        release['product']],
                ),
                "verifyConfigs": {},
                "balrog_api_root":
                branchConfig["balrog_api_root"],
                "funsize_balrog_api_root":
                branchConfig["funsize_balrog_api_root"],
                "balrog_username":
                balrog_username,
                "balrog_password":
                balrog_password,
                "beetmover_aws_access_key_id":
                beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key":
                beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class":
                branchConfig['signing_class'][release["product"]],
                "accepted_mar_channel_id":
                branchConfig.get('accepted_mar_channel_id',
                                 {}).get(release["product"]),
                "signing_cert":
                branchConfig['signing_cert'][release["product"]],
                "moz_disable_mar_cert_verification":
                branchConfig.get('moz_disable_mar_cert_verification'),
                "root_home_dir":
                branchConfig['root_home_dir'][release["product"]],
                "bouncer_enabled":
                branchConfig["bouncer_enabled"],
                "updates_builder_enabled":
                branchConfig["updates_builder_enabled"],
                "update_verify_enabled":
                branchConfig["update_verify_enabled"],
                "release_channels":
                release_channels,
                "final_verify_channels":
                final_verify_channels,
                "final_verify_platforms":
                branchConfig['release_platforms'],
                "uptake_monitoring_platforms":
                branchConfig['uptake_monitoring_platforms'][
                    release["product"]],
                "signing_pvt_key":
                signing_pvt_key,
                "build_tools_repo_path":
                branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled":
                branchConfig['push_to_candidates_enabled'],
                # TODO: temporary config enabled during 53 Fennec beta cycle
                "candidates_fennec_enabled":
                branchConfig.get('candidates_fennec_enabled'),
                "stage_product":
                branchConfig['stage_product'][release['product']],
                "postrelease_bouncer_aliases_enabled":
                postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled":
                branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url":
                branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled":
                postrelease_enabled,
                "postrelease_mark_as_shipped_enabled":
                postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled":
                push_to_releases_enabled,
                "push_to_releases_automatic":
                branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket":
                branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms":
                branchConfig.get("partner_repacks_platforms",
                                 {}).get(release["product"], []),
                "eme_free_repacks_platforms":
                branchConfig.get("eme_free_repacks_platforms",
                                 {}).get(release["product"], []),
                "sha1_repacks_platforms":
                branchConfig.get("sha1_repacks_platforms", []),
                "l10n_changesets":
                release['l10n_changesets'],
                "extra_balrog_submitter_params":
                extra_balrog_submitter_params + " --product " +
                release["product"].capitalize(),
                "publish_to_balrog_channels":
                publish_to_balrog_channels,
                "snap_enabled":
                branchConfig.get("snap_enabled",
                                 {}).get(release["product"], False),
                "update_verify_channel":
                branchConfig.get("update_verify_channel",
                                 {}).get(release["product"]),
                "update_verify_requires_cdn_push":
                branchConfig.get("update_verify_requires_cdn_push", False),
            }

            # TODO: en-US validation for multiple tasks
            # validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print(scheduler.createTaskGraph(graph_id, graph))

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  task_group_id=graph_id,
                                  l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s'
                % (graph_id, exception))
            log.exception(
                'Failed to start release "%s" promotion for graph %s. Error(s): %s',
                release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Esempio n. 12
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to_announce', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    signing_pvt_key = get_config(config, 'signing', 'pvt_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token", None),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)
    extra_balrog_submitter_params = get_config(config, "balrog", "extra_balrog_submitter_params", None)
    beetmover_aws_access_key_id = get_config(config, "beetmover", "aws_access_key_id", None)
    beetmover_aws_secret_access_key = get_config(config, "beetmover", "aws_secret_access_key", None)
    gpg_key_path = get_config(config, "signing", "gpg_key_path", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(rr)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial, args=(buildbot_configs, CONFIGS_WORKDIR), kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
            postrelease_bouncer_aliases_enabled = branchConfig['postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig['postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        ship_it_product_name = release['product']
        tc_product_name = branchConfig['stage_product'][ship_it_product_name]
        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            if not are_en_us_builds_completed(index, release_name=release['name'], submitted_at=release['submittedAt'],
                                              branch=release['branchShortName'], revision=release['mozillaRevision'],
                                              tc_product_name=tc_product_name, platforms=platforms):
                log.info('Builds are not completed yet, skipping release "%s" for now', release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s', release['name'])
            graph_id = slugId()

            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key": docker_worker_key,
                "version": release["version"],
                # ESR should not use "esr" suffix here:
                "next_version": bump_version(release["version"].replace("esr", "")),
                "appVersion": getAppVersion(release["version"]),
                "buildNumber": release["buildNumber"],
                "source_enabled": True,
                "checksums_enabled": True,
                "repo_path": release["branch"],
                "revision": release["mozillaRevision"],
                "product": release["product"],
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset": release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates": release['partial_updates'],
                "branch": release['branchShortName'],
                "updates_enabled": bool(release["partials"]),
                "l10n_config": get_l10n_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets']
                ),
                "en_US_config": get_en_US_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms']
                ),
                "verifyConfigs": {},
                "balrog_api_root": branchConfig["balrog_api_root"],
                "funsize_balrog_api_root": branchConfig["funsize_balrog_api_root"],
                "balrog_username": balrog_username,
                "balrog_password": balrog_password,
                "beetmover_aws_access_key_id": beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key": beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class": "release-signing",
                "bouncer_enabled": branchConfig["bouncer_enabled"],
                "updates_builder_enabled": branchConfig["updates_builder_enabled"],
                "update_verify_enabled": branchConfig["update_verify_enabled"],
                "release_channels": release_channels,
                "final_verify_channels": final_verify_channels,
                "final_verify_platforms": branchConfig['release_platforms'],
                "uptake_monitoring_platforms": branchConfig['release_platforms'],
                "signing_pvt_key": signing_pvt_key,
                "build_tools_repo_path": branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled": branchConfig['push_to_candidates_enabled'],
                "postrelease_bouncer_aliases_enabled": postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled": branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url": branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled": postrelease_enabled,
                "postrelease_mark_as_shipped_enabled": postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled": push_to_releases_enabled,
                "push_to_releases_automatic": branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket": branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms": branchConfig.get("partner_repacks_platforms", []),
                "l10n_changesets": release['l10n_changesets'],
                "extra_balrog_submitter_params": extra_balrog_submitter_params,
                "publish_to_balrog_channels": publish_to_balrog_channels,
                "snap_enabled": branchConfig.get("snap_enabled", False),
            }

            validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server, from_=notify_from,
                                  to=notify_to, release=release,
                                  task_group_id=graph_id, l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s' % (graph_id, exception)
            )
            log.exception('Failed to start release "%s" promotion for graph %s. Error(s): %s',
                          release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)