def get_release_items_from_runner_config(release_runner_ini):
    ini_items = {}
    ini_items['signing_pvt_key'] = release_runner_ini.get('signing', 'pvt_key')
    ini_items['docker_worker_key'] = release_runner_ini.get('release-runner', 'docker_worker_key')
    ini_items['balrog_username'] = release_runner_ini.get("balrog", "username")
    ini_items['balrog_password'] = release_runner_ini.get("balrog", "password")
    ini_items['beetmover_aws_access_key_id'] = release_runner_ini.get("beetmover", "aws_access_key_id")
    ini_items['beetmover_aws_secret_access_key'] = release_runner_ini.get("beetmover", "aws_secret_access_key")
    ini_items['extra_balrog_submitter_params'] = get_config(release_runner_ini, "balrog",
                                                            "extra_balrog_submitter_params", None)
    return ini_items
def get_release_items_from_runner_config(release_runner_ini):
    ini_items = {}
    ini_items['signing_pvt_key'] = release_runner_ini.get('signing', 'pvt_key')
    ini_items['docker_worker_key'] = release_runner_ini.get(
        'release-runner', 'docker_worker_key')
    ini_items['balrog_username'] = release_runner_ini.get("balrog", "username")
    ini_items['balrog_password'] = release_runner_ini.get("balrog", "password")
    ini_items['beetmover_aws_access_key_id'] = release_runner_ini.get(
        "beetmover", "aws_access_key_id")
    ini_items['beetmover_aws_secret_access_key'] = release_runner_ini.get(
        "beetmover", "aws_secret_access_key")
    ini_items['extra_balrog_submitter_params'] = get_config(
        release_runner_ini, "balrog", "extra_balrog_submitter_params", None)
    return ini_items
Esempio n. 3
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level=logging.DEBUG
    else:
        log_level=logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)

    check_buildbot()
    check_fabric()

    # Shorthand
    sendchange_master = config.get('release-runner', 'sendchange_master')
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    hg_username = config.get('release-runner', 'hg_username')
    hg_ssh_key = config.get('release-runner', 'hg_ssh_key')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    buildbotcustom = config.get('release-runner', 'buildbotcustom')
    buildbotcustom_branch = config.get('release-runner',
                                       'buildbotcustom_branch')
    tools = config.get('release-runner', 'tools')
    tools_branch = config.get('release-runner', 'tools_branch')
    masters_json = config.get('release-runner', 'masters_json')
    staging = config.getboolean('release-runner', 'staging')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to', None)
    ssh_username = get_config(config, 'release-runner', 'ssh_username', None)
    ssh_key = get_config(config, 'release-runner', 'ssh_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    configs_workdir = 'buildbot-configs'
    custom_workdir = 'buildbotcustom'
    tools_workdir = 'tools'
    configs_pushRepo = make_hg_url(HG, get_repo_path(buildbot_configs),
                                   protocol='ssh')
    custom_pushRepo = make_hg_url(HG, get_repo_path(buildbotcustom),
                                  protocol='ssh')
    tools_pushRepo = make_hg_url(HG, get_repo_path(tools), protocol='ssh')

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                for release in rr.new_releases:
                    log.info('Got a new release request: %s' % release)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except Exception, e:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)
Esempio n. 4
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token", None),
        }
    }
    configs_workdir = 'buildbot-configs'
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(tc_config)
    index = Index(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                for release in rr.new_releases:
                    log.info('Got a new release request: %s' % release)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial, args=(buildbot_configs, configs_workdir), kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=configs_workdir)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)

    # TODO: this won't work for Thunderbird...do we care?
    branch = release["branch"].split("/")[-1]
    branchConfig = readBranchConfig(path.join(configs_workdir, "mozilla"), branch=branch)

    rc = 0
    for release in rr.new_releases:
        try:
            rr.update_status(release, 'Generating task graph')
            l10n_changesets = parsePlainL10nChangesets(rr.get_release_l10n(release["name"]))

            kwargs = {
                "public_key": docker_worker_key,
                "version": release["version"],
                "appVersion": getAppVersion(release["version"]),
                "buildNumber": release["buildNumber"],
                "source_enabled": True,
                "repo_path": release["branch"],
                "revision": release["mozillaRevision"],
                "product": release["product"],
                "partial_updates": getPartials(release),
                "branch": branch,
                "updates_enabled": bool(release["partials"]),
                "enUS_platforms": branchConfig["release_platforms"],
                "l10n_config": get_l10n_config(release, branchConfig, branch, l10n_changesets, index),
                "en_US_config": get_en_US_config(release, branchConfig, branch, index),
                "verifyConfigs": {},
                "balrog_api_root": branchConfig["balrog_api_root"],
                "balrog_username": balrog_username,
                "balrog_password": balrog_password,
                # TODO: stagin specific, make them configurable
                "signing_class": "dep-signing",
            }
            verifyConfigTemplate = "{branch}-{product}-{plat}.cfg"
            for plat in branchConfig["release_platforms"]:
                kwargs["verifyConfigs"][plat] = verifyConfigTemplate.format(
                    branch=kwargs['branch'],
                    product=kwargs['product'],
                    plat=plat,
                )

            validate_graph_kwargs(**kwargs)

            graph_id = slugId()
            graph = make_task_graph(**kwargs)

            rr.update_status(release, "Submitting task graph")

            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
        except:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.update_status(release, 'Failed to start release promotion')
            log.exception("Failed to start release promotion for {}: ".format(release))

    if rc != 0:
        sys.exit(rc)
Esempio n. 5
0
        sys.exit(0)
    elif options.action == "reload":
        pid = int(open(options.pidfile).read())
        os.kill(pid, signal.SIGHUP)
        sys.exit(0)

    if len(args) != 1:
        parser.error("Need just one server.ini file to read")

    config = load_config(args[0])
    if not config:
        parser.error("Error reading config file: %s" % args[0])

    # Set up options if they haven't been overridden on the cmdline
    if options.logfile is None:
        options.logfile = get_config(config, "logging", "logfile", None)
    if options.loglevel is None:
        loglevel = get_config(config, "logging", "loglevel", "INFO")
        options.loglevel = getattr(logging, loglevel)
    if options.log_maxfiles is None:
        options.log_maxfiles = get_config_int(config, "logging",
                                              "log_maxfiles", 50)
    if options.log_maxsize is None:
        # 10 MB log size by default
        options.log_maxsize = get_config_int(config, "logging", "log_maxsize",
                                             10 * (1024**2))
    if options.daemonize is None:
        options.daemonize = get_config_bool(config, "server", "daemonize",
                                            False)
    os.umask(0o077)
    setup_logging(
Esempio n. 6
0
        sys.exit(0)
    elif options.action == "reload":
        pid = int(open(options.pidfile).read())
        os.kill(pid, signal.SIGHUP)
        sys.exit(0)

    if len(args) != 1:
        parser.error("Need just one server.ini file to read")

    config = load_config(args[0])
    if not config:
        parser.error("Error reading config file: %s" % args[0])

    # Set up options if they haven't been overridden on the cmdline
    if options.logfile is None:
        options.logfile = get_config(config, "logging", "logfile", None)
    if options.loglevel is None:
        loglevel = get_config(config, "logging", "loglevel", "INFO")
        options.loglevel = getattr(logging, loglevel)
    if options.log_maxfiles is None:
        options.log_maxfiles = get_config_int(
            config, "logging", "log_maxfiles", 50)
    if options.log_maxsize is None:
        # 10 MB log size by default
        options.log_maxsize = get_config_int(
            config, "logging", "log_maxsize", 10 * (1024 ** 2))
    if options.daemonize is None:
        options.daemonize = get_config_bool(
            config, "server", "daemonize", False)
    os.umask(0o077)
    setup_logging(logfile=options.logfile,
                      help='a task id of a task that shares the same release info')
    parser.add_option('--dry-run', dest='dry_run', action='store_true', default=False,
                      help="render the task graph from yaml tmpl but don't submit to taskcluster")

    options = parser.parse_args()[0]

    if not options.release_runner_ini:
        parser.error('Need to pass a release runner config')
    if not options.branch_and_product_config:
        parser.error('Need to pass a branch and product config')

    # load config files
    release_runner_config = load_config(options.release_runner_ini)
    tc_config = {
        "credentials": {
            "clientId": get_config(release_runner_config, "taskcluster", "client_id", None),
            "accessToken": get_config(release_runner_config, "taskcluster", "access_token", None),
        }
    }
    branch_product_config = load_branch_and_product_config(options.branch_and_product_config)

    if release_runner_config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(filename='releasetasks_graph_gen.log',
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)


    # create releasetasks graph args from config files
        help=
        "render the task graph from yaml tmpl but don't submit to taskcluster")

    options = parser.parse_args()[0]

    if not options.release_runner_ini:
        parser.error('Need to pass a release runner config')
    if not options.branch_and_product_config:
        parser.error('Need to pass a branch and product config')

    # load config files
    release_runner_config = load_config(options.release_runner_ini)
    tc_config = {
        "credentials": {
            "clientId":
            get_config(release_runner_config, "taskcluster", "client_id",
                       None),
            "accessToken":
            get_config(release_runner_config, "taskcluster", "access_token",
                       None),
        }
    }
    branch_product_config = load_branch_and_product_config(
        options.branch_and_product_config)

    if release_runner_config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(filename='releasetasks_graph_gen.log',
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
Esempio n. 9
0
    if len(args) != 1:
        parser.error("Need just one server.ini file to read")

    # anything past here should only be done as user cltsign, remove
    # footgun
    if getpass.getuser() != 'cltsign':
        log.error("Must run as user 'cltsign' for remaining actions")
        sys.exit(1)

    config = load_config(args[0])
    if not config:
        parser.error("Error reading config file: %s" % args[0])

    # Set up options if they haven't been overridden on the cmdline
    if options.logfile is None:
        options.logfile = get_config(config, "logging", "logfile", None)
    if options.loglevel is None:
        loglevel = get_config(config, "logging", "loglevel", "INFO")
        options.loglevel = getattr(logging, loglevel)
    if options.log_maxfiles is None:
        options.log_maxfiles = get_config_int(
            config, "logging", "log_maxfiles", 50)
    if options.log_maxsize is None:
        # 10 MB log size by default
        options.log_maxsize = get_config_int(
            config, "logging", "log_maxsize", 10 * (1024 ** 2))
    if options.daemonize is None:
        options.daemonize = get_config_bool(
            config, "server", "daemonize", False)
    os.umask(0o077)
    setup_logging(logfile=options.logfile,
Esempio n. 10
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    signing_pvt_key = get_config(config, 'signing', 'pvt_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token",
                                      None),
        }
    }
    configs_workdir = 'buildbot-configs'
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)
    extra_balrog_submitter_params = get_config(
        config, "balrog", "extra_balrog_submitter_params", None)
    beetmover_aws_access_key_id = get_config(config, "beetmover",
                                             "aws_access_key_id", None)
    beetmover_aws_secret_access_key = get_config(config, "beetmover",
                                                 "aws_secret_access_key", None)
    gpg_key_path = get_config(config, "signing", "gpg_key_path", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                for release in rr.new_releases:
                    log.info('Got a new release request: %s' % release)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial,
          args=(buildbot_configs, configs_workdir),
          kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=configs_workdir)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)

    # TODO: this won't work for Thunderbird...do we care?
    branch = release["branch"].split("/")[-1]
    branchConfig = readBranchConfig(path.join(configs_workdir, "mozilla"),
                                    branch=branch)

    release_channels = update_channels(
        release["version"], branchConfig["release_channel_mappings"])
    # candidate releases are split in two graphs and release-runner only handles the first
    # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
    # channels are handled in the second generated graph outside of release-runner.
    # This is not elegant but it should do the job for now
    candidate_release = is_candidate_release(release_channels)
    if candidate_release:
        postrelease_enabled = False
        final_verify_channels = [
            c for c in release_channels
            if c not in branchConfig.get('mirror_requiring_channels', [])
        ]
        # TODO - use publish_to_balrog_channels once releasetasks publishes to balrog
        publish_to_balrog_channels = [
            c for c in release_channels
            if c not in branchConfig.get('mirror_requiring_channels', [])
        ]
        push_to_releases_enabled = False
    else:
        postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
        final_verify_channels = release_channels
        publish_to_balrog_channels = release_channels
        push_to_releases_enabled = True

    rc = 0
    for release in rr.new_releases:
        graph_id = slugId()
        try:
            rr.update_status(release, 'Generating task graph')
            l10n_changesets = parsePlainL10nChangesets(
                rr.get_release_l10n(release["name"]))

            kwargs = {
                "public_key":
                docker_worker_key,
                "version":
                release["version"],
                "next_version":
                bump_version(release["version"]),
                "appVersion":
                getAppVersion(release["version"]),
                "buildNumber":
                release["buildNumber"],
                "source_enabled":
                True,
                "checksums_enabled":
                True,
                "repo_path":
                release["branch"],
                "revision":
                release["mozillaRevision"],
                "product":
                release["product"],
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset":
                release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates":
                getPartials(release),
                "branch":
                branch,
                "updates_enabled":
                bool(release["partials"]),
                "l10n_config":
                get_l10n_config(release, branchConfig, branch, l10n_changesets,
                                index),
                "en_US_config":
                get_en_US_config(release, branchConfig, branch, index),
                "verifyConfigs": {},
                "balrog_api_root":
                branchConfig["balrog_api_root"],
                "funsize_balrog_api_root":
                branchConfig["funsize_balrog_api_root"],
                "balrog_username":
                balrog_username,
                "balrog_password":
                balrog_password,
                "beetmover_aws_access_key_id":
                beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key":
                beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class":
                "release-signing",
                "bouncer_enabled":
                branchConfig["bouncer_enabled"],
                "release_channels":
                release_channels,
                "final_verify_channels":
                final_verify_channels,
                "signing_pvt_key":
                signing_pvt_key,
                "build_tools_repo_path":
                branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled":
                branchConfig['push_to_candidates_enabled'],
                "postrelease_bouncer_aliases_enabled":
                branchConfig['postrelease_bouncer_aliases_enabled'],
                "tuxedo_server_url":
                branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled":
                postrelease_enabled,
                "push_to_releases_enabled":
                push_to_releases_enabled,
                "push_to_releases_automatic":
                branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket":
                branchConfig["beetmover_buckets"][release["product"]],
            }
            if extra_balrog_submitter_params:
                kwargs[
                    "extra_balrog_submitter_params"] = extra_balrog_submitter_params

            validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
            email_release_drivers(smtp_server=smtp_server,
                                  from_=notify_from,
                                  to=notify_to,
                                  release=release,
                                  graph_id=graph_id)
        except:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s)' % graph_id)
            log.exception("Failed to start release promotion for graph %s %s",
                          graph_id, release)

    if rc != 0:
        sys.exit(rc)
Esempio n. 11
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    # Shorthand
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to_announce', None)
    docker_worker_key = get_config(config, 'release-runner',
                                   'docker_worker_key', None)
    signing_pvt_key = get_config(config, 'signing', 'pvt_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    tc_config = {
        "credentials": {
            "clientId": get_config(config, "taskcluster", "client_id", None),
            "accessToken": get_config(config, "taskcluster", "access_token", None),
        }
    }
    # Extend tc_config for retries, see Bug 1293744
    # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
    # This is a stopgap until Bug 1259627 is fixed.
    retrying_tc_config = tc_config.copy()
    retrying_tc_config.update({"maxRetries": 12})
    balrog_username = get_config(config, "balrog", "username", None)
    balrog_password = get_config(config, "balrog", "password", None)
    extra_balrog_submitter_params = get_config(config, "balrog", "extra_balrog_submitter_params", None)
    beetmover_aws_access_key_id = get_config(config, "beetmover", "aws_access_key_id", None)
    beetmover_aws_secret_access_key = get_config(config, "beetmover", "aws_secret_access_key", None)
    gpg_key_path = get_config(config, "signing", "gpg_key_path", None)

    # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)
    scheduler = Scheduler(retrying_tc_config)
    index = Index(tc_config)
    queue = Queue(tc_config)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                new_releases = run_prebuild_sanity_checks(rr)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)

    retry(mercurial, args=(buildbot_configs, CONFIGS_WORKDIR), kwargs=dict(branch=buildbot_configs_branch))

    if 'symlinks' in config.sections():
        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
        for target in config.options('symlinks'):
            symlink = config.get('symlinks', target).format(**format_dict)
            if path.exists(symlink):
                log.warning("Skipping %s -> %s symlink" % (symlink, target))
            else:
                log.info("Adding %s -> %s symlink" % (symlink, target))
                os.symlink(target, symlink)
    rc = 0
    for release in new_releases:
        branchConfig = get_branch_config(release)
        # candidate releases are split in two graphs and release-runner only handles the first
        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
        # channels are handled in the second generated graph outside of release-runner.
        # This is not elegant but it should do the job for now
        release_channels = release['release_channels']
        candidate_release = is_candidate_release(release_channels)
        if candidate_release:
            postrelease_enabled = False
            postrelease_bouncer_aliases_enabled = False
            final_verify_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            publish_to_balrog_channels = [
                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
            ]
            push_to_releases_enabled = False
            postrelease_mark_as_shipped_enabled = False
        else:
            postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
            postrelease_bouncer_aliases_enabled = branchConfig['postrelease_bouncer_aliases_enabled']
            postrelease_mark_as_shipped_enabled = branchConfig['postrelease_mark_as_shipped_enabled']
            final_verify_channels = release_channels
            publish_to_balrog_channels = release_channels
            push_to_releases_enabled = True

        ship_it_product_name = release['product']
        tc_product_name = branchConfig['stage_product'][ship_it_product_name]
        # XXX: Doesn't work with neither Fennec nor Thunderbird
        platforms = branchConfig['release_platforms']

        try:
            if not are_en_us_builds_completed(index, release_name=release['name'], submitted_at=release['submittedAt'],
                                              branch=release['branchShortName'], revision=release['mozillaRevision'],
                                              tc_product_name=tc_product_name, platforms=platforms):
                log.info('Builds are not completed yet, skipping release "%s" for now', release['name'])
                rr.update_status(release, 'Waiting for builds to be completed')
                continue

            log.info('Every build is completed for release: %s', release['name'])
            graph_id = slugId()

            rr.update_status(release, 'Generating task graph')

            kwargs = {
                "public_key": docker_worker_key,
                "version": release["version"],
                # ESR should not use "esr" suffix here:
                "next_version": bump_version(release["version"].replace("esr", "")),
                "appVersion": getAppVersion(release["version"]),
                "buildNumber": release["buildNumber"],
                "source_enabled": True,
                "checksums_enabled": True,
                "repo_path": release["branch"],
                "revision": release["mozillaRevision"],
                "product": release["product"],
                # if mozharness_revision is not passed, use 'revision'
                "mozharness_changeset": release.get('mh_changeset') or release['mozillaRevision'],
                "partial_updates": release['partial_updates'],
                "branch": release['branchShortName'],
                "updates_enabled": bool(release["partials"]),
                "l10n_config": get_l10n_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['platforms'],
                    l10n_platforms=branchConfig['l10n_release_platforms'],
                    l10n_changesets=release['l10n_changesets']
                ),
                "en_US_config": get_en_US_config(
                    index=index, product=release["product"], branch=release['branchShortName'],
                    revision=release['mozillaRevision'],
                    platforms=branchConfig['release_platforms']
                ),
                "verifyConfigs": {},
                "balrog_api_root": branchConfig["balrog_api_root"],
                "funsize_balrog_api_root": branchConfig["funsize_balrog_api_root"],
                "balrog_username": balrog_username,
                "balrog_password": balrog_password,
                "beetmover_aws_access_key_id": beetmover_aws_access_key_id,
                "beetmover_aws_secret_access_key": beetmover_aws_secret_access_key,
                # TODO: stagin specific, make them configurable
                "signing_class": "release-signing",
                "bouncer_enabled": branchConfig["bouncer_enabled"],
                "updates_builder_enabled": branchConfig["updates_builder_enabled"],
                "update_verify_enabled": branchConfig["update_verify_enabled"],
                "release_channels": release_channels,
                "final_verify_channels": final_verify_channels,
                "final_verify_platforms": branchConfig['release_platforms'],
                "uptake_monitoring_platforms": branchConfig['release_platforms'],
                "signing_pvt_key": signing_pvt_key,
                "build_tools_repo_path": branchConfig['build_tools_repo_path'],
                "push_to_candidates_enabled": branchConfig['push_to_candidates_enabled'],
                "postrelease_bouncer_aliases_enabled": postrelease_bouncer_aliases_enabled,
                "uptake_monitoring_enabled": branchConfig['uptake_monitoring_enabled'],
                "tuxedo_server_url": branchConfig['tuxedoServerUrl'],
                "postrelease_version_bump_enabled": postrelease_enabled,
                "postrelease_mark_as_shipped_enabled": postrelease_mark_as_shipped_enabled,
                "push_to_releases_enabled": push_to_releases_enabled,
                "push_to_releases_automatic": branchConfig['push_to_releases_automatic'],
                "beetmover_candidates_bucket": branchConfig["beetmover_buckets"][release["product"]],
                "partner_repacks_platforms": branchConfig.get("partner_repacks_platforms", []),
                "l10n_changesets": release['l10n_changesets'],
                "extra_balrog_submitter_params": extra_balrog_submitter_params,
                "publish_to_balrog_channels": publish_to_balrog_channels,
                "snap_enabled": branchConfig.get("snap_enabled", False),
            }

            validate_graph_kwargs(queue, gpg_key_path, **kwargs)
            graph = make_task_graph_strict_kwargs(**kwargs)
            rr.update_status(release, "Submitting task graph")
            log.info("Task graph generated!")
            import pprint
            log.debug(pprint.pformat(graph, indent=4, width=160))
            print scheduler.createTaskGraph(graph_id, graph)

            rr.mark_as_completed(release)
            l10n_url = rr.release_l10n_api.getL10nFullUrl(release['name'])
            email_release_drivers(smtp_server=smtp_server, from_=notify_from,
                                  to=notify_to, release=release,
                                  task_group_id=graph_id, l10n_url=l10n_url)
        except Exception as exception:
            # We explicitly do not raise an error here because there's no
            # reason not to start other releases if creating the Task Graph
            # fails for another one. We _do_ need to set this in order to exit
            # with the right code, though.
            rc = 2
            rr.mark_as_failed(
                release,
                'Failed to start release promotion (graph ID: %s). Error(s): %s' % (graph_id, exception)
            )
            log.exception('Failed to start release "%s" promotion for graph %s. Error(s): %s',
                          release['name'], graph_id, exception)
            log.debug('Release failed: %s', release)

    if rc != 0:
        sys.exit(rc)

    log.debug('Sleeping for %s seconds before polling again', sleeptime)
    time.sleep(sleeptime)
Esempio n. 12
0
def main(options):
    log.info('Loading config from %s' % options.config)
    config = load_config(options.config)

    if config.getboolean('release-runner', 'verbose'):
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
                        level=log_level)
    # Suppress logging of retry(), see bug 925321 for the details
    logging.getLogger("util.retry").setLevel(logging.WARN)

    check_buildbot()
    check_fabric()

    # Shorthand
    sendchange_master = config.get('release-runner', 'sendchange_master')
    api_root = config.get('api', 'api_root')
    username = config.get('api', 'username')
    password = config.get('api', 'password')
    hg_host = config.get('release-runner', 'hg_host')
    hg_username = config.get('release-runner', 'hg_username')
    hg_ssh_key = config.get('release-runner', 'hg_ssh_key')
    buildbot_configs = config.get('release-runner', 'buildbot_configs')
    buildbot_configs_branch = config.get('release-runner',
                                         'buildbot_configs_branch')
    buildbotcustom = config.get('release-runner', 'buildbotcustom')
    buildbotcustom_branch = config.get('release-runner',
                                       'buildbotcustom_branch')
    tools = config.get('release-runner', 'tools')
    tools_branch = config.get('release-runner', 'tools_branch')
    masters_json = config.get('release-runner', 'masters_json')
    staging = config.getboolean('release-runner', 'staging')
    sleeptime = config.getint('release-runner', 'sleeptime')
    notify_from = get_config(config, 'release-runner', 'notify_from', None)
    notify_to = get_config(config, 'release-runner', 'notify_to', None)
    ssh_username = get_config(config, 'release-runner', 'ssh_username', None)
    ssh_key = get_config(config, 'release-runner', 'ssh_key', None)
    if isinstance(notify_to, basestring):
        notify_to = [x.strip() for x in notify_to.split(',')]
    smtp_server = get_config(config, 'release-runner', 'smtp_server',
                             'localhost')
    configs_workdir = 'buildbot-configs'
    custom_workdir = 'buildbotcustom'
    tools_workdir = 'tools'
    if "://" in buildbot_configs and not buildbot_configs.startswith("file"):
        configs_pushRepo = make_hg_url(hg_host,
                                       get_repo_path(buildbot_configs),
                                       protocol='ssh')
    else:
        configs_pushRepo = buildbot_configs
    if "://" in buildbotcustom and not buildbotcustom.startswith("file"):
        custom_pushRepo = make_hg_url(hg_host,
                                      get_repo_path(buildbotcustom),
                                      protocol='ssh')
    else:
        custom_pushRepo = buildbotcustom
    if "://" in tools and not tools.startswith("file"):
        tools_pushRepo = make_hg_url(hg_host,
                                     get_repo_path(tools),
                                     protocol='ssh')
    else:
        tools_pushRepo = tools

    rr = ReleaseRunner(api_root=api_root, username=username, password=password)

    # Main loop waits for new releases, processes them and exits.
    while True:
        try:
            log.debug('Fetching release requests')
            rr.get_release_requests()
            if rr.new_releases:
                for release in rr.new_releases:
                    log.info('Got a new release request: %s' % release)
                break
            else:
                log.debug('Sleeping for %d seconds before polling again' %
                          sleeptime)
                time.sleep(sleeptime)
        except Exception, e:
            log.error("Caught exception when polling:", exc_info=True)
            sys.exit(5)
Esempio n. 13
0
    if len(args) != 1:
        parser.error("Need just one server.ini file to read")

    # anything past here should only be done as user cltsign, remove
    # footgun
    if getpass.getuser() != 'cltsign':
        print("Must run as user 'cltsign' for remaining actions")
        sys.exit(1)

    config = load_config(args[0])
    if not config:
        parser.error("Error reading config file: %s" % args[0])

    # Set up options if they haven't been overridden on the cmdline
    if options.logfile is None:
        options.logfile = get_config(config, "logging", "logfile", None)
    if options.loglevel is None:
        loglevel = get_config(config, "logging", "loglevel", "INFO")
        options.loglevel = getattr(logging, loglevel)
    if options.log_maxfiles is None:
        options.log_maxfiles = get_config_int(
            config, "logging", "log_maxfiles", 50)
    if options.log_maxsize is None:
        # 10 MB log size by default
        options.log_maxsize = get_config_int(
            config, "logging", "log_maxsize", 10 * (1024 ** 2))
    if options.daemonize is None:
        options.daemonize = get_config_bool(
            config, "server", "daemonize", False)
    os.umask(0o077)
    setup_logging(logfile=options.logfile,