Пример #1
0
def upload_pillars():
    _, _, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential()
    github_token = util.read_github_token()
    loggly_token = util.read_loggly_token()
    if not util.in_production():
        cfgsrv_redis_url = "redis://%s:6379" % config.cloudmaster_address
    util.ssh_cloudmaster((
        'echo "salt_version: %s" > salt.sls '
        # Hack so every instance will read specific pillars from a file
        # named with the <instance_name>.sls scheme.
        r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
        ' && echo "" > $(hostname).sls""'
        ' && echo "in_production: %s" > global.sls '
        ' && echo "datacenter: %s" >> global.sls '
        ' && echo "cloudmaster_name: %s" >> global.sls '
        ' && echo "do_token: %s" > do_credential.sls'
        ' && echo "vultr_apikey: %s" > vultr_credential.sls'
        ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
        ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls'
        ' && echo "github_token: %s" > github_token.sls'
        ' && echo "loggly_token: %s" > loggly_token.sls'
        r' && echo "base: {\"*\": [salt, global], \"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, cfgsrv_credential]}" > top.sls '
        ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ '
        ' && sudo chown -R root:root /srv/pillar '
        ' && sudo chmod -R 600 /srv/pillar ') %
                         (config.salt_version, util.in_production(),
                          config.datacenter, config.cloudmaster_name, do_token,
                          vultr_apikey, cfgsrv_token, cfgsrv_redis_url,
                          github_token, loggly_token))
Пример #2
0
def upload_pillars():
    _, _, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \
        = util.read_cfgsrv_credential()
    secondary_redis_url = util.read_secondary_redis_credential()
    github_token = util.read_github_token()
    loggly_token = util.read_loggly_token()
    if util.in_production():
        slack_webhook_url = util.read_slack_webhook_url()
    else:
        slack_webhook_url = util.read_slack_staging_webhook_url()
    if not util.in_production():
        if util.in_staging():
            # Exception: make the staging cloudmasters use the redis instance
            # of the staging cloudmaster in Amsterdam, to be more like the
            # production setup.
            redis_address = '188.166.55.168'
        else:
            redis_address = config.cloudmaster_address
        cfgsrv_redis_url = "redis://*****:*****@%s:6379" % (cfgsrv_redis_test_pass,
                                                         redis_address)
    util.ssh_cloudmaster((
            'echo "salt_version: %s" > salt.sls '
            # Hack so every instance will read specific pillars from a file
            # named with the <instance_name>.sls scheme.
            r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
            ' && echo "" > $(hostname).sls""'
            ' && echo "in_staging: %s" > global.sls '
            ' && echo "in_production: %s" >> global.sls '
            ' && echo "datacenter: %s" >> global.sls '
            ' && echo "slack_webhook_url: %s" >> global.sls '
            ' && echo "cloudmaster_name: %s" >> global.sls '
            ' && echo "do_token: %s" > do_credential.sls'
            ' && echo "vultr_apikey: %s" > vultr_credential.sls'
            ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
            ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls'
            ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls'
            ' && echo "secondary_redis_url: \"%s\"" >> secondary_redis_credential.sls'
            ' && echo "github_token: %s" > github_token.sls'
            ' && echo "loggly_token: %s" > loggly_token.sls'
            r' && echo "base: {\"*\": [salt, global], \"fp-*\": [cfgsrv_credential, vultr_credential, secondary_redis_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential]}" > top.sls '
            ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls cfgsrv_credential.sls secondary_redis_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ '
            ' && sudo chown -R root:root /srv/pillar '
            ' && sudo chmod -R 600 /srv/pillar '
            ) % (config.salt_version,
                 util.in_staging(),
                 util.in_production(),
                 config.datacenter,
                 slack_webhook_url,
                 config.cloudmaster_name,
                 do_token,
                 vultr_apikey,
                 cfgsrv_token,
                 cfgsrv_redis_url,
                 cfgsrv_redis_test_pass,
                 secondary_redis_url,
                 github_token,
                 loggly_token))
Пример #3
0
def upload_pillars(as_root=False):
    _, _, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    linode_password, linode_apikey, linode_tokyo_apikey = util.read_linode_credential(
    )
    cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \
        = util.read_cfgsrv_credential()
    github_token = util.read_github_token()
    loggly_token = util.read_loggly_token()
    if util.in_production():
        slack_webhook_url = util.read_slack_webhook_url()
    else:
        slack_webhook_url = util.read_slack_staging_webhook_url()
    if not util.in_production():
        if util.in_staging():
            # Exception: make the staging cloudmasters use the redis instance
            # of the staging cloudmaster in Amsterdam, to be more like the
            # production setup.
            redis_address = '188.166.55.168'
        else:
            redis_address = config.cloudmaster_address
        cfgsrv_redis_url = "redis://*****:*****@%s:6379" % (
            cfgsrv_redis_test_pass, redis_address)
    util.ssh_cloudmaster(
        (
            'echo "salt_version: %s" > salt.sls '
            # Hack so every instance will read specific pillars from a file
            # named with the <instance_name>.sls scheme.
            r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
            ' && echo "" > $(hostname).sls""'
            ' && echo "in_staging: %s" > global.sls '
            ' && echo "in_production: %s" >> global.sls '
            ' && echo "datacenter: %s" >> global.sls '
            ' && echo "cfgsrv_redis_url: %s" >> global.sls'
            ' && echo "slack_webhook_url: %s" >> global.sls '
            ' && echo "cloudmaster_name: %s" >> global.sls '
            ' && echo "do_token: %s" > do_credential.sls'
            ' && echo "vultr_apikey: %s" > vultr_credential.sls'
            ' && echo "linode_password: \'%s\'" > linode_credential.sls '
            ' && echo "linode_apikey: %s" >> linode_credential.sls '
            ' && echo "linode_tokyo_apikey: %s" >> linode_credential.sls '
            ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
            ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls'
            ' && echo "github_token: %s" > github_token.sls'
            ' && echo "loggly_token: %s" > loggly_token.sls'
            r' && echo "base: {\"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, linode_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential], \"*\": [global, salt]}" > top.sls '
            ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls linode_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ '
            ' && sudo chown -R root:root /srv/pillar '
            ' && sudo chmod -R 600 /srv/pillar ') %
        (config.salt_version, util.in_staging(), util.in_production(),
         config.datacenter, cfgsrv_redis_url, slack_webhook_url,
         config.cloudmaster_name, do_token, vultr_apikey, linode_password,
         linode_apikey, linode_tokyo_apikey, cfgsrv_token,
         cfgsrv_redis_test_pass, github_token, loggly_token),
        as_root=as_root)
Пример #4
0
def check_master_if_in_production():
    if util.in_production():
        status_output = normalize_status_output(subprocess.check_output(["git", "status"]))
        if status_output != EXPECTED_PRODUCTION_GIT_STATUS_OUTPUT:
            not_up_to_date()
        pull_output = subprocess.check_output(["git", "pull"])
        if pull_output != EXPECTED_PRODUCTION_GIT_PULL_OUTPUT:
            not_up_to_date()
Пример #5
0
def check_master_if_in_production():
    if util.in_production():
        status_output = normalize_status_output(
            subprocess.check_output(['git', 'status']))
        if status_output != EXPECTED_PRODUCTION_GIT_STATUS_OUTPUT:
            not_up_to_date()
        pull_output = subprocess.check_output(['git', 'pull'])
        if pull_output != EXPECTED_PRODUCTION_GIT_PULL_OUTPUT:
            not_up_to_date()
Пример #6
0
def upload_pillars():
    do_id, do_key, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    aws_id, aws_key = util.read_aws_credential()
    cfr_id, cfr_key = util.read_aws_credential(
            os.path.join(here.secrets_path,
                         'cloudfront.aws_credential'))
    cfl_id, cfl_key = util.read_cfl_credential()
    dsp_id, dsp_key = util.read_dnsimple_credential()
    cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential()
    util.ssh_cloudmaster((
            'echo "branch: check-all-fallbacks" > $(hostname).sls '
            ' && echo "private_networking: %s" >> $(hostname).sls '
            ' && echo "default_profile: %s" >> $(hostname).sls '
            ' && echo "salt_version: %s" > salt.sls '
            ' && echo "in_production: %s" > global.sls '
            # Hack so every instance will read specific pillars from a file
            # named with the <instance_name>.sls scheme.
            r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
            ' && echo "do_id: %s"  > do_credential.sls'
            ' && echo "do_key: %s" >> do_credential.sls'
            ' && echo "do_token: %s" >> do_credential.sls'
            ' && echo "vultr_apikey: %s" > vultr_credential.sls'
            ' && echo "aws_id: %s"  > aws_credential.sls'
            ' && echo "aws_key: %s" >> aws_credential.sls'
            ' && echo "cfl_id: %s"  > cfl_credential.sls'
            ' && echo "cfl_key: %s" >> cfl_credential.sls'
            ' && echo "cfr_id: %s"  > cfr_credential.sls'
            ' && echo "cfr_key: %s" >> cfr_credential.sls'
            ' && echo "dsp_id: %s"  > dsp_credential.sls'
            ' && echo "dsp_key: %s" >> dsp_credential.sls'
            ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
            ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls'
           r' && echo "base: {\"*\": [salt, global], \"fp-*\": [aws_credential, cfgsrv_credential, vultr_credential], \"*cloudmaster*\": [aws_credential, do_credential, vultr_credential, cfr_credential, cfgsrv_credential], \"ps-*\": [cfl_credential, cfr_credential, dsp_credential]}" > top.sls '
            ' && sudo mv salt.sls global.sls top.sls $(hostname).sls aws_credential.sls cfl_credential.sls cfr_credential.sls do_credential.sls vultr_credential.sls dsp_credential.sls cfgsrv_credential.sls /srv/pillar/ '
            ' && sudo chown -R root:root /srv/pillar '
            ' && sudo chmod -R 600 /srv/pillar '
            ) % (config.private_networking,
                 config.default_profile,
                 config.salt_version,
                 util.in_production(),
                 do_id,
                 do_key,
                 do_token,
                 vultr_apikey,
                 aws_id,
                 aws_key,
                 cfl_id,
                 cfl_key,
                 cfr_id,
                 cfr_key,
                 dsp_id,
                 dsp_key,
                 cfgsrv_token,
                 cfgsrv_redis_url))
Пример #7
0
def check_master_if_in_production():
    if util.in_production():
        status_output = normalize_status_output(
                subprocess.check_output(['git', 'status']))
        if status_output != EXPECTED_PRODUCTION_GIT_STATUS_OUTPUT:
            not_up_to_date()
        pull_output = subprocess.check_output(['git', 'pull'])
        if pull_output != EXPECTED_PRODUCTION_GIT_PULL_OUTPUT:
            not_up_to_date()
        lfs_status_output = normalize_status_output(
                subprocess.check_output(['git', 'lfs', 'status']))
        if lfs_status_output != EXPECTED_PRODUCTION_GIT_LFS_STATUS_OUTPUT:
            not_up_to_date()
Пример #8
0
def upload_pillars():
    _, _, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential()
    github_token = util.read_github_token()
    loggly_token = util.read_loggly_token()
    if not util.in_production():
        cfgsrv_redis_url = "redis://%s:6379" % config.cloudmaster_address
    util.ssh_cloudmaster((
            'echo "salt_version: %s" > salt.sls '
            # Hack so every instance will read specific pillars from a file
            # named with the <instance_name>.sls scheme.
            r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
            ' && echo "" > $(hostname).sls""'
            ' && echo "in_production: %s" > global.sls '
            ' && echo "datacenter: %s" >> global.sls '
            ' && echo "cloudmaster_name: %s" >> global.sls '
            ' && echo "do_token: %s" > do_credential.sls'
            ' && echo "vultr_apikey: %s" > vultr_credential.sls'
            ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
            ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls'
            ' && echo "github_token: %s" > github_token.sls'
            ' && echo "loggly_token: %s" > loggly_token.sls'
            r' && echo "base: {\"*\": [salt, global], \"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, cfgsrv_credential]}" > top.sls '
            ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ '
            ' && sudo chown -R root:root /srv/pillar '
            ' && sudo chmod -R 600 /srv/pillar '
            ) % (config.salt_version,
                 util.in_production(),
                 config.datacenter,
                 config.cloudmaster_name,
                 do_token,
                 vultr_apikey,
                 cfgsrv_token,
                 cfgsrv_redis_url,
                 github_token,
                 loggly_token))
Пример #9
0
def upload_pillars():
    do_id, do_key, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    aws_id, aws_key = util.read_aws_credential()
    cfr_id, cfr_key = util.read_aws_credential(
        os.path.join(here.secrets_path, 'cloudfront.aws_credential'))
    cfl_id, cfl_key = util.read_cfl_credential()
    dsp_id, dsp_key = util.read_dnsimple_credential()
    cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential()
    util.ssh_cloudmaster((
        'echo "branch: check-all-fallbacks" > $(hostname).sls '
        ' && echo "private_networking: %s" >> $(hostname).sls '
        ' && echo "default_profile: %s" >> $(hostname).sls '
        ' && echo "salt_version: %s" > salt.sls '
        ' && echo "in_production: %s" > global.sls '
        # Hack so every instance will read specific pillars from a file
        # named with the <instance_name>.sls scheme.
        r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
        ' && echo "do_id: %s"  > do_credential.sls'
        ' && echo "do_key: %s" >> do_credential.sls'
        ' && echo "do_token: %s" >> do_credential.sls'
        ' && echo "vultr_apikey: %s" > vultr_credential.sls'
        ' && echo "aws_id: %s"  > aws_credential.sls'
        ' && echo "aws_key: %s" >> aws_credential.sls'
        ' && echo "cfl_id: %s"  > cfl_credential.sls'
        ' && echo "cfl_key: %s" >> cfl_credential.sls'
        ' && echo "cfr_id: %s"  > cfr_credential.sls'
        ' && echo "cfr_key: %s" >> cfr_credential.sls'
        ' && echo "dsp_id: %s"  > dsp_credential.sls'
        ' && echo "dsp_key: %s" >> dsp_credential.sls'
        ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
        ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls'
        r' && echo "base: {\"*\": [salt, global], \"fp-*\": [aws_credential, cfgsrv_credential, vultr_credential], \"*cloudmaster*\": [aws_credential, do_credential, vultr_credential, cfr_credential, cfgsrv_credential], \"ps-*\": [cfl_credential, cfr_credential, dsp_credential]}" > top.sls '
        ' && sudo mv salt.sls global.sls top.sls $(hostname).sls aws_credential.sls cfl_credential.sls cfr_credential.sls do_credential.sls vultr_credential.sls dsp_credential.sls cfgsrv_credential.sls /srv/pillar/ '
        ' && sudo chown -R root:root /srv/pillar '
        ' && sudo chmod -R 600 /srv/pillar ') %
                         (config.private_networking, config.default_profile,
                          config.salt_version, util.in_production(), do_id,
                          do_key, do_token, vultr_apikey, aws_id, aws_key,
                          cfl_id, cfl_key, cfr_id, cfr_key, dsp_id, dsp_key,
                          cfgsrv_token, cfgsrv_redis_url))
Пример #10
0
def upload_pillars(as_root=False):
    if not util.in_dev() and not util.in_staging() and not util.in_production():
        assert util.in_production(), "Environment unknown!"

    _, _, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    linode_password, linode_apikey, linode_tokyo_apikey = util.read_linode_credential()
    cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \
        = util.read_cfgsrv_credential()
    github_token = util.read_github_token()
    loggly_token = util.read_loggly_token()
    if util.in_production():
        slack_webhook_url = util.read_slack_webhook_url()
    else:
        slack_webhook_url = util.read_slack_staging_webhook_url()

    environment = "production"

    if util.in_staging():
        environment = "staging"
        cfgsrv_redis_url = "rediss://:[email protected]:6380"

    redis_host = cfgsrv_redis_url.split('@')[1]
    redis_domain = redis_host.split(":")[0]
    redis_via_stunnel_url = cfgsrv_redis_url.split('@')[0].replace("rediss", "redis") + "@localhost:6380"

    if util.in_dev():
        environment = "dev"
        redis_host = "%s:6379" % config.cloudmaster_address
        cfgsrv_redis_url = "redis://*****:*****@%s" % (cfgsrv_redis_test_pass, redis_host)
        redis_domain = "redis-staging.getlantern.org"
        # Bypass stunnel in dev environments because we're not encrypting connections to Redis
        redis_via_stunnel_url = cfgsrv_redis_url

    ssh_whitelist_query_token = util.read_ssh_whitelist_query_token()

    util.ssh_cloudmaster((
            'echo "salt_version: %s" > salt.sls '
            # Hack so every instance will read specific pillars from a file
            # named with the <instance_name>.sls scheme.
            r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
            ' && echo "" > $(hostname).sls""'
            ' && echo "environment: %s" > global.sls '
            ' && echo "in_dev: %s" >> global.sls '
            ' && echo "in_staging: %s" >> global.sls '
            ' && echo "in_production: %s" >> global.sls '
            ' && echo "datacenter: %s" >> global.sls '
            ' && echo "cfgsrv_redis_url: %s" >> global.sls'
            ' && echo "redis_via_stunnel_url: %s" >> global.sls'
            ' && echo "redis_host: %s" >> global.sls'
            ' && echo "redis_domain: %s" >> global.sls'
            ' && echo "slack_webhook_url: %s" >> global.sls '
            ' && echo "cloudmaster_name: %s" >> global.sls '
            ' && echo "ssh_whitelist_query_token: %s" >> global.sls '
            ' && echo "do_token: %s" > do_credential.sls'
            ' && echo "vultr_apikey: %s" > vultr_credential.sls'
            ' && echo "linode_password: \'%s\'" > linode_credential.sls '
            ' && echo "linode_apikey: %s" >> linode_credential.sls '
            ' && echo "linode_tokyo_apikey: %s" >> linode_credential.sls '
            ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
            ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls'
            ' && echo "github_token: %s" > github_token.sls'
            ' && echo "loggly_token: %s" > loggly_token.sls'
            r' && echo "base: {\"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, linode_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential], \"*\": [global, salt]}" > top.sls '
            ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls linode_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ '
            ' && sudo chown -R root:root /srv/pillar '
            ' && sudo chmod -R 600 /srv/pillar '
            ) % (config.salt_version,
                 environment,
                 util.in_dev(),
                 util.in_staging(),
                 util.in_production(),
                 config.datacenter,
                 cfgsrv_redis_url,
                 redis_via_stunnel_url,
                 redis_host,
                 redis_domain,
                 slack_webhook_url,
                 config.cloudmaster_name,
                 ssh_whitelist_query_token,
                 do_token,
                 vultr_apikey,
                 linode_password,
                 linode_apikey,
                 linode_tokyo_apikey,
                 cfgsrv_token,
                 cfgsrv_redis_test_pass,
                 github_token,
                 loggly_token),
            as_root=as_root)
Пример #11
0
def upload_pillars(as_root=False):
    if not util.in_dev() and not util.in_staging() and not util.in_production(
    ):
        assert util.in_production(), "Environment unknown!"

    _, _, do_token = util.read_do_credential()
    vultr_apikey = util.read_vultr_credential()
    linode_password, linode_apikey, linode_tokyo_apikey = util.read_linode_credential(
    )
    cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \
        = util.read_cfgsrv_credential()
    github_token = util.read_github_token()
    loggly_token = util.read_loggly_token()
    if util.in_production():
        slack_webhook_url = util.read_slack_webhook_url()
    else:
        slack_webhook_url = util.read_slack_staging_webhook_url()

    environment = "production"

    if util.in_staging():
        environment = "staging"
        cfgsrv_redis_url = "rediss://:[email protected]:6380"

    redis_host = cfgsrv_redis_url.split('@')[1]
    redis_domain = redis_host.split(":")[0]
    redis_via_stunnel_url = cfgsrv_redis_url.split('@')[0].replace(
        "rediss", "redis") + "@localhost:6380"

    if util.in_dev():
        environment = "dev"
        redis_host = "%s:6379" % config.cloudmaster_address
        cfgsrv_redis_url = "redis://*****:*****@%s" % (cfgsrv_redis_test_pass,
                                                    redis_host)
        redis_domain = "redis-staging.getlantern.org"
        # Bypass stunnel in dev environments because we're not encrypting connections to Redis
        redis_via_stunnel_url = cfgsrv_redis_url

    ssh_whitelist_query_token = util.read_ssh_whitelist_query_token()

    util.ssh_cloudmaster(
        (
            'echo "salt_version: %s" > salt.sls '
            # Hack so every instance will read specific pillars from a file
            # named with the <instance_name>.sls scheme.
            r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls '
            ' && echo "" > $(hostname).sls""'
            ' && echo "environment: %s" > global.sls '
            ' && echo "in_dev: %s" >> global.sls '
            ' && echo "in_staging: %s" >> global.sls '
            ' && echo "in_production: %s" >> global.sls '
            ' && echo "datacenter: %s" >> global.sls '
            ' && echo "cfgsrv_redis_url: %s" >> global.sls'
            ' && echo "redis_via_stunnel_url: %s" >> global.sls'
            ' && echo "redis_host: %s" >> global.sls'
            ' && echo "redis_domain: %s" >> global.sls'
            ' && echo "slack_webhook_url: %s" >> global.sls '
            ' && echo "cloudmaster_name: %s" >> global.sls '
            ' && echo "ssh_whitelist_query_token: %s" >> global.sls '
            ' && echo "do_token: %s" > do_credential.sls'
            ' && echo "vultr_apikey: %s" > vultr_credential.sls'
            ' && echo "linode_password: \'%s\'" > linode_credential.sls '
            ' && echo "linode_apikey: %s" >> linode_credential.sls '
            ' && echo "linode_tokyo_apikey: %s" >> linode_credential.sls '
            ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls'
            ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls'
            ' && echo "github_token: %s" > github_token.sls'
            ' && echo "loggly_token: %s" > loggly_token.sls'
            r' && echo "base: {\"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, linode_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential], \"*\": [global, salt]}" > top.sls '
            ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls linode_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ '
            ' && sudo chown -R root:root /srv/pillar '
            ' && sudo chmod -R 600 /srv/pillar ') %
        (config.salt_version, environment, util.in_dev(), util.in_staging(),
         util.in_production(), config.datacenter, cfgsrv_redis_url,
         redis_via_stunnel_url, redis_host, redis_domain, slack_webhook_url,
         config.cloudmaster_name, ssh_whitelist_query_token, do_token,
         vultr_apikey, linode_password, linode_apikey, linode_tokyo_apikey,
         cfgsrv_token, cfgsrv_redis_test_pass, github_token, loggly_token),
        as_root=as_root)