def upload_pillars(): _, _, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential() github_token = util.read_github_token() loggly_token = util.read_loggly_token() if not util.in_production(): cfgsrv_redis_url = "redis://%s:6379" % config.cloudmaster_address util.ssh_cloudmaster(( 'echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "" > $(hostname).sls""' ' && echo "in_production: %s" > global.sls ' ' && echo "datacenter: %s" >> global.sls ' ' && echo "cloudmaster_name: %s" >> global.sls ' ' && echo "do_token: %s" > do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls' ' && echo "github_token: %s" > github_token.sls' ' && echo "loggly_token: %s" > loggly_token.sls' r' && echo "base: {\"*\": [salt, global], \"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, cfgsrv_credential]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ') % (config.salt_version, util.in_production(), config.datacenter, config.cloudmaster_name, do_token, vultr_apikey, cfgsrv_token, cfgsrv_redis_url, github_token, loggly_token))
def upload_pillars(): do_id, do_key, _ = util.read_do_credential() aws_id, aws_key = util.read_aws_credential() cfr_id, cfr_key = util.read_aws_credential( os.path.join(here.secrets_path, 'cloudfront.aws_credential')) cfl_id, cfl_key = util.read_cfl_credential() azure_ssh_pass = util.read_azure_ssh_pass() dsp_id, dsp_key = util.read_dnsimple_credential() util.ssh_cloudmaster(( 'echo "branch: check-all-fallbacks" > $(hostname).sls ' ' && echo "private_networking: %s" >> $(hostname).sls ' ' && echo "default_profile: %s" >> $(hostname).sls ' ' && echo "azure_ssh_pass: %s" >> $(hostname).sls ' ' && echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "do_id: %s" > do_credential.sls' ' && echo "do_key: %s" >> do_credential.sls' ' && echo "aws_id: %s" > aws_credential.sls' ' && echo "aws_key: %s" >> aws_credential.sls' ' && echo "cfl_id: %s" > cfl_credential.sls' ' && echo "cfl_key: %s" >> cfl_credential.sls' ' && echo "cfr_id: %s" > cfr_credential.sls' ' && echo "cfr_key: %s" >> cfr_credential.sls' ' && echo "dsp_id: %s" > dsp_credential.sls' ' && echo "dsp_key: %s" >> dsp_credential.sls' r' && echo "base: {\"*\": [salt], \"fp-*\": [aws_credential], \"*cloudmaster*\": [aws_credential, do_credential, cfr_credential], \"ps-*\": [cfl_credential, cfr_credential, dsp_credential]}" > top.sls ' ' && sudo mv salt.sls top.sls $(hostname).sls aws_credential.sls cfl_credential.sls cfr_credential.sls do_credential.sls dsp_credential.sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ') % (config.private_networking, config.default_profile, azure_ssh_pass, config.salt_version, do_id, do_key, aws_id, aws_key, cfl_id, cfl_key, cfr_id, cfr_key, dsp_id, dsp_key))
def upload_pillars(): _, _, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \ = util.read_cfgsrv_credential() secondary_redis_url = util.read_secondary_redis_credential() github_token = util.read_github_token() loggly_token = util.read_loggly_token() if util.in_production(): slack_webhook_url = util.read_slack_webhook_url() else: slack_webhook_url = util.read_slack_staging_webhook_url() if not util.in_production(): if util.in_staging(): # Exception: make the staging cloudmasters use the redis instance # of the staging cloudmaster in Amsterdam, to be more like the # production setup. redis_address = '188.166.55.168' else: redis_address = config.cloudmaster_address cfgsrv_redis_url = "redis://*****:*****@%s:6379" % (cfgsrv_redis_test_pass, redis_address) util.ssh_cloudmaster(( 'echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "" > $(hostname).sls""' ' && echo "in_staging: %s" > global.sls ' ' && echo "in_production: %s" >> global.sls ' ' && echo "datacenter: %s" >> global.sls ' ' && echo "slack_webhook_url: %s" >> global.sls ' ' && echo "cloudmaster_name: %s" >> global.sls ' ' && echo "do_token: %s" > do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls' ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls' ' && echo "secondary_redis_url: \"%s\"" >> secondary_redis_credential.sls' ' && echo "github_token: %s" > github_token.sls' ' && echo "loggly_token: %s" > loggly_token.sls' r' && echo "base: {\"*\": [salt, global], \"fp-*\": [cfgsrv_credential, vultr_credential, secondary_redis_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls cfgsrv_credential.sls secondary_redis_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ' ) % (config.salt_version, util.in_staging(), util.in_production(), config.datacenter, slack_webhook_url, config.cloudmaster_name, do_token, vultr_apikey, cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass, secondary_redis_url, github_token, loggly_token))
def upload_pillars(as_root=False): _, _, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() linode_password, linode_apikey, linode_tokyo_apikey = util.read_linode_credential( ) cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \ = util.read_cfgsrv_credential() github_token = util.read_github_token() loggly_token = util.read_loggly_token() if util.in_production(): slack_webhook_url = util.read_slack_webhook_url() else: slack_webhook_url = util.read_slack_staging_webhook_url() if not util.in_production(): if util.in_staging(): # Exception: make the staging cloudmasters use the redis instance # of the staging cloudmaster in Amsterdam, to be more like the # production setup. redis_address = '188.166.55.168' else: redis_address = config.cloudmaster_address cfgsrv_redis_url = "redis://*****:*****@%s:6379" % ( cfgsrv_redis_test_pass, redis_address) util.ssh_cloudmaster( ( 'echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "" > $(hostname).sls""' ' && echo "in_staging: %s" > global.sls ' ' && echo "in_production: %s" >> global.sls ' ' && echo "datacenter: %s" >> global.sls ' ' && echo "cfgsrv_redis_url: %s" >> global.sls' ' && echo "slack_webhook_url: %s" >> global.sls ' ' && echo "cloudmaster_name: %s" >> global.sls ' ' && echo "do_token: %s" > do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "linode_password: \'%s\'" > linode_credential.sls ' ' && echo "linode_apikey: %s" >> linode_credential.sls ' ' && echo "linode_tokyo_apikey: %s" >> linode_credential.sls ' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls' ' && echo "github_token: %s" > github_token.sls' ' && echo "loggly_token: %s" > loggly_token.sls' r' && echo "base: {\"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, linode_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential], \"*\": [global, salt]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls linode_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ') % (config.salt_version, util.in_staging(), util.in_production(), config.datacenter, cfgsrv_redis_url, slack_webhook_url, config.cloudmaster_name, do_token, vultr_apikey, linode_password, linode_apikey, linode_tokyo_apikey, cfgsrv_token, cfgsrv_redis_test_pass, github_token, loggly_token), as_root=as_root)
def upload_pillars(): do_id, do_key, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() aws_id, aws_key = util.read_aws_credential() cfr_id, cfr_key = util.read_aws_credential( os.path.join(here.secrets_path, 'cloudfront.aws_credential')) cfl_id, cfl_key = util.read_cfl_credential() dsp_id, dsp_key = util.read_dnsimple_credential() cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential() util.ssh_cloudmaster(( 'echo "branch: check-all-fallbacks" > $(hostname).sls ' ' && echo "private_networking: %s" >> $(hostname).sls ' ' && echo "default_profile: %s" >> $(hostname).sls ' ' && echo "salt_version: %s" > salt.sls ' ' && echo "in_production: %s" > global.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "do_id: %s" > do_credential.sls' ' && echo "do_key: %s" >> do_credential.sls' ' && echo "do_token: %s" >> do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "aws_id: %s" > aws_credential.sls' ' && echo "aws_key: %s" >> aws_credential.sls' ' && echo "cfl_id: %s" > cfl_credential.sls' ' && echo "cfl_key: %s" >> cfl_credential.sls' ' && echo "cfr_id: %s" > cfr_credential.sls' ' && echo "cfr_key: %s" >> cfr_credential.sls' ' && echo "dsp_id: %s" > dsp_credential.sls' ' && echo "dsp_key: %s" >> dsp_credential.sls' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls' r' && echo "base: {\"*\": [salt, global], \"fp-*\": [aws_credential, cfgsrv_credential, vultr_credential], \"*cloudmaster*\": [aws_credential, do_credential, vultr_credential, cfr_credential, cfgsrv_credential], \"ps-*\": [cfl_credential, cfr_credential, dsp_credential]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls $(hostname).sls aws_credential.sls cfl_credential.sls cfr_credential.sls do_credential.sls vultr_credential.sls dsp_credential.sls cfgsrv_credential.sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ' ) % (config.private_networking, config.default_profile, config.salt_version, util.in_production(), do_id, do_key, do_token, vultr_apikey, aws_id, aws_key, cfl_id, cfl_key, cfr_id, cfr_key, dsp_id, dsp_key, cfgsrv_token, cfgsrv_redis_url))
def upload_cloudmaster_minion_config(): address = util.get_address() do_id, do_key = util.read_do_credential() util.ssh_cloudmaster( (r"""(echo "master: salt" """ + r""" && echo "grains:" """ + r""" && echo " aws_region: %s " """ + r""" && echo " aws_ami: %s " """ + r""" && echo " do_id: %s " """ + r""" && echo " do_key: %s " """ + r""" && echo " do_region: %s " """ + r""" && echo " controller: %s " """ + r""" ) > /home/ubuntu/minion""") % (config.aws_region, region.get_ami(), do_id, do_key, config.do_region, config.controller)) util.ssh_cloudmaster('sudo mv /home/ubuntu/minion /etc/salt/minion' ' && sudo chown root:root /etc/salt/minion' ' && sudo chmod 600 /etc/salt/minion')
def upload_cloudmaster_minion_config(): address = util.get_address() do_id, do_key = util.read_do_credential() util.ssh_cloudmaster((r"""(echo "master: salt" """ + r""" && echo "grains:" """ + r""" && echo " aws_region: %s " """ + r""" && echo " aws_ami: %s " """ + r""" && echo " do_id: %s " """ + r""" && echo " do_key: %s " """ + r""" && echo " do_region: %s " """ + r""" && echo " controller: %s " """ + r""" ) > /home/ubuntu/minion""") % (config.aws_region, region.get_ami(), do_id, do_key, config.do_region, config.controller)) util.ssh_cloudmaster('sudo mv /home/ubuntu/minion /etc/salt/minion' ' && sudo chown root:root /etc/salt/minion' ' && sudo chmod 600 /etc/salt/minion')
def upload_pillars(): _, _, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential() github_token = util.read_github_token() loggly_token = util.read_loggly_token() if not util.in_production(): cfgsrv_redis_url = "redis://%s:6379" % config.cloudmaster_address util.ssh_cloudmaster(( 'echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "" > $(hostname).sls""' ' && echo "in_production: %s" > global.sls ' ' && echo "datacenter: %s" >> global.sls ' ' && echo "cloudmaster_name: %s" >> global.sls ' ' && echo "do_token: %s" > do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls' ' && echo "github_token: %s" > github_token.sls' ' && echo "loggly_token: %s" > loggly_token.sls' r' && echo "base: {\"*\": [salt, global], \"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, cfgsrv_credential]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ' ) % (config.salt_version, util.in_production(), config.datacenter, config.cloudmaster_name, do_token, vultr_apikey, cfgsrv_token, cfgsrv_redis_url, github_token, loggly_token))
"fp-from-old-controller-91-at-getlantern-dot-org-9d17-1-2014-4-8", "fp-from-old-controller-92-at-getlantern-dot-org-b568-1-2014-4-8", "fp-from-old-controller-93-at-getlantern-dot-org-acc7-1-2014-4-8", "fp-from-old-controller-94-at-getlantern-dot-org-7c06-1-2014-4-9", "fp-from-old-controller-95-at-getlantern-dot-org-960b-1-2014-4-9", "fp-from-old-controller-96-at-getlantern-dot-org-8f94-1-2014-4-8", "fp-from-old-controller-97-at-getlantern-dot-org-c643-1-2014-4-9", "fp-from-old-controller-98-at-getlantern-dot-org-8362-1-2014-4-9", "fp-from-old-controller-99-at-getlantern-dot-org-ff9f-1-2014-4-9", "fp-from-old-controller-9-at-getlantern-dot-org-33ef-1-2014-4-8", "fp-fte3-at-getlantern-dot-org-4853-3-2014-5-10", "fp-invite-at-getlantern-dot-org-7600-1-2014-2-17", "fp-ox-at-getlantern-dot-org-c336-1-2014-4-17", ] _, _, do_token = util.read_do_credential() mgr = digitalocean.Manager(token=do_token) droplets_by_name = {d.name: d for d in mgr.get_all_droplets()} def destroy(): for i, name in enumerate(all_fallbacks): print "killing %s of %s..." % (i+1, len(all_fallbacks)) try: droplets_by_name[name].destroy() time.sleep(30) except: traceback.print_exc() def run_command(ip, cmd):
def upload_pillars(as_root=False): if not util.in_dev() and not util.in_staging() and not util.in_production(): assert util.in_production(), "Environment unknown!" _, _, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() linode_password, linode_apikey, linode_tokyo_apikey = util.read_linode_credential() cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \ = util.read_cfgsrv_credential() github_token = util.read_github_token() loggly_token = util.read_loggly_token() if util.in_production(): slack_webhook_url = util.read_slack_webhook_url() else: slack_webhook_url = util.read_slack_staging_webhook_url() environment = "production" if util.in_staging(): environment = "staging" cfgsrv_redis_url = "rediss://:[email protected]:6380" redis_host = cfgsrv_redis_url.split('@')[1] redis_domain = redis_host.split(":")[0] redis_via_stunnel_url = cfgsrv_redis_url.split('@')[0].replace("rediss", "redis") + "@localhost:6380" if util.in_dev(): environment = "dev" redis_host = "%s:6379" % config.cloudmaster_address cfgsrv_redis_url = "redis://*****:*****@%s" % (cfgsrv_redis_test_pass, redis_host) redis_domain = "redis-staging.getlantern.org" # Bypass stunnel in dev environments because we're not encrypting connections to Redis redis_via_stunnel_url = cfgsrv_redis_url ssh_whitelist_query_token = util.read_ssh_whitelist_query_token() util.ssh_cloudmaster(( 'echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "" > $(hostname).sls""' ' && echo "environment: %s" > global.sls ' ' && echo "in_dev: %s" >> global.sls ' ' && echo "in_staging: %s" >> global.sls ' ' && echo "in_production: %s" >> global.sls ' ' && echo "datacenter: %s" >> global.sls ' ' && echo "cfgsrv_redis_url: %s" >> global.sls' ' && echo "redis_via_stunnel_url: %s" >> global.sls' ' && echo "redis_host: %s" >> global.sls' ' && echo "redis_domain: %s" >> global.sls' ' && echo "slack_webhook_url: %s" >> global.sls ' ' && echo "cloudmaster_name: %s" >> global.sls ' ' && echo "ssh_whitelist_query_token: %s" >> global.sls ' ' && echo "do_token: %s" > do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "linode_password: \'%s\'" > linode_credential.sls ' ' && echo "linode_apikey: %s" >> linode_credential.sls ' ' && echo "linode_tokyo_apikey: %s" >> linode_credential.sls ' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls' ' && echo "github_token: %s" > github_token.sls' ' && echo "loggly_token: %s" > loggly_token.sls' r' && echo "base: {\"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, linode_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential], \"*\": [global, salt]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls linode_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ' ) % (config.salt_version, environment, util.in_dev(), util.in_staging(), util.in_production(), config.datacenter, cfgsrv_redis_url, redis_via_stunnel_url, redis_host, redis_domain, slack_webhook_url, config.cloudmaster_name, ssh_whitelist_query_token, do_token, vultr_apikey, linode_password, linode_apikey, linode_tokyo_apikey, cfgsrv_token, cfgsrv_redis_test_pass, github_token, loggly_token), as_root=as_root)
"fp-from-old-controller-91-at-getlantern-dot-org-9d17-1-2014-4-8", "fp-from-old-controller-92-at-getlantern-dot-org-b568-1-2014-4-8", "fp-from-old-controller-93-at-getlantern-dot-org-acc7-1-2014-4-8", "fp-from-old-controller-94-at-getlantern-dot-org-7c06-1-2014-4-9", "fp-from-old-controller-95-at-getlantern-dot-org-960b-1-2014-4-9", "fp-from-old-controller-96-at-getlantern-dot-org-8f94-1-2014-4-8", "fp-from-old-controller-97-at-getlantern-dot-org-c643-1-2014-4-9", "fp-from-old-controller-98-at-getlantern-dot-org-8362-1-2014-4-9", "fp-from-old-controller-99-at-getlantern-dot-org-ff9f-1-2014-4-9", "fp-from-old-controller-9-at-getlantern-dot-org-33ef-1-2014-4-8", "fp-fte3-at-getlantern-dot-org-4853-3-2014-5-10", "fp-invite-at-getlantern-dot-org-7600-1-2014-2-17", "fp-ox-at-getlantern-dot-org-c336-1-2014-4-17", ] _, _, do_token = util.read_do_credential() mgr = digitalocean.Manager(token=do_token) droplets_by_name = {d.name: d for d in mgr.get_all_droplets()} def destroy(): for i, name in enumerate(all_fallbacks): print "killing %s of %s..." % (i + 1, len(all_fallbacks)) try: droplets_by_name[name].destroy() time.sleep(30) except: traceback.print_exc()
def upload_pillars(as_root=False): if not util.in_dev() and not util.in_staging() and not util.in_production( ): assert util.in_production(), "Environment unknown!" _, _, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() linode_password, linode_apikey, linode_tokyo_apikey = util.read_linode_credential( ) cfgsrv_token, cfgsrv_redis_url, cfgsrv_redis_test_pass \ = util.read_cfgsrv_credential() github_token = util.read_github_token() loggly_token = util.read_loggly_token() if util.in_production(): slack_webhook_url = util.read_slack_webhook_url() else: slack_webhook_url = util.read_slack_staging_webhook_url() environment = "production" if util.in_staging(): environment = "staging" cfgsrv_redis_url = "rediss://:[email protected]:6380" redis_host = cfgsrv_redis_url.split('@')[1] redis_domain = redis_host.split(":")[0] redis_via_stunnel_url = cfgsrv_redis_url.split('@')[0].replace( "rediss", "redis") + "@localhost:6380" if util.in_dev(): environment = "dev" redis_host = "%s:6379" % config.cloudmaster_address cfgsrv_redis_url = "redis://*****:*****@%s" % (cfgsrv_redis_test_pass, redis_host) redis_domain = "redis-staging.getlantern.org" # Bypass stunnel in dev environments because we're not encrypting connections to Redis redis_via_stunnel_url = cfgsrv_redis_url ssh_whitelist_query_token = util.read_ssh_whitelist_query_token() util.ssh_cloudmaster( ( 'echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "" > $(hostname).sls""' ' && echo "environment: %s" > global.sls ' ' && echo "in_dev: %s" >> global.sls ' ' && echo "in_staging: %s" >> global.sls ' ' && echo "in_production: %s" >> global.sls ' ' && echo "datacenter: %s" >> global.sls ' ' && echo "cfgsrv_redis_url: %s" >> global.sls' ' && echo "redis_via_stunnel_url: %s" >> global.sls' ' && echo "redis_host: %s" >> global.sls' ' && echo "redis_domain: %s" >> global.sls' ' && echo "slack_webhook_url: %s" >> global.sls ' ' && echo "cloudmaster_name: %s" >> global.sls ' ' && echo "ssh_whitelist_query_token: %s" >> global.sls ' ' && echo "do_token: %s" > do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "linode_password: \'%s\'" > linode_credential.sls ' ' && echo "linode_apikey: %s" >> linode_credential.sls ' ' && echo "linode_tokyo_apikey: %s" >> linode_credential.sls ' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_test_pass: \"%s\"" >> cfgsrv_credential.sls' ' && echo "github_token: %s" > github_token.sls' ' && echo "loggly_token: %s" > loggly_token.sls' r' && echo "base: {\"fp-*\": [cfgsrv_credential, vultr_credential, github_token, loggly_token], \"cm-*\": [do_credential, vultr_credential, linode_credential, cfgsrv_credential], \"cs-*\": [cfgsrv_credential], \"*\": [global, salt]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls do_credential.sls vultr_credential.sls linode_credential.sls cfgsrv_credential.sls github_token.sls loggly_token.sls $(hostname).sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ') % (config.salt_version, environment, util.in_dev(), util.in_staging(), util.in_production(), config.datacenter, cfgsrv_redis_url, redis_via_stunnel_url, redis_host, redis_domain, slack_webhook_url, config.cloudmaster_name, ssh_whitelist_query_token, do_token, vultr_apikey, linode_password, linode_apikey, linode_tokyo_apikey, cfgsrv_token, cfgsrv_redis_test_pass, github_token, loggly_token), as_root=as_root)
def launch_cloudmaster(): _, _, do_token = util.read_do_credential() mgr = do.Manager(token=do_token) delay = 2 class Done: pass for d in mgr.get_all_droplets(): if d.name == config.cloudmaster_name: if raw_input("Found an existing cloudmaster;" + " should I kill it? (y/N): ") != 'y': print "OK, bye." sys.exit(0) print "Killing..." ip_ = d.ip_address d.destroy() util.wait_droplet(d) print "Removing from known_hosts..." os.system('ssh-keygen -f "%s/.ssh/known_hosts" -R %s' % (os.path.expanduser("~"), ip_)) print "Digitan Ocean doesn't like us immediately creating" print "instances with the same name as one we just killed." print "Waiting for 20 seconds to be on the safe side..." time.sleep(20) print "Ordering the creation of the droplet..." droplet = do.Droplet( token=do_token, name=config.cloudmaster_name, region='sgp1', image='ubuntu-14-04-x64', size='1gb', ssh_keys=[97623], # cloudmaster key backups=False) droplet.create() print "Waiting for instance to run..." util.wait_droplet(droplet) print print "Trying to connect to server..." print "(You may see some connection refusals; this is normal.)" print delay = 1 while init_dir('/srv/salt'): time.sleep(delay) delay *= 1.5 print "Retrying..." print print "Initializing directories..." init_dir('/srv/pillar') init_dir('/home/lantern') init_dir('/etc/salt') print print "Uploading pillars..." update.upload_pillars() print "Setting cloudmaster minion config..." update.upload_cloudmaster_minion_config() print "Uploading salt configuration..." update.rsync_salt() print "Uploading secrets..." update.upload_secrets() print "Copying bootstrap file..." os.system( "scp -i %s %s root@%s:" % (config.key_path, here.bootstrap_path, util.get_cloudmaster_address())) print "Bootstrapping..." util.ssh_cloudmaster( "sudo SALT_VERSION=%s ./bootstrap.bash" % config.salt_version, ".log") print print "Done launching." update.print_errors()