def upload_pillars(): do_id, do_key, _ = util.read_do_credential() aws_id, aws_key = util.read_aws_credential() cfr_id, cfr_key = util.read_aws_credential( os.path.join(here.secrets_path, 'cloudfront.aws_credential')) cfl_id, cfl_key = util.read_cfl_credential() azure_ssh_pass = util.read_azure_ssh_pass() dsp_id, dsp_key = util.read_dnsimple_credential() util.ssh_cloudmaster(( 'echo "branch: check-all-fallbacks" > $(hostname).sls ' ' && echo "private_networking: %s" >> $(hostname).sls ' ' && echo "default_profile: %s" >> $(hostname).sls ' ' && echo "azure_ssh_pass: %s" >> $(hostname).sls ' ' && echo "salt_version: %s" > salt.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "do_id: %s" > do_credential.sls' ' && echo "do_key: %s" >> do_credential.sls' ' && echo "aws_id: %s" > aws_credential.sls' ' && echo "aws_key: %s" >> aws_credential.sls' ' && echo "cfl_id: %s" > cfl_credential.sls' ' && echo "cfl_key: %s" >> cfl_credential.sls' ' && echo "cfr_id: %s" > cfr_credential.sls' ' && echo "cfr_key: %s" >> cfr_credential.sls' ' && echo "dsp_id: %s" > dsp_credential.sls' ' && echo "dsp_key: %s" >> dsp_credential.sls' r' && echo "base: {\"*\": [salt], \"fp-*\": [aws_credential], \"*cloudmaster*\": [aws_credential, do_credential, cfr_credential], \"ps-*\": [cfl_credential, cfr_credential, dsp_credential]}" > top.sls ' ' && sudo mv salt.sls top.sls $(hostname).sls aws_credential.sls cfl_credential.sls cfr_credential.sls do_credential.sls dsp_credential.sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ') % (config.private_networking, config.default_profile, azure_ssh_pass, config.salt_version, do_id, do_key, aws_id, aws_key, cfl_id, cfl_key, cfr_id, cfr_key, dsp_id, dsp_key))
def upload_pillars(): do_id, do_key, do_token = util.read_do_credential() vultr_apikey = util.read_vultr_credential() aws_id, aws_key = util.read_aws_credential() cfr_id, cfr_key = util.read_aws_credential( os.path.join(here.secrets_path, 'cloudfront.aws_credential')) cfl_id, cfl_key = util.read_cfl_credential() dsp_id, dsp_key = util.read_dnsimple_credential() cfgsrv_token, cfgsrv_redis_url = util.read_cfgsrv_credential() util.ssh_cloudmaster(( 'echo "branch: check-all-fallbacks" > $(hostname).sls ' ' && echo "private_networking: %s" >> $(hostname).sls ' ' && echo "default_profile: %s" >> $(hostname).sls ' ' && echo "salt_version: %s" > salt.sls ' ' && echo "in_production: %s" > global.sls ' # Hack so every instance will read specific pillars from a file # named with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" >> salt.sls ' ' && echo "do_id: %s" > do_credential.sls' ' && echo "do_key: %s" >> do_credential.sls' ' && echo "do_token: %s" >> do_credential.sls' ' && echo "vultr_apikey: %s" > vultr_credential.sls' ' && echo "aws_id: %s" > aws_credential.sls' ' && echo "aws_key: %s" >> aws_credential.sls' ' && echo "cfl_id: %s" > cfl_credential.sls' ' && echo "cfl_key: %s" >> cfl_credential.sls' ' && echo "cfr_id: %s" > cfr_credential.sls' ' && echo "cfr_key: %s" >> cfr_credential.sls' ' && echo "dsp_id: %s" > dsp_credential.sls' ' && echo "dsp_key: %s" >> dsp_credential.sls' ' && echo "cfgsrv_token: %s" > cfgsrv_credential.sls' ' && echo "cfgsrv_redis_url: %s" >> cfgsrv_credential.sls' r' && echo "base: {\"*\": [salt, global], \"fp-*\": [aws_credential, cfgsrv_credential, vultr_credential], \"*cloudmaster*\": [aws_credential, do_credential, vultr_credential, cfr_credential, cfgsrv_credential], \"ps-*\": [cfl_credential, cfr_credential, dsp_credential]}" > top.sls ' ' && sudo mv salt.sls global.sls top.sls $(hostname).sls aws_credential.sls cfl_credential.sls cfr_credential.sls do_credential.sls vultr_credential.sls dsp_credential.sls cfgsrv_credential.sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ' ) % (config.private_networking, config.default_profile, config.salt_version, util.in_production(), do_id, do_key, do_token, vultr_apikey, aws_id, aws_key, cfl_id, cfl_key, cfr_id, cfr_key, dsp_id, dsp_key, cfgsrv_token, cfgsrv_redis_url))
def trigger_launch(): aws_id, aws_key = util.read_aws_credential() aws_creds = {'aws_access_key_id': aws_id, 'aws_secret_access_key': aws_key} sqs = boto.sqs.connect_to_region(config.aws_region, **aws_creds) req_q = sqs.get_queue("%s_request" % config.controller) notify_q = sqs.get_queue("notify_%s" % config.controller) for q in [req_q, notify_q]: q.set_message_class(JSONMessage) msg = JSONMessage() msg.set_body({ 'launch-fp-as': '*****@*****.**', 'launch-refrtok': '<redacted>', 'launch-serial': 1 }) print "Sending request..." req_q.write(msg) return # Comment out to wait for response! print "Awaiting response..." while True: msg = notify_q.read() if msg is not None: print "Got message: %r" % msg.get_body() notify_q.delete_message(msg) return sys.stdout.write(".") sys.stdout.flush()
def upload_pillars(): aws_id, aws_key = util.read_aws_credential() refr_tok = file( os.path.join(here.secrets_path, 'lantern_aws', 'lanterndonors.refresh_token')).read().strip() util.ssh_cloudmaster(( 'echo "lanterndonors_refrtok: %s" > cloudmaster.sls ' ' && echo "salt_version: %s" > salt.sls ' ' && echo "aws_id: %s" > aws_credential.sls' ' && echo "aws_key: %s" >> aws_credential.sls' # Hack so every instance will read specific pillars from a file named # with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" > fallback_proxy.sls ' r' && echo "installer_bucket: %s" >> fallback_proxy.sls ' r' && echo "installer_filename: %s" >> fallback_proxy.sls ' r' && echo "base: {\"*\": [salt, aws_credential], ' r'\"cloudmaster\": [cloudmaster], ' r'\"fp-*\": [fallback_proxy]}" ' ' > top.sls ' ' && sudo mv salt.sls top.sls cloudmaster.sls fallback_proxy.sls ' ' aws_credential.sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ') % (refr_tok, config.salt_version, aws_id, aws_key, config.installer_bucket, config.installer_filename))
def upload_pillars(): aws_id, aws_key = util.read_aws_credential() refr_tok = file(os.path.join(here.secrets_path, 'lantern_aws', 'lanterndonors.refresh_token')).read().strip() util.ssh_cloudmaster(( 'echo "lanterndonors_refrtok: %s" > cloudmaster.sls ' ' && echo "salt_version: %s" > salt.sls ' ' && echo "aws_id: %s" > aws_credential.sls' ' && echo "aws_key: %s" >> aws_credential.sls' # Hack so every instance will read specific pillars from a file named # with the <instance_name>.sls scheme. r' && echo "include: [{{ grains[\"id\"] }}]" > fallback_proxy.sls ' r' && echo "installer_bucket: %s" >> fallback_proxy.sls ' r' && echo "installer_filename: %s" >> fallback_proxy.sls ' r' && echo "base: {\"*\": [salt, aws_credential], ' r'\"cloudmaster\": [cloudmaster], ' r'\"fp-*\": [fallback_proxy]}" ' ' > top.sls ' ' && sudo mv salt.sls top.sls cloudmaster.sls fallback_proxy.sls ' ' aws_credential.sls /srv/pillar/ ' ' && sudo chown -R root:root /srv/pillar ' ' && sudo chmod -R 600 /srv/pillar ' ) % (refr_tok, config.salt_version, aws_id, aws_key, config.installer_bucket, config.installer_filename))
def send_message(d): aws_id, aws_key = util.read_aws_credential() aws_creds = {'aws_access_key_id': aws_id, 'aws_secret_access_key': aws_key} sqs = boto.sqs.connect_to_region(config.aws_region, **aws_creds) req_q = sqs.get_queue("%s_request" % config.controller) req_q.set_message_class(JSONMessage) msg = JSONMessage() msg.set_body(d) print "Sending request..." req_q.write(msg) print "Sent."
def connect(): aws_id, aws_key = util.read_aws_credential() print "Connecting to region %s..." % config.aws_region return boto.ec2.connect_to_region(config.aws_region, aws_access_key_id=aws_id, aws_secret_access_key=aws_key)