def launch(pname, instance_id=None, alt_config='standalone', *repolist): stackname = cfn.generate_stack_from_input(pname, instance_id, alt_config) pdata = core.project_data_for_stackname(stackname) # ensure given alt config has masterless=True # todo: can the choices presented to the user remove non-masterless alt-configs? ensure(pdata['aws-alt'], "project has no alternate configurations") ensure(alt_config in pdata['aws-alt'], "unknown alt-config %r" % alt_config) ensure(pdata['aws-alt'][alt_config]['ec2']['masterless'], "alternative configuration %r has masterless=False" % alt_config) formula_revisions = parse_validate_repolist(pdata, *repolist) # todo: this is good UX but was simply debug output that got left in. # a better summary of what is to be created could be printed out, # preferably after the templates are printed out but before confirmation. LOG.info('attempting to create masterless stack:') LOG.info('stackname:\t' + stackname) LOG.info('region:\t' + pdata['aws']['region']) LOG.info('formula_revisions:\t%s' % pformat(formula_revisions)) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) checks.ensure_stack_does_not_exist(stackname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3'], formula_revisions=formula_revisions)
def update_salt(stackname): "updates the Salt version installed on the instances for the given stack" # start instance if it is stopped # acquire a lock from Alfred (if possible) so instance isn't shutdown while being updated cfn._check_want_to_be_running(stackname, autostart=True) context = context_handler.load_context(stackname) if not context.get('ec2'): LOG.info("no ec2 context. skipping: %s", stackname) return pdata = core.project_data_for_stackname(stackname) LOG.info("upgrading salt minion") context['project']['salt'] = pdata['salt'] LOG.info("updating context") context_handler.write_context(stackname, context) LOG.info("updating buildvars") buildvars.refresh(stackname, context) LOG.info("updating nodes") bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def launch(pname, instance_id=None, alt_config='standalone', *repolist): stackname = cfn.generate_stack_from_input(pname, instance_id, alt_config) pdata = core.project_data_for_stackname(stackname) # ensure given alt config has masterless=True ensure(pdata['aws-alt'], "project has no alternate configurations") ensure(alt_config in pdata['aws-alt'], "unknown alt-config %r" % alt_config) ensure(pdata['aws-alt'][alt_config]['ec2']['masterless'], "alternative configuration %r has masterless=False" % alt_config) formula_revisions = parse_validate_repolist(pdata, *repolist) LOG.info('attempting to create masterless stack:') LOG.info('stackname:\t' + stackname) LOG.info('region:\t' + pdata['aws']['region']) LOG.info('formula_revisions:\t%s' % pformat(formula_revisions)) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) checks.ensure_stack_does_not_exist(stackname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3'], formula_revisions=formula_revisions)
def remaster(stackname, new_master_stackname): "tell minion who their new master is. deletes any existing master key on minion" # TODO: turn this into a decorator import cfn # start the machine if it's stopped # you might also want to acquire a lock so alfred doesn't stop things cfn._check_want_to_be_running(stackname, 1) master_ip = _cached_master_ip(new_master_stackname) LOG.info('re-mastering %s to %s', stackname, master_ip) context = context_handler.load_context(stackname) # remove if no longer an issue # if context.get('ec2') == True: # # TODO: duplicates bad ec2 data wrangling in cfngen.build_context # # ec2 == True for some reason, which is completely useless # LOG.warn("bad context for stack: %s", stackname) # context['ec2'] = {} # context['project']['aws']['ec2'] = {} if not context.get('ec2'): LOG.info("no ec2 context, skipping %s", stackname) return if context['ec2'].get('master_ip') == master_ip: LOG.info("already remastered: %s", stackname) try: utils.confirm("Skip?") return except KeyboardInterrupt: LOG.info("not skipping") LOG.info("upgrading salt client") pdata = core.project_data_for_stackname(stackname) context['project']['salt'] = pdata['salt'] LOG.info("setting new master address") cfngen.set_master_address(pdata, context, master_ip) # mutates context # update context LOG.info("updating context") context_handler.write_context(stackname, context) # update buildvars LOG.info("updating buildvars") buildvars.refresh(stackname, context) # remove knowledge of old master def work(): remote_sudo("rm -f /etc/salt/pki/minion/minion_master.pub" ) # destroy the old master key we have LOG.info("removing old master key from minion") core.stack_all_ec2_nodes(stackname, work, username=config.BOOTSTRAP_USER) # update ec2 nodes LOG.info("updating nodes") bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def remaster(stackname, new_master_stackname): "tell minion who their new master is. deletes any existing master key on minion" # TODO: turn this into a decorator import cfn # start the machine if it's stopped # you might also want to acquire a lock so alfred doesn't stop things cfn._check_want_to_be_running(stackname, 1) master_ip = _cached_master_ip(new_master_stackname) LOG.info('re-mastering %s to %s', stackname, master_ip) context = context_handler.load_context(stackname) # remove if no longer an issue # if context.get('ec2') == True: # # TODO: duplicates bad ec2 data wrangling in cfngen.build_context # # ec2 == True for some reason, which is completely useless # LOG.warn("bad context for stack: %s", stackname) # context['ec2'] = {} # context['project']['aws']['ec2'] = {} if not context.get('ec2'): LOG.info("no ec2 context, skipping %s", stackname) return if context['ec2'].get('master_ip') == master_ip: LOG.info("already remastered: %s", stackname) try: utils.confirm("Skip?") return except KeyboardInterrupt: LOG.info("not skipping") LOG.info("upgrading salt client") pdata = core.project_data_for_stackname(stackname) context['project']['salt'] = pdata['salt'] LOG.info("setting new master address") cfngen.set_master_address(pdata, context, master_ip) # mutates context # update context LOG.info("updating context") context_handler.write_context(stackname, context) # update buildvars LOG.info("updating buildvars") buildvars.refresh(stackname, context) # remove knowledge of old master def work(): sudo("rm -f /etc/salt/pki/minion/minion_master.pub") # destroy the old master key we have LOG.info("removing old master key from minion") core.stack_all_ec2_nodes(stackname, work, username=config.BOOTSTRAP_USER) # update ec2 nodes LOG.info("updating nodes") bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def _retrieve_build_vars(stackname): pdata = core.project_data_for_stackname(stackname) print 'looking for build vars ...' with hide('everything'): bvarst, bvars = valid(stackname) assert bvarst in [ABBREV, FULL], \ "the build-vars.json file for %r is not valid. use `./bldr buildvars.fix` to attempt to fix this." print 'found build vars' print return bvars
def launch(pname, instance_id=None, alt_config=None): stackname = generate_stack_from_input(pname, instance_id, alt_config) pdata = core.project_data_for_stackname(stackname) LOG.info('attempting to create %s (AWS region %s)', stackname, pdata['aws']['region']) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) # TODO: highstate.sh (think it's run inside here) doesn't detect: # [34.234.95.137] out: [CRITICAL] The Salt Master has rejected this minion's public key! bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3']) setdefault('.active-stack', stackname)
def remaster(stackname, new_master_stackname="master-server--2018-04-09-2"): "tell minion who their new master is. deletes any existing master key on minion" # start instance if it is stopped # acquire a lock from Alfred (if possible) so instance isn't shutdown while being updated cfn._check_want_to_be_running(stackname, autostart=True) master_ip = _cached_master_ip(new_master_stackname) LOG.info('re-mastering %r to %r', stackname, master_ip) context = context_handler.load_context(stackname) if not context.get('ec2'): LOG.info("no ec2 context, skipping %s", stackname) return if context['ec2'].get('master_ip') == master_ip: LOG.info("already remastered: %s", stackname) return pdata = core.project_data_for_stackname(stackname) LOG.info("setting new master address") cfngen.set_master_address(pdata, context, master_ip) # mutates context LOG.info("updating context") context_handler.write_context(stackname, context) LOG.info("updating buildvars") buildvars.refresh(stackname, context) # remove knowledge of old master by destroying the minion's master pubkey def workerfn(): remote_sudo("rm -f /etc/salt/pki/minion/minion_master.pub") LOG.info("removing old master key from minion") core.stack_all_ec2_nodes(stackname, workerfn, username=config.BOOTSTRAP_USER) LOG.info("updating nodes") # todo: how to pass in --dry-run to highstate.sh ? bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def launch(pname, instance_id=None, alt_config=None): try: stackname = generate_stack_from_input(pname, instance_id, alt_config) except checks.StackAlreadyExistsProblem as e: LOG.info('stack %s already exists', e.stackname) return pdata = core.project_data_for_stackname(stackname) LOG.info('attempting to create %s (AWS region %s)', stackname, pdata['aws']['region']) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) # TODO: highstate.sh (think it's run inside here) doesn't detect: # [34.234.95.137] out: [CRITICAL] The Salt Master has rejected this minion's public key! bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3']) setdefault('.active-stack', stackname)
def launch(pname, instance_id=None, alt_config='standalone', *repolist): stackname = cfn.generate_stack_from_input(pname, instance_id, alt_config) pdata = core.project_data_for_stackname(stackname) # ensure given alt config has masterless=True ensure(pdata['aws-alt'], "project has no alternate configurations") ensure(alt_config in pdata['aws-alt'], "unknown alt-config %r" % alt_config) ensure(pdata['aws-alt'][alt_config]['ec2']['masterless'], "alternative configuration %r has masterless=False" % alt_config) formula_revisions = parse_validate_repolist(pdata, *repolist) LOG.info('attempting to create masterless stack:') LOG.info('stackname:\t' + stackname) LOG.info('region:\t' + pdata['aws']['region']) LOG.info('formula_revisions:\t%s' % pformat(formula_revisions)) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) checks.ensure_stack_does_not_exist(stackname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3'], formula_revisions=formula_revisions)
def launch(pname, instance_id=None): try: stackname = generate_stack_from_input(pname, instance_id) pdata = core.project_data_for_stackname(stackname) print 'attempting to create stack:' print ' stackname: ' + stackname print ' region: ' + pdata['aws']['region'] print if core.is_master_server_stack(stackname): if not checks.can_access_builder_private(pname): print "failed to access your organisation's 'builder-private' repository:" print ' ' + pdata['private-repo'] print "you'll need access to this repository to add a deploy key later" print return bootstrap.create_update(stackname) setdefault('.active-stack', stackname) except core.NoMasterException as e: LOG.warn(e.message) print "\n%s\ntry `./bldr master.create`'" % e.message
def launch(pname): try: stackname = create_stack(pname) pdata = core.project_data_for_stackname(stackname) print 'attempting to create stack:' print ' stackname: ' + stackname print ' region: ' + pdata['aws']['region'] print if core.is_master_server_stack(stackname): if not checks.can_access_builder_private(pname): print "failed to access your organisation's 'builder-private' repository:" print ' ' + pdata['private-repo'] print "you'll need access to this repository to add a deploy key later" print return stackname = create_update(stackname) if stackname: setdefault('.active-stack', stackname) except core.NoMasterException, e: LOG.warn(e.message) print "\n%s\ntry `./bldr master.create`'" % e.message