def _base_context(self, project_name='dummy1'): stackname = '%s--test' % project_name context = cfngen.build_context(project_name, stackname=stackname) context_handler.write_context(stackname, context) template = cfngen.render_template(context) cfngen.write_template(stackname, template) return context
def update_salt(stackname): "updates the Salt version installed on the instances for the given stack" # start instance if it is stopped # acquire a lock from Alfred (if possible) so instance isn't shutdown while being updated cfn._check_want_to_be_running(stackname, autostart=True) context = context_handler.load_context(stackname) if not context.get('ec2'): LOG.info("no ec2 context. skipping: %s", stackname) return pdata = core.project_data_for_stackname(stackname) LOG.info("upgrading salt minion") context['project']['salt'] = pdata['salt'] LOG.info("updating context") context_handler.write_context(stackname, context) LOG.info("updating buildvars") buildvars.refresh(stackname, context) LOG.info("updating nodes") bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def remaster(stackname, new_master_stackname): "tell minion who their new master is. deletes any existing master key on minion" # TODO: turn this into a decorator import cfn # start the machine if it's stopped # you might also want to acquire a lock so alfred doesn't stop things cfn._check_want_to_be_running(stackname, 1) master_ip = _cached_master_ip(new_master_stackname) LOG.info('re-mastering %s to %s', stackname, master_ip) context = context_handler.load_context(stackname) # remove if no longer an issue # if context.get('ec2') == True: # # TODO: duplicates bad ec2 data wrangling in cfngen.build_context # # ec2 == True for some reason, which is completely useless # LOG.warn("bad context for stack: %s", stackname) # context['ec2'] = {} # context['project']['aws']['ec2'] = {} if not context.get('ec2'): LOG.info("no ec2 context, skipping %s", stackname) return if context['ec2'].get('master_ip') == master_ip: LOG.info("already remastered: %s", stackname) try: utils.confirm("Skip?") return except KeyboardInterrupt: LOG.info("not skipping") LOG.info("upgrading salt client") pdata = core.project_data_for_stackname(stackname) context['project']['salt'] = pdata['salt'] LOG.info("setting new master address") cfngen.set_master_address(pdata, context, master_ip) # mutates context # update context LOG.info("updating context") context_handler.write_context(stackname, context) # update buildvars LOG.info("updating buildvars") buildvars.refresh(stackname, context) # remove knowledge of old master def work(): remote_sudo("rm -f /etc/salt/pki/minion/minion_master.pub" ) # destroy the old master key we have LOG.info("removing old master key from minion") core.stack_all_ec2_nodes(stackname, work, username=config.BOOTSTRAP_USER) # update ec2 nodes LOG.info("updating nodes") bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def remaster(stackname, new_master_stackname): "tell minion who their new master is. deletes any existing master key on minion" # TODO: turn this into a decorator import cfn # start the machine if it's stopped # you might also want to acquire a lock so alfred doesn't stop things cfn._check_want_to_be_running(stackname, 1) master_ip = _cached_master_ip(new_master_stackname) LOG.info('re-mastering %s to %s', stackname, master_ip) context = context_handler.load_context(stackname) # remove if no longer an issue # if context.get('ec2') == True: # # TODO: duplicates bad ec2 data wrangling in cfngen.build_context # # ec2 == True for some reason, which is completely useless # LOG.warn("bad context for stack: %s", stackname) # context['ec2'] = {} # context['project']['aws']['ec2'] = {} if not context.get('ec2'): LOG.info("no ec2 context, skipping %s", stackname) return if context['ec2'].get('master_ip') == master_ip: LOG.info("already remastered: %s", stackname) try: utils.confirm("Skip?") return except KeyboardInterrupt: LOG.info("not skipping") LOG.info("upgrading salt client") pdata = core.project_data_for_stackname(stackname) context['project']['salt'] = pdata['salt'] LOG.info("setting new master address") cfngen.set_master_address(pdata, context, master_ip) # mutates context # update context LOG.info("updating context") context_handler.write_context(stackname, context) # update buildvars LOG.info("updating buildvars") buildvars.refresh(stackname, context) # remove knowledge of old master def work(): sudo("rm -f /etc/salt/pki/minion/minion_master.pub") # destroy the old master key we have LOG.info("removing old master key from minion") core.stack_all_ec2_nodes(stackname, work, username=config.BOOTSTRAP_USER) # update ec2 nodes LOG.info("updating nodes") bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def _base_context(self, project_name='dummy1', in_memory=False, existing_context=None): environment_name = base.generate_environment_name() stackname = '%s--%s' % (project_name, environment_name) context = cfngen.build_context(project_name, stackname=stackname, existing_context=existing_context if existing_context is not None else {}) if not in_memory: context_handler.write_context(stackname, context) template = cloudformation.render_template(context) cloudformation.write_template(stackname, template) return context
def update_infrastructure(stackname, skip=None, start=['ec2']): """Limited update of the Cloudformation template and/or Terraform template. Resources can be added, but most of the existing ones are immutable. Some resources are updatable in place. Moreover, we never add anything related to EC2 instances as they are not supported anyway (they will come up as part of the template but without any software being on it) Moreover, EC2 instances must be running while this is executed or their resources like PublicIP will be inaccessible. Allows to skip EC2, SQS, S3 updates by passing `skip=ec2\\,sqs\\,s3` By default starts EC2 instances but this can be avoid by passing `start=`""" skip = skip.split(",") if skip else [] start = start.split(",") if isinstance(start, str) else start or [] (pname, _) = core.parse_stackname(stackname) more_context = {} context, delta, current_context = cfngen.regenerate_stack( stackname, **more_context) if _are_there_existing_servers(current_context) and 'ec2' in start: core_lifecycle.start(stackname) LOG.info("Create: %s", pformat(delta.plus)) LOG.info("Update: %s", pformat(delta.edit)) LOG.info("Delete: %s", pformat(delta.minus)) LOG.info("Terraform delta: %s", delta.terraform) # see: `buildercore.config.BUILDER_NON_INTERACTIVE` for skipping confirmation prompts utils.confirm( 'Confirming changes to CloudFormation and Terraform templates?') context_handler.write_context(stackname, context) cloudformation.update_template(stackname, delta.cloudformation) terraform.update_template(stackname) # TODO: move inside bootstrap.update_stack # EC2 if _are_there_existing_servers(context) and not 'ec2' in skip: # the /etc/buildvars.json file may need to be updated buildvars.refresh(stackname, context) update(stackname) # SQS if context.get('sqs', {}) and not 'sqs' in skip: bootstrap.update_stack(stackname, service_list=['sqs']) # S3 if context.get('s3', {}) and not 's3' in skip: bootstrap.update_stack(stackname, service_list=['s3'])
def test_storing_a_context_on_s3_and_retrieving_it_from_a_new_client(self): stackname = 'dummy1--prod' context = cfngen.build_context('dummy1', stackname=stackname) context_handler.write_context(stackname, context) expected = context_handler.load_context(stackname) remove(context_handler.local_context_file(stackname)) downloaded = context_handler.load_context(stackname) self.assertEqual(expected, downloaded)
def update_infrastructure(stackname, skip=None, start=['ec2']): """Limited update of the Cloudformation template and/or Terraform template. Resources can be added, but most of the existing ones are immutable. Some resources are updatable in place. Moreover, we never add anything related to EC2 instances as they are not supported anyway (they will come up as part of the template but without any software being on it) Moreover, EC2 instances must be running while this is executed or their resources like PublicIP will be inaccessible. Allows to skip EC2, SQS, S3 updates by passing `skip=ec2\\,sqs\\,s3` By default starts EC2 instances but this can be avoid by passing `start=`""" skip = skip.split(",") if skip else [] start = start.split(",") if isinstance(start, str) else start or [] (pname, _) = core.parse_stackname(stackname) more_context = {} context, delta, current_context = cfngen.regenerate_stack(stackname, **more_context) if _are_there_existing_servers(current_context) and 'ec2' in start: core_lifecycle.start(stackname) LOG.info("Create: %s", pformat(delta.plus)) LOG.info("Update: %s", pformat(delta.edit)) LOG.info("Delete: %s", pformat(delta.minus)) LOG.info("Terraform delta: %s", delta.terraform) utils.confirm('Confirming changes to CloudFormation and Terraform templates?') context_handler.write_context(stackname, context) cloudformation.update_template(stackname, delta.cloudformation) terraform.update_template(stackname) # TODO: move inside bootstrap.update_stack # EC2 if _are_there_existing_servers(context) and not 'ec2' in skip: # the /etc/buildvars.json file may need to be updated buildvars.refresh(stackname, context) update(stackname) # SQS if context.get('sqs', {}) and not 'sqs' in skip: bootstrap.update_stack(stackname, service_list=['sqs']) # S3 if context.get('s3', {}) and not 's3' in skip: bootstrap.update_stack(stackname, service_list=['s3'])
def remaster(stackname, new_master_stackname="master-server--2018-04-09-2"): "tell minion who their new master is. deletes any existing master key on minion" # start instance if it is stopped # acquire a lock from Alfred (if possible) so instance isn't shutdown while being updated cfn._check_want_to_be_running(stackname, autostart=True) master_ip = _cached_master_ip(new_master_stackname) LOG.info('re-mastering %r to %r', stackname, master_ip) context = context_handler.load_context(stackname) if not context.get('ec2'): LOG.info("no ec2 context, skipping %s", stackname) return if context['ec2'].get('master_ip') == master_ip: LOG.info("already remastered: %s", stackname) return pdata = core.project_data_for_stackname(stackname) LOG.info("setting new master address") cfngen.set_master_address(pdata, context, master_ip) # mutates context LOG.info("updating context") context_handler.write_context(stackname, context) LOG.info("updating buildvars") buildvars.refresh(stackname, context) # remove knowledge of old master by destroying the minion's master pubkey def workerfn(): remote_sudo("rm -f /etc/salt/pki/minion/minion_master.pub") LOG.info("removing old master key from minion") core.stack_all_ec2_nodes(stackname, workerfn, username=config.BOOTSTRAP_USER) LOG.info("updating nodes") # todo: how to pass in --dry-run to highstate.sh ? bootstrap.update_ec2_stack(stackname, context, concurrency='serial') return True
def update_template(stackname): """Limited update of the Cloudformation template. Resources can be added, but most of the existing ones are immutable. Some resources are updatable in place. Moreover, we never add anything related to EC2 instances as they are not supported anyway (they will come up as part of the template but without any software being on it) Moreover, EC2 instances must be running while this is executed or their resources like PublicIP will be inaccessible""" (pname, _) = core.parse_stackname(stackname) more_context = cfngen.choose_config(stackname) context, delta_plus, delta_minus = cfngen.regenerate_stack( pname, **more_context) if context['ec2']: core_lifecycle.start(stackname) LOG.info("Create/update: %s", pformat(delta_plus)) LOG.info("Delete: %s", pformat(delta_minus)) utils.confirm( 'Confirming changes to the stack template? This will rewrite the context and the CloudFormation template' ) context_handler.write_context(stackname, context) if delta_plus['Resources'] or delta_plus['Outputs'] or delta_minus[ 'Resources'] or delta_minus['Outputs']: new_template = cfngen.merge_delta(stackname, delta_plus, delta_minus) bootstrap.update_template(stackname, new_template) # the /etc/buildvars.json file may need to be updated buildvars.refresh(stackname, context) else: # attempting to apply an empty change set would result in an error LOG.info("Nothing to update on CloudFormation") update(stackname)