def launch(pname, instance_id=None, alt_config='standalone', *repolist): stackname = cfn.generate_stack_from_input(pname, instance_id, alt_config) pdata = core.project_data_for_stackname(stackname) # ensure given alt config has masterless=True # todo: can the choices presented to the user remove non-masterless alt-configs? ensure(pdata['aws-alt'], "project has no alternate configurations") ensure(alt_config in pdata['aws-alt'], "unknown alt-config %r" % alt_config) ensure(pdata['aws-alt'][alt_config]['ec2']['masterless'], "alternative configuration %r has masterless=False" % alt_config) formula_revisions = parse_validate_repolist(pdata, *repolist) # todo: this is good UX but was simply debug output that got left in. # a better summary of what is to be created could be printed out, # preferably after the templates are printed out but before confirmation. LOG.info('attempting to create masterless stack:') LOG.info('stackname:\t' + stackname) LOG.info('region:\t' + pdata['aws']['region']) LOG.info('formula_revisions:\t%s' % pformat(formula_revisions)) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) checks.ensure_stack_does_not_exist(stackname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3'], formula_revisions=formula_revisions)
def create_update(stackname): if not core.stack_is_active(stackname): print 'stack does not exist, creating' bootstrap.create_stack(stackname) print 'updating stack' bootstrap.update_stack(stackname) return stackname
def launch(pname, instance_id=None, alt_config='standalone', *repolist): stackname = cfn.generate_stack_from_input(pname, instance_id, alt_config) pdata = core.project_data_for_stackname(stackname) # ensure given alt config has masterless=True ensure(pdata['aws-alt'], "project has no alternate configurations") ensure(alt_config in pdata['aws-alt'], "unknown alt-config %r" % alt_config) ensure(pdata['aws-alt'][alt_config]['ec2']['masterless'], "alternative configuration %r has masterless=False" % alt_config) formula_revisions = parse_validate_repolist(pdata, *repolist) LOG.info('attempting to create masterless stack:') LOG.info('stackname:\t' + stackname) LOG.info('region:\t' + pdata['aws']['region']) LOG.info('formula_revisions:\t%s' % pformat(formula_revisions)) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) checks.ensure_stack_does_not_exist(stackname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3'], formula_revisions=formula_revisions)
def test_create(self): with settings(abort_on_prompts=True): project = 'dummy1' stackname = '%s--%s' % (project, self.environment) cfn.ensure_destroyed(stackname) self.stacknames.append(stackname) cfngen.generate_stack(project, stackname=stackname) bootstrap.create_stack(stackname) buildvars.switch_revision(stackname, 'master') buildvars.force(stackname, 'answer', 'forty-two') cfn.cmd(stackname, "ls -l /", username=BOOTSTRAP_USER, concurrency='parallel') cfn.cmd(stackname, "ls -l /", username=BOOTSTRAP_USER, concurrency='parallel', node=1) cfn.download_file(stackname, "/bin/ls", "ls", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./ls")) cfn.download_file(stackname, "/bin/less", "venv/bin/", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./venv/bin/less")) cfn.download_file(stackname, "/bin/pwd", "subfolder/pwd", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./subfolder/pwd")) lifecycle.stop_if_running_for(stackname, minimum_minutes=60 * 24 * 365) # should exercise the code but do nothing, as this test's instance can't have been running for a year
def set_up_stack(cls, project, explicitly_start=False): switch_in_test_settings() # to re-use an existing stack, ensure cls.reuse_existing_stack is True # this will read the instance name from a temporary file (if it exists) and # look for that, creating it if doesn't exist yet # also ensure cls.cleanup is False so the instance isn't destroyed after tests complete cls.reuse_existing_stack = config.TWI_REUSE_STACK cls.cleanup = config.TWI_CLEANUP cls.stacknames = [] cls.environment = generate_environment_name() # cls.temp_dir, cls.rm_temp_dir = utils.tempdir() # debugging only, where we keep an instance up between processes cls.state, cls.statefile = {}, '/tmp/.open-test-instances.txt' if cls.reuse_existing_stack and os.path.exists(cls.statefile): # evidence of a previous instance and we've been told to re-use old instances old_state = json.load(open(cls.statefile, 'r')) old_env = old_state.get('environment') # test if the old stack still exists ... if old_env and core.describe_stack(project + "--" + old_env, allow_missing=True): cls.state = old_state cls.environment = old_env else: # nope. old statefile is bogus, delete it os.unlink(cls.statefile) cls.state['environment'] = cls.environment # will be saved later with settings(abort_on_prompts=True): cls.stackname = '%s--%s' % (project, cls.environment) cls.stacknames.append(cls.stackname) if cls.cleanup: LOG.info("ensure_destroyed %s", cls.stackname) cfn.ensure_destroyed(cls.stackname) cls.context, cls.cfn_template, _ = cfngen.generate_stack( project, stackname=cls.stackname) cls.region = cls.context['aws']['region'] LOG.info("create_stack %s", cls.stackname) bootstrap.create_stack(cls.stackname) if explicitly_start: LOG.info("start %s", cls.stackname) lifecycle.start(cls.stackname)
def test_blue_green_operations(self): with settings(abort_on_prompts=True): project = 'project-with-cluster-integration-tests' stackname = '%s--%s' % (project, self.environment) cfn.ensure_destroyed(stackname) self.stacknames.append(stackname) cfngen.generate_stack(project, stackname=stackname) bootstrap.create_stack(stackname) output = cfn.cmd(stackname, 'ls -l /', username=BOOTSTRAP_USER, concurrency='blue-green') print output
def set_up_stack(cls, project, explicitly_start=False): switch_in_test_settings() # to re-use an existing stack, ensure cls.reuse_existing_stack is True # this will read the instance name from a temporary file (if it exists) and # look for that, creating it if doesn't exist yet # also ensure cls.cleanup is False so the instance isn't destroyed after tests complete cls.reuse_existing_stack = config.TWI_REUSE_STACK cls.cleanup = config.TWI_CLEANUP cls.stacknames = [] cls.environment = generate_environment_name() # cls.temp_dir, cls.rm_temp_dir = utils.tempdir() # debugging only, where we keep an instance up between processes cls.state, cls.statefile = {}, '/tmp/.open-test-instances.txt' if cls.reuse_existing_stack and os.path.exists(cls.statefile): # evidence of a previous instance and we've been told to re-use old instances old_state = json.load(open(cls.statefile, 'r')) old_env = old_state.get('environment') # test if the old stack still exists ... if old_env and core.describe_stack(project + "--" + old_env, allow_missing=True): cls.state = old_state cls.environment = old_env else: # nope. old statefile is bogus, delete it os.unlink(cls.statefile) cls.state['environment'] = cls.environment # will be saved later with settings(abort_on_prompts=True): cls.stackname = '%s--%s' % (project, cls.environment) cls.stacknames.append(cls.stackname) if cls.cleanup: LOG.info("ensure_destroyed %s", cls.stackname) cfn.ensure_destroyed(cls.stackname) cls.context, cls.cfn_template, _ = cfngen.generate_stack(project, stackname=cls.stackname) cls.region = cls.context['aws']['region'] LOG.info("create_stack %s", cls.stackname) bootstrap.create_stack(cls.stackname) if explicitly_start: LOG.info("start %s", cls.stackname) lifecycle.start(cls.stackname)
def test_create(self): with settings(abort_on_prompts=True): project = 'dummy1' stackname = '%s--%s' % (project, self.environment) cfn.ensure_destroyed(stackname) self.stacknames.append(stackname) # ensures stack is destroyed cfngen.generate_stack(project, stackname=stackname) bootstrap.create_stack(stackname) buildvars.switch_revision(stackname, 'master') buildvars.force(stackname, 'answer', 'forty-two') lifecycle.stop(stackname) lifecycle.start(stackname)
def launch(pname, instance_id=None, alt_config=None): stackname = generate_stack_from_input(pname, instance_id, alt_config) pdata = core.project_data_for_stackname(stackname) LOG.info('attempting to create %s (AWS region %s)', stackname, pdata['aws']['region']) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) # TODO: highstate.sh (think it's run inside here) doesn't detect: # [34.234.95.137] out: [CRITICAL] The Salt Master has rejected this minion's public key! bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3']) setdefault('.active-stack', stackname)
def test_create(self): with settings(abort_on_prompts=True): project = 'dummy1' stackname = '%s--%s' % (project, self.environment) cfn.ensure_destroyed(stackname) self.stacknames.append(stackname) cfngen.generate_stack(project, stackname=stackname) bootstrap.create_stack(stackname) buildvars.switch_revision(stackname, 'master') buildvars.force(stackname, 'answer', 'forty-two') cfn.cmd(stackname, "ls -l /", username=BOOTSTRAP_USER, concurrency='parallel') cfn.cmd(stackname, "ls -l /", username=BOOTSTRAP_USER, concurrency='parallel', node=1) cfn.download_file(stackname, "/bin/ls", "ls", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./ls")) cfn.download_file(stackname, "/bin/less", "venv/bin/", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./venv/bin/less")) cfn.download_file(stackname, "/bin/pwd", "subfolder/pwd", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./subfolder/pwd")) lifecycle.stop_if_running_for( stackname, minimum_minutes=60 * 24 * 365 ) # should exercise the code but do nothing, as this test's instance can't have been running for a year
def launch(pname, instance_id=None, alt_config=None): try: stackname = generate_stack_from_input(pname, instance_id, alt_config) except checks.StackAlreadyExistsProblem as e: LOG.info('stack %s already exists', e.stackname) return pdata = core.project_data_for_stackname(stackname) LOG.info('attempting to create %s (AWS region %s)', stackname, pdata['aws']['region']) if core.is_master_server_stack(stackname): checks.ensure_can_access_builder_private(pname) bootstrap.create_stack(stackname) LOG.info('updating stack %s', stackname) # TODO: highstate.sh (think it's run inside here) doesn't detect: # [34.234.95.137] out: [CRITICAL] The Salt Master has rejected this minion's public key! bootstrap.update_stack(stackname, service_list=['ec2', 'sqs', 's3']) setdefault('.active-stack', stackname)
def test_create(self): with settings(abort_on_prompts=True): project = 'dummy1' stackname = '%s--%s' % (project, self.environment) cfn.ensure_destroyed(stackname) self.stacknames.append(stackname) cfngen.generate_stack(project, stackname=stackname) bootstrap.create_stack(stackname) buildvars.switch_revision(stackname, 'master') buildvars.force(stackname, 'answer', 'forty-two') lifecycle.stop(stackname) lifecycle.start(stackname) cfn.cmd(stackname, "ls -l /", username=BOOTSTRAP_USER, concurrency='parallel') cfn.download_file(stackname, "/bin/ls", "ls", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./ls")) cfn.download_file(stackname, "/bin/less", "venv/bin/", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./venv/bin/less")) cfn.download_file(stackname, "/bin/pwd", "subfolder/pwd", use_bootstrap_user="******") self.assertTrue(os.path.isfile("./subfolder/pwd"))
def test_bootstrap_create_stack_idempotence(self): "the same stack cannot be created multiple times" bootstrap.create_stack(self.stackname)