def test_scaling_up_meta_update(self): t = template_format.parse(as_template) # Add CustomLB (just AWS::EC2::Instance) to template t['Resources']['MyCustomLB'] = { 'Type': 'AWS::EC2::Instance', 'ImageId': { 'Ref': 'ImageId' }, 'InstanceType': 'bar', 'Metadata': { 'IPs': { 'Fn::GetAtt': ['WebServerGroup', 'InstanceList'] } } } stack = utils.parse_stack(t, params=self.params) # Create initial group self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') stack.resources['WebServerGroup'] = rsrc self.assertEqual(1, len(grouputils.get_member_names(rsrc))) # Scale up one self._stub_lb_reload(2) self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2) self._stub_create(1) self.m.ReplayAll() up_policy = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy') alarm_url = up_policy.FnGetAtt('AlarmUrl') self.assertIsNotNone(alarm_url) up_policy.signal() self.assertEqual(2, len(grouputils.get_member_names(rsrc))) # Check CustomLB metadata was updated self.m.StubOutWithMock(instance.Instance, '_ipaddress') instance.Instance._ipaddress().MultipleTimes().AndReturn('127.0.0.1') self.m.ReplayAll() expected_meta = {'IPs': u'127.0.0.1,127.0.0.1'} self.assertEqual(expected_meta, stack['MyCustomLB'].metadata_get()) rsrc.delete() self.m.VerifyAll()
def test_scaling_up_meta_update(self): t = template_format.parse(as_template) # Add CustomLB (just AWS::EC2::Instance) to template t['Resources']['MyCustomLB'] = { 'Type': 'AWS::EC2::Instance', 'ImageId': {'Ref': 'ImageId'}, 'InstanceType': 'bar', 'Metadata': { 'IPs': {'Fn::GetAtt': ['WebServerGroup', 'InstanceList']} } } stack = utils.parse_stack(t, params=self.params) # Create initial group self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') stack.resources['WebServerGroup'] = rsrc self.assertEqual(1, len(grouputils.get_member_names(rsrc))) # Scale up one self._stub_lb_reload(2) self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2) self._stub_create(1) self.m.ReplayAll() up_policy = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy') alarm_url = up_policy.FnGetAtt('AlarmUrl') self.assertIsNotNone(alarm_url) up_policy.signal() self.assertEqual(2, len(grouputils.get_member_names(rsrc))) # Check CustomLB metadata was updated self.m.StubOutWithMock(instance.Instance, '_ipaddress') instance.Instance._ipaddress().MultipleTimes().AndReturn( '127.0.0.1') self.m.ReplayAll() expected_meta = {'IPs': u'127.0.0.1,127.0.0.1'} self.assertEqual(expected_meta, stack['MyCustomLB'].metadata_get()) rsrc.delete() self.m.VerifyAll()
def test_scaling_group_resume_fail(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=self.params) self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') self.assertEqual(utils.PhysName(stack.name, rsrc.name), rsrc.FnGetRefId()) self.assertEqual(1, len(grouputils.get_member_names(rsrc))) self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() self.m.UnsetStubs() self._stub_resume(with_error='oops') self.m.ReplayAll() rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE) for i in rsrc.nested().values(): i.state_set(rsrc.SUSPEND, rsrc.COMPLETE) sus_task = scheduler.TaskRunner(rsrc.resume) self.assertRaises(exception.ResourceFailure, sus_task, ()) self.assertEqual((rsrc.RESUME, rsrc.FAILED), rsrc.state) self.assertEqual('Error: Resource RESUME failed: Error: oops', rsrc.status_reason) rsrc.delete() self.m.VerifyAll()
def test_scaling_group_resume_multiple(self): t = template_format.parse(as_template) properties = t['Resources']['WebServerGroup']['Properties'] properties['DesiredCapacity'] = '2' stack = utils.parse_stack(t, params=self.params) self._stub_lb_reload(2) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 2') self._stub_create(2) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') self.assertEqual(utils.PhysName(stack.name, rsrc.name), rsrc.FnGetRefId()) self.assertEqual(2, len(grouputils.get_member_names(rsrc))) self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() self.m.UnsetStubs() self._stub_resume(cookies=[('foo1', 'foo2', 'foo3'), ('bar1', 'bar2', 'bar3')]) self.m.ReplayAll() rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE) for i in rsrc.nested().values(): i.state_set(rsrc.SUSPEND, rsrc.COMPLETE) scheduler.TaskRunner(rsrc.resume)() self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state) rsrc.delete() self.m.VerifyAll()
def test_scaling_group_resume(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=self.params) self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') self.assertEqual(utils.PhysName(stack.name, rsrc.name), rsrc.FnGetRefId()) self.assertEqual(1, len(grouputils.get_member_names(rsrc))) self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() self.m.UnsetStubs() self._stub_resume() self.m.ReplayAll() rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE) for i in rsrc.nested().values(): i.state_set(rsrc.SUSPEND, rsrc.COMPLETE) scheduler.TaskRunner(rsrc.resume)() self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state) rsrc.delete() self.m.VerifyAll()
def test_normal_group(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) # group size self.patchobject(group, 'nested', return_value=stack) self.assertEqual(2, grouputils.get_size(group)) # member list (sorted) members = [r for r in six.itervalues(stack)] expected = sorted(members, key=lambda r: (r.created_time, r.name)) actual = grouputils.get_members(group) self.assertEqual(expected, actual) # refids actual_ids = grouputils.get_member_refids(group) self.assertEqual(['ID-r0', 'ID-r1'], actual_ids) partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1']) self.assertEqual(['ID-r0'], partial_ids) # names names = grouputils.get_member_names(group) self.assertEqual(['r0', 'r1'], names) # defn snippets as list expected = rsrc_defn.ResourceDefinition( None, "OverwrittenFnGetRefIdType") member_defs = grouputils.get_member_definitions(group) self.assertEqual([(x, expected) for x in names], member_defs)
def test_non_nested_resource(self): group = mock.Mock() self.patchobject(group, 'nested', return_value=None) self.assertEqual(0, grouputils.get_size(group)) self.assertEqual([], grouputils.get_members(group)) self.assertEqual([], grouputils.get_member_refids(group)) self.assertEqual([], grouputils.get_member_names(group))
def test_non_nested_resource(self): group = mock.Mock() group.nested_identifier.return_value = None group.nested.return_value = None self.assertEqual(0, grouputils.get_size(group)) self.assertEqual([], grouputils.get_members(group)) self.assertEqual([], grouputils.get_member_refids(group)) self.assertEqual([], grouputils.get_member_names(group))
def test_lb_reload_static_resolve(self): t = template_format.parse(as_template) properties = t['Resources']['ElasticLoadBalancer']['Properties'] properties['AvailabilityZones'] = {'Fn::GetAZs': ''} self.m.StubOutWithMock(parser.Stack, 'get_availability_zones') parser.Stack.get_availability_zones().MultipleTimes().AndReturn( ['abc', 'xyz']) # Check that the Fn::GetAZs is correctly resolved expected = { u'Type': u'AWS::ElasticLoadBalancing::LoadBalancer', u'Properties': { 'Instances': ['aaaabbbbcccc'], u'Listeners': [{ u'InstancePort': u'80', u'LoadBalancerPort': u'80', u'Protocol': u'HTTP' }], u'AvailabilityZones': ['abc', 'xyz'] } } self.m.StubOutWithMock(short_id, 'generate_id') short_id.generate_id().MultipleTimes().AndReturn('aaaabbbbcccc') now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() stack = utils.parse_stack(t, params=self.params) lb = stack['ElasticLoadBalancer'] self.m.StubOutWithMock(lb, 'handle_update') lb.handle_update(expected, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') self.assertEqual(utils.PhysName(stack.name, rsrc.name), rsrc.FnGetRefId()) self.assertEqual(1, len(grouputils.get_member_names(rsrc))) props = copy.copy(rsrc.properties.data) props['Cooldown'] = '61' update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, update_snippet)() rsrc.delete() self.m.VerifyAll()
def test_group_with_failed_members(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) self.patchobject(group, 'nested', return_value=stack) # Just failed for whatever reason rsrc_err = stack.resources['r0'] rsrc_err.status = rsrc_err.FAILED rsrc_ok = stack.resources['r1'] self.assertEqual(1, grouputils.get_size(group)) self.assertEqual([rsrc_ok], grouputils.get_members(group)) self.assertEqual(['ID-r1'], grouputils.get_member_refids(group)) self.assertEqual(['r1'], grouputils.get_member_names(group))
def test_lb_reload_static_resolve(self): t = template_format.parse(as_template) properties = t['Resources']['ElasticLoadBalancer']['Properties'] properties['AvailabilityZones'] = {'Fn::GetAZs': ''} self.m.StubOutWithMock(parser.Stack, 'get_availability_zones') parser.Stack.get_availability_zones().MultipleTimes().AndReturn( ['abc', 'xyz']) # Check that the Fn::GetAZs is correctly resolved expected = {u'Type': u'AWS::ElasticLoadBalancing::LoadBalancer', u'Properties': {'Instances': ['aaaabbbbcccc'], u'Listeners': [{u'InstancePort': u'80', u'LoadBalancerPort': u'80', u'Protocol': u'HTTP'}], u'AvailabilityZones': ['abc', 'xyz']}} self.m.StubOutWithMock(short_id, 'generate_id') short_id.generate_id().MultipleTimes().AndReturn('aaaabbbbcccc') now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() stack = utils.parse_stack(t, params=self.params) lb = stack['ElasticLoadBalancer'] self.m.StubOutWithMock(lb, 'handle_update') lb.handle_update(expected, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') self.assertEqual(utils.PhysName(stack.name, rsrc.name), rsrc.FnGetRefId()) self.assertEqual(1, len(grouputils.get_member_names(rsrc))) props = copy.copy(rsrc.properties.data) props['Cooldown'] = '61' update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, update_snippet)() rsrc.delete() self.m.VerifyAll()
def test_scaling_group_update_replace(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=self.params) self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') self.assertEqual(utils.PhysName(stack.name, rsrc.name), rsrc.FnGetRefId()) self.assertEqual(1, len(grouputils.get_member_names(rsrc))) props = copy.copy(rsrc.properties.data) props['AvailabilityZones'] = ['foo'] update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) updater = scheduler.TaskRunner(rsrc.update, update_snippet) self.assertRaises(resource.UpdateReplace, updater) rsrc.delete() self.m.VerifyAll()
def test_normal_group(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) # group size self.patchobject(group, 'nested', return_value=stack) self.assertEqual(2, grouputils.get_size(group)) # member list (sorted) members = [r for r in six.itervalues(stack)] expected = sorted(members, key=lambda r: (r.created_time, r.name)) actual = grouputils.get_members(group) self.assertEqual(expected, actual) # refids actual_ids = grouputils.get_member_refids(group) self.assertEqual(['ID-r0', 'ID-r1'], actual_ids) partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1']) self.assertEqual(['ID-r0'], partial_ids) # names self.assertEqual(['r0', 'r1'], grouputils.get_member_names(group))
def test_normal_group(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) # group size self.patchobject(group, 'nested', return_value=stack) self.assertEqual(2, grouputils.get_size(group)) # member list (sorted) members = [r for r in stack.itervalues()] expected = sorted(members, key=lambda r: (r.created_time, r.name)) actual = grouputils.get_members(group) self.assertEqual(expected, actual) # refids actual_ids = grouputils.get_member_refids(group) self.assertEqual(['ID-r0', 'ID-r1'], actual_ids) partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1']) self.assertEqual(['ID-r0'], partial_ids) # names self.assertEqual(['r0', 'r1'], grouputils.get_member_names(group))
def _count_black_listed(self): """Return the number of current resource names that are blacklisted""" existing_members = grouputils.get_member_names(self) return len(self._name_blacklist() & set(existing_members))
def test_scaling_policy_update(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=self.params) # Create initial group self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') stack.resources['WebServerGroup'] = rsrc self.assertEqual(1, len(grouputils.get_member_names(rsrc))) # Create initial scaling policy up_policy = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy') # Scale up one self._stub_lb_reload(2) self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2) self._stub_create(1) self.m.ReplayAll() # Trigger alarm up_policy.signal() self.assertEqual(2, len(grouputils.get_member_names(rsrc))) # Update scaling policy props = copy.copy(up_policy.properties.data) props['ScalingAdjustment'] = '2' update_snippet = rsrc_defn.ResourceDefinition(up_policy.name, up_policy.type(), props) scheduler.TaskRunner(up_policy.update, update_snippet)() self.assertEqual(2, up_policy.properties['ScalingAdjustment']) # Now move time on 61 seconds - Cooldown in template is 60 # so this should trigger a scale-up previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'} self.m.VerifyAll() self.m.UnsetStubs() self.m.StubOutWithMock(resource.Resource, 'metadata_get') up_policy.metadata_get().AndReturn(previous_meta) rsrc.metadata_get().AndReturn(previous_meta) #stub for the metadata accesses while creating the two instances resource.Resource.metadata_get() resource.Resource.metadata_get() now = now + datetime.timedelta(seconds=61) self._stub_lb_reload(4, unset=False) self._stub_meta_expected(now, 'ChangeInCapacity : 2', 2) self._stub_create(2) self.m.ReplayAll() # Trigger alarm up_policy.signal() self.assertEqual(4, len(grouputils.get_member_names(rsrc))) rsrc.delete() self.m.VerifyAll()
def _count_black_listed(self): """Get black list count""" return len(self._name_blacklist() & set(grouputils.get_member_names(self)))
def update_instance_group(self, init_template, updt_template, num_updates_expected_on_updt, num_creates_expected_on_updt, num_deletes_expected_on_updt, update_replace): # setup stack from the initial template tmpl = template_format.parse(init_template) stack = utils.parse_stack(tmpl) self.stub_KeypairConstraint_validate() self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.validate() self.m.VerifyAll() self.m.UnsetStubs() # test stack create size = int(stack['JobServerGroup'].properties['Size']) self._stub_grp_create(size) self.m.ReplayAll() stack.create() self.m.VerifyAll() self.assertEqual(('CREATE', 'COMPLETE'), stack.state) # test that update policy is loaded current_grp = stack['JobServerGroup'] self.assertIn('RollingUpdate', current_grp.update_policy) current_policy = current_grp.update_policy['RollingUpdate'] self.assertTrue(current_policy) self.assertTrue(len(current_policy) > 0) init_grp_tmpl = tmpl['Resources']['JobServerGroup'] init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate'] init_batch_sz = int(init_roll_updt['MaxBatchSize']) self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize'])) # test that physical resource name of launch configuration is used conf = stack['JobServerConfig'] conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name self.assertThat(conf.FnGetRefId(), matchers.MatchesRegex(conf_name_pattern)) # get launch conf name here to compare result after update conf_name = self.get_launch_conf_name(stack, 'JobServerGroup') # test the number of instances created nested = stack['JobServerGroup'].nested() self.assertEqual(size, len(nested.resources)) # clean up for next test self.m.UnsetStubs() # saves info from initial list of instances for comparison later init_instances = grouputils.get_members(current_grp) init_names = grouputils.get_member_names(current_grp) init_images = [(i.name, i.t['Properties']['ImageId']) for i in init_instances] init_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in init_instances] # test stack update updated_tmpl = template_format.parse(updt_template) updated_stack = utils.parse_stack(updated_tmpl) new_grp_tmpl = updated_tmpl['Resources']['JobServerGroup'] new_roll_updt = new_grp_tmpl['UpdatePolicy']['RollingUpdate'] new_batch_sz = int(new_roll_updt['MaxBatchSize']) self.assertNotEqual(new_batch_sz, init_batch_sz) if update_replace: self._stub_grp_replace(size, size) else: self._stub_grp_update(num_creates_expected_on_updt, num_deletes_expected_on_updt) self.stub_wallclock() self.m.ReplayAll() stack.update(updated_stack) self.m.VerifyAll() self.assertEqual(('UPDATE', 'COMPLETE'), stack.state) # test that the update policy is updated updated_grp = stack['JobServerGroup'] self.assertIn('RollingUpdate', updated_grp.update_policy) updated_policy = updated_grp.update_policy['RollingUpdate'] self.assertTrue(updated_policy) self.assertTrue(len(updated_policy) > 0) self.assertEqual(new_batch_sz, int(updated_policy['MaxBatchSize'])) # test that the launch configuration is replaced updated_conf_name = self.get_launch_conf_name(stack, 'JobServerGroup') self.assertNotEqual(conf_name, updated_conf_name) # test that the group size are the same updt_instances = grouputils.get_members(updated_grp) updt_names = grouputils.get_member_names(updated_grp) self.assertEqual(len(init_names), len(updt_names)) # test that the appropriate number of instance names are the same matched_names = set(updt_names) & set(init_names) self.assertEqual(num_updates_expected_on_updt, len(matched_names)) # test that the appropriate number of new instances are created self.assertEqual(num_creates_expected_on_updt, len(set(updt_names) - set(init_names))) # test that the appropriate number of instances are deleted self.assertEqual(num_deletes_expected_on_updt, len(set(init_names) - set(updt_names))) # test that the older instances are the ones being deleted if num_deletes_expected_on_updt > 0: deletes_expected = init_names[:num_deletes_expected_on_updt] self.assertNotIn(deletes_expected, updt_names) # test if instances are updated if update_replace: # test that the image id is changed for all instances updt_images = [(i.name, i.t['Properties']['ImageId']) for i in updt_instances] self.assertEqual(0, len(set(updt_images) & set(init_images))) else: # test that instance type is changed for all instances updt_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in updt_instances] self.assertEqual(0, len(set(updt_flavors) & set(init_flavors)))
def test_scaling_policy_update(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=self.params) # Create initial group self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') stack.resources['WebServerGroup'] = rsrc self.assertEqual(1, len(grouputils.get_member_names(rsrc))) # Create initial scaling policy up_policy = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy') # Scale up one self._stub_lb_reload(2) self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2) self._stub_create(1) self.m.ReplayAll() # Trigger alarm up_policy.signal() self.assertEqual(2, len(grouputils.get_member_names(rsrc))) # Update scaling policy props = copy.copy(up_policy.properties.data) props['ScalingAdjustment'] = '2' update_snippet = rsrc_defn.ResourceDefinition(up_policy.name, up_policy.type(), props) scheduler.TaskRunner(up_policy.update, update_snippet)() self.assertEqual(2, up_policy.properties['ScalingAdjustment']) # Now move time on 61 seconds - Cooldown in template is 60 # so this should trigger a scale-up previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'} self.m.VerifyAll() self.m.UnsetStubs() self.m.StubOutWithMock(resource.Resource, 'metadata_get') up_policy.metadata_get().AndReturn(previous_meta) rsrc.metadata_get().AndReturn(previous_meta) # stub for the metadata accesses while creating the two instances resource.Resource.metadata_get() resource.Resource.metadata_get() now = now + datetime.timedelta(seconds=61) self._stub_lb_reload(4, unset=False) self._stub_meta_expected(now, 'ChangeInCapacity : 2', 2) self._stub_create(2) self.m.ReplayAll() # Trigger alarm up_policy.signal() self.assertEqual(4, len(grouputils.get_member_names(rsrc))) rsrc.delete() self.m.VerifyAll()
def update_autoscaling_group(self, init_template, updt_template, num_updates_expected_on_updt, num_creates_expected_on_updt, num_deletes_expected_on_updt, num_reloads_expected_on_updt, update_replace, update_image_id=None): # setup stack from the initial template tmpl = template_format.parse(init_template) stack = utils.parse_stack(tmpl) self.stub_KeypairConstraint_validate() self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.validate() self.m.VerifyAll() self.m.UnsetStubs() # test stack create size = int(stack['WebServerGroup'].properties['MinSize']) self._stub_grp_create(size) self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.create() self.m.VerifyAll() self.assertEqual(('CREATE', 'COMPLETE'), stack.state) # test that update policy is loaded current_grp = stack['WebServerGroup'] self.assertTrue( 'AutoScalingRollingUpdate' in current_grp.update_policy) current_policy = current_grp.update_policy['AutoScalingRollingUpdate'] self.assertTrue(current_policy) self.assertTrue(len(current_policy) > 0) init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy'] init_roll_updt = init_updt_policy['AutoScalingRollingUpdate'] init_batch_sz = int(init_roll_updt['MaxBatchSize']) self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize'])) # test that physical resource name of launch configuration is used conf = stack['LaunchConfig'] conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name self.assertThat(conf.FnGetRefId(), matchers.MatchesRegex(conf_name_pattern)) # get launch conf name here to compare result after update conf_name = self.get_launch_conf_name(stack, 'WebServerGroup') # test the number of instances created nested = stack['WebServerGroup'].nested() self.assertEqual(size, len(nested.resources)) # clean up for next test self.m.UnsetStubs() # saves info from initial list of instances for comparison later init_instances = grouputils.get_members(current_grp) init_names = grouputils.get_member_names(current_grp) init_images = [(i.name, i.t['Properties']['ImageId']) for i in init_instances] init_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in init_instances] # test stack update updated_tmpl = template_format.parse(updt_template) updated_stack = utils.parse_stack(updated_tmpl) new_grp_tmpl = updated_tmpl['Resources']['WebServerGroup'] new_updt_pol = new_grp_tmpl['UpdatePolicy']['AutoScalingRollingUpdate'] new_batch_sz = int(new_updt_pol['MaxBatchSize']) self.assertNotEqual(new_batch_sz, init_batch_sz) if update_replace: self._stub_grp_replace(size, size, num_reloads_expected_on_updt) else: self._stub_grp_update(num_creates_expected_on_updt, num_deletes_expected_on_updt, num_reloads_expected_on_updt) self.stub_wallclock() self.stub_ImageConstraint_validate() self.stub_KeypairConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.validate() stack.update(updated_stack) self.m.VerifyAll() self.assertEqual(('UPDATE', 'COMPLETE'), stack.state) # test that the update policy is updated updated_grp = stack['WebServerGroup'] updt_instances = grouputils.get_members(updated_grp) self.assertTrue( 'AutoScalingRollingUpdate' in updated_grp.update_policy) updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate'] self.assertTrue(updated_policy) self.assertTrue(len(updated_policy) > 0) self.assertEqual(new_batch_sz, int(updated_policy['MaxBatchSize'])) # test that the launch configuration is replaced updated_conf_name = self.get_launch_conf_name(stack, 'WebServerGroup') self.assertNotEqual(conf_name, updated_conf_name) # test that the group size are the same updt_instances = grouputils.get_members(updated_grp) updt_names = grouputils.get_member_names(updated_grp) self.assertEqual(len(init_names), len(updt_names)) # test that appropriate number of instance names are the same matched_names = set(updt_names) & set(init_names) self.assertEqual(num_updates_expected_on_updt, len(matched_names)) # test that the appropriate number of new instances are created self.assertEqual(num_creates_expected_on_updt, len(set(updt_names) - set(init_names))) # test that the appropriate number of instances are deleted self.assertEqual(num_deletes_expected_on_updt, len(set(init_names) - set(updt_names))) # test that the older instances are the ones being deleted if num_deletes_expected_on_updt > 0: deletes_expected = init_names[:num_deletes_expected_on_updt] self.assertNotIn(deletes_expected, updt_names) # test if instances are updated if update_replace: # test that the image id is changed for all instances updt_images = [(i.name, i.t['Properties']['ImageId']) for i in updt_instances] self.assertEqual(0, len(set(updt_images) & set(init_images))) else: # test that instance type is changed for all instances updt_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in updt_instances] self.assertEqual(0, len(set(updt_flavors) & set(init_flavors)))