def test_scaling_group_update_ok_desired_remove(self): properties = self.parsed['resources']['my-group']['properties'] properties['desired_capacity'] = 2 rsrc = self.create_stack(self.parsed)['my-group'] resources = grouputils.get_members(rsrc) self.assertEqual(2, len(resources)) props = copy.copy(rsrc.properties.data) props.pop('desired_capacity') update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, update_snippet)() self.assertEqual(resources, grouputils.get_members(rsrc)) self.assertIsNone(rsrc.properties['desired_capacity'])
def _lb_reload(self, exclude=frozenset(), refresh_data=True): lb_names = self.properties.get(self.LOAD_BALANCER_NAMES) or [] if lb_names: if refresh_data: self._outputs = None try: all_refids = self.get_output(self.OUTPUT_MEMBER_IDS) except (exception.NotFound, exception.TemplateOutputError) as op_err: LOG.debug('Falling back to grouputils due to %s', op_err) if refresh_data: self._nested = None instances = grouputils.get_members(self) all_refids = {i.name: i.FnGetRefId() for i in instances} names = [i.name for i in instances] else: group_data = self._group_data(refresh=refresh_data) names = group_data.member_names(include_failed=False) id_list = [ all_refids[n] for n in names if n not in exclude and n in all_refids ] lbs = [self.stack[name] for name in lb_names] lbutils.reconfigure_loadbalancers(lbs, id_list)
def _resolve_attribute(self, name): """Resolves the resource's attributes. Heat extension: "InstanceList" returns comma delimited list of server ip addresses. """ if name == self.INSTANCE_LIST: def listify(ips): return u','.join(ips) or None try: output = self.get_output(name) except (exception.NotFound, exception.TemplateOutputError) as op_err: LOG.debug('Falling back to grouputils due to %s', op_err) else: if isinstance(output, dict): names = self._group_data().member_names(False) return listify(output[n] for n in names if n in output) else: LOG.debug('Falling back to grouputils due to ' 'old (list-style) output format') return listify(inst.FnGetAtt('PublicIp') or '0.0.0.0' for inst in grouputils.get_members(self))
def test_scaling_group_create_error(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=self.params) self.m.StubOutWithMock(instance.Instance, 'handle_create') self.m.StubOutWithMock(instance.Instance, 'check_create_complete') instance.Instance.handle_create().AndRaise(Exception) self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.stub_SnapshotConstraint_validate() self.m.ReplayAll() conf = stack['LaunchConfig'] self.assertIsNone(conf.validate()) scheduler.TaskRunner(conf.create)() self.assertEqual((conf.CREATE, conf.COMPLETE), conf.state) rsrc = stack['WebServerGroup'] self.assertIsNone(rsrc.validate()) self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.create)) self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state) self.assertEqual([], grouputils.get_members(rsrc)) self.m.VerifyAll()
def changing_instances(tmpl): instances = grouputils.get_members(self) current = set((i.name, i.t) for i in instances) updated = set(tmpl.resource_definitions(self.nested()).items()) # includes instances to be updated and deleted affected = set(k for k, v in current ^ updated) return set(i.FnGetRefId() for i in instances if i.name in affected)
def test_normal_group(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) # group size self.patchobject(group, 'nested', return_value=stack) self.assertEqual(2, grouputils.get_size(group)) # member list (sorted) members = [r for r in six.itervalues(stack)] expected = sorted(members, key=lambda r: (r.created_time, r.name)) actual = grouputils.get_members(group) self.assertEqual(expected, actual) # refids actual_ids = grouputils.get_member_refids(group) self.assertEqual(['ID-r0', 'ID-r1'], actual_ids) partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1']) self.assertEqual(['ID-r0'], partial_ids) # names names = grouputils.get_member_names(group) self.assertEqual(['r0', 'r1'], names) # defn snippets as list expected = rsrc_defn.ResourceDefinition( None, "OverwrittenFnGetRefIdType") member_defs = grouputils.get_member_definitions(group) self.assertEqual([(x, expected) for x in names], member_defs)
def _resolve_attribute(self, name): ''' heat extension: "InstanceList" returns comma delimited list of server ip addresses. ''' if name == self.INSTANCE_LIST: return u','.join(inst.FnGetAtt('PublicIp') for inst in grouputils.get_members(self)) or None
def test_non_nested_resource(self): group = mock.Mock() self.patchobject(group, 'nested', return_value=None) self.assertEqual(0, grouputils.get_size(group)) self.assertEqual([], grouputils.get_members(group)) self.assertEqual([], grouputils.get_member_refids(group)) self.assertEqual([], grouputils.get_member_names(group))
def _resolve_attribute(self, name): """Resolves the resource's attributes. heat extension: "InstanceList" returns comma delimited list of server ip addresses. """ if name == self.INSTANCE_LIST: return u",".join(inst.FnGetAtt("PublicIp") for inst in grouputils.get_members(self)) or None
def test_scaling_group_update_ok_maxsize(self): properties = self.parsed['resources']['my-group']['properties'] properties['min_size'] = 1 properties['max_size'] = 3 rsrc = self.create_stack(self.parsed)['my-group'] resources = grouputils.get_members(rsrc) self.assertEqual(1, len(resources)) # Reduce the max size to 2, should complete without adjusting props = copy.copy(rsrc.properties.data) props['max_size'] = 2 update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, update_snippet)() self.assertEqual(resources, grouputils.get_members(rsrc)) self.assertEqual(2, rsrc.properties['max_size'])
def _resolve_attribute(self, name): """Resolves the resource's attributes. Heat extension: "InstanceList" returns comma delimited list of server ip addresses. """ if name == self.INSTANCE_LIST: return u','.join(inst.FnGetAtt('PublicIp') or '0.0.0.0' for inst in grouputils.get_members(self)) or None
def test_non_nested_resource(self): group = mock.Mock() group.nested_identifier.return_value = None group.nested.return_value = None self.assertEqual(0, grouputils.get_size(group)) self.assertEqual([], grouputils.get_members(group)) self.assertEqual([], grouputils.get_member_refids(group)) self.assertEqual([], grouputils.get_member_names(group))
def test_scaling_adjust_down_empty(self): properties = self.parsed['resources']['my-group']['properties'] properties['min_size'] = 1 properties['max_size'] = 1 rsrc = self.create_stack(self.parsed)['my-group'] resources = grouputils.get_members(rsrc) self.assertEqual(1, len(resources)) # Reduce the min size to 0, should complete without adjusting props = copy.copy(rsrc.properties.data) props['min_size'] = 0 update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, update_snippet)() self.assertEqual(resources, grouputils.get_members(rsrc)) # trigger adjustment to reduce to 0, there should be no more instances rsrc.adjust(-1) self.assertEqual(0, grouputils.get_size(rsrc))
def FnGetAtt(self, key, *path): if key == self.CURRENT_SIZE: return grouputils.get_size(self) if path: members = grouputils.get_members(self) attrs = ((rsrc.name, rsrc.FnGetAtt(*path)) for rsrc in members) if key == self.OUTPUTS: return dict(attrs) if key == self.OUTPUTS_LIST: return [value for name, value in attrs] raise exception.InvalidTemplateAttribute(resource=self.name, key=key)
def test_group_with_failed_members(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) self.patchobject(group, 'nested', return_value=stack) # Just failed for whatever reason rsrc_err = stack.resources['r0'] rsrc_err.status = rsrc_err.FAILED rsrc_ok = stack.resources['r1'] self.assertEqual([rsrc_ok], grouputils.get_members(group)) self.assertEqual(['ID-r1'], grouputils.get_member_refids(group))
def get_attribute(self, key, *path): if key == self.CURRENT_SIZE: return grouputils.get_size(self) if key == self.REFS: refs = grouputils.get_member_refids(self) return refs if key == self.REFS_MAP: members = grouputils.get_members(self) refs_map = {m.name: m.resource_id for m in members} return refs_map if path: members = grouputils.get_members(self) attrs = ((rsrc.name, rsrc.FnGetAtt(*path)) for rsrc in members) if key == self.OUTPUTS: return dict(attrs) if key == self.OUTPUTS_LIST: return [value for name, value in attrs] if key.startswith("resource."): return grouputils.get_nested_attrs(self, key, True, *path) raise exception.InvalidTemplateAttribute(resource=self.name, key=key)
def test_normal_group(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) group.nested.return_value = stack # member list (sorted) members = [r for r in stack.values()] expected = sorted(members, key=lambda r: (r.created_time, r.name)) actual = grouputils.get_members(group) self.assertEqual(expected, actual) # refids actual_ids = grouputils.get_member_refids(group) self.assertEqual(['ID-r0', 'ID-r1'], actual_ids)
def test_normal_group(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) group.nested.return_value = stack # member list (sorted) members = [r for r in six.itervalues(stack)] expected = sorted(members, key=lambda r: (r.created_time, r.name)) actual = grouputils.get_members(group) self.assertEqual(expected, actual) # refids actual_ids = grouputils.get_member_refids(group) self.assertEqual(['ID-r0', 'ID-r1'], actual_ids) partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1']) self.assertEqual(['ID-r0'], partial_ids)
def _create_template(self, num_instances, num_replace=0, template_version=('HeatTemplateFormatVersion', '2012-12-12')): """Create a template to represent autoscaled instances. Also see heat.scaling.template.member_definitions. """ instance_definition = self._get_resource_definition() old_resources = grouputils.get_member_definitions(self, include_failed=True) # WRS: Detect a scale down. Issue a vote # If any vote is rejected, set new_resources to be same size as old existing = grouputils.get_members(self) if num_instances < len(existing): LOG.info("WRS downscale detected, vote initiated") for i in range(num_instances, len(existing)): if existing[i].wrs_vote() is False: LOG.info("WRS downscale blocked by vote") num_instances = len(existing) break definitions = list(template.member_definitions( old_resources, instance_definition, num_instances, num_replace, short_id.generate_id, delete_oldest=False)) child_env = environment.get_child_environment( self.stack.env, self.child_params(), item_to_remove=self.resource_info) tmpl = template.make_template(definitions, version=template_version, child_env=child_env) # Subclasses use HOT templates att_func = 'get_attr' if att_func not in tmpl.functions: att_func = 'Fn::GetAtt' get_attr = functools.partial(tmpl.functions[att_func], None, att_func) for odefn in self._nested_output_defns([k for k, d in definitions], get_attr): tmpl.add_output(odefn) return tmpl
def test_vpc_zone_identifier(self): t = template_format.parse(as_template) properties = t['Resources']['WebServerGroup']['Properties'] properties['VPCZoneIdentifier'] = ['xxxx'] stack = utils.parse_stack(t, params=self.params) self._stub_lb_reload(1) now = timeutils.utcnow() self._stub_meta_expected(now, 'ExactCapacity : 1') self._stub_create(1) self.m.ReplayAll() rsrc = self.create_scaling_group(t, stack, 'WebServerGroup') instances = grouputils.get_members(rsrc) self.assertEqual(1, len(instances)) self.assertEqual('xxxx', instances[0].properties['SubnetId']) rsrc.delete() self.m.VerifyAll()
def test_normal_group(self): group = mock.Mock() t = template_format.parse(nested_stack) stack = utils.parse_stack(t) # group size self.patchobject(group, 'nested', return_value=stack) self.assertEqual(2, grouputils.get_size(group)) # member list (sorted) members = [r for r in stack.itervalues()] expected = sorted(members, key=lambda r: (r.created_time, r.name)) actual = grouputils.get_members(group) self.assertEqual(expected, actual) # refids actual_ids = grouputils.get_member_refids(group) self.assertEqual(['ID-r0', 'ID-r1'], actual_ids) partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1']) self.assertEqual(['ID-r0'], partial_ids) # names self.assertEqual(['r0', 'r1'], grouputils.get_member_names(group))
def _lb_reload(self, exclude=frozenset(), refresh_data=True): lb_names = self.properties.get(self.LOAD_BALANCER_NAMES) or [] if lb_names: if refresh_data: self._outputs = None try: all_refids = self.get_output(self.OUTPUT_MEMBER_IDS) except (exception.NotFound, exception.TemplateOutputError) as op_err: LOG.debug('Falling back to grouputils due to %s', op_err) if refresh_data: self._nested = None instances = grouputils.get_members(self) all_refids = {i.name: i.FnGetRefId() for i in instances} names = [i.name for i in instances] else: group_data = self._group_data(refresh=refresh_data) names = group_data.member_names(include_failed=False) id_list = [all_refids[n] for n in names if n not in exclude and n in all_refids] lbs = [self.stack[name] for name in lb_names] lbutils.reconfigure_loadbalancers(lbs, id_list)
def _get_instance_templates(self): """Get templates for resource instances.""" return [(instance.name, instance.t) for instance in grouputils.get_members(self)]
def _get_resources(self): """Get templates for resources.""" return [(resource.name, resource.t.render_hot()) for resource in grouputils.get_members(self)]
def update_instance_group(self, init_template, updt_template, num_updates_expected_on_updt, num_creates_expected_on_updt, num_deletes_expected_on_updt, update_replace): # setup stack from the initial template tmpl = template_format.parse(init_template) stack = utils.parse_stack(tmpl) self.stub_KeypairConstraint_validate() self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.validate() self.m.VerifyAll() self.m.UnsetStubs() # test stack create size = int(stack['JobServerGroup'].properties['Size']) self._stub_grp_create(size) self.m.ReplayAll() stack.create() self.m.VerifyAll() self.assertEqual(('CREATE', 'COMPLETE'), stack.state) # test that update policy is loaded current_grp = stack['JobServerGroup'] self.assertIn('RollingUpdate', current_grp.update_policy) current_policy = current_grp.update_policy['RollingUpdate'] self.assertTrue(current_policy) self.assertTrue(len(current_policy) > 0) init_grp_tmpl = tmpl['Resources']['JobServerGroup'] init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate'] init_batch_sz = int(init_roll_updt['MaxBatchSize']) self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize'])) # test that physical resource name of launch configuration is used conf = stack['JobServerConfig'] conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name self.assertThat(conf.FnGetRefId(), matchers.MatchesRegex(conf_name_pattern)) # get launch conf name here to compare result after update conf_name = self.get_launch_conf_name(stack, 'JobServerGroup') # test the number of instances created nested = stack['JobServerGroup'].nested() self.assertEqual(size, len(nested.resources)) # clean up for next test self.m.UnsetStubs() # saves info from initial list of instances for comparison later init_instances = grouputils.get_members(current_grp) init_names = grouputils.get_member_names(current_grp) init_images = [(i.name, i.t['Properties']['ImageId']) for i in init_instances] init_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in init_instances] # test stack update updated_tmpl = template_format.parse(updt_template) updated_stack = utils.parse_stack(updated_tmpl) new_grp_tmpl = updated_tmpl['Resources']['JobServerGroup'] new_roll_updt = new_grp_tmpl['UpdatePolicy']['RollingUpdate'] new_batch_sz = int(new_roll_updt['MaxBatchSize']) self.assertNotEqual(new_batch_sz, init_batch_sz) if update_replace: self._stub_grp_replace(size, size) else: self._stub_grp_update(num_creates_expected_on_updt, num_deletes_expected_on_updt) self.stub_wallclock() self.m.ReplayAll() stack.update(updated_stack) self.m.VerifyAll() self.assertEqual(('UPDATE', 'COMPLETE'), stack.state) # test that the update policy is updated updated_grp = stack['JobServerGroup'] self.assertIn('RollingUpdate', updated_grp.update_policy) updated_policy = updated_grp.update_policy['RollingUpdate'] self.assertTrue(updated_policy) self.assertTrue(len(updated_policy) > 0) self.assertEqual(new_batch_sz, int(updated_policy['MaxBatchSize'])) # test that the launch configuration is replaced updated_conf_name = self.get_launch_conf_name(stack, 'JobServerGroup') self.assertNotEqual(conf_name, updated_conf_name) # test that the group size are the same updt_instances = grouputils.get_members(updated_grp) updt_names = grouputils.get_member_names(updated_grp) self.assertEqual(len(init_names), len(updt_names)) # test that the appropriate number of instance names are the same matched_names = set(updt_names) & set(init_names) self.assertEqual(num_updates_expected_on_updt, len(matched_names)) # test that the appropriate number of new instances are created self.assertEqual(num_creates_expected_on_updt, len(set(updt_names) - set(init_names))) # test that the appropriate number of instances are deleted self.assertEqual(num_deletes_expected_on_updt, len(set(init_names) - set(updt_names))) # test that the older instances are the ones being deleted if num_deletes_expected_on_updt > 0: deletes_expected = init_names[:num_deletes_expected_on_updt] self.assertNotIn(deletes_expected, updt_names) # test if instances are updated if update_replace: # test that the image id is changed for all instances updt_images = [(i.name, i.t['Properties']['ImageId']) for i in updt_instances] self.assertEqual(0, len(set(updt_images) & set(init_images))) else: # test that instance type is changed for all instances updt_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in updt_instances] self.assertEqual(0, len(set(updt_flavors) & set(init_flavors)))
def update_autoscaling_group(self, init_template, updt_template, num_updates_expected_on_updt, num_creates_expected_on_updt, num_deletes_expected_on_updt, num_reloads_expected_on_updt, update_replace, update_image_id=None): # setup stack from the initial template tmpl = template_format.parse(init_template) stack = utils.parse_stack(tmpl) self.stub_KeypairConstraint_validate() self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.validate() self.m.VerifyAll() self.m.UnsetStubs() # test stack create size = int(stack['WebServerGroup'].properties['MinSize']) self._stub_grp_create(size) self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.create() self.m.VerifyAll() self.assertEqual(('CREATE', 'COMPLETE'), stack.state) # test that update policy is loaded current_grp = stack['WebServerGroup'] self.assertTrue( 'AutoScalingRollingUpdate' in current_grp.update_policy) current_policy = current_grp.update_policy['AutoScalingRollingUpdate'] self.assertTrue(current_policy) self.assertTrue(len(current_policy) > 0) init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy'] init_roll_updt = init_updt_policy['AutoScalingRollingUpdate'] init_batch_sz = int(init_roll_updt['MaxBatchSize']) self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize'])) # test that physical resource name of launch configuration is used conf = stack['LaunchConfig'] conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name self.assertThat(conf.FnGetRefId(), matchers.MatchesRegex(conf_name_pattern)) # get launch conf name here to compare result after update conf_name = self.get_launch_conf_name(stack, 'WebServerGroup') # test the number of instances created nested = stack['WebServerGroup'].nested() self.assertEqual(size, len(nested.resources)) # clean up for next test self.m.UnsetStubs() # saves info from initial list of instances for comparison later init_instances = grouputils.get_members(current_grp) init_names = grouputils.get_member_names(current_grp) init_images = [(i.name, i.t['Properties']['ImageId']) for i in init_instances] init_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in init_instances] # test stack update updated_tmpl = template_format.parse(updt_template) updated_stack = utils.parse_stack(updated_tmpl) new_grp_tmpl = updated_tmpl['Resources']['WebServerGroup'] new_updt_pol = new_grp_tmpl['UpdatePolicy']['AutoScalingRollingUpdate'] new_batch_sz = int(new_updt_pol['MaxBatchSize']) self.assertNotEqual(new_batch_sz, init_batch_sz) if update_replace: self._stub_grp_replace(size, size, num_reloads_expected_on_updt) else: self._stub_grp_update(num_creates_expected_on_updt, num_deletes_expected_on_updt, num_reloads_expected_on_updt) self.stub_wallclock() self.stub_ImageConstraint_validate() self.stub_KeypairConstraint_validate() self.stub_FlavorConstraint_validate() self.m.ReplayAll() stack.validate() stack.update(updated_stack) self.m.VerifyAll() self.assertEqual(('UPDATE', 'COMPLETE'), stack.state) # test that the update policy is updated updated_grp = stack['WebServerGroup'] updt_instances = grouputils.get_members(updated_grp) self.assertTrue( 'AutoScalingRollingUpdate' in updated_grp.update_policy) updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate'] self.assertTrue(updated_policy) self.assertTrue(len(updated_policy) > 0) self.assertEqual(new_batch_sz, int(updated_policy['MaxBatchSize'])) # test that the launch configuration is replaced updated_conf_name = self.get_launch_conf_name(stack, 'WebServerGroup') self.assertNotEqual(conf_name, updated_conf_name) # test that the group size are the same updt_instances = grouputils.get_members(updated_grp) updt_names = grouputils.get_member_names(updated_grp) self.assertEqual(len(init_names), len(updt_names)) # test that appropriate number of instance names are the same matched_names = set(updt_names) & set(init_names) self.assertEqual(num_updates_expected_on_updt, len(matched_names)) # test that the appropriate number of new instances are created self.assertEqual(num_creates_expected_on_updt, len(set(updt_names) - set(init_names))) # test that the appropriate number of instances are deleted self.assertEqual(num_deletes_expected_on_updt, len(set(init_names) - set(updt_names))) # test that the older instances are the ones being deleted if num_deletes_expected_on_updt > 0: deletes_expected = init_names[:num_deletes_expected_on_updt] self.assertNotIn(deletes_expected, updt_names) # test if instances are updated if update_replace: # test that the image id is changed for all instances updt_images = [(i.name, i.t['Properties']['ImageId']) for i in updt_instances] self.assertEqual(0, len(set(updt_images) & set(init_images))) else: # test that instance type is changed for all instances updt_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in updt_instances] self.assertEqual(0, len(set(updt_flavors) & set(init_flavors)))
def get_attribute(self, key, *path): if key == self.CURRENT_SIZE: return grouputils.get_size(self) op_key = key op_path = path keycomponents = None if key == self.OUTPUTS_LIST: op_key = self.OUTPUTS elif key == self.REFS: op_key = self.REFS_MAP elif key.startswith("resource."): keycomponents = key.split('.', 2) if len(keycomponents) > 2: op_path = (keycomponents[2],) + path op_key = self.OUTPUTS if op_path else self.REFS_MAP try: output = self.get_output(self._attribute_output_name(op_key, *op_path)) except (exception.NotFound, exception.TemplateOutputError) as op_err: LOG.debug('Falling back to grouputils due to %s', op_err) if key == self.REFS: return grouputils.get_member_refids(self) if key == self.REFS_MAP: members = grouputils.get_members(self) return {m.name: m.resource_id for m in members} if path and key in {self.OUTPUTS, self.OUTPUTS_LIST}: members = grouputils.get_members(self) attrs = ((rsrc.name, rsrc.FnGetAtt(*path)) for rsrc in members) if key == self.OUTPUTS: return dict(attrs) if key == self.OUTPUTS_LIST: return [value for name, value in attrs] if keycomponents is not None: return grouputils.get_nested_attrs(self, key, True, *path) else: if key in {self.REFS, self.REFS_MAP}: names = self._group_data().member_names(False) if key == self.REFS: return [output[n] for n in names if n in output] else: return {n: output[n] for n in names if n in output} if path and key in {self.OUTPUTS_LIST, self.OUTPUTS}: names = self._group_data().member_names(False) if key == self.OUTPUTS_LIST: return [output[n] for n in names if n in output] else: return {n: output[n] for n in names if n in output} if keycomponents is not None: names = list(self._group_data().member_names(False)) index = keycomponents[1] try: resource_name = names[int(index)] return output[resource_name] except (IndexError, KeyError): raise exception.NotFound(_("Member '%(mem)s' not found " "in group resource '%(grp)s'.") % {'mem': index, 'grp': self.name}) raise exception.InvalidTemplateAttribute(resource=self.name, key=key)
def get_attribute(self, key, *path): # noqa: C901 if key == self.CURRENT_SIZE: return grouputils.get_size(self) op_key = key op_path = path keycomponents = None if key == self.OUTPUTS_LIST: op_key = self.OUTPUTS elif key == self.REFS: op_key = self.REFS_MAP elif key.startswith("resource."): keycomponents = key.split('.', 2) if len(keycomponents) > 2: op_path = (keycomponents[2], ) + path op_key = self.OUTPUTS if op_path else self.REFS_MAP try: output = self.get_output( self._attribute_output_name(op_key, *op_path)) except (exception.NotFound, exception.TemplateOutputError) as op_err: LOG.debug('Falling back to grouputils due to %s', op_err) if key == self.REFS: return grouputils.get_member_refids(self) if key == self.REFS_MAP: members = grouputils.get_members(self) return {m.name: m.resource_id for m in members} if path and key in {self.OUTPUTS, self.OUTPUTS_LIST}: members = grouputils.get_members(self) attrs = ((rsrc.name, rsrc.FnGetAtt(*path)) for rsrc in members) if key == self.OUTPUTS: return dict(attrs) if key == self.OUTPUTS_LIST: return [value for name, value in attrs] if keycomponents is not None: return grouputils.get_nested_attrs(self, key, True, *path) else: if key in {self.REFS, self.REFS_MAP}: names = self._group_data().member_names(False) if key == self.REFS: return [output[n] for n in names if n in output] else: return {n: output[n] for n in names if n in output} if path and key in {self.OUTPUTS_LIST, self.OUTPUTS}: names = self._group_data().member_names(False) if key == self.OUTPUTS_LIST: return [output[n] for n in names if n in output] else: return {n: output[n] for n in names if n in output} if keycomponents is not None: names = list(self._group_data().member_names(False)) index = keycomponents[1] try: resource_name = names[int(index)] return output[resource_name] except (IndexError, KeyError): raise exception.NotFound( _("Member '%(mem)s' not found " "in group resource '%(grp)s'.") % { 'mem': index, 'grp': self.name }) raise exception.InvalidTemplateAttribute(resource=self.name, key=key)