def test_autoscaling_group_update_policy_removed(self):

        # setup stack from the initial template
        tmpl = template_format.parse(asg_tmpl_with_updt_policy)
        stack = utils.parse_stack(tmpl)
        self.stub_ImageConstraint_validate()
        self.stub_KeypairConstraint_validate()
        self.m.ReplayAll()

        stack.validate()
        self.m.VerifyAll()
        self.m.UnsetStubs()

        # test stack create
        size = int(stack['WebServerGroup'].properties['MinSize'])
        self._stub_grp_create(size)
        self.stub_ImageConstraint_validate()
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(('CREATE', 'COMPLETE'), stack.state)

        # test that update policy is loaded
        current_grp = stack['WebServerGroup']
        self.assertIn('AutoScalingRollingUpdate', current_grp.update_policy)
        current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
        init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
        init_batch_sz = int(init_roll_updt['MaxBatchSize'])
        self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))

        # test that physical resource name of launch configuration is used
        conf = stack['LaunchConfig']
        conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
        self.assertThat(conf.FnGetRefId(),
                        matchers.MatchesRegex(conf_name_pattern))

        # test the number of instances created
        nested = stack['WebServerGroup'].nested()
        self.assertEqual(size, len(nested.resources))

        # clean up for next test
        self.m.UnsetStubs()

        # test stack update
        updated_tmpl = template_format.parse(asg_tmpl_without_updt_policy)
        updated_stack = utils.parse_stack(updated_tmpl)
        self._stub_grp_replace(num_creates_expected_on_updt=0,
                               num_deletes_expected_on_updt=0,
                               num_reloads_expected_on_updt=1)
        self.m.ReplayAll()
        stack.update(updated_stack)
        self.m.VerifyAll()
        self.assertEqual(('UPDATE', 'COMPLETE'), stack.state)

        # test that update policy is removed
        updated_grp = stack['WebServerGroup']
        self.assertFalse(updated_grp.update_policy['AutoScalingRollingUpdate'])
    def create_pool(self, resolve_neutron=True, with_vip_subnet=False):
        if resolve_neutron:
            if with_vip_subnet:
                snippet = template_format.parse(pool_template_with_vip_subnet)
            else:
                snippet = template_format.parse(pool_template)
        else:
            snippet = template_format.parse(pool_template_deprecated)
        self.stack = utils.parse_stack(snippet)
        self.tmpl = snippet
        self.mock_create.return_value = {'pool': {'id': '5678'}}
        self.mock_show.return_value = {'pool': {'status': 'ACTIVE'}}
        self.mock_show_vip.return_value = {'vip': {'status': 'ACTIVE'}}
        self.pool_create_snippet = {
            'pool': {
                'subnet_id': 'sub123', 'protocol': u'HTTP',
                'name': utils.PhysName(self.stack.name, 'pool'),
                'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
        self.vip_create_snippet = {
            'vip': {
                'protocol': u'HTTP', 'name': 'pool.vip',
                'admin_state_up': True, 'subnet_id': u'sub123',
                'pool_id': '5678', 'protocol_port': 80}}

        if with_vip_subnet:
            self.stub_SubnetConstraint_validate()
            self.vip_create_snippet['vip']['subnet_id'] = 'sub9999'
        self.mock_create_vip.return_value = {'vip': {'id': 'xyz'}}
        resource_defns = self.stack.t.resource_definitions(self.stack)
        return loadbalancer.Pool(
            'pool', resource_defns['pool'], self.stack)
Пример #3
0
    def validate_update_policy_diff(self, current, updated):
        # load current stack
        current_tmpl = template_format.parse(current)
        current_stack = utils.parse_stack(current_tmpl,
                                          params=inline_templates.as_params)

        # get the json snippet for the current InstanceGroup resource
        current_grp = current_stack['WebServerGroup']
        current_snippets = dict((n, r.frozen_definition())
                                for n, r in current_stack.items())
        current_grp_json = current_snippets[current_grp.name]

        # load the updated stack
        updated_tmpl = template_format.parse(updated)
        updated_stack = utils.parse_stack(updated_tmpl,
                                          params=inline_templates.as_params)

        # get the updated json snippet for the InstanceGroup resource in the
        # context of the current stack
        updated_grp = updated_stack['WebServerGroup']
        updated_grp_json = updated_grp.t.freeze()

        # identify the template difference
        tmpl_diff = updated_grp.update_template_diff(
            updated_grp_json, current_grp_json)
        self.assertTrue(tmpl_diff.update_policy_changed())

        # test application of the new update policy in handle_update
        current_grp._try_rolling_update = mock.MagicMock()
        current_grp.resize = mock.MagicMock()
        current_grp.handle_update(updated_grp_json, tmpl_diff, None)
        self.assertEqual(updated_grp_json._update_policy or {},
                         current_grp.update_policy.data)
Пример #4
0
    def test_mem_alarm_high_check_not_required_parameters(self):
        snippet = template_format.parse(not_string_alarm_template)
        snippet['Resources']['MEMAlarmHigh']['Properties'].pop('meter_name')
        stack = utils.parse_stack(snippet)

        resource_defns = stack.t.resource_definitions(stack)
        rsrc = alarm.CeilometerAlarm(
            'MEMAlarmHigh', resource_defns['MEMAlarmHigh'], stack)
        error = self.assertRaises(exception.StackValidationFailed,
                                  rsrc.validate)
        self.assertEqual(
            "Property error: Resources.MEMAlarmHigh.Properties: "
            "Property meter_name not assigned",
            six.text_type(error))

        for p in ('period', 'evaluation_periods', 'statistic',
                  'comparison_operator'):
            snippet = template_format.parse(not_string_alarm_template)
            snippet['Resources']['MEMAlarmHigh']['Properties'].pop(p)
            stack = utils.parse_stack(snippet)

            resource_defns = stack.t.resource_definitions(stack)
            rsrc = alarm.CeilometerAlarm(
                'MEMAlarmHigh', resource_defns['MEMAlarmHigh'], stack)
            self.assertIsNone(rsrc.validate())
    def test_instance_group(self):

        # setup stack from the initial template
        tmpl = template_format.parse(ig_template_before)
        stack = parse_stack(tmpl)

        # test stack create
        # test the number of instance creation
        # test that physical resource name of launch configuration is used
        size = int(stack.resources['JobServerGroup'].properties['Size'])
        self._stub_create(size)
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(stack.status, stack.COMPLETE)
        conf = stack.resources['JobServerConfig']
        conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name
        regex_pattern = re.compile(conf_name_pattern)
        self.assertTrue(regex_pattern.match(conf.FnGetRefId()))

        # test stack update
        # test that launch configuration is replaced
        conf_name = self.get_launch_conf_name(stack, 'JobServerGroup')
        updated_tmpl = template_format.parse(ig_template_after)
        updated_stack = parse_stack(updated_tmpl)
        stack.update(updated_stack)
        updated_conf_name = self.get_launch_conf_name(stack, 'JobServerGroup')
        self.assertNotEqual(conf_name, updated_conf_name)
Пример #6
0
    def test_loadbalancer(self):
        self._mock_get_image_id_success(u'F20-x86_64-cfntools', 746)
        self._create_stubs()
        self.m.ReplayAll()

        t = template_format.parse(lb_template)
        s = utils.parse_stack(t)
        s.store()

        rsrc = self.create_loadbalancer(t, s, 'LoadBalancer')

        hc = {
            'Target': 'HTTP:80/',
            'HealthyThreshold': '3',
            'UnhealthyThreshold': '5',
            'Interval': '30',
            'Timeout': '5'}
        rsrc.t['Properties']['HealthCheck'] = hc
        self.assertIsNone(rsrc.validate())

        hc['Timeout'] = 35
        self.assertEqual(
            {'Error': 'Interval must be larger than Timeout'},
            rsrc.validate())
        hc['Timeout'] = 5

        self.assertEqual('LoadBalancer', rsrc.FnGetRefId())

        templ = template_format.parse(lb.lb_template_default)
        ha_cfg = rsrc._haproxy_config(templ, rsrc.properties['Instances'])

        self.assertRegexpMatches(ha_cfg, 'bind \*:80')
        self.assertRegexpMatches(ha_cfg, 'server server1 1\.2\.3\.4:80 '
                                 'check inter 30s fall 5 rise 3')
        self.assertRegexpMatches(ha_cfg, 'timeout check 5s')

        id_list = []
        resource_defns = s.t.resource_definitions(s)
        for inst_name in ['WikiServerOne1', 'WikiServerOne2']:
            inst = instance.Instance(inst_name,
                                     resource_defns['WikiServerOne'],
                                     s)
            id_list.append(inst.FnGetRefId())

        prop_diff = {'Instances': id_list}
        props = copy.copy(rsrc.properties.data)
        props.update(prop_diff)
        update_defn = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
                                                   props)
        rsrc.handle_update(update_defn, {}, prop_diff)

        self.assertEqual('4.5.6.7', rsrc.FnGetAtt('DNSName'))
        self.assertEqual('', rsrc.FnGetAtt('SourceSecurityGroup.GroupName'))

        self.assertRaises(exception.InvalidTemplateAttribute,
                          rsrc.FnGetAtt, 'Foo')

        self.assertIsNone(rsrc.handle_update(rsrc.t, {}, {}))

        self.m.VerifyAll()
    def validate_update_policy_diff(self, current, updated):

        # load current stack
        current_tmpl = template_format.parse(current)
        current_stack = utils.parse_stack(current_tmpl)

        # get the json snippet for the current InstanceGroup resource
        current_grp = current_stack['JobServerGroup']
        current_snippets = dict((n, r.parsed_template())
                                for n, r in current_stack.items())
        current_grp_json = current_snippets[current_grp.name]

        # load the updated stack
        updated_tmpl = template_format.parse(updated)
        updated_stack = utils.parse_stack(updated_tmpl)

        # get the updated json snippet for the InstanceGroup resource in the
        # context of the current stack
        updated_grp = updated_stack['JobServerGroup']
        updated_grp_json = current_stack.resolve_runtime_data(updated_grp.t)

        # identify the template difference
        tmpl_diff = updated_grp.update_template_diff(
            updated_grp_json, current_grp_json)
        updated_policy = (updated_grp.t['UpdatePolicy']
                          if 'UpdatePolicy' in updated_grp.t else None)
        expected = {u'UpdatePolicy': updated_policy}
        self.assertEqual(expected, tmpl_diff)
Пример #8
0
    def test_update_in_place(self):
        t = template_format.parse(wp_template)
        self.parse_stack(t)
        queue = self.stack["MyQueue2"]
        queue.resource_id_set(queue.properties.get("name"))
        self.m.StubOutWithMock(queue, "marconi")
        queue.marconi().MultipleTimes().AndReturn(self.fc)
        fake_q = FakeQueue("myqueue", auto_create=False)
        self.m.StubOutWithMock(self.fc, "queue")
        self.fc.queue("myqueue", auto_create=False).MultipleTimes().AndReturn(fake_q)
        self.m.StubOutWithMock(fake_q, "metadata")
        fake_q.metadata(new_meta={"key1": {"key2": "value", "key3": [1, 2]}})

        # Expected to be called during update
        fake_q.metadata(new_meta={"key1": "value"})

        self.m.ReplayAll()

        t = template_format.parse(wp_template)
        new_queue = t["Resources"]["MyQueue2"]
        new_queue["Properties"]["metadata"] = {"key1": "value"}

        scheduler.TaskRunner(queue.create)()
        scheduler.TaskRunner(queue.update, new_queue)()
        self.m.VerifyAll()
Пример #9
0
    def create_vpnservice(self, resolve_neutron=True, resolve_router=True):
        self.stub_SubnetConstraint_validate()
        self.stub_RouterConstraint_validate()
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'subnet',
            'sub123',
            cmd_resource=None,
        ).AndReturn('sub123')
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'router',
            'rou123',
            cmd_resource=None,
        ).AndReturn('rou123')
        if resolve_neutron:
            snippet = template_format.parse(vpnservice_template)
        else:
            snippet = template_format.parse(vpnservice_template_deprecated)
        if resolve_router:
            props = snippet['resources']['VPNService']['properties']
            props['router'] = 'rou123'
            del props['router_id']
        neutronclient.Client.create_vpnservice(
            self.VPN_SERVICE_CONF).AndReturn({'vpnservice': {'id': 'vpn123'}})

        self.stack = utils.parse_stack(snippet)
        resource_defns = self.stack.t.resource_definitions(self.stack)
        return vpnservice.VPNService('vpnservice',
                                     resource_defns['VPNService'],
                                     self.stack)
Пример #10
0
    def test_update_replace(self):
        t = template_format.parse(wp_template)
        self.parse_stack(t)
        queue = self.stack['MyQueue2']
        queue.resource_id_set(queue.properties.get('name'))
        self.m.StubOutWithMock(queue, 'client')
        queue.client().MultipleTimes().AndReturn(self.fc)
        fake_q = FakeQueue('myqueue', auto_create=False)
        self.m.StubOutWithMock(self.fc, 'queue')
        self.fc.queue('myqueue',
                      auto_create=False).MultipleTimes().AndReturn(fake_q)

        self.m.ReplayAll()

        t = template_format.parse(wp_template)
        t['Resources']['MyQueue2']['Properties']['name'] = 'new_queue'
        resource_defns = template.Template(t).resource_definitions(self.stack)
        new_queue = resource_defns['MyQueue2']

        scheduler.TaskRunner(queue.create)()
        err = self.assertRaises(resource.UpdateReplace,
                                scheduler.TaskRunner(queue.update,
                                                     new_queue))
        msg = 'The Resource MyQueue2 requires replacement.'
        self.assertEqual(msg, six.text_type(err))

        self.m.VerifyAll()
Пример #11
0
    def test_update_in_place(self):
        t = template_format.parse(wp_template)
        self.parse_stack(t)
        queue = self.stack['MyQueue2']
        queue.resource_id_set(queue.properties.get('name'))
        self.m.StubOutWithMock(queue, 'client')
        queue.client().MultipleTimes().AndReturn(self.fc)
        fake_q = FakeQueue('myqueue', auto_create=False)
        self.m.StubOutWithMock(self.fc, 'queue')
        self.fc.queue('myqueue',
                      auto_create=False).MultipleTimes().AndReturn(fake_q)
        self.m.StubOutWithMock(fake_q, 'metadata')
        fake_q.metadata(new_meta={"key1": {"key2": "value", "key3": [1, 2]}})

        # Expected to be called during update
        fake_q.metadata(new_meta={'key1': 'value'})

        self.m.ReplayAll()

        t = template_format.parse(wp_template)
        new_queue = t['Resources']['MyQueue2']
        new_queue['Properties']['metadata'] = {'key1': 'value'}
        resource_defns = template.Template(t).resource_definitions(self.stack)

        scheduler.TaskRunner(queue.create)()
        scheduler.TaskRunner(queue.update, resource_defns['MyQueue2'])()
        self.m.VerifyAll()
Пример #12
0
    def test_update_replace(self, mock_client):
        t = template_format.parse(subscr_template)
        self.parse_stack(t)

        subscr = self.stack['MySubscription']
        subscr_id = "58138648c1e2eb7355d62137"

        self.m.StubOutWithMock(subscr, 'client')
        subscr.client().MultipleTimes().AndReturn(self.fc)

        fake_subscr = FakeSubscription(subscr.properties['queue_name'],
                                       subscr_id)
        self.m.StubOutWithMock(self.fc, 'subscription')
        self.fc.subscription(subscr.properties['queue_name'],
                             options={'key1': 'value1'},
                             subscriber=u'mailto:[email protected]',
                             ttl=3600).AndReturn(fake_subscr)

        self.m.ReplayAll()

        t = template_format.parse(subscr_template)
        t['Resources']['MySubscription']['Properties']['queue_name'] = 'foo'
        resource_defns = template.Template(t).resource_definitions(self.stack)
        new_subscr = resource_defns['MySubscription']

        scheduler.TaskRunner(subscr.create)()
        err = self.assertRaises(resource.UpdateReplace,
                                scheduler.TaskRunner(subscr.update,
                                                     new_subscr))
        msg = 'The Resource MySubscription requires replacement.'
        self.assertEqual(msg, six.text_type(err))

        self.m.VerifyAll()
Пример #13
0
    def test_update_in_place(self, mock_client):
        t = template_format.parse(subscr_template)
        self.parse_stack(t)

        subscr = self.stack['MySubscription']
        subscr_id = "58138648c1e2eb7355d62137"

        self.m.StubOutWithMock(subscr, 'client')
        subscr.client().MultipleTimes().AndReturn(self.fc)

        fake_subscr = FakeSubscription(subscr.properties['queue_name'],
                                       subscr_id)
        self.m.StubOutWithMock(self.fc, 'subscription')
        self.fc.subscription(subscr.properties['queue_name'],
                             options={'key1': 'value1'},
                             subscriber=u'mailto:[email protected]',
                             ttl=3600).AndReturn(fake_subscr)
        self.fc.subscription(subscr.properties['queue_name'],
                             id=subscr_id,
                             auto_create=False).AndReturn(fake_subscr)
        self.m.StubOutWithMock(fake_subscr, 'update')
        fake_subscr.update({'ttl': 3601, 'options': {'key1': 'value1'},
                            'subscriber': 'mailto:[email protected]'})

        self.m.ReplayAll()

        t = template_format.parse(subscr_template)
        new_subscr = t['Resources']['MySubscription']
        new_subscr['Properties']['ttl'] = "3601"
        resource_defns = template.Template(t).resource_definitions(self.stack)

        scheduler.TaskRunner(subscr.create)()
        scheduler.TaskRunner(subscr.update, resource_defns['MySubscription'])()

        self.m.VerifyAll()
Пример #14
0
    def _create_with_remote_credential(self, credential_secret_id=None,
                                       ca_cert=None, insecure=False):

        t = template_format.parse(parent_stack_template)
        properties = t['resources']['remote_stack']['properties']
        if credential_secret_id:
            properties['context']['credential_secret_id'] = (
                credential_secret_id)
        if ca_cert:
            properties['context']['ca_cert'] = (
                ca_cert)
        if insecure:
            properties['context']['insecure'] = insecure
        t = json.dumps(t)
        self.patchobject(policy.Enforcer, 'check_is_admin')

        rsrc = self.create_remote_stack(stack_template=t)
        env = environment.get_child_environment(rsrc.stack.env,
                                                {'name': 'foo'})
        args = {
            'stack_name': rsrc.physical_resource_name(),
            'template': template_format.parse(remote_template),
            'timeout_mins': 60,
            'disable_rollback': True,
            'parameters': {'name': 'foo'},
            'files': self.files,
            'environment': env.user_env_as_dict(),
        }
        self.heat.stacks.create.assert_called_with(**args)
        self.assertEqual(2, len(self.heat.stacks.get.call_args_list))
        rsrc.validate()
        return rsrc
    def prepare_create_network_gateway(self, resolve_neutron=True):
        self.mockclient.create_network_gateway.return_value = {
            'network_gateway': {
                'id': 'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
                'name': 'NetworkGateway',
                'default': False,
                'tenant_id': '96ba52dc-c5c5-44c6-9a9d-d3ba1a03f77f',
                'devices': [{
                    'id': 'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
                    'interface_name': 'breth1'}]
            }
        }
        self.mockclient.connect_network_gateway.return_value = {
            'connection_info': {
                'network_gateway_id': 'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
                'network_id': '6af055d3-26f6-48dd-a597-7611d7e58d35',
                'port_id': '32acc49c-899e-44ea-8177-6f4157e12eb4'
            }
        }
        self.stub_NetworkConstraint_validate()
        if resolve_neutron:
            t = template_format.parse(gw_template)
        else:
            t = template_format.parse(gw_template_deprecated)

        self.stack = utils.parse_stack(t)
        resource_defns = self.stack.t.resource_definitions(self.stack)
        rsrc = network_gateway.NetworkGateway(
            'test_network_gateway',
            resource_defns['NetworkGateway'], self.stack)
        return rsrc
    def test_instance_group_update_policy_check_timeout(self):

        # setup stack from the initial template
        tmpl = template_format.parse(ig_tmpl_with_updt_policy)
        stack = utils.parse_stack(tmpl)

        # test stack create
        size = int(stack['JobServerGroup'].properties['Size'])
        self._stub_grp_create(size)
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(('CREATE', 'COMPLETE'), stack.state)

        # test that update policy is loaded
        current_grp = stack['JobServerGroup']
        self.assertIn('RollingUpdate', current_grp.update_policy)
        current_policy = current_grp.update_policy['RollingUpdate']
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_grp_tmpl = tmpl['Resources']['JobServerGroup']
        init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate']
        init_batch_sz = int(init_roll_updt['MaxBatchSize'])
        self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))

        # test the number of instances created
        nested = stack['JobServerGroup'].nested()
        self.assertEqual(size, len(nested.resources))

        # clean up for next test
        self.m.UnsetStubs()

        # modify the pause time and test for error
        new_pause_time = 'PT30M'
        updt_template = json.loads(copy.deepcopy(ig_tmpl_with_updt_policy))
        group = updt_template['Resources']['JobServerGroup']
        policy = group['UpdatePolicy']['RollingUpdate']
        policy['PauseTime'] = new_pause_time
        config = updt_template['Resources']['JobServerConfig']
        config['Properties']['ImageId'] = 'bar'
        updated_tmpl = template_format.parse(json.dumps(updt_template))
        updated_stack = utils.parse_stack(updated_tmpl)
        stack.update(updated_stack)
        self.assertEqual(('UPDATE', 'FAILED'), stack.state)

        # test that the update policy is updated
        updated_grp = stack['JobServerGroup']
        self.assertIn('RollingUpdate', updated_grp.update_policy)
        updated_policy = updated_grp.update_policy['RollingUpdate']
        self.assertTrue(updated_policy)
        self.assertTrue(len(updated_policy) > 0)
        self.assertEqual(new_pause_time, updated_policy['PauseTime'])

        # test that error message match
        expected_error_message = ('The current UpdatePolicy will result '
                                  'in stack update timeout.')
        self.assertIn(expected_error_message, stack.status_reason)
Пример #17
0
    def test_loadbalancer(self):
        self._create_stubs()

        self.m.ReplayAll()

        t = template_format.parse(lb_template)
        s = utils.parse_stack(t)
        s.store()

        rsrc = self.create_loadbalancer(t, s, 'LoadBalancer')

        hc = {
            'Target': 'HTTP:80/',
            'HealthyThreshold': '3',
            'UnhealthyThreshold': '5',
            'Interval': '30',
            'Timeout': '5'}
        rsrc.t['Properties']['HealthCheck'] = hc
        self.assertIsNone(rsrc.validate())

        hc['Timeout'] = 35
        self.assertEqual(
            {'Error': 'Interval must be larger than Timeout'},
            rsrc.validate())
        hc['Timeout'] = 5

        self.assertEqual('LoadBalancer', rsrc.FnGetRefId())

        templ = template_format.parse(lb.lb_template_default)
        ha_cfg = rsrc._haproxy_config(templ, rsrc.properties['Instances'])

        self.assertRegexpMatches(ha_cfg, 'bind \*:80')
        self.assertRegexpMatches(ha_cfg, 'server server1 1\.2\.3\.4:80 '
                                 'check inter 30s fall 5 rise 3')
        self.assertRegexpMatches(ha_cfg, 'timeout check 5s')

        id_list = []
        for inst_name in ['WikiServerOne1', 'WikiServerOne2']:
            inst = instance.Instance(inst_name,
                                     s.t['Resources']['WikiServerOne'],
                                     s)
            id_list.append(inst.FnGetRefId())

        rsrc.handle_update(rsrc.json_snippet, {}, {'Instances': id_list})

        self.assertEqual('4.5.6.7', rsrc.FnGetAtt('DNSName'))
        self.assertEqual('', rsrc.FnGetAtt('SourceSecurityGroup.GroupName'))

        try:
            rsrc.FnGetAtt('Foo')
            raise Exception('Expected InvalidTemplateAttribute')
        except exception.InvalidTemplateAttribute:
            pass

        self.assertIsNone(rsrc.handle_update({}, {}, {}))

        self.m.VerifyAll()
Пример #18
0
 def setUp(self):
     super(StackResourceTest, self).setUp()
     utils.setup_dummy_db()
     resource._register_class("some_magic_type", MyStackResource)
     resource._register_class("GenericResource", generic_rsrc.GenericResource)
     t = parser.Template({"Resources": {"provider_resource": ws_res_snippet}})
     self.parent_stack = parser.Stack(utils.dummy_context(), "test_stack", t, stack_id=str(uuid.uuid4()))
     self.parent_resource = MyStackResource("test", ws_res_snippet, self.parent_stack)
     self.templ = template_format.parse(param_template)
     self.simple_template = template_format.parse(simple_template)
Пример #19
0
 def test_with_update_policy_inst_group(self):
     t = template_format.parse(inline_templates.as_heat_template)
     ag = t["resources"]["my-group"]
     ag["update_policy"] = {
         "RollingUpdate": {"MinInstancesInService": "1", "MaxBatchSize": "2", "PauseTime": "PT1S"}
     }
     tmpl = template_format.parse(json.dumps(t))
     stack = utils.parse_stack(tmpl)
     exc = self.assertRaises(exception.StackValidationFailed, stack.validate)
     self.assertIn("Unknown Property RollingUpdate", six.text_type(exc))
Пример #20
0
    def create_pool(self, resolve_neutron=True, with_vip_subnet=False):
        clients.OpenStackClients.keystone().AndReturn(
            fakes.FakeKeystoneClient())
        neutronclient.Client.create_pool({
            'pool': {
                'subnet_id': 'sub123', 'protocol': u'HTTP',
                'name': utils.PhysName('test_stack', 'pool'),
                'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
        ).AndReturn({'pool': {'id': '5678'}})
        neutronclient.Client.show_pool('5678').AndReturn(
            {'pool': {'status': 'ACTIVE'}})
        neutronclient.Client.show_vip('xyz').AndReturn(
            {'vip': {'status': 'ACTIVE'}})
        stvipvsn = {
            'vip': {
                'protocol': u'HTTP', 'name': 'pool.vip',
                'admin_state_up': True, 'subnet_id': u'sub9999',
                'pool_id': '5678', 'protocol_port': 80}
        }

        stvippsn = copy.deepcopy(stvipvsn)
        stvippsn['vip']['subnet_id'] = 'sub123'

        if resolve_neutron and with_vip_subnet:
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client),
                'subnet',
                'sub123'
            ).AndReturn('sub123')
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client),
                'subnet',
                'sub9999'
            ).AndReturn('sub9999')
            snippet = template_format.parse(pool_template_with_vip_subnet)
            neutronclient.Client.create_vip(stvipvsn
                                            ).AndReturn({'vip': {'id': 'xyz'}})

        elif resolve_neutron and not with_vip_subnet:
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client),
                'subnet',
                'sub123'
            ).AndReturn('sub123')
            snippet = template_format.parse(pool_template)
            neutronclient.Client.create_vip(stvippsn
                                            ).AndReturn({'vip': {'id': 'xyz'}})
        else:
            snippet = template_format.parse(pool_template_deprecated)
            neutronclient.Client.create_vip(stvippsn
                                            ).AndReturn({'vip': {'id': 'xyz'}})
        stack = utils.parse_stack(snippet)
        resource_defns = stack.t.resource_definitions(stack)
        return loadbalancer.Pool(
            'pool', resource_defns['pool'], stack)
Пример #21
0
 def test_signal_with_body_as_input_and_delete_with_executions(self):
     tmpl = template_format.parse(workflow_template_full)
     stack = utils.parse_stack(tmpl, params={
         'parameters': {'use_request_body_as_input': 'true'}
     })
     rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
     wf = workflow.Workflow('create_vm', rsrc_defns, stack)
     self.mistral.workflows.create.return_value = [
         FakeWorkflow('create_vm')]
     scheduler.TaskRunner(wf.create)()
     details = {'flavor': '3'}
     execution = mock.Mock()
     execution.id = '12345'
     exec_manager = executions.ExecutionManager(wf.client('mistral'))
     self.mistral.executions.create.side_effect = (
         lambda *args, **kw: exec_manager.create(*args, **kw))
     self.patchobject(exec_manager, '_create', return_value=execution)
     scheduler.TaskRunner(wf.signal, details)()
     call_args = self.mistral.executions.create.call_args
     args, _ = call_args
     expected_args = (
         '{"image": "31d8eeaf-686e-4e95-bb27-765014b9f20b", '
         '"name": "create_test_server", "flavor": "3"}')
     self.validate_json_inputs(args[1], expected_args)
     self.assertEqual({'executions': '12345'}, wf.data())
     # Updating the workflow changing "use_request_body_as_input" to
     # false and signaling again with the expected request body format.
     t = template_format.parse(workflow_updating_request_body_property)
     new_stack = utils.parse_stack(t)
     rsrc_defns = new_stack.t.resource_definitions(new_stack)
     self.mistral.workflows.update.return_value = [
         FakeWorkflow('test_stack-workflow-b5fiekdsa355')]
     scheduler.TaskRunner(wf.update, rsrc_defns['create_vm'])()
     self.assertTrue(self.mistral.workflows.update.called)
     self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
     details = {'input': {'flavor': '4'}}
     execution = mock.Mock()
     execution.id = '54321'
     exec_manager = executions.ExecutionManager(wf.client('mistral'))
     self.mistral.executions.create.side_effect = (
         lambda *args, **kw: exec_manager.create(*args, **kw))
     self.patchobject(exec_manager, '_create', return_value=execution)
     scheduler.TaskRunner(wf.signal, details)()
     call_args = self.mistral.executions.create.call_args
     args, _ = call_args
     expected_args = (
         '{"image": "31d8eeaf-686e-4e95-bb27-765014b9f20b", '
         '"name": "create_test_server", "flavor": "4"}')
     self.validate_json_inputs(args[1], expected_args)
     self.assertEqual({'executions': '54321,12345', 'name':
                      'test_stack-workflow-b5fiekdsa355'}, wf.data())
     scheduler.TaskRunner(wf.delete)()
     self.assertEqual(2, self.mistral.executions.delete.call_count)
     self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
Пример #22
0
    def test_minimal_yaml(self):
        yaml1 = ''
        yaml2 = '''HeatTemplateFormatVersion: '2012-12-12'
Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
        tpl1 = template_format.parse(yaml1)
        tpl2 = template_format.parse(yaml2)
        self.assertEqual(tpl1, tpl2)
    def test_autoscaling_group_update_policy_removed(self):

        # setup stack from the initial template
        tmpl = template_format.parse(asg_tmpl_with_updt_policy)
        stack = utils.parse_stack(tmpl)
        stack.validate()

        # test stack create
        size = int(stack["WebServerGroup"].properties["MinSize"])
        self._stub_grp_create(size)
        self._mock_get_image_id_success("F20-x86_64-cfntools", "image_id")
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(("CREATE", "COMPLETE"), stack.state)

        # test that update policy is loaded
        current_grp = stack["WebServerGroup"]
        self.assertIn("AutoScalingRollingUpdate", current_grp.update_policy)
        current_policy = current_grp.update_policy["AutoScalingRollingUpdate"]
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_updt_policy = tmpl["Resources"]["WebServerGroup"]["UpdatePolicy"]
        init_roll_updt = init_updt_policy["AutoScalingRollingUpdate"]
        init_batch_sz = int(init_roll_updt["MaxBatchSize"])
        self.assertEqual(init_batch_sz, int(current_policy["MaxBatchSize"]))

        # test that physical resource name of launch configuration is used
        conf = stack["LaunchConfig"]
        conf_name_pattern = "%s-LaunchConfig-[a-zA-Z0-9]+$" % stack.name
        self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))

        # test the number of instances created
        nested = stack["WebServerGroup"].nested()
        self.assertEqual(size, len(nested.resources))

        # clean up for next test
        self.m.UnsetStubs()

        # test stack update
        updated_tmpl = template_format.parse(asg_tmpl_without_updt_policy)
        updated_stack = utils.parse_stack(updated_tmpl)
        self._stub_grp_replace(
            num_creates_expected_on_updt=0, num_deletes_expected_on_updt=0, num_reloads_expected_on_updt=1
        )
        self.m.ReplayAll()
        stack.update(updated_stack)
        self.m.VerifyAll()
        self.assertEqual(("UPDATE", "COMPLETE"), stack.state)

        # test that update policy is removed
        updated_grp = stack["WebServerGroup"]
        self.assertFalse(updated_grp.update_policy["AutoScalingRollingUpdate"])
Пример #24
0
    def create_pool(self, resolve_neutron=True, with_vip_subnet=False):
        if resolve_neutron:
            if with_vip_subnet:
                snippet = template_format.parse(pool_template_with_vip_subnet)
            else:
                snippet = template_format.parse(pool_template)
        else:
            snippet = template_format.parse(pool_template_deprecated)
        self.stack = utils.parse_stack(snippet)
        self.tmpl = snippet
        neutronclient.Client.create_pool({
            'pool': {
                'subnet_id': 'sub123', 'protocol': u'HTTP',
                'name': utils.PhysName(self.stack.name, 'pool'),
                'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
        ).AndReturn({'pool': {'id': '5678'}})
        neutronclient.Client.show_pool('5678').AndReturn(
            {'pool': {'status': 'ACTIVE'}})
        neutronclient.Client.show_vip('xyz').AndReturn(
            {'vip': {'status': 'ACTIVE'}})
        stvipvsn = {
            'vip': {
                'protocol': u'HTTP', 'name': 'pool.vip',
                'admin_state_up': True, 'subnet_id': u'sub9999',
                'pool_id': '5678', 'protocol_port': 80}
        }

        stvippsn = copy.deepcopy(stvipvsn)
        stvippsn['vip']['subnet_id'] = 'sub123'
        self.stub_SubnetConstraint_validate()

        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'subnet',
            'sub123',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('sub123')
        if resolve_neutron and with_vip_subnet:
            neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client),
                'subnet',
                'sub9999',
                cmd_resource=None,
            ).AndReturn('sub9999')
            neutronclient.Client.create_vip(stvipvsn
                                            ).AndReturn({'vip': {'id': 'xyz'}})
        else:
            neutronclient.Client.create_vip(stvippsn
                                            ).AndReturn({'vip': {'id': 'xyz'}})
        resource_defns = self.stack.t.resource_definitions(self.stack)
        return loadbalancer.Pool(
            'pool', resource_defns['pool'], self.stack)
Пример #25
0
def get_stack(stack_name, ctx, template=None, with_params=True):
    if template is None:
        t = template_format.parse(wp_template)
        if with_params:
            env = environment.Environment({'KeyName': 'test'})
            tmpl = templatem.Template(t, env=env)
        else:
            tmpl = templatem.Template(t)
    else:
        t = template_format.parse(template)
        tmpl = templatem.Template(t)
    stack = parser.Stack(ctx, stack_name, tmpl)
    return stack
Пример #26
0
def get_stack(stack_name, ctx, template=None, with_params=True, convergence=False):
    if template is None:
        t = template_format.parse(wp_template)
        if with_params:
            env = environment.Environment({"KeyName": "test"})
            tmpl = templatem.Template(t, env=env)
        else:
            tmpl = templatem.Template(t)
    else:
        t = template_format.parse(template)
        tmpl = templatem.Template(t)
    stack = parser.Stack(ctx, stack_name, tmpl, convergence=convergence)
    return stack
Пример #27
0
    def compare_json_vs_yaml(self, json_str, yml_str, file_name):
        yml = template_format.parse(yml_str)

        self.assertEqual(u"2012-12-12", yml[u"HeatTemplateFormatVersion"], file_name)
        self.assertFalse(u"AWSTemplateFormatVersion" in yml, file_name)
        del (yml[u"HeatTemplateFormatVersion"])

        jsn = template_format.parse(json_str)

        if u"AWSTemplateFormatVersion" in jsn:
            del (jsn[u"AWSTemplateFormatVersion"])

        self.assertEqual(yml, jsn, file_name)
Пример #28
0
    def compare_json_vs_yaml(self, json_str, yml_str):
        yml = template_format.parse(yml_str)

        self.assertEqual(u'2012-12-12', yml[u'HeatTemplateFormatVersion'])
        self.assertNotIn(u'AWSTemplateFormatVersion', yml)
        del(yml[u'HeatTemplateFormatVersion'])

        jsn = template_format.parse(json_str)

        if u'AWSTemplateFormatVersion' in jsn:
            del(jsn[u'AWSTemplateFormatVersion'])

        self.assertEqual(yml, jsn)
Пример #29
0
    def create_vpnservice(self, resolve_neutron=True):
        if resolve_neutron:
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client), "subnet", "sub123"
            ).AndReturn("sub123")
            snippet = template_format.parse(vpnservice_template)
        else:
            snippet = template_format.parse(vpnservice_template_deprecated)
        neutronclient.Client.create_vpnservice(self.VPN_SERVICE_CONF).AndReturn({"vpnservice": {"id": "vpn123"}})

        self.stack = utils.parse_stack(snippet)
        resource_defns = self.stack.t.resource_definitions(self.stack)
        return vpnservice.VPNService("vpnservice", resource_defns["VPNService"], self.stack)
Пример #30
0
    def prepare_create_network_gateway(self, resolve_neutron=True):
        clients.OpenStackClients.keystone().AndReturn(
            fakes.FakeKeystoneClient())
        neutronclient.Client.create_network_gateway({
            'network_gateway': {
                'name': u'NetworkGateway',
                'devices': [{'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
                             'interface_name': u'breth1'}]
            }
        }
        ).AndReturn({
            'network_gateway': {
                'id': 'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
                'name': 'NetworkGateway',
                'default': False,
                'tenant_id': '96ba52dc-c5c5-44c6-9a9d-d3ba1a03f77f',
                'devices': [{
                    'id': 'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
                    'interface_name': 'breth1'}]
            }
        }
        )
        neutronclient.Client.connect_network_gateway(
            u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
                'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
                'segmentation_id': 10,
                'segmentation_type': u'vlan'
            }
        ).AndReturn({
            'connection_info': {
                'network_gateway_id': u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
                'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
                'port_id': u'32acc49c-899e-44ea-8177-6f4157e12eb4'
            }
        })
        if resolve_neutron:
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client),
                'network',
                '6af055d3-26f6-48dd-a597-7611d7e58d35'
            ).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
            t = template_format.parse(gw_template)
        else:
            t = template_format.parse(gw_template_deprecated)

        stack = utils.parse_stack(t)
        resource_defns = stack.t.resource_definitions(stack)
        rsrc = network_gateway.NetworkGateway(
            'test_network_gateway',
            resource_defns['NetworkGateway'], stack)
        return rsrc
Пример #31
0
    def test_mem_alarm_high_update_no_replace(self):
        '''
        Make sure that we can change the update-able properties
        without replacing the Alarm rsrc.
        '''
        # short circuit the alarm's references
        t = template_format.parse(alarm_template)
        properties = t['Resources']['MEMAlarmHigh']['Properties']
        properties['alarm_actions'] = ['signal_handler']
        properties['matching_metadata'] = {'a': 'v'}
        properties['query'] = [dict(field='b', op='eq', value='w')]

        self.stack = self.create_stack(template=json.dumps(t))
        self.m.StubOutWithMock(self.fa.alarms, 'update')
        schema = props.schemata(alarm.CeilometerAlarm.properties_schema)
        exns = [
            'period', 'evaluation_periods', 'threshold', 'statistic',
            'comparison_operator', 'meter_name', 'matching_metadata', 'query'
        ]
        al2 = dict((k, mox.IgnoreArg()) for k, s in schema.items()
                   if s.update_allowed and k not in exns)
        al2['alarm_id'] = mox.IgnoreArg()
        al2['type'] = 'threshold'
        al2['threshold_rule'] = dict(meter_name=properties['meter_name'],
                                     period=90,
                                     evaluation_periods=2,
                                     threshold=39,
                                     statistic='max',
                                     comparison_operator='lt',
                                     query=[
                                         dict(field='c', op='ne', value='z'),
                                         dict(field='metadata.metering.x',
                                              op='eq',
                                              value='y')
                                     ])
        self.fa.alarms.update(**al2).AndReturn(None)

        self.m.ReplayAll()
        self.stack.create()
        rsrc = self.stack['MEMAlarmHigh']

        properties = copy.copy(rsrc.properties.data)
        properties.update({
            'comparison_operator': 'lt',
            'description': 'fruity',
            'evaluation_periods': '2',
            'period': '90',
            'enabled': True,
            'repeat_actions': True,
            'statistic': 'max',
            'threshold': '39',
            'insufficient_data_actions': [],
            'alarm_actions': [],
            'ok_actions': ['signal_handler'],
            'matching_metadata': {
                'x': 'y'
            },
            'query': [dict(field='c', op='ne', value='z')]
        })
        snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
                                               properties)

        scheduler.TaskRunner(rsrc.update, snippet)()

        self.m.VerifyAll()
Пример #32
0
    def test_security_group(self):

        show_created = {
            'security_group': {
                'tenant_id':
                'f18ca530cc05425e8bac0a5ff92f7e88',
                'name':
                'sc1',
                'description':
                '',
                'security_group_rules': [{
                    'direction': 'ingress',
                    'protocol': 'tcp',
                    'port_range_max': '22',
                    'id': 'bbbb',
                    'ethertype': 'IPv4',
                    'security_group_id': 'aaaa',
                    'remote_group_id': None,
                    'remote_ip_prefix': '0.0.0.0/0',
                    'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                    'port_range_min': '22'
                }, {
                    'direction': 'ingress',
                    'protocol': 'tcp',
                    'port_range_max': '80',
                    'id': 'cccc',
                    'ethertype': 'IPv4',
                    'security_group_id': 'aaaa',
                    'remote_group_id': None,
                    'remote_ip_prefix': '0.0.0.0/0',
                    'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                    'port_range_min': '80'
                }, {
                    'direction': 'ingress',
                    'protocol': 'tcp',
                    'port_range_max': None,
                    'id': 'dddd',
                    'ethertype': 'IPv4',
                    'security_group_id': 'aaaa',
                    'remote_group_id': 'wwww',
                    'remote_ip_prefix': None,
                    'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                    'port_range_min': None
                }, {
                    'direction': 'egress',
                    'protocol': 'tcp',
                    'port_range_max': '22',
                    'id': 'eeee',
                    'ethertype': 'IPv4',
                    'security_group_id': 'aaaa',
                    'remote_group_id': None,
                    'remote_ip_prefix': '10.0.1.0/24',
                    'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                    'port_range_min': '22'
                }, {
                    'direction': 'egress',
                    'protocol': None,
                    'port_range_max': None,
                    'id': 'ffff',
                    'ethertype': 'IPv4',
                    'security_group_id': 'aaaa',
                    'remote_group_id': 'xxxx',
                    'remote_ip_prefix': None,
                    'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                    'port_range_min': None
                }, {
                    'direction': 'egress',
                    'protocol': None,
                    'port_range_max': None,
                    'id': 'gggg',
                    'ethertype': 'IPv4',
                    'security_group_id': 'aaaa',
                    'remote_group_id': 'aaaa',
                    'remote_ip_prefix': None,
                    'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                    'port_range_min': None
                }],
                'id':
                'aaaa'
            }
        }

        # create script
        sg_name = utils.PhysName('test_stack', 'the_sg')
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'security_group',
            'wwww',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('wwww')
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'security_group',
            'xxxx',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('xxxx')
        neutronclient.Client.create_security_group({
            'security_group': {
                'name': sg_name,
                'description': 'HTTP and SSH access'
            }
        }).AndReturn({
            'security_group': {
                'tenant_id':
                'f18ca530cc05425e8bac0a5ff92f7e88',
                'name':
                sg_name,
                'description':
                'HTTP and SSH access',
                'security_group_rules': [{
                    "direction":
                    "egress",
                    "ethertype":
                    "IPv4",
                    "id":
                    "aaaa-1",
                    "port_range_max":
                    None,
                    "port_range_min":
                    None,
                    "protocol":
                    None,
                    "remote_group_id":
                    None,
                    "remote_ip_prefix":
                    None,
                    "security_group_id":
                    "aaaa",
                    "tenant_id":
                    "f18ca530cc05425e8bac0a5ff92f7e88"
                }, {
                    "direction":
                    "egress",
                    "ethertype":
                    "IPv6",
                    "id":
                    "aaaa-2",
                    "port_range_max":
                    None,
                    "port_range_min":
                    None,
                    "protocol":
                    None,
                    "remote_group_id":
                    None,
                    "remote_ip_prefix":
                    None,
                    "security_group_id":
                    "aaaa",
                    "tenant_id":
                    "f18ca530cc05425e8bac0a5ff92f7e88"
                }],
                'id':
                'aaaa'
            }
        })

        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'bbbb'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '80',
                'ethertype': 'IPv4',
                'port_range_max': '80',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '80',
                'ethertype': 'IPv4',
                'port_range_max': '80',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'cccc'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': 'wwww',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': 'wwww',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'dddd'
            }
        })
        neutronclient.Client.show_security_group('aaaa').AndReturn({
            'security_group': {
                'tenant_id':
                'f18ca530cc05425e8bac0a5ff92f7e88',
                'name':
                sg_name,
                'description':
                'HTTP and SSH access',
                'security_group_rules': [{
                    "direction":
                    "egress",
                    "ethertype":
                    "IPv4",
                    "id":
                    "aaaa-1",
                    "port_range_max":
                    None,
                    "port_range_min":
                    None,
                    "protocol":
                    None,
                    "remote_group_id":
                    None,
                    "remote_ip_prefix":
                    None,
                    "security_group_id":
                    "aaaa",
                    "tenant_id":
                    "f18ca530cc05425e8bac0a5ff92f7e88"
                }, {
                    "direction":
                    "egress",
                    "ethertype":
                    "IPv6",
                    "id":
                    "aaaa-2",
                    "port_range_max":
                    None,
                    "port_range_min":
                    None,
                    "protocol":
                    None,
                    "remote_group_id":
                    None,
                    "remote_ip_prefix":
                    None,
                    "security_group_id":
                    "aaaa",
                    "tenant_id":
                    "f18ca530cc05425e8bac0a5ff92f7e88"
                }],
                'id':
                'aaaa'
            }
        })
        neutronclient.Client.delete_security_group_rule('aaaa-1').AndReturn(
            None)
        neutronclient.Client.delete_security_group_rule('aaaa-2').AndReturn(
            None)
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.1.0/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.1.0/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'eeee'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'xxxx',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'xxxx',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'ffff'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'aaaa',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'aaaa',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'gggg'
            }
        })

        # update script
        neutronclient.Client.update_security_group(
            'aaaa', {
                'security_group': {
                    'description': 'SSH access for private network',
                    'name': 'myrules'
                }
            }).AndReturn({
                'security_group': {
                    'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                    'name': 'myrules',
                    'description': 'SSH access for private network',
                    'security_group_rules': [],
                    'id': 'aaaa'
                }
            })

        neutronclient.Client.show_security_group('aaaa').AndReturn(
            show_created)
        neutronclient.Client.delete_security_group_rule('bbbb').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('cccc').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('dddd').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('eeee').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('ffff').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('gggg').AndReturn(None)

        neutronclient.Client.show_security_group('aaaa').AndReturn({
            'security_group': {
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'name': 'sc1',
                'description': '',
                'security_group_rules': [],
                'id': 'aaaa'
            }
        })

        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'hhhh'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'ethertype': 'IPv6',
                'security_group_id': 'aaaa',
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv6',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'iiii'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.0.10/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.0.10/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'jjjj'
            }
        })

        # delete script
        neutronclient.Client.show_security_group('aaaa').AndReturn(
            show_created)
        neutronclient.Client.delete_security_group_rule('bbbb').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('cccc').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('dddd').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('eeee').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('ffff').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('gggg').AndReturn(None)
        neutronclient.Client.delete_security_group('aaaa').AndReturn(None)

        self.m.ReplayAll()
        stack = self.create_stack(self.test_template)

        sg = stack['the_sg']
        self.assertResourceState(sg, 'aaaa')

        updated_tmpl = template_format.parse(self.test_template_update)
        updated_stack = utils.parse_stack(updated_tmpl)
        stack.update(updated_stack)

        stack.delete()
        self.m.VerifyAll()
Пример #33
0
    def test_port_needs_update_network(self):
        props = {'network_id': u'net1234',
                 'name': 'test_port',
                 'admin_state_up': True,
                 'device_owner': u'network:dhcp'}
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'network',
            'net1234',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('net1234')

        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'network',
            'old_network',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('net1234')
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'network',
            'new_network',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('net5678')

        create_props = props.copy()
        neutronclient.Client.create_port(
            {'port': create_props}
        ).AndReturn({'port': {
            "status": "BUILD",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
        }})
        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({'port': {
            "status": "ACTIVE",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
            "fixed_ips": {
                "subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
                "ip_address": "10.0.0.2"
            }
        }})
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'network',
            'net5678',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('net5678')

        call_dict = copy.deepcopy(props)
        call_dict['security_groups'] = [
            '0389f747-7785-4757-b7bb-2ab07e4b09c3']
        del call_dict['network_id']

        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({'port': {
            "status": "ACTIVE",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
        }})

        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({'port': {
            "status": "ACTIVE",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
        }})

        neutronclient.Client.update_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
            {'port': {'fixed_ips': []}}
        ).AndReturn(None)

        self.m.ReplayAll()

        # create port
        t = template_format.parse(neutron_port_template)
        t['resources']['port']['properties'].pop('fixed_ips')
        t['resources']['port']['properties']['name'] = 'test_port'
        stack = utils.parse_stack(t)

        port = stack['port']
        scheduler.TaskRunner(port.create)()

        # Switch from network_id=ID to network=ID (no replace)
        new_props = props.copy()
        new_props['network'] = new_props.pop('network_id')
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props)
        scheduler.TaskRunner(port.update, update_snippet)()
        self.assertEqual((port.UPDATE, port.COMPLETE), port.state)

        # Switch from network=ID to network=NAME (no replace)
        new_props['network'] = 'old_network'
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props)

        scheduler.TaskRunner(port.update, update_snippet)()
        self.assertEqual((port.UPDATE, port.COMPLETE), port.state)

        # Switch to a different network (replace)
        new_props['network'] = 'new_network'
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props)
        updater = scheduler.TaskRunner(port.update, update_snippet)
        self.assertRaises(exception.UpdateReplace, updater)

        self.m.VerifyAll()
Пример #34
0
 def test_refid(self):
     t = template_format.parse(self.test_template)
     stack = self.parse_stack(t)
     nested_stack = stack['the_nested']
     self.assertEqual('the_nested', nested_stack.FnGetRefId())
Пример #35
0
 def test_user_refid_rsrc_id(self):
     t = template_format.parse(user_template)
     stack = utils.parse_stack(t)
     rsrc = stack['CfnUser']
     rsrc.resource_id = 'phy-rsrc-id'
     self.assertEqual('phy-rsrc-id', rsrc.FnGetRefId())
Пример #36
0
    def test_security_group_nova_with_egress_rules(self):
        t = template_format.parse(self.test_template_nova_with_egress)
        stack = self.parse_stack(t)

        sg = stack['the_sg']
        self.assertRaises(exception.EgressRuleNotAllowed, sg.validate)
Пример #37
0
    def _test_extraroute(self, ipv6=False):

        if ipv6:
            route1 = {
                "destination": "ffff:f53b:82e4::56/46",
                "nexthop": "dce7:f53b:82e4::56"
            }
            route2 = {
                "destination": "ffff:f53b:ffff::56/46",
                "nexthop": "dce7:f53b:82e4::56"
            }
        else:
            route1 = {"destination": "192.168.0.0/24", "nexthop": "1.1.1.1"}
            route2 = {"destination": "192.168.255.0/24", "nexthop": "1.1.1.1"}

        self.stub_RouterConstraint_validate()

        self.mockclient.show_router.side_effect = [
            # add first route
            {
                'router': {
                    'routes': []
                }
            },
            # add second route
            {
                'router': {
                    'routes': [route1.copy()]
                }
            },
            # first delete
            {
                'router': {
                    'routes': [route1.copy(), route2.copy()]
                }
            },
            # second delete
            {
                'router': {
                    'routes': [route2.copy()]
                }
            },
        ]
        self.mockclient.update_router.return_value = None

        t = template_format.parse(neutron_template)
        stack = utils.parse_stack(t)

        if ipv6:
            rsrc1 = self.create_extraroute(
                t,
                stack,
                'extraroute1',
                properties={
                    'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
                    'destination': 'ffff:f53b:82e4::56/46',
                    'nexthop': 'dce7:f53b:82e4::56'
                })

            self.create_extraroute(t,
                                   stack,
                                   'extraroute2',
                                   properties={
                                       'router_id':
                                       '3e46229d-8fce-4733-819a-b5fe630550f8',
                                       'destination': 'ffff:f53b:ffff::56/46',
                                       'nexthop': 'dce7:f53b:82e4::56'
                                   })
        else:
            rsrc1 = self.create_extraroute(
                t,
                stack,
                'extraroute1',
                properties={
                    'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
                    'destination': '192.168.0.0/24',
                    'nexthop': '1.1.1.1'
                })

            self.create_extraroute(t,
                                   stack,
                                   'extraroute2',
                                   properties={
                                       'router_id':
                                       '3e46229d-8fce-4733-819a-b5fe630550f8',
                                       'destination': '192.168.255.0/24',
                                       'nexthop': '1.1.1.1'
                                   })

        scheduler.TaskRunner(rsrc1.delete)()
        rsrc1.state_set(rsrc1.CREATE, rsrc1.COMPLETE, 'to delete again')
        scheduler.TaskRunner(rsrc1.delete)()

        self.mockclient.show_router.assert_called_with(
            '3e46229d-8fce-4733-819a-b5fe630550f8')
        self.mockclient.update_router.assert_has_calls([
            # add first route
            mock.call('3e46229d-8fce-4733-819a-b5fe630550f8',
                      {'router': {
                          'routes': [route1.copy()]
                      }}),
            # add second route
            mock.call(
                '3e46229d-8fce-4733-819a-b5fe630550f8',
                {'router': {
                    'routes': [route1.copy(), route2.copy()]
                }}),
            # first delete
            mock.call('3e46229d-8fce-4733-819a-b5fe630550f8',
                      {'router': {
                          'routes': [route2.copy()]
                      }}),
        ])
    def update_autoscaling_group(self, init_template, updt_template,
                                 num_updates_expected_on_updt,
                                 num_creates_expected_on_updt,
                                 num_deletes_expected_on_updt,
                                 num_reloads_expected_on_updt,
                                 update_replace):

        # setup stack from the initial template
        tmpl = template_format.parse(init_template)
        stack = utils.parse_stack(tmpl)
        stack.validate()

        # test stack create
        size = int(stack['WebServerGroup'].properties['MinSize'])
        self._stub_grp_create(size)
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))

        # test that update policy is loaded
        current_grp = stack['WebServerGroup']
        self.assertTrue('AutoScalingRollingUpdate'
                        in current_grp.update_policy)
        current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
        init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
        init_batch_sz = int(init_roll_updt['MaxBatchSize'])
        self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)

        # test that physical resource name of launch configuration is used
        conf = stack['LaunchConfig']
        conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
        self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))

        # get launch conf name here to compare result after update
        conf_name = self.get_launch_conf_name(stack, 'WebServerGroup')

        # test the number of instances created
        nested = stack['WebServerGroup'].nested()
        self.assertEqual(len(nested.resources), size)

        # clean up for next test
        self.m.UnsetStubs()

        # saves info from initial list of instances for comparison later
        init_instances = current_grp.get_instances()
        init_names = current_grp.get_instance_names()
        init_images = [(i.name, i.t['Properties']['ImageId'])
                       for i in init_instances]
        init_flavors = [(i.name, i.t['Properties']['InstanceType'])
                        for i in init_instances]

        # test stack update
        updated_tmpl = template_format.parse(updt_template)
        updated_stack = utils.parse_stack(updated_tmpl)
        new_grp_tmpl = updated_tmpl['Resources']['WebServerGroup']
        new_updt_pol = new_grp_tmpl['UpdatePolicy']['AutoScalingRollingUpdate']
        new_batch_sz = int(new_updt_pol['MaxBatchSize'])
        self.assertNotEqual(new_batch_sz, init_batch_sz)
        self._stub_validate()
        if update_replace:
            self._stub_grp_replace(size, size, num_reloads_expected_on_updt)
        else:
            self._stub_grp_update(num_creates_expected_on_updt,
                                  num_deletes_expected_on_updt,
                                  num_reloads_expected_on_updt)
        self.stub_wallclock()
        self.m.ReplayAll()
        stack.update(updated_stack)
        self.m.VerifyAll()
        self.assertEqual(stack.state, ('UPDATE', 'COMPLETE'))

        # test that the update policy is updated
        updated_grp = stack['WebServerGroup']
        updt_instances = updated_grp.get_instances()
        self.assertTrue('AutoScalingRollingUpdate'
                        in updated_grp.update_policy)
        updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate']
        self.assertTrue(updated_policy)
        self.assertTrue(len(updated_policy) > 0)
        self.assertEqual(int(updated_policy['MaxBatchSize']), new_batch_sz)

        # test that the launch configuration is replaced
        updated_conf_name = self.get_launch_conf_name(stack, 'WebServerGroup')
        self.assertNotEqual(conf_name, updated_conf_name)

        # test that the group size are the same
        updt_instances = updated_grp.get_instances()
        updt_names = updated_grp.get_instance_names()
        self.assertEqual(len(updt_names), len(init_names))

        # test that appropriate number of instance names are the same
        matched_names = set(updt_names) & set(init_names)
        self.assertEqual(len(matched_names), num_updates_expected_on_updt)

        # test that the appropriate number of new instances are created
        self.assertEqual(len(set(updt_names) - set(init_names)),
                         num_creates_expected_on_updt)

        # test that the appropriate number of instances are deleted
        self.assertEqual(len(set(init_names) - set(updt_names)),
                         num_deletes_expected_on_updt)

        # test that the older instances are the ones being deleted
        if num_deletes_expected_on_updt > 0:
            deletes_expected = init_names[:num_deletes_expected_on_updt]
            self.assertNotIn(deletes_expected, updt_names)

        # test if instances are updated
        if update_replace:
            # test that the image id is changed for all instances
            updt_images = [(i.name, i.t['Properties']['ImageId'])
                           for i in updt_instances]
            self.assertEqual(len(set(updt_images) & set(init_images)), 0)
        else:
            # test that instance type is changed for all instances
            updt_flavors = [(i.name, i.t['Properties']['InstanceType'])
                            for i in updt_instances]
            self.assertEqual(len(set(updt_flavors) & set(init_flavors)), 0)
    def test_autoscaling_group_update_policy_check_timeout(self):

        # setup stack from the initial template
        tmpl = template_format.parse(asg_tmpl_with_updt_policy)
        stack = utils.parse_stack(tmpl)

        # test stack create
        size = int(stack['WebServerGroup'].properties['MinSize'])
        self._stub_grp_create(size)
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(stack.state, ('CREATE', 'COMPLETE'))

        # test that update policy is loaded
        current_grp = stack['WebServerGroup']
        self.assertTrue('AutoScalingRollingUpdate'
                        in current_grp.update_policy)
        current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
        init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
        init_batch_sz = int(init_roll_updt['MaxBatchSize'])
        self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz)

        # test the number of instances created
        nested = stack['WebServerGroup'].nested()
        self.assertEqual(len(nested.resources), size)

        # clean up for next test
        self.m.UnsetStubs()

        # modify the pause time and test for error
        new_pause_time = 'PT30M'
        updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
        group = updt_template['Resources']['WebServerGroup']
        policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
        policy['PauseTime'] = new_pause_time
        config = updt_template['Resources']['LaunchConfig']
        config['Properties']['ImageId'] = 'bar'
        updated_tmpl = template_format.parse(json.dumps(updt_template))
        updated_stack = utils.parse_stack(updated_tmpl)
        self._stub_grp_replace(num_creates_expected_on_updt=0,
                               num_deletes_expected_on_updt=0,
                               num_reloads_expected_on_updt=1)
        self.m.ReplayAll()
        stack.update(updated_stack)
        self.m.VerifyAll()
        self.assertEqual(stack.state, ('UPDATE', 'FAILED'))

        # test that the update policy is updated
        updated_grp = stack['WebServerGroup']
        self.assertTrue('AutoScalingRollingUpdate'
                        in updated_grp.update_policy)
        updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate']
        self.assertTrue(updated_policy)
        self.assertTrue(len(updated_policy) > 0)
        self.assertEqual(updated_policy['PauseTime'], new_pause_time)

        # test that error message match
        expected_error_message = ('The current UpdatePolicy will result '
                                  'in stack update timeout.')
        self.assertIn(expected_error_message, stack.status_reason)
 def test_parse_without_update_policy(self):
     tmpl = template_format.parse(asg_tmpl_without_updt_policy)
     stack = utils.parse_stack(tmpl)
     stack.validate()
     grp = stack['WebServerGroup']
     self.assertFalse(grp.update_policy['AutoScalingRollingUpdate'])
 def test_parse_with_bad_update_policy(self):
     tmpl = template_format.parse(asg_tmpl_with_bad_updt_policy)
     stack = utils.parse_stack(tmpl)
     self.assertRaises(exception.StackValidationFailed, stack.validate)
Пример #42
0
    def test_net(self):
        # Create script
        neutronclient.Client.create_network({
            'network': {
                'name': u'the_network',
                'admin_state_up': True,
                'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
                'port_security_enabled': False,
                'shared': True}
        }).AndReturn({"network": {
            "status": "BUILD",
            "subnets": [],
            "name": "name",
            "admin_state_up": True,
            "shared": True,
            "tenant_id": "c1210485b2424d48804aad5d39c61b8f",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
            "mtu": 0
        }})

        neutronclient.Client.list_dhcp_agent_hosting_networks(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({"agents": []})

        neutronclient.Client.add_network_to_dhcp_agent(
            '28c25a04-3f73-45a7-a2b4-59e183943ddc',
            {'network_id': u'fc68ea2c-b60b-4b4f-bd82-94ec81110766'}
        ).AndReturn(None)

        neutronclient.Client.show_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({"network": {
            "status": "BUILD",
            "subnets": [],
            "name": "name",
            "admin_state_up": True,
            "shared": True,
            "tenant_id": "c1210485b2424d48804aad5d39c61b8f",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
            "mtu": 0
        }})

        neutronclient.Client.show_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({"network": {
            "status": "ACTIVE",
            "subnets": [],
            "name": "name",
            "admin_state_up": True,
            "shared": True,
            "tenant_id": "c1210485b2424d48804aad5d39c61b8f",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
            "mtu": 0
        }})

        neutronclient.Client.show_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndRaise(qe.NetworkNotFoundClient(status_code=404))

        neutronclient.Client.show_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({"network": {
            "status": "ACTIVE",
            "subnets": [],
            "name": "name",
            "admin_state_up": True,
            "shared": True,
            "tenant_id": "c1210485b2424d48804aad5d39c61b8f",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
            "mtu": 0
        }})

        neutronclient.Client.show_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({"network": {
            "status": "ACTIVE",
            "subnets": [],
            "name": "name",
            "admin_state_up": True,
            "shared": True,
            "tenant_id": "c1210485b2424d48804aad5d39c61b8f",
            "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
            "mtu": 0
        }})

        # Update script
        neutronclient.Client.list_dhcp_agent_hosting_networks(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn({
            "agents": [{
                "admin_state_up": True,
                "agent_type": "DHCP agent",
                "alive": True,
                "binary": "neutron-dhcp-agent",
                "configurations": {
                    "dhcp_driver": "DummyDriver",
                    "dhcp_lease_duration": 86400,
                    "networks": 0,
                    "ports": 0,
                    "subnets": 0,
                    "use_namespaces": True},
                "created_at": "2014-03-20 05:12:34",
                "description": None,
                "heartbeat_timestamp": "2014-03-20 05:12:34",
                "host": "hostname",
                "id": "28c25a04-3f73-45a7-a2b4-59e183943ddc",
                "started_at": "2014-03-20 05:12:34",
                "topic": "dhcp_agent"
            }]
        })

        neutronclient.Client.add_network_to_dhcp_agent(
            'bb09cfcd-5277-473d-8336-d4ed8628ae68',
            {'network_id': u'fc68ea2c-b60b-4b4f-bd82-94ec81110766'}
        ).AndReturn(None)

        neutronclient.Client.remove_network_from_dhcp_agent(
            '28c25a04-3f73-45a7-a2b4-59e183943ddc',
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn(None)

        neutronclient.Client.update_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
            {'network': {
                'shared': True,
                'name': 'mynet',
                'admin_state_up': True,
                'port_security_enabled': False
            }}).AndReturn(None)

        # Delete script
        neutronclient.Client.delete_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndReturn(None)

        neutronclient.Client.show_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndRaise(qe.NetworkNotFoundClient(status_code=404))

        neutronclient.Client.delete_network(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).AndRaise(qe.NetworkNotFoundClient(status_code=404))

        self.m.ReplayAll()
        t = template_format.parse(neutron_template)
        stack = utils.parse_stack(t)
        rsrc = self.create_net(t, stack, 'network')

        # assert the implicit dependency between the gateway and the interface
        deps = stack.dependencies[stack['router_interface']]
        self.assertIn(stack['gateway'], deps)

        # assert the implicit dependency between the gateway and the subnet
        deps = stack.dependencies[stack['subnet']]
        self.assertIn(stack['gateway'], deps)

        rsrc.validate()

        ref_id = rsrc.FnGetRefId()
        self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', ref_id)

        self.assertIsNone(rsrc.FnGetAtt('status'))
        self.assertEqual('ACTIVE', rsrc.FnGetAtt('status'))
        self.assertEqual(0, rsrc.FnGetAtt('mtu'))
        self.assertRaises(
            exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Foo')
        prop_diff = {
            "name": "mynet",
            "dhcp_agent_ids": [
                "bb09cfcd-5277-473d-8336-d4ed8628ae68"
            ]
        }
        props = copy.copy(rsrc.properties.data)
        props.update(prop_diff)
        update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
                                                      props)
        rsrc.handle_update(update_snippet, {}, prop_diff)

        scheduler.TaskRunner(rsrc.delete)()
        rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
        scheduler.TaskRunner(rsrc.delete)()
        self.m.VerifyAll()
Пример #43
0
    def test_update_port(self):
        t = template_format.parse(neutron_port_template)
        t['resources']['port']['properties'].pop('fixed_ips')
        stack = utils.parse_stack(t)

        self.patchobject(neutronV20, 'find_resourceid_by_name_or_id',
                         return_value='net1234')
        create_port = self.patchobject(neutronclient.Client, 'create_port')
        update_port = self.patchobject(neutronclient.Client, 'update_port')
        fake_groups_list = {
            'security_groups': [
                {
                    'tenant_id': 'dc4b074874244f7693dd65583733a758',
                    'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
                    'name': 'default',
                    'security_group_rules': [],
                    'description': 'no protocol'
                }
            ]
        }
        self.patchobject(neutronclient.Client, 'list_security_groups',
                         return_value=fake_groups_list)

        props = {'network_id': u'net1234',
                 'name': utils.PhysName(stack.name, 'port'),
                 'admin_state_up': True,
                 'device_owner': u'network:dhcp'}

        update_props = props.copy()
        update_props['security_groups'] = self.secgrp
        update_props['value_specs'] = self.value_specs
        if self.fixed_ips:
            update_props['fixed_ips'] = self.fixed_ips
        update_props['allowed_address_pairs'] = self.addr_pair
        update_props['binding:vnic_type'] = self.vnic_type

        update_dict = update_props.copy()

        if update_props['security_groups'] is None:
            update_dict['security_groups'] = ['default']

        if update_props['name'] is None:
            update_dict['name'] = utils.PhysName(stack.name, 'test_subnet')

        value_specs = update_dict.pop('value_specs')
        if value_specs:
            for value_spec in six.iteritems(value_specs):
                update_dict[value_spec[0]] = value_spec[1]

        # create port
        port = stack['port']
        self.assertIsNone(scheduler.TaskRunner(port.handle_create)())
        create_port.assset_called_once_with(props)
        # update port
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      update_props)
        self.assertIsNone(scheduler.TaskRunner(port.handle_update,
                                               update_snippet, {},
                                               update_props)())

        update_port.assset_called_once_with(update_dict)
        # update with empty prop_diff
        scheduler.TaskRunner(port.handle_update, update_snippet, {}, {})()
        self.assertEqual(1, update_port.call_count)
Пример #44
0
    def test_get_port_attributes(self):
        t = template_format.parse(neutron_port_template)
        t['resources']['port']['properties'].pop('fixed_ips')
        stack = utils.parse_stack(t)

        subnet_dict = {'name': 'test-subnet', 'enable_dhcp': True,
                       'network_id': 'net1234', 'dns_nameservers': [],
                       'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
                       'ipv6_ra_mode': None, 'cidr': '10.0.0.0/24',
                       'allocation_pools': [{'start': '10.0.0.2',
                                             'end': u'10.0.0.254'}],
                       'gateway_ip': '10.0.0.1', 'ipv6_address_mode': None,
                       'ip_version': 4, 'host_routes': [],
                       'id': '6dd609ad-d52a-4587-b1a0-b335f76062a5'}
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client),
            'network',
            'net1234',
            cmd_resource=None,
        ).MultipleTimes().AndReturn('net1234')
        neutronclient.Client.create_port({'port': {
            'network_id': u'net1234',
            'name': utils.PhysName(stack.name, 'port'),
            'admin_state_up': True,
            'device_owner': u'network:dhcp'}}
        ).AndReturn({'port': {
            'status': 'BUILD',
            'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        }})
        neutronclient.Client.show_subnet(
            'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
        ).AndReturn({'subnet': subnet_dict})
        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
        ).MultipleTimes().AndReturn({'port': {
            'status': 'DOWN',
            'name': utils.PhysName(stack.name, 'port'),
            'allowed_address_pairs': [],
            'admin_state_up': True,
            'network_id': 'net1234',
            'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
            'mac_address': 'fa:16:3e:75:67:60',
            'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
            'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
            'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
                           'ip_address': '10.0.0.2'}]
        }})
        self.m.ReplayAll()

        port = stack['port']
        scheduler.TaskRunner(port.create)()
        self.assertEqual('DOWN', port.FnGetAtt('status'))
        self.assertEqual([], port.FnGetAtt('allowed_address_pairs'))
        self.assertTrue(port.FnGetAtt('admin_state_up'))
        self.assertEqual('net1234', port.FnGetAtt('network_id'))
        self.assertEqual('fa:16:3e:75:67:60', port.FnGetAtt('mac_address'))
        self.assertEqual(utils.PhysName(stack.name, 'port'),
                         port.FnGetAtt('name'))
        self.assertEqual('dc68eg2c-b60g-4b3f-bd82-67ec87650532',
                         port.FnGetAtt('device_id'))
        self.assertEqual('58a61fc3992944ce971404a2ece6ff98',
                         port.FnGetAtt('tenant_id'))
        self.assertEqual(['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
                         port.FnGetAtt('security_groups'))
        self.assertEqual([{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
                           'ip_address': '10.0.0.2'}],
                         port.FnGetAtt('fixed_ips'))
        self.assertEqual([subnet_dict], port.FnGetAtt('subnets'))
        self.assertRaises(exception.InvalidTemplateAttribute,
                          port.FnGetAtt, 'Foo')
        self.m.VerifyAll()
Пример #45
0
 def test_parse_yaml_template(self):
     tmpl_str = 'heat_template_version: 2013-05-23'
     expected = {'heat_template_version': '2013-05-23'}
     self.assertEqual(expected, template_format.parse(tmpl_str))
Пример #46
0
 def test_validate_failed_with_string_None_protocol(self):
     snippet = template_format.parse(firewall_rule_template)
     stack = utils.parse_stack(snippet)
     rsrc = stack['firewall_rule']
     rsrc.t['Properties']['protocol'] = 'None'
     self.assertRaises(exception.StackValidationFailed, rsrc.validate)
Пример #47
0
 def test_get_attribute_autoscaling(self):
     t = template_format.parse(heat_autoscaling_group_template)
     tmpl = templatem.Template(t)
     stack = parser.Stack(utils.dummy_context(), 'test_att', tmpl)
     rsrc = stack['my_autoscaling_group']
     self.assertEqual(0, rsrc.FnGetAtt(rsrc.CURRENT_SIZE))
Пример #48
0
 def create_stack(self, templ):
     t = template_format.parse(templ)
     self.stack = self.parse_stack(t)
     self.assertIsNone(self.stack.create())
     return self.stack
Пример #49
0
 def validate_stack(self, template):
     t = template_format.parse(template)
     stack = self.parse_stack(t)
     res = stack.validate()
     self.assertIsNone(res)
     return stack
def mock_template(templ_vers, templ_class, test_templ=f5_bigip_defn):
    '''Mock a Heat template for the Kilo version.'''
    templ_dict = template_format.parse(test_templ)
    return templ_dict
Пример #51
0
 def create_stack(self, template):
     t = template_format.parse(template)
     stack = utils.parse_stack(t)
     stack.create()
     self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
     return stack
Пример #52
0
 def test_remote_stack_refid(self):
     t = template_format.parse(parent_stack_template)
     stack = utils.parse_stack(t)
     rsrc = stack['remote_stack']
     rsrc.resource_id = 'xyz'
     self.assertEqual('xyz', rsrc.FnGetRefId())
Пример #53
0
 def test_child_template_when_fetching_file_fails(self):
     urlfetch.get.side_effect = exceptions.RequestException()
     t = template_format.parse(self.test_template)
     stack = self.parse_stack(t)
     nested_stack = stack['the_nested']
     self.assertRaises(ValueError, nested_stack.child_template)
    def _create_test_instance_with_nic(self, return_server, name):
        stack_name = '%s_s' % name
        t = template_format.parse(wp_template_with_nic)
        kwargs = {'KeyName': 'test',
                  'InstanceType': 'm1.large',
                  'SubnetId': '4156c7a5-e8c4-4aff-a6e1-8f3c7bc83861'}
        tmpl = template.Template(t,
                                 env=environment.Environment(kwargs))
        self.stack = parser.Stack(utils.dummy_context(), stack_name, tmpl,
                                  stack_id=str(uuid.uuid4()))
        image_id = 'CentOS 5.2'
        t['Resources']['WebServer']['Properties']['ImageId'] = image_id

        resource_defns = self.stack.t.resource_definitions(self.stack)
        nic = net_interfaces.NetworkInterface('%s_nic' % name,
                                              resource_defns['nic1'],
                                              self.stack)

        instance = instances.Instance('%s_name' % name,
                                      resource_defns['WebServer'], self.stack)
        metadata = instance.metadata_get()

        self._mock_get_image_id_success(image_id, 1)
        self.stub_SubnetConstraint_validate()
        self.m.StubOutWithMock(nic, 'client')
        nic.client().AndReturn(FakeNeutron())

        self.m.StubOutWithMock(neutron.NeutronClientPlugin, '_create')
        neutron.NeutronClientPlugin._create().MultipleTimes().AndReturn(
            FakeNeutron())

        self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
        nova.NovaClientPlugin._create().AndReturn(self.fc)

        # need to resolve the template functions
        server_userdata = instance.client_plugin().build_userdata(
            metadata,
            instance.t['Properties']['UserData'],
            'ec2-user')
        self.m.StubOutWithMock(nova.NovaClientPlugin, 'build_userdata')
        nova.NovaClientPlugin.build_userdata(
            metadata,
            instance.t['Properties']['UserData'],
            'ec2-user').AndReturn(server_userdata)

        self.m.StubOutWithMock(self.fc.servers, 'create')
        self.fc.servers.create(
            image=1, flavor=3, key_name='test',
            name=utils.PhysName(stack_name, instance.name),
            security_groups=None,
            userdata=server_userdata, scheduler_hints=None, meta=None,
            nics=[{'port-id': '64d913c1-bcb1-42d2-8f0a-9593dbcaf251'}],
            availability_zone=None,
            block_device_mapping=None).AndReturn(
                return_server)
        self.m.ReplayAll()

        # create network interface
        scheduler.TaskRunner(nic.create)()
        self.stack.resources["nic1"] = nic

        scheduler.TaskRunner(instance.create)()
        return instance
Пример #55
0
    def test_port_needs_update_network(self):
        net1 = '9cfe6c74-c105-4906-9a1f-81d9064e9bca'
        net2 = '0064eec9-5681-4ba7-a745-6f8e32db9503'
        props = {
            'network_id': net1,
            'name': 'test_port',
            'device_owner': u'network:dhcp'
        }
        create_kwargs = props.copy()
        create_kwargs['admin_state_up'] = True

        self.find_mock.side_effect = [net1] * 8 + [net2] * 2 + [net1]
        self.create_mock.return_value = {
            'port': {
                "status": "ACTIVE",
                "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
            }
        }
        self.port_show_mock.return_value = {
            'port': {
                "status": "ACTIVE",
                "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
                "fixed_ips": {
                    "subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
                    "ip_address": "10.0.0.2"
                }
            }
        }

        # create port with network_id
        tmpl = neutron_port_template.replace(
            'network: net1234',
            'network_id: 9cfe6c74-c105-4906-9a1f-81d9064e9bca')
        t = template_format.parse(tmpl)
        t['resources']['port']['properties'].pop('fixed_ips')
        t['resources']['port']['properties']['name'] = 'test_port'
        stack = utils.parse_stack(t)

        port = stack['port']
        scheduler.TaskRunner(port.create)()
        self.assertEqual((port.CREATE, port.COMPLETE), port.state)
        self.create_mock.assert_called_once_with({'port': create_kwargs})

        # Switch from network_id=ID to network=ID (no replace)
        new_props = props.copy()
        new_props['network'] = new_props.pop('network_id')
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props)

        scheduler.TaskRunner(port.update, update_snippet)()
        self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
        self.assertEqual(0, self.update_mock.call_count)

        # Switch from network=ID to network=NAME (no replace)
        new_props['network'] = 'net1234'
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props)

        scheduler.TaskRunner(port.update, update_snippet)()
        self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
        self.assertEqual(0, self.update_mock.call_count)

        # Switch to a different network (replace)
        new_props['network'] = 'net5678'
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props)
        updater = scheduler.TaskRunner(port.update, update_snippet)
        self.assertRaises(resource.UpdateReplace, updater)
        self.assertEqual(11, self.find_mock.call_count)
Пример #56
0
    def test_lb(self):

        tmpl = template_format.parse(as_template)

        network_body = {
            "network": {
                "id": str(uuid.uuid4()),
                "name": "testnet",
                "admin_state_up": True
            }
        }
        subnet_body = {
            "subnet": {
                "name": "testsubnet",
                "id": str(uuid.uuid4()),
                "network_id": network_body['network']['id'],
                "ip_version": 4,
                "cidr": "10.0.3.0/24",
                "allocation_pools": [{
                    "start": "10.0.3.20",
                    "end": "10.0.3.150"
                }],
                "gateway_ip": "10.0.3.1"
            }
        }

        self.params["SubnetId"] = subnet_body['subnet']['id']
        mon_block = {
            'health_monitor': tmpl['Resources']['myMonitor']['Properties']
        }
        mon_block['health_monitor']['admin_state_up'] = True
        mon_ret_block = copy.deepcopy(mon_block)
        mon_ret_block['health_monitor']['id'] = str(uuid.uuid4())
        mon_ret_block['health_monitor']['status'] = 'ACTIVE'

        pool_block = {'pool': {}}
        tmp_pool_block = tmpl['Resources']['myPool']['Properties']
        for val in ['lb_method', 'protocol', 'name', 'description']:
            pool_block['pool'][val] = tmp_pool_block[val]
        pool_block['pool']['admin_state_up'] = True
        pool_block['pool']['subnet_id'] = self.params['SubnetId']
        pool_block['pool']['admin_state_up'] = True
        pool_ret_block = copy.deepcopy(pool_block)
        pool_ret_block['pool']['id'] = str(uuid.uuid4())
        pool_ret_block['pool']['status'] = 'ACTIVE'

        tmp_vip_block = tmp_pool_block.pop('vip')
        vip_block = {
            'vip': {
                'protocol': pool_block['pool']['protocol'],
                'description': tmp_vip_block['description'],
                'admin_state_up': True,
                'subnet_id': self.params['SubnetId'],
                'connection_limit': tmp_vip_block['connection_limit'],
                'pool_id': pool_ret_block['pool']['id'],
                'address': tmp_vip_block['address'],
                'protocol_port': tmp_vip_block['protocol_port'],
                'name': tmp_vip_block['name']
            }
        }
        vip_ret_block = copy.deepcopy(vip_block)
        vip_ret_block['vip']['id'] = str(uuid.uuid4())
        vip_ret_block['vip']['status'] = 'ACTIVE'

        port_block = {
            'port': {
                'network_id': network_body['network']['id'],
                'fixed_ips': [{
                    'subnet_id': subnet_body['subnet']['id'],
                }],
                'admin_state_up': True
            }
        }
        port_ret_block = copy.deepcopy(port_block)
        port_ret_block['port']['id'] = str(uuid.uuid4())

        membera_block = {
            'member': {
                'protocol_port': 8080,
                'pool_id': pool_ret_block['pool']['id'],
                'address': '1.2.3.4'
            }
        }
        membera_ret_block = copy.deepcopy(membera_block)
        membera_ret_block['member']['id'] = str(uuid.uuid4())

        memberb_block = {
            'member': {
                'protocol_port': 8080,
                'pool_id': pool_ret_block['pool']['id'],
                'address': '1.2.3.5'
            }
        }
        memberb_ret_block = copy.deepcopy(memberb_block)
        memberb_ret_block['member']['id'] = str(uuid.uuid4())

        memberc_block = {
            'member': {
                'protocol_port': 8080,
                'pool_id': pool_ret_block['pool']['id'],
                'address': '1.2.3.6'
            }
        }
        memberc_ret_block = copy.deepcopy(memberc_block)
        memberc_ret_block['member']['id'] = str(uuid.uuid4())

        class id_type(object):
            def __init__(self, id, name):
                self.id = id
                self.name = name

        instances = {}

        clients.OpenStackClients.keystone().AndReturn(
            fakes.FakeKeystoneClient())

        clients.neutronclient.Client.create_health_monitor(mon_block).\
            AndReturn(mon_ret_block)

        clients.neutronclient.Client.create_pool(pool_block).\
            AndReturn(pool_ret_block)

        clients.neutronclient.Client.associate_health_monitor(
            pool_ret_block['pool']['id'], {
                'health_monitor': {
                    'id': mon_ret_block['health_monitor']['id']
                }
            }).AndReturn(None)

        clients.neutronclient.Client.create_vip(vip_block).\
            AndReturn(vip_ret_block)

        clients.neutronclient.Client.show_pool(pool_ret_block['pool']['id']).\
            AndReturn(pool_ret_block)

        clients.neutronclient.Client.show_vip(vip_ret_block['vip']['id']).\
            AndReturn(vip_ret_block)

        clients.OpenStackClients.keystone().AndReturn(
            fakes.FakeKeystoneClient())

        clients.OpenStackClients.keystone().AndReturn(
            fakes.FakeKeystoneClient())

        parser.Stack.validate()
        instid = str(uuid.uuid4())
        instance.Instance.handle_create().AndReturn(instid)
        instance.Instance.check_create_complete(mox.IgnoreArg())\
            .AndReturn(False)
        instance.Instance.check_create_complete(mox.IgnoreArg())\
            .AndReturn(True)

        image.ImageConstraint.validate(
            mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(True)

        nova_utils.server_to_ipaddress(mox.IgnoreArg(),
                                       mox.IgnoreArg()).AndReturn('1.2.3.4')

        clients.neutronclient.Client.create_member(membera_block).\
            AndReturn(membera_ret_block)

        instances[instid] = membera_ret_block['member']['id']

        # Start of update
        clients.OpenStackClients.keystone().AndReturn(
            fakes.FakeKeystoneClient())

        parser.Stack.validate()
        instid = str(uuid.uuid4())
        instance.Instance.handle_create().AndReturn(instid)
        instance.Instance.check_create_complete(mox.IgnoreArg())\
            .AndReturn(False)
        instance.Instance.check_create_complete(mox.IgnoreArg())\
            .AndReturn(True)
        instances[instid] = memberb_ret_block['member']['id']

        instid = str(uuid.uuid4())
        instance.Instance.handle_create().AndReturn(instid)
        instance.Instance.check_create_complete(mox.IgnoreArg())\
            .AndReturn(False)
        instance.Instance.check_create_complete(mox.IgnoreArg())\
            .AndReturn(True)

        nova_utils.server_to_ipaddress(mox.IgnoreArg(),
                                       mox.IgnoreArg()).AndReturn('1.2.3.5')

        clients.neutronclient.Client.create_member(memberb_block).\
            AndReturn(memberb_ret_block)

        nova_utils.server_to_ipaddress(mox.IgnoreArg(),
                                       mox.IgnoreArg()).AndReturn('1.2.3.6')

        clients.neutronclient.Client.create_member(memberc_block).\
            AndReturn(memberc_ret_block)

        self.m.ReplayAll()

        # Start of stack create
        env = {'parameters': self.params}
        tmpl = template_format.parse(as_template)

        stack = parser.Stack(self.ctx, 'update_test_stack',
                             template.Template(tmpl),
                             environment.Environment(env))

        stack.store()
        stack.create()
        self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
                         stack.state)

        # Start of stack update
        stack2 = parser.Stack.load(self.ctx, stack_id=stack.id)

        tmpl2 = copy.deepcopy(tmpl)
        tmpl2['Resources']['SvrGrp']['Properties']['DesiredCapacity'] = '3'

        update_stack = parser.Stack(self.ctx, 'update_test_stack',
                                    template.Template(tmpl2),
                                    environment.Environment(env))
        stack2.update(update_stack)
        self.assertEqual((parser.Stack.UPDATE, parser.Stack.COMPLETE),
                         stack2.state)

        members = db_api.resource_data_get_all(stack['ElasticLoadBalancer'])
        self.assertEqual(3, len(members.keys()))

        self.m.VerifyAll()
Пример #57
0
class CloudNetworkTest(common.HeatTestCase):

    _template = template_format.parse("""
    heat_template_version: 2013-05-23
    description: Test stack for Rackspace Cloud Networks
    resources:
      cnw:
        type: Rackspace::Cloud::Network
        properties:
          label: test_network
          cidr: 172.16.0.0/24
    """)

    def setUp(self):
        super(CloudNetworkTest, self).setUp()
        resource._register_class("Rackspace::Cloud::Network",
                                 cloudnetworks.CloudNetwork)

    def _parse_stack(self):
        self.stack = utils.parse_stack(self._template,
                                       stack_name=self.__class__.__name__)

    def _setup_stack(self, mock_client, *args):
        self.fake_cnw = FakeClient(*args)
        mock_client.return_value = self.fake_cnw
        self._parse_stack()
        self.stack.create()
        self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
                         self.stack.state)
        res = self.stack['cnw']
        self.assertEqual((res.CREATE, res.COMPLETE), res.state)

    def test_attributes(self, mock_client):
        self._setup_stack(mock_client)
        res = self.stack['cnw']
        template_resource = self._template['resources']['cnw']
        expect_label = template_resource['properties']['label']
        expect_cidr = template_resource['properties']['cidr']
        self.assertEqual(expect_label, res.FnGetAtt('label'))
        self.assertEqual(expect_cidr, res.FnGetAtt('cidr'))

    def test_create_bad_cidr(self, mock_client):
        prop = self._template['resources']['cnw']['properties']
        prop['cidr'] = "bad cidr"
        self._parse_stack()
        exc = self.assertRaises(exception.StackValidationFailed,
                                self.stack.validate)
        self.assertIn("Invalid net cidr", six.text_type(exc))
        # reset property
        prop['cidr'] = "172.16.0.0/24"

    def test_check(self, mock_client):
        self._setup_stack(mock_client)
        res = self.stack['cnw']
        scheduler.TaskRunner(res.check)()
        self.assertEqual((res.CHECK, res.COMPLETE), res.state)

        self.fake_cnw.networks = []
        exc = self.assertRaises(exception.ResourceFailure,
                                scheduler.TaskRunner(res.check))
        self.assertEqual((res.CHECK, res.FAILED), res.state)
        self.assertIn('No network', str(exc))

    def test_delete(self, mock_client):
        self._setup_stack(mock_client)
        res = self.stack['cnw']
        res_id = res.FnGetRefId()
        scheduler.TaskRunner(res.delete)()
        self.assertEqual((res.DELETE, res.COMPLETE), res.state)
        exc = self.assertRaises(NotFound, self.fake_cnw.get, res_id)
        self.assertIn(res_id, six.text_type(exc))

    def test_delete_no_network_created(self, mock_client):
        self.fake_cnw = FakeClientRaiseException()
        mock_client.return_value = self.fake_cnw
        self._parse_stack()
        self.stack.create()
        self.assertEqual((self.stack.CREATE, self.stack.FAILED),
                         self.stack.state)
        res = self.stack['cnw']
        self.assertEqual((res.CREATE, res.FAILED), res.state)
        scheduler.TaskRunner(res.delete)()
        self.assertEqual((res.DELETE, res.COMPLETE), res.state)

    def test_delete_in_use(self, mock_client):
        self._setup_stack(mock_client)
        res = self.stack['cnw']
        fake_network = res.network()
        fake_network.delete = mock.Mock()
        fake_network.delete.side_effect = [cloudnetworks.NetworkInUse(), True]
        mock_client.return_value = fake_network
        fake_network.get = mock.Mock()
        fake_network.get.side_effect = [cloudnetworks.NotFound()]

        scheduler.TaskRunner(res.delete)()
        self.assertEqual((res.DELETE, res.COMPLETE), res.state)

    def test_delete_not_complete(self, mock_client):
        self._setup_stack(mock_client)
        res = self.stack['cnw']
        mock_client.get = mock.Mock()

        task = res.handle_delete()
        self.assertFalse(res.check_delete_complete(task))

    def test_delete_not_found(self, mock_client):
        self._setup_stack(mock_client)
        self.fake_cnw.networks = []
        res = self.stack['cnw']
        scheduler.TaskRunner(res.delete)()
        self.assertEqual((res.DELETE, res.COMPLETE), res.state)
Пример #58
0
    def test_router(self):
        t = template_format.parse(neutron_template)
        stack = utils.parse_stack(t)

        neutronclient.Client.create_router({
            'router': {
                'name': utils.PhysName(stack.name, 'router'),
                'admin_state_up': True,
            }
        }).AndReturn({
            "router": {
                "status": "BUILD",
                "external_gateway_info": None,
                "name": utils.PhysName(stack.name, 'router'),
                "admin_state_up": True,
                "tenant_id": "3e21026f2dc94372b105808c0e721661",
                "id": "3e46229d-8fce-4733-819a-b5fe630550f8",
            }
        })
        neutronclient.Client.list_l3_agent_hosting_routers(
            u'3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn({"agents": []})
        neutronclient.Client.add_router_to_l3_agent(
            u'792ff887-6c85-4a56-b518-23f24fa65581', {
                'router_id': u'3e46229d-8fce-4733-819a-b5fe630550f8'
            }).AndReturn(None)
        neutronclient.Client.show_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn({
                "router": {
                    "status": "BUILD",
                    "external_gateway_info": None,
                    "name": utils.PhysName(stack.name, 'router'),
                    "admin_state_up": True,
                    "tenant_id": "3e21026f2dc94372b105808c0e721661",
                    "routes": [],
                    "id": "3e46229d-8fce-4733-819a-b5fe630550f8"
                }
            })
        neutronclient.Client.show_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn({
                "router": {
                    "status": "ACTIVE",
                    "external_gateway_info": None,
                    "name": utils.PhysName(stack.name, 'router'),
                    "admin_state_up": True,
                    "tenant_id": "3e21026f2dc94372b105808c0e721661",
                    "routes": [],
                    "id": "3e46229d-8fce-4733-819a-b5fe630550f8"
                }
            })
        neutronclient.Client.show_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8').AndRaise(
                qe.NeutronClientException(status_code=404))
        neutronclient.Client.show_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn({
                "router": {
                    "status": "ACTIVE",
                    "external_gateway_info": None,
                    "name": utils.PhysName(stack.name, 'router'),
                    "admin_state_up": True,
                    "tenant_id": "3e21026f2dc94372b105808c0e721661",
                    "routes": [],
                    "id": "3e46229d-8fce-4733-819a-b5fe630550f8"
                }
            })

        # Update script
        neutronclient.Client.list_l3_agent_hosting_routers(
            u'3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn({
                "agents": [{
                    "admin_state_up": True,
                    "agent_type": "L3 agent",
                    "alive": True,
                    "binary": "neutron-l3-agent",
                    "configurations": {
                        "ex_gw_ports": 1,
                        "floating_ips": 0,
                        "gateway_external_network_id": "",
                        "handle_internal_only_routers": True,
                        "interface_driver": "DummyDriver",
                        "interfaces": 1,
                        "router_id": "",
                        "routers": 1,
                        "use_namespaces": True
                    },
                    "created_at": "2014-03-11 05:00:05",
                    "description": None,
                    "heartbeat_timestamp": "2014-03-11 05:01:49",
                    "host": "l3_agent_host",
                    "id": "792ff887-6c85-4a56-b518-23f24fa65581",
                    "started_at": "2014-03-11 05:00:05",
                    "topic": "l3_agent"
                }]
            })
        neutronclient.Client.remove_router_from_l3_agent(
            u'792ff887-6c85-4a56-b518-23f24fa65581',
            u'3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn(None)
        neutronclient.Client.add_router_to_l3_agent(
            u'63b3fd83-2c5f-4dad-b3ae-e0f83a40f216', {
                'router_id': u'3e46229d-8fce-4733-819a-b5fe630550f8'
            }).AndReturn(None)
        neutronclient.Client.update_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8',
            {'router': {
                'name': 'myrouter',
                'admin_state_up': False
            }})
        # Update again script
        neutronclient.Client.list_l3_agent_hosting_routers(
            u'3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn({
                "agents": [{
                    "admin_state_up": True,
                    "agent_type": "L3 agent",
                    "alive": True,
                    "binary": "neutron-l3-agent",
                    "configurations": {
                        "ex_gw_ports": 1,
                        "floating_ips": 0,
                        "gateway_external_network_id": "",
                        "handle_internal_only_routers": True,
                        "interface_driver": "DummyDriver",
                        "interfaces": 1,
                        "router_id": "",
                        "routers": 1,
                        "use_namespaces": True
                    },
                    "created_at": "2014-03-11 05:00:05",
                    "description": None,
                    "heartbeat_timestamp": "2014-03-11 05:01:49",
                    "host": "l3_agent_host",
                    "id": "63b3fd83-2c5f-4dad-b3ae-e0f83a40f216",
                    "started_at": "2014-03-11 05:00:05",
                    "topic": "l3_agent"
                }]
            })
        neutronclient.Client.remove_router_from_l3_agent(
            u'63b3fd83-2c5f-4dad-b3ae-e0f83a40f216',
            u'3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn(None)
        neutronclient.Client.add_router_to_l3_agent(
            u'4c692423-2c5f-4dad-b3ae-e2339f58539f', {
                'router_id': u'3e46229d-8fce-4733-819a-b5fe630550f8'
            }).AndReturn(None)
        neutronclient.Client.add_router_to_l3_agent(
            u'8363b3fd-2c5f-4dad-b3ae-0f216e0f83a4', {
                'router_id': u'3e46229d-8fce-4733-819a-b5fe630550f8'
            }).AndReturn(None)
        # Delete script
        neutronclient.Client.delete_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8').AndReturn(None)

        neutronclient.Client.show_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8').AndRaise(
                qe.NeutronClientException(status_code=404))

        neutronclient.Client.delete_router(
            '3e46229d-8fce-4733-819a-b5fe630550f8').AndRaise(
                qe.NeutronClientException(status_code=404))

        self.m.ReplayAll()
        rsrc = self.create_router(t, stack, 'router')

        rsrc.validate()

        ref_id = rsrc.FnGetRefId()
        self.assertEqual('3e46229d-8fce-4733-819a-b5fe630550f8', ref_id)
        self.assertIsNone(rsrc.FnGetAtt('tenant_id'))
        self.assertEqual('3e21026f2dc94372b105808c0e721661',
                         rsrc.FnGetAtt('tenant_id'))

        prop_diff = {
            "admin_state_up": False,
            "name": "myrouter",
            "l3_agent_ids": ["63b3fd83-2c5f-4dad-b3ae-e0f83a40f216"]
        }
        props = copy.copy(rsrc.properties.data)
        props.update(prop_diff)
        update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
                                                      props)
        rsrc.handle_update(update_snippet, {}, prop_diff)

        prop_diff = {
            "l3_agent_ids": [
                "4c692423-2c5f-4dad-b3ae-e2339f58539f",
                "8363b3fd-2c5f-4dad-b3ae-0f216e0f83a4"
            ]
        }
        props = copy.copy(rsrc.properties.data)
        props.update(prop_diff)
        update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
                                                      props)
        rsrc.handle_update(update_snippet, {}, prop_diff)

        self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
        rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
        self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
        self.m.VerifyAll()
Пример #59
0
 def test_router_interface_validate(self):
     neutronV20.find_resourceid_by_name_or_id(
         mox.IsA(neutronclient.Client),
         'port',
         '9577cafd-8e98-4059-a2e6-8a771b4d318e',
         cmd_resource=None,
     ).MultipleTimes().AndReturn('9577cafd-8e98-4059-a2e6-8a771b4d318e')
     neutronV20.find_resourceid_by_name_or_id(
         mox.IsA(neutronclient.Client),
         'router',
         'ae478782-53c0-4434-ab16-49900c88016c',
         cmd_resource=None,
     ).MultipleTimes().AndReturn('ae478782-53c0-4434-ab16-49900c88016c')
     neutronV20.find_resourceid_by_name_or_id(
         mox.IsA(neutronclient.Client),
         'subnet',
         '9577cafd-8e98-4059-a2e6-8a771b4d318e',
         cmd_resource=None,
     ).MultipleTimes().AndReturn('9577cafd-8e98-4059-a2e6-8a771b4d318e')
     self.m.ReplayAll()
     t = template_format.parse(neutron_template)
     json = t['resources']['router_interface']
     json['properties'] = {
         'router_id': 'ae478782-53c0-4434-ab16-49900c88016c',
         'subnet_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e',
         'port_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'
     }
     stack = utils.parse_stack(t)
     resource_defns = stack.t.resource_definitions(stack)
     res = router.RouterInterface('router_interface',
                                  resource_defns['router_interface'], stack)
     self.assertRaises(exception.ResourcePropertyConflict, res.validate)
     json['properties'] = {
         'router_id': 'ae478782-53c0-4434-ab16-49900c88016c',
         'port_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'
     }
     stack = utils.parse_stack(t)
     resource_defns = stack.t.resource_definitions(stack)
     res = router.RouterInterface('router_interface',
                                  resource_defns['router_interface'], stack)
     self.assertIsNone(res.validate())
     json['properties'] = {
         'router_id': 'ae478782-53c0-4434-ab16-49900c88016c',
         'subnet_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'
     }
     stack = utils.parse_stack(t)
     resource_defns = stack.t.resource_definitions(stack)
     res = router.RouterInterface('router_interface',
                                  resource_defns['router_interface'], stack)
     self.assertIsNone(res.validate())
     json['properties'] = {
         'router_id': 'ae478782-53c0-4434-ab16-49900c88016c'
     }
     stack = utils.parse_stack(t)
     resource_defns = stack.t.resource_definitions(stack)
     res = router.RouterInterface('router_interface',
                                  resource_defns['router_interface'], stack)
     ex = self.assertRaises(exception.PropertyUnspecifiedError,
                            res.validate)
     self.assertEqual(
         "At least one of the following properties "
         "must be specified: subnet, port.", six.text_type(ex))
     self.m.VerifyAll()
Пример #60
0
 def child_template(self):
     if not self._parsed_nested:
         self._parsed_nested = template_format.parse(self.template_data())
     return self._parsed_nested