def test_autoscaling_group_update_policy_removed(self):

        # setup stack from the initial template
        tmpl = template_format.parse(asg_tmpl_with_updt_policy)
        stack = utils.parse_stack(tmpl)
        self.stub_ImageConstraint_validate()
        self.stub_KeypairConstraint_validate()
        self.m.ReplayAll()

        stack.validate()
        self.m.VerifyAll()
        self.m.UnsetStubs()

        # test stack create
        size = int(stack['WebServerGroup'].properties['MinSize'])
        self._stub_grp_create(size)
        self.stub_ImageConstraint_validate()
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(('CREATE', 'COMPLETE'), stack.state)

        # test that update policy is loaded
        current_grp = stack['WebServerGroup']
        self.assertIn('AutoScalingRollingUpdate', current_grp.update_policy)
        current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
        init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
        init_batch_sz = int(init_roll_updt['MaxBatchSize'])
        self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))

        # test that physical resource name of launch configuration is used
        conf = stack['LaunchConfig']
        conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
        self.assertThat(conf.FnGetRefId(),
                        matchers.MatchesRegex(conf_name_pattern))

        # test the number of instances created
        nested = stack['WebServerGroup'].nested()
        self.assertEqual(size, len(nested.resources))

        # clean up for next test
        self.m.UnsetStubs()

        # test stack update
        updated_tmpl = template_format.parse(asg_tmpl_without_updt_policy)
        updated_stack = utils.parse_stack(updated_tmpl)
        self._stub_grp_replace(num_creates_expected_on_updt=0,
                               num_deletes_expected_on_updt=0,
                               num_reloads_expected_on_updt=1)
        self.m.ReplayAll()
        stack.update(updated_stack)
        self.m.VerifyAll()
        self.assertEqual(('UPDATE', 'COMPLETE'), stack.state)

        # test that update policy is removed
        updated_grp = stack['WebServerGroup']
        self.assertFalse(updated_grp.update_policy['AutoScalingRollingUpdate'])
    def test_instance_group(self):

        # setup stack from the initial template
        tmpl = template_format.parse(ig_template_before)
        stack = parse_stack(tmpl)

        # test stack create
        # test the number of instance creation
        # test that physical resource name of launch configuration is used
        size = int(stack.resources['JobServerGroup'].properties['Size'])
        self._stub_create(size)
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(stack.status, stack.COMPLETE)
        conf = stack.resources['JobServerConfig']
        conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name
        regex_pattern = re.compile(conf_name_pattern)
        self.assertTrue(regex_pattern.match(conf.FnGetRefId()))

        # test stack update
        # test that launch configuration is replaced
        conf_name = self.get_launch_conf_name(stack, 'JobServerGroup')
        updated_tmpl = template_format.parse(ig_template_after)
        updated_stack = parse_stack(updated_tmpl)
        stack.update(updated_stack)
        updated_conf_name = self.get_launch_conf_name(stack, 'JobServerGroup')
        self.assertNotEqual(conf_name, updated_conf_name)
Example #3
0
    def test_mem_alarm_high_check_not_required_parameters(self):
        snippet = template_format.parse(not_string_alarm_template)
        snippet['Resources']['MEMAlarmHigh']['Properties'].pop('meter_name')
        stack = utils.parse_stack(snippet)

        resource_defns = stack.t.resource_definitions(stack)
        rsrc = alarm.CeilometerAlarm(
            'MEMAlarmHigh', resource_defns['MEMAlarmHigh'], stack)
        error = self.assertRaises(exception.StackValidationFailed,
                                  rsrc.validate)
        self.assertEqual(
            "Property error: Resources.MEMAlarmHigh.Properties: "
            "Property meter_name not assigned",
            six.text_type(error))

        for p in ('period', 'evaluation_periods', 'statistic',
                  'comparison_operator'):
            snippet = template_format.parse(not_string_alarm_template)
            snippet['Resources']['MEMAlarmHigh']['Properties'].pop(p)
            stack = utils.parse_stack(snippet)

            resource_defns = stack.t.resource_definitions(stack)
            rsrc = alarm.CeilometerAlarm(
                'MEMAlarmHigh', resource_defns['MEMAlarmHigh'], stack)
            self.assertIsNone(rsrc.validate())
Example #4
0
    def check_with_update(self, with_policy=False, with_diff=False):
        current = copy.deepcopy(template)
        self.current_stack = utils.parse_stack(current)
        self.current_grp = self.current_stack["group1"]
        current_grp_json = function.resolve(self.current_grp.t)
        prop_diff, tmpl_diff = None, None
        updated = tmpl_with_updt_policy() if (with_policy) else copy.deepcopy(template)
        if with_diff:
            res_def = updated["resources"]["group1"]["properties"]["resource_def"]
            res_def["properties"]["Foo"] = "baz"
            prop_diff = dict(
                {"count": 2, "resource_def": {"properties": {"Foo": "baz"}, "type": "OverwrittenFnGetRefIdType"}}
            )
        updated_stack = utils.parse_stack(updated)
        updated_grp = updated_stack["group1"]
        updated_grp_json = function.resolve(updated_grp.t)
        tmpl_diff = updated_grp.update_template_diff(updated_grp_json, current_grp_json)

        updated_policy = updated_grp_json["UpdatePolicy"] if "UpdatePolicy" in updated_grp_json else None
        update_snippet = rsrc_defn.ResourceDefinition(
            self.current_grp.name,
            self.current_grp.type(),
            properties=updated_grp_json["Properties"],
            update_policy=updated_policy,
        )
        self.current_grp._replace = mock.Mock(return_value=[])
        self.current_grp._assemble_nested = mock.Mock()
        self.patchobject(scheduler.TaskRunner, "start")
        self.current_grp.handle_update(update_snippet, tmpl_diff, prop_diff)
    def check_with_update(self, with_policy=False, with_diff=False):
        current = copy.deepcopy(template)
        self.current_stack = utils.parse_stack(current)
        self.current_grp = self.current_stack['group1']
        current_grp_json = function.resolve(
            self.current_grp.t)
        prop_diff, tmpl_diff = None, None
        updated = tmpl_with_updt_policy() if (
            with_policy) else copy.deepcopy(template)
        if with_diff:
            res_def = updated['resources']['group1'][
                'properties']['resource_def']
            res_def['properties']['Foo'] = 'baz'
            prop_diff = dict(
                {'count': 2,
                 'resource_def': {'properties': {'Foo': 'baz'},
                                  'type': 'OverwrittenFnGetRefIdType'}})
        updated_stack = utils.parse_stack(updated)
        updated_grp = updated_stack['group1']
        updated_grp_json = function.resolve(updated_grp.t)
        tmpl_diff = updated_grp.update_template_diff(
            updated_grp_json, current_grp_json)

        updated_policy = updated_grp_json[
            'UpdatePolicy']if 'UpdatePolicy' in updated_grp_json else None
        update_snippet = rsrc_defn.ResourceDefinition(
            self.current_grp.name,
            self.current_grp.type(),
            properties=updated_grp_json['Properties'],
            update_policy=updated_policy)
        self.current_grp._replace = mock.Mock(return_value=[])
        self.current_grp._assemble_nested_for_size = mock.Mock()
        self.patchobject(scheduler.TaskRunner, 'start')
        self.current_grp.handle_update(update_snippet, tmpl_diff, prop_diff)
    def validate_update_policy_diff(self, current, updated):
        # load current stack
        current_stack = utils.parse_stack(current)
        current_grp = current_stack['group1']
        current_grp_json = function.resolve(
            current_grp.t)

        updated_stack = utils.parse_stack(updated)
        updated_grp = updated_stack['group1']
        updated_grp_json = function.resolve(
            updated_grp.t)

        # identify the template difference
        tmpl_diff = updated_grp.update_template_diff(
            updated_grp_json, current_grp_json)
        updated_policy = (updated_grp_json['UpdatePolicy']
                          if 'UpdatePolicy' in updated_grp_json else None)
        expected = {u'UpdatePolicy': updated_policy}
        self.assertEqual(expected, tmpl_diff)

        # test application of the new update policy in handle_update
        update_snippet = rsrc_defn.ResourceDefinition(
            current_grp.name,
            current_grp.type(),
            properties=updated_grp_json['Properties'],
            update_policy=updated_policy)

        current_grp._try_rolling_update = mock.Mock()
        current_grp._assemble_nested_for_size = mock.Mock()
        self.patchobject(scheduler.TaskRunner, 'start')
        current_grp.handle_update(update_snippet, tmpl_diff, None)
        if updated_policy is None:
            self.assertEqual({}, current_grp.update_policy.data)
        else:
            self.assertEqual(updated_policy, current_grp.update_policy.data)
    def validate_update_policy_diff(self, current, updated):

        # load current stack
        current_tmpl = template_format.parse(current)
        current_stack = utils.parse_stack(current_tmpl)

        # get the json snippet for the current InstanceGroup resource
        current_grp = current_stack['JobServerGroup']
        current_snippets = dict((n, r.parsed_template())
                                for n, r in current_stack.items())
        current_grp_json = current_snippets[current_grp.name]

        # load the updated stack
        updated_tmpl = template_format.parse(updated)
        updated_stack = utils.parse_stack(updated_tmpl)

        # get the updated json snippet for the InstanceGroup resource in the
        # context of the current stack
        updated_grp = updated_stack['JobServerGroup']
        updated_grp_json = current_stack.resolve_runtime_data(updated_grp.t)

        # identify the template difference
        tmpl_diff = updated_grp.update_template_diff(
            updated_grp_json, current_grp_json)
        updated_policy = (updated_grp.t['UpdatePolicy']
                          if 'UpdatePolicy' in updated_grp.t else None)
        expected = {u'UpdatePolicy': updated_policy}
        self.assertEqual(expected, tmpl_diff)
Example #8
0
    def compare_stacks(self, json_file, yaml_file, parameters):
        t1 = self.load_template(json_file)
        template_format.default_for_missing(t1, 'AWSTemplateFormatVersion',
                                            template_format.CFN_VERSIONS)
        del(t1[u'AWSTemplateFormatVersion'])

        t2 = self.load_template(yaml_file)
        del(t2[u'HeatTemplateFormatVersion'])

        stack1 = utils.parse_stack(t1, parameters)
        stack2 = utils.parse_stack(t2, parameters)

        # compare resources separately so that resolved static data
        # is compared
        t1nr = dict(stack1.t.t)
        del(t1nr['Resources'])

        t2nr = dict(stack2.t.t)
        del(t2nr['Resources'])
        self.assertEqual(t1nr, t2nr)

        self.assertEquals(set(stack1.resources.keys()),
                          set(stack2.resources.keys()))
        for key in stack1.resources:
            self.assertEqual(stack1.resources[key].t, stack2.resources[key].t)
Example #9
0
 def test_router_interface_validate(self):
     t = template_format.parse(neutron_template)
     json = t["Resources"]["router_interface"]
     json["Properties"] = {
         "router_id": "ae478782-53c0-4434-ab16-49900c88016c",
         "subnet_id": "9577cafd-8e98-4059-a2e6-8a771b4d318e",
         "port_id": "9577cafd-8e98-4059-a2e6-8a771b4d318e",
     }
     stack = utils.parse_stack(t)
     res = router.RouterInterface("router_interface", json, stack)
     self.assertRaises(exception.ResourcePropertyConflict, res.validate)
     json["Properties"] = {
         "router_id": "ae478782-53c0-4434-ab16-49900c88016c",
         "port_id": "9577cafd-8e98-4059-a2e6-8a771b4d318e",
     }
     stack = utils.parse_stack(t)
     res = router.RouterInterface("router_interface", json, stack)
     self.assertEqual(None, res.validate())
     json["Properties"] = {
         "router_id": "ae478782-53c0-4434-ab16-49900c88016c",
         "subnet_id": "9577cafd-8e98-4059-a2e6-8a771b4d318e",
     }
     stack = utils.parse_stack(t)
     res = router.RouterInterface("router_interface", json, stack)
     self.assertEqual(None, res.validate())
     json["Properties"] = {"router_id": "ae478782-53c0-4434-ab16-49900c88016c"}
     stack = utils.parse_stack(t)
     res = router.RouterInterface("router_interface", json, stack)
     ex = self.assertRaises(exception.StackValidationFailed, res.validate)
     self.assertEqual("Either subnet_id or port_id must be specified.", str(ex))
Example #10
0
    def validate_update_policy_diff(self, current, updated):
        # load current stack
        current_tmpl = template_format.parse(current)
        current_stack = utils.parse_stack(current_tmpl,
                                          params=inline_templates.as_params)

        # get the json snippet for the current InstanceGroup resource
        current_grp = current_stack['WebServerGroup']
        current_snippets = dict((n, r.frozen_definition())
                                for n, r in current_stack.items())
        current_grp_json = current_snippets[current_grp.name]

        # load the updated stack
        updated_tmpl = template_format.parse(updated)
        updated_stack = utils.parse_stack(updated_tmpl,
                                          params=inline_templates.as_params)

        # get the updated json snippet for the InstanceGroup resource in the
        # context of the current stack
        updated_grp = updated_stack['WebServerGroup']
        updated_grp_json = updated_grp.t.freeze()

        # identify the template difference
        tmpl_diff = updated_grp.update_template_diff(
            updated_grp_json, current_grp_json)
        self.assertTrue(tmpl_diff.update_policy_changed())

        # test application of the new update policy in handle_update
        current_grp._try_rolling_update = mock.MagicMock()
        current_grp.resize = mock.MagicMock()
        current_grp.handle_update(updated_grp_json, tmpl_diff, None)
        self.assertEqual(updated_grp_json._update_policy or {},
                         current_grp.update_policy.data)
    def test_instance_group_update_policy_check_timeout(self):

        # setup stack from the initial template
        tmpl = template_format.parse(ig_tmpl_with_updt_policy)
        stack = utils.parse_stack(tmpl)

        # test stack create
        size = int(stack['JobServerGroup'].properties['Size'])
        self._stub_grp_create(size)
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(('CREATE', 'COMPLETE'), stack.state)

        # test that update policy is loaded
        current_grp = stack['JobServerGroup']
        self.assertIn('RollingUpdate', current_grp.update_policy)
        current_policy = current_grp.update_policy['RollingUpdate']
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_grp_tmpl = tmpl['Resources']['JobServerGroup']
        init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate']
        init_batch_sz = int(init_roll_updt['MaxBatchSize'])
        self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))

        # test the number of instances created
        nested = stack['JobServerGroup'].nested()
        self.assertEqual(size, len(nested.resources))

        # clean up for next test
        self.m.UnsetStubs()

        # modify the pause time and test for error
        new_pause_time = 'PT30M'
        updt_template = json.loads(copy.deepcopy(ig_tmpl_with_updt_policy))
        group = updt_template['Resources']['JobServerGroup']
        policy = group['UpdatePolicy']['RollingUpdate']
        policy['PauseTime'] = new_pause_time
        config = updt_template['Resources']['JobServerConfig']
        config['Properties']['ImageId'] = 'bar'
        updated_tmpl = template_format.parse(json.dumps(updt_template))
        updated_stack = utils.parse_stack(updated_tmpl)
        stack.update(updated_stack)
        self.assertEqual(('UPDATE', 'FAILED'), stack.state)

        # test that the update policy is updated
        updated_grp = stack['JobServerGroup']
        self.assertIn('RollingUpdate', updated_grp.update_policy)
        updated_policy = updated_grp.update_policy['RollingUpdate']
        self.assertTrue(updated_policy)
        self.assertTrue(len(updated_policy) > 0)
        self.assertEqual(new_pause_time, updated_policy['PauseTime'])

        # test that error message match
        expected_error_message = ('The current UpdatePolicy will result '
                                  'in stack update timeout.')
        self.assertIn(expected_error_message, stack.status_reason)
Example #12
0
 def test_signal_with_body_as_input_and_delete_with_executions(self):
     tmpl = template_format.parse(workflow_template_full)
     stack = utils.parse_stack(tmpl, params={
         'parameters': {'use_request_body_as_input': 'true'}
     })
     rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
     wf = workflow.Workflow('create_vm', rsrc_defns, stack)
     self.mistral.workflows.create.return_value = [
         FakeWorkflow('create_vm')]
     scheduler.TaskRunner(wf.create)()
     details = {'flavor': '3'}
     execution = mock.Mock()
     execution.id = '12345'
     exec_manager = executions.ExecutionManager(wf.client('mistral'))
     self.mistral.executions.create.side_effect = (
         lambda *args, **kw: exec_manager.create(*args, **kw))
     self.patchobject(exec_manager, '_create', return_value=execution)
     scheduler.TaskRunner(wf.signal, details)()
     call_args = self.mistral.executions.create.call_args
     args, _ = call_args
     expected_args = (
         '{"image": "31d8eeaf-686e-4e95-bb27-765014b9f20b", '
         '"name": "create_test_server", "flavor": "3"}')
     self.validate_json_inputs(args[1], expected_args)
     self.assertEqual({'executions': '12345'}, wf.data())
     # Updating the workflow changing "use_request_body_as_input" to
     # false and signaling again with the expected request body format.
     t = template_format.parse(workflow_updating_request_body_property)
     new_stack = utils.parse_stack(t)
     rsrc_defns = new_stack.t.resource_definitions(new_stack)
     self.mistral.workflows.update.return_value = [
         FakeWorkflow('test_stack-workflow-b5fiekdsa355')]
     scheduler.TaskRunner(wf.update, rsrc_defns['create_vm'])()
     self.assertTrue(self.mistral.workflows.update.called)
     self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
     details = {'input': {'flavor': '4'}}
     execution = mock.Mock()
     execution.id = '54321'
     exec_manager = executions.ExecutionManager(wf.client('mistral'))
     self.mistral.executions.create.side_effect = (
         lambda *args, **kw: exec_manager.create(*args, **kw))
     self.patchobject(exec_manager, '_create', return_value=execution)
     scheduler.TaskRunner(wf.signal, details)()
     call_args = self.mistral.executions.create.call_args
     args, _ = call_args
     expected_args = (
         '{"image": "31d8eeaf-686e-4e95-bb27-765014b9f20b", '
         '"name": "create_test_server", "flavor": "4"}')
     self.validate_json_inputs(args[1], expected_args)
     self.assertEqual({'executions': '54321,12345', 'name':
                      'test_stack-workflow-b5fiekdsa355'}, wf.data())
     scheduler.TaskRunner(wf.delete)()
     self.assertEqual(2, self.mistral.executions.delete.call_count)
     self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
    def test_autoscaling_group_update_policy_removed(self):

        # setup stack from the initial template
        tmpl = template_format.parse(asg_tmpl_with_updt_policy)
        stack = utils.parse_stack(tmpl)
        stack.validate()

        # test stack create
        size = int(stack["WebServerGroup"].properties["MinSize"])
        self._stub_grp_create(size)
        self._mock_get_image_id_success("F20-x86_64-cfntools", "image_id")
        self.m.ReplayAll()
        stack.create()
        self.m.VerifyAll()
        self.assertEqual(("CREATE", "COMPLETE"), stack.state)

        # test that update policy is loaded
        current_grp = stack["WebServerGroup"]
        self.assertIn("AutoScalingRollingUpdate", current_grp.update_policy)
        current_policy = current_grp.update_policy["AutoScalingRollingUpdate"]
        self.assertTrue(current_policy)
        self.assertTrue(len(current_policy) > 0)
        init_updt_policy = tmpl["Resources"]["WebServerGroup"]["UpdatePolicy"]
        init_roll_updt = init_updt_policy["AutoScalingRollingUpdate"]
        init_batch_sz = int(init_roll_updt["MaxBatchSize"])
        self.assertEqual(init_batch_sz, int(current_policy["MaxBatchSize"]))

        # test that physical resource name of launch configuration is used
        conf = stack["LaunchConfig"]
        conf_name_pattern = "%s-LaunchConfig-[a-zA-Z0-9]+$" % stack.name
        self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))

        # test the number of instances created
        nested = stack["WebServerGroup"].nested()
        self.assertEqual(size, len(nested.resources))

        # clean up for next test
        self.m.UnsetStubs()

        # test stack update
        updated_tmpl = template_format.parse(asg_tmpl_without_updt_policy)
        updated_stack = utils.parse_stack(updated_tmpl)
        self._stub_grp_replace(
            num_creates_expected_on_updt=0, num_deletes_expected_on_updt=0, num_reloads_expected_on_updt=1
        )
        self.m.ReplayAll()
        stack.update(updated_stack)
        self.m.VerifyAll()
        self.assertEqual(("UPDATE", "COMPLETE"), stack.state)

        # test that update policy is removed
        updated_grp = stack["WebServerGroup"]
        self.assertFalse(updated_grp.update_policy["AutoScalingRollingUpdate"])
 def test_floating_ip_validate(self):
     t = template_format.parse(neutron_floating_no_assoc_template)
     stack = utils.parse_stack(t)
     fip = stack['floating_ip']
     self.assertIsNone(fip.validate())
     del t['resources']['floating_ip']['properties']['port_id']
     t['resources']['floating_ip']['properties'][
         'fixed_ip_address'] = '10.0.0.12'
     stack = utils.parse_stack(t)
     fip = stack['floating_ip']
     self.assertRaises(exception.ResourcePropertyDependency,
                       fip.validate)
    def test_update_pool_with_references_to_health_monitors(self):
        clients.OpenStackClients.keystone().MultipleTimes().AndReturn(
            fakes.FakeKeystoneClient())
        neutronclient.Client.create_health_monitor({
            'health_monitor': {
                'delay': 3, 'max_retries': 5, 'type': u'HTTP',
                'timeout': 10, 'admin_state_up': True}}
        ).AndReturn({'health_monitor': {'id': '5555'}})

        neutronclient.Client.create_health_monitor({
            'health_monitor': {
                'delay': 3, 'max_retries': 5, 'type': u'HTTP',
                'timeout': 10, 'admin_state_up': True}}
        ).AndReturn({'health_monitor': {'id': '6666'}})
        neutronclient.Client.create_pool({
            'pool': {
                'subnet_id': 'sub123', 'protocol': u'HTTP',
                'name': utils.PhysName('test_stack', 'pool'),
                'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}}
        ).AndReturn({'pool': {'id': '5678'}})
        neutronclient.Client.associate_health_monitor(
            '5678', {'health_monitor': {'id': '5555'}}).InAnyOrder()
        neutronclient.Client.associate_health_monitor(
            '5678', {'health_monitor': {'id': '6666'}}).InAnyOrder()
        neutronclient.Client.create_vip({
            'vip': {
                'protocol': u'HTTP', 'name': 'pool.vip',
                'admin_state_up': True, 'subnet_id': u'sub123',
                'pool_id': '5678', 'protocol_port': 80}}
        ).AndReturn({'vip': {'id': 'xyz'}})
        neutronclient.Client.show_pool('5678').AndReturn(
            {'pool': {'status': 'ACTIVE'}})
        neutronclient.Client.show_vip('xyz').AndReturn(
            {'vip': {'status': 'ACTIVE'}})

        neutronclient.Client.disassociate_health_monitor(
            '5678', mox.IsA(unicode))

        self.m.ReplayAll()
        snippet = template_format.parse(pool_with_health_monitors_template)
        self.stack = utils.parse_stack(snippet)
        self.stack.create()
        self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
                         self.stack.state)

        snippet['Resources']['pool']['Properties']['monitors'] = [
            {u'Ref': u'monitor1'}]
        updated_stack = utils.parse_stack(snippet)
        self.stack.update(updated_stack)
        self.assertEqual((self.stack.UPDATE, self.stack.COMPLETE),
                         self.stack.state)
        self.m.VerifyAll()
Example #16
0
    def test_router_interface_validate(self):
        def find_rsrc(resource, name_or_id, cmd_resource=None):
            id_mapping = {
                'router': 'ae478782-53c0-4434-ab16-49900c88016c',
                'subnet': '8577cafd-8e98-4059-a2e6-8a771b4d318e',
                'port': '9577cafd-8e98-4059-a2e6-8a771b4d318e'}
            return id_mapping.get(resource)

        self.find_rsrc_mock.side_effect = find_rsrc

        t = template_format.parse(neutron_template)
        json = t['resources']['router_interface']
        json['properties'] = {
            'router_id': 'ae478782-53c0-4434-ab16-49900c88016c',
            'subnet_id': '8577cafd-8e98-4059-a2e6-8a771b4d318e',
            'port_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'}
        stack = utils.parse_stack(t)
        resource_defns = stack.t.resource_definitions(stack)
        res = router.RouterInterface('router_interface',
                                     resource_defns['router_interface'],
                                     stack)
        self.assertRaises(exception.ResourcePropertyConflict, res.validate)
        json['properties'] = {
            'router_id': 'ae478782-53c0-4434-ab16-49900c88016c',
            'port_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'}
        stack = utils.parse_stack(t)
        resource_defns = stack.t.resource_definitions(stack)
        res = router.RouterInterface('router_interface',
                                     resource_defns['router_interface'],
                                     stack)
        self.assertIsNone(res.validate())
        json['properties'] = {
            'router_id': 'ae478782-53c0-4434-ab16-49900c88016c',
            'subnet_id': '8577cafd-8e98-4059-a2e6-8a771b4d318e'}
        stack = utils.parse_stack(t)
        resource_defns = stack.t.resource_definitions(stack)
        res = router.RouterInterface('router_interface',
                                     resource_defns['router_interface'],
                                     stack)
        self.assertIsNone(res.validate())
        json['properties'] = {
            'router_id': 'ae478782-53c0-4434-ab16-49900c88016c'}
        stack = utils.parse_stack(t)
        resource_defns = stack.t.resource_definitions(stack)
        res = router.RouterInterface('router_interface',
                                     resource_defns['router_interface'],
                                     stack)
        ex = self.assertRaises(exception.PropertyUnspecifiedError,
                               res.validate)
        self.assertEqual("At least one of the following properties "
                         "must be specified: subnet, port.",
                         six.text_type(ex))
    def test_parse_with_bad_pausetime_in_update_policy(self):
        tmpl = template_format.parse(ig_tmpl_with_updt_policy)
        group = tmpl['Resources']['JobServerGroup']
        policy = group['UpdatePolicy']['RollingUpdate']

        # test against some random string
        policy['PauseTime'] = 'ABCD1234'
        stack = utils.parse_stack(tmpl)
        self.assertRaises(exception.StackValidationFailed, stack.validate)

        # test unsupported designator
        policy['PauseTime'] = 'P1YT1H'
        stack = utils.parse_stack(tmpl)
        self.assertRaises(exception.StackValidationFailed, stack.validate)
 def _create_stack(self, ext_func, tmpl=inline_templates.RBAC_TEMPLATE):
     self.t = template_format.parse(tmpl)
     self.stack = utils.parse_stack(self.t)
     self.rbac = self.stack['rbac']
     self.neutron_client = mock.MagicMock()
     self.rbac.client = mock.MagicMock()
     self.rbac.client.return_value = self.neutron_client
Example #19
0
    def test_snapshot_no_volume(self):
        stack_name = 'test_volume_stack'
        fv = FakeVolume('creating', 'error')

        self._mock_create_volume(fv, stack_name)

        self.cinder_fc.volumes.get('vol-123').AndReturn(fv)

        self.m.ReplayAll()

        t = template_format.parse(volume_template)
        t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
        t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
        stack = utils.parse_stack(t, stack_name=stack_name)
        rsrc = vol.Volume('DataVolume',
                          t['Resources']['DataVolume'],
                          stack)

        create = scheduler.TaskRunner(rsrc.create)
        self.assertRaises(exception.ResourceFailure, create)

        self._stubout_delete_volume(fv)
        scheduler.TaskRunner(rsrc.destroy)()

        self.m.VerifyAll()
Example #20
0
    def test_create_from_snapshot_error(self):
        stack_name = 'test_volume_stack'
        fv = FakeVolumeWithStateTransition('restoring-backup', 'error')
        fvbr = FakeBackupRestore('vol-123')

        # create script
        clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
            self.cinder_fc)
        self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
        self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
        self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
        self.m.StubOutWithMock(fv, 'update')
        vol_name = utils.PhysName(stack_name, 'DataVolume')
        fv.update(
            display_description=vol_name,
            display_name=vol_name)

        self.m.ReplayAll()

        t = template_format.parse(volume_template)
        t['Resources']['DataVolume']['Properties']['SnapshotId'] = 'backup-123'
        t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
        stack = utils.parse_stack(t, stack_name=stack_name)

        rsrc = vol.Volume('DataVolume',
                          t['Resources']['DataVolume'],
                          stack)
        create = scheduler.TaskRunner(rsrc.create)
        self.assertRaises(exception.ResourceFailure, create)

        self.m.VerifyAll()
Example #21
0
    def test_create_from_snapshot(self):
        stack_name = 'test_volume_stack'
        fv = FakeVolumeWithStateTransition('restoring-backup', 'available')
        fvbr = FakeBackupRestore('vol-123')

        # create script
        clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
            self.cinder_fc)
        self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
        self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
        self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
        self.m.StubOutWithMock(fv, 'update')
        vol_name = utils.PhysName(stack_name, 'DataVolume')
        fv.update(
            display_description=vol_name,
            display_name=vol_name)

        self.m.ReplayAll()

        t = template_format.parse(volume_template)
        t['Resources']['DataVolume']['Properties']['SnapshotId'] = 'backup-123'
        stack = utils.parse_stack(t, stack_name=stack_name)

        self.create_volume(t, stack, 'DataVolume')
        self.assertEqual('available', fv.status)

        self.m.VerifyAll()
Example #22
0
    def test_volume_detach_non_exist(self):
        fv = FakeVolume('creating', 'available')
        fva = FakeVolume('in-use', 'available')
        stack_name = 'test_volume_detach_stack'

        self._mock_create_volume(fv, stack_name)

        self._mock_create_server_volume_script(fva)

        # delete script
        self.fc.volumes.get_server_volume(u'WikiDatabase',
                                          'vol-123').AndReturn(fva)
        self.cinder_fc.volumes.get(fva.id).AndRaise(
            clients.cinderclient.exceptions.NotFound('Not found'))

        self.m.ReplayAll()

        t = template_format.parse(volume_template)
        t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
        stack = utils.parse_stack(t, stack_name=stack_name)

        scheduler.TaskRunner(stack['DataVolume'].create)()
        rsrc = self.create_attachment(t, stack, 'MountPoint')

        scheduler.TaskRunner(rsrc.delete)()

        self.m.VerifyAll()
Example #23
0
    def test_volume_detach_with_error(self):
        fv = FakeVolume('creating', 'available')
        fva = FakeVolume('attaching', 'in-use')
        stack_name = 'test_volume_attach_stack'

        self._mock_create_volume(fv, stack_name)

        self._mock_create_server_volume_script(fva)

        # delete script
        fva = FakeVolume('in-use', 'error')
        self.fc.volumes.get_server_volume(u'WikiDatabase',
                                          'vol-123').AndReturn(fva)
        self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
        self.fc.volumes.delete_server_volume('WikiDatabase',
                                             'vol-123').AndReturn(None)
        self.m.ReplayAll()

        t = template_format.parse(volume_template)
        t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
        stack = utils.parse_stack(t, stack_name=stack_name)

        scheduler.TaskRunner(stack['DataVolume'].create)()
        self.assertEqual('available', fv.status)
        rsrc = self.create_attachment(t, stack, 'MountPoint')
        detach_task = scheduler.TaskRunner(rsrc.delete)

        self.assertRaises(exception.ResourceFailure, detach_task)

        self.m.VerifyAll()
Example #24
0
    def test_update_group_replace(self):
        """Make sure that during a group update the non updatable
        properties cause a replacement.
        """
        t = template_format.parse(ig_template)
        properties = t['Resources']['JobServerGroup']['Properties']
        properties['Size'] = '2'
        stack = utils.parse_stack(t)

        self._stub_create(2)

        self.m.ReplayAll()
        self.create_resource(t, stack, 'JobServerConfig')
        rsrc = self.create_resource(t, stack, 'JobServerGroup')

        self.m.ReplayAll()

        props = copy.copy(rsrc.properties.data)
        props['AvailabilityZones'] = ['wibble']
        update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
                                                      rsrc.type(),
                                                      props)
        updater = scheduler.TaskRunner(rsrc.update, update_snippet)
        self.assertRaises(resource.UpdateReplace, updater)

        rsrc.delete()
        self.m.VerifyAll()
Example #25
0
    def test_volume(self):
        fv = FakeVolume('creating', 'available')
        stack_name = 'test_volume_stack'

        # create script
        self._mock_create_volume(fv, stack_name)

        # delete script
        self.cinder_fc.volumes.get('vol-123').AndReturn(fv)

        self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
        self.m.ReplayAll()

        t = template_format.parse(volume_template)
        stack = utils.parse_stack(t, stack_name=stack_name)

        rsrc = self.create_volume(t, stack, 'DataVolume')
        self.assertEqual('available', fv.status)

        self.assertRaises(resource.UpdateReplace,
                          rsrc.handle_update, {}, {}, {})

        fv.status = 'in-use'
        self.assertRaises(exception.ResourceFailure,
                          scheduler.TaskRunner(rsrc.destroy))

        self._stubout_delete_volume(fv)
        fv.status = 'available'
        scheduler.TaskRunner(rsrc.destroy)()

        # Test when volume already deleted
        rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
        scheduler.TaskRunner(rsrc.destroy)()

        self.m.VerifyAll()
Example #26
0
    def test_cinder_default(self):
        fv = FakeVolume('creating', 'available')
        stack_name = 'test_volume_stack'

        clients.OpenStackClients.cinder().MultipleTimes().AndReturn(
            self.cinder_fc)
        vol_name = utils.PhysName(stack_name, 'DataVolume')
        self.cinder_fc.volumes.create(
            size=1, availability_zone='nova',
            display_description=None,
            display_name=vol_name).AndReturn(fv)

        self.m.ReplayAll()

        t = template_format.parse(volume_template)
        t['Resources']['DataVolume']['Properties'] = {
            'size': '1',
            'availability_zone': 'nova',
        }
        stack = utils.parse_stack(t, stack_name=stack_name)

        rsrc = vol.CinderVolume('DataVolume',
                                t['Resources']['DataVolume'],
                                stack)
        self.assertIsNone(rsrc.validate())
        scheduler.TaskRunner(rsrc.create)()
        self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
        self.assertEqual('available', fv.status)

        self.m.VerifyAll()
Example #27
0
    def test_create_instance_error_causes_group_error(self):
        """
        If a resource in an instance group fails to be created, the instance
        group itself will fail and the broken inner resource will remain.
        """
        t = template_format.parse(ig_template)
        stack = utils.parse_stack(t)

        self.m.StubOutWithMock(parser.Stack, 'validate')
        parser.Stack.validate().MultipleTimes().AndReturn(None)
        self.stub_ImageConstraint_validate()
        self.stub_KeypairConstraint_validate()
        self.stub_FlavorConstraint_validate()
        self.stub_SnapshotConstraint_validate()

        self.m.StubOutWithMock(instance.Instance, 'handle_create')
        instance.Instance.handle_create().AndRaise(Exception)

        self.m.ReplayAll()
        self.create_resource(t, stack, 'JobServerConfig')
        self.assertRaises(
            exception.ResourceFailure,
            self.create_resource, t, stack, 'JobServerGroup')

        rsrc = stack['JobServerGroup']
        self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)

        # The failed inner resource remains
        self.assertEqual(1, len(rsrc.nested().resources))
        child_resource = rsrc.nested().resources.values()[0]
        self.assertEqual((child_resource.CREATE, child_resource.FAILED),
                         child_resource.state)

        self.m.VerifyAll()
Example #28
0
    def test_restore_prev_rsrc(self):
        t = template_format.parse(neutron_port_template)
        stack = utils.parse_stack(t)
        new_port = stack["port"]
        new_port.resource_id = "new_res_id"
        # mock backup stack to return only one mocked old_port
        old_port = mock.Mock()
        new_port.stack._backup_stack = mock.Mock()
        new_port.stack._backup_stack().resources.get.return_value = old_port
        old_port.resource_id = "old_res_id"
        _value = {"subnet_id": "test_subnet", "ip_address": "42.42.42.42"}
        old_port.data = mock.Mock(return_value={"port_fip": jsonutils.dumps(_value)})

        n_client = mock.Mock()
        new_port.client = mock.Mock(return_value=n_client)

        # execute prepare_for_replace
        new_port.restore_prev_rsrc()

        # check, that ports were updated: old port get ip and
        # same ip was removed from old port
        expected_new_props = {"port": {"fixed_ips": []}}
        expected_old_props = {"port": {"fixed_ips": _value}}
        n_client.update_port.assert_has_calls(
            [mock.call("new_res_id", expected_new_props), mock.call("old_res_id", expected_old_props)]
        )
Example #29
0
    def test_port_security_enabled(self):
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client), "network", "abcd1234"
        ).MultipleTimes().AndReturn("abcd1234")

        neutronclient.Client.create_port(
            {
                "port": {
                    "network_id": u"abcd1234",
                    "port_security_enabled": False,
                    "name": utils.PhysName("test_stack", "port"),
                    "admin_state_up": True,
                }
            }
        ).AndReturn({"port": {"status": "BUILD", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})

        neutronclient.Client.show_port("fc68ea2c-b60b-4b4f-bd82-94ec81110766").AndReturn(
            {"port": {"status": "ACTIVE", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}}
        )

        self.m.ReplayAll()

        t = template_format.parse(neutron_port_security_template)
        stack = utils.parse_stack(t)

        port = stack["port"]
        scheduler.TaskRunner(port.create)()
        self.m.VerifyAll()
Example #30
0
    def test_missing_subnet_id(self):
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client), "network", "net1234"
        ).MultipleTimes().AndReturn("net1234")
        neutronclient.Client.create_port(
            {
                "port": {
                    "network_id": u"net1234",
                    "fixed_ips": [{"ip_address": u"10.0.3.21"}],
                    "name": utils.PhysName("test_stack", "port"),
                    "admin_state_up": True,
                    "device_owner": u"network:dhcp",
                }
            }
        ).AndReturn({"port": {"status": "BUILD", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
        neutronclient.Client.show_port("fc68ea2c-b60b-4b4f-bd82-94ec81110766").AndReturn(
            {"port": {"status": "ACTIVE", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}}
        )

        self.m.ReplayAll()

        t = template_format.parse(neutron_port_template)
        t["resources"]["port"]["properties"]["fixed_ips"][0].pop("subnet")
        stack = utils.parse_stack(t)

        port = stack["port"]
        scheduler.TaskRunner(port.create)()

        self.m.VerifyAll()
 def test_validate_minimal(self):
     self.t = template_format.parse(inline_templates.SPOOL_MINIMAL_TEMPLATE)
     self.stack = utils.parse_stack(self.t)
     rsrc = self.stack['sub_pool']
     self.assertIsNone(rsrc.validate())
Example #32
0
    def test_metadata_software_deployments(self):
        stack_name = 'test_metadata_software_deployments'
        t = template_format.parse(tools.wp_template)
        stack = utils.parse_stack(t, stack_name=stack_name)

        tools.setup_mocks(self.m, stack)
        self.m.ReplayAll()
        stack.store()
        stack.create()
        server = stack['WebServer']
        server_id = server.resource_id

        stack_user_project_id = str(uuid.uuid4())
        d1 = self._create_software_deployment(
            config_group='mygroup',
            server_id=server_id,
            config_name='02_second',
            stack_user_project_id=stack_user_project_id)
        d2 = self._create_software_deployment(
            config_group='mygroup',
            server_id=server_id,
            config_name='01_first',
            stack_user_project_id=stack_user_project_id)
        d3 = self._create_software_deployment(
            config_group='myothergroup',
            server_id=server_id,
            config_name='03_third',
            stack_user_project_id=stack_user_project_id)
        metadata = self.engine.metadata_software_deployments(
            self.ctx, server_id=server_id)
        self.assertEqual(3, len(metadata))
        self.assertEqual('mygroup', metadata[1]['group'])
        self.assertEqual('mygroup', metadata[0]['group'])
        self.assertEqual('myothergroup', metadata[2]['group'])
        self.assertEqual(d1['config_id'], metadata[1]['id'])
        self.assertEqual(d2['config_id'], metadata[0]['id'])
        self.assertEqual(d3['config_id'], metadata[2]['id'])
        self.assertEqual('01_first', metadata[0]['name'])
        self.assertEqual('02_second', metadata[1]['name'])
        self.assertEqual('03_third', metadata[2]['name'])

        # assert that metadata via metadata_software_deployments matches
        # metadata via server resource
        rsrcs = resource_objects.Resource.get_all_by_physical_resource_id(
            self.ctx, server_id)
        self.assertEqual(metadata, rsrcs[0].rsrc_metadata.get('deployments'))

        deployments = self.engine.metadata_software_deployments(
            self.ctx, server_id=str(uuid.uuid4()))
        self.assertEqual([], deployments)

        # assert get results when the context tenant_id matches
        # the stored stack_user_project_id
        ctx = utils.dummy_context(tenant_id=stack_user_project_id)
        metadata = self.engine.metadata_software_deployments(
            ctx, server_id=server_id)
        self.assertEqual(3, len(metadata))

        # assert get no results when the context tenant_id is unknown
        ctx = utils.dummy_context(tenant_id=str(uuid.uuid4()))
        metadata = self.engine.metadata_software_deployments(
            ctx, server_id=server_id)
        self.assertEqual(0, len(metadata))

        # assert None config is filtered out
        obj_conf = self._create_dummy_config_object()
        side_effect = [obj_conf, obj_conf, None]
        self.patchobject(software_config_object.SoftwareConfig,
                         '_from_db_object',
                         side_effect=side_effect)
        metadata = self.engine.metadata_software_deployments(
            self.ctx, server_id=server_id)
        self.assertEqual(2, len(metadata))
 def test_parse_without_update_policy(self):
     tmpl = template_format.parse(asg_tmpl_without_updt_policy)
     stack = utils.parse_stack(tmpl)
     grp = stack.resources['WebServerGroup']
     self.assertFalse(grp.update_policy['AutoScalingRollingUpdate'])
Example #34
0
 def _init_ngt(self, template):
     self.stack = utils.parse_stack(template)
     return self.stack['node-group']
 def test_floating_ip_assoc_refid_rsrc_id(self):
     t = template_format.parse(floating_ip_template_with_assoc)
     stack = utils.parse_stack(t)
     rsrc = stack['MyFloatingIPAssociation']
     rsrc.resource_id = 'phy-rsrc-id'
     self.assertEqual('phy-rsrc-id', rsrc.FnGetRefId())
Example #36
0
    def test_get_port_attributes(self):
        subnet_dict = {
            'name': 'test-subnet',
            'enable_dhcp': True,
            'network_id': 'net1234',
            'dns_nameservers': [],
            'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
            'ipv6_ra_mode': None,
            'cidr': '10.0.0.0/24',
            'allocation_pools': [{
                'start': '10.0.0.2',
                'end': u'10.0.0.254'
            }],
            'gateway_ip': '10.0.0.1',
            'ipv6_address_mode': None,
            'ip_version': 4,
            'host_routes': [],
            'id': '6dd609ad-d52a-4587-b1a0-b335f76062a5'
        }
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client), 'network',
            'net1234').MultipleTimes().AndReturn('net1234')
        neutronclient.Client.create_port({
            'port': {
                'network_id': u'net1234',
                'name': utils.PhysName('test_stack', 'port'),
                'admin_state_up': True,
                'device_owner': u'network:dhcp'
            }
        }).AndReturn({
            'port': {
                'status': 'BUILD',
                'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
            }
        })
        neutronclient.Client.show_subnet(
            'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e').AndReturn(
                {'subnet': subnet_dict})
        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766').MultipleTimes().AndReturn({
                'port': {
                    'status':
                    'DOWN',
                    'name':
                    utils.PhysName('test_stack', 'port'),
                    'allowed_address_pairs': [],
                    'admin_state_up':
                    True,
                    'network_id':
                    'net1234',
                    'device_id':
                    'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
                    'mac_address':
                    'fa:16:3e:75:67:60',
                    'tenant_id':
                    '58a61fc3992944ce971404a2ece6ff98',
                    'security_groups':
                    ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
                    'fixed_ips': [{
                        'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
                        'ip_address': '10.0.0.2'
                    }]
                }
            })
        self.m.ReplayAll()

        t = template_format.parse(neutron_port_template)
        t['resources']['port']['properties'].pop('fixed_ips')
        stack = utils.parse_stack(t)

        port = stack['port']
        scheduler.TaskRunner(port.create)()
        self.assertEqual('DOWN', port.FnGetAtt('status'))
        self.assertEqual([], port.FnGetAtt('allowed_address_pairs'))
        self.assertTrue(port.FnGetAtt('admin_state_up'))
        self.assertEqual('net1234', port.FnGetAtt('network_id'))
        self.assertEqual('fa:16:3e:75:67:60', port.FnGetAtt('mac_address'))
        self.assertEqual(utils.PhysName('test_stack', 'port'),
                         port.FnGetAtt('name'))
        self.assertEqual('dc68eg2c-b60g-4b3f-bd82-67ec87650532',
                         port.FnGetAtt('device_id'))
        self.assertEqual('58a61fc3992944ce971404a2ece6ff98',
                         port.FnGetAtt('tenant_id'))
        self.assertEqual(['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
                         port.FnGetAtt('security_groups'))
        self.assertEqual([{
            'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
            'ip_address': '10.0.0.2'
        }], port.FnGetAtt('fixed_ips'))
        self.assertEqual([subnet_dict], port.FnGetAtt('subnets'))
        self.assertRaises(exception.InvalidTemplateAttribute, port.FnGetAtt,
                          'Foo')
        self.m.VerifyAll()
 def test_min_max_size(self):
     self.parsed['resources']['my-group']['properties']['max_size'] = -1
     stack = utils.parse_stack(self.parsed)
     self.assertRaises(exception.StackValidationFailed,
                       stack['my-group'].validate)
 def test_alarm_attribute(self):
     stack = utils.parse_stack(self.parsed)
     stack.create()
     policy = stack['my-policy']
     self.assertIn("my-policy", policy.FnGetAtt('alarm_url'))
 def create_stack(self, t):
     stack = utils.parse_stack(t)
     stack.create()
     self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
     return stack
Example #40
0
 def test_FnGetRefId_resource_ip(self, mock_ipaddr):
     t = template_format.parse(ipassoc_template_validate)
     stack = utils.parse_stack(t)
     rsrc = stack['eip']
     mock_ipaddr.return_value = 'x.x.x.x'
     self.assertEqual('x.x.x.x', rsrc.FnGetRefId())
Example #41
0
 def test_eip_allocation_refid_resource_id(self):
     t = template_format.parse(eip_template_ipassoc)
     stack = utils.parse_stack(t)
     rsrc = stack['IPAssoc']
     rsrc.resource_id = 'phy-rsrc-id'
     self.assertEqual('phy-rsrc-id', rsrc.FnGetRefId())
Example #42
0
    def create_pool(self, resolve_neutron=True, with_vip_subnet=False):
        clients.OpenStackClients.keystone().AndReturn(
            fakes.FakeKeystoneClient())
        neutronclient.Client.create_pool({
            'pool': {
                'subnet_id': 'sub123',
                'protocol': u'HTTP',
                'name': utils.PhysName('test_stack', 'pool'),
                'lb_method': 'ROUND_ROBIN',
                'admin_state_up': True
            }
        }).AndReturn({'pool': {
            'id': '5678'
        }})
        neutronclient.Client.show_pool('5678').AndReturn(
            {'pool': {
                'status': 'ACTIVE'
            }})
        neutronclient.Client.show_vip('xyz').AndReturn(
            {'vip': {
                'status': 'ACTIVE'
            }})
        stvipvsn = {
            'vip': {
                'protocol': u'HTTP',
                'name': 'pool.vip',
                'admin_state_up': True,
                'subnet_id': u'sub9999',
                'pool_id': '5678',
                'protocol_port': 80
            }
        }

        stvippsn = copy.deepcopy(stvipvsn)
        stvippsn['vip']['subnet_id'] = 'sub123'

        if resolve_neutron and with_vip_subnet:
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client), 'subnet',
                'sub123').AndReturn('sub123')
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client), 'subnet',
                'sub9999').AndReturn('sub9999')
            snippet = template_format.parse(pool_template_with_vip_subnet)
            neutronclient.Client.create_vip(stvipvsn).AndReturn(
                {'vip': {
                    'id': 'xyz'
                }})

        elif resolve_neutron and not with_vip_subnet:
            neutron_utils.neutronV20.find_resourceid_by_name_or_id(
                mox.IsA(neutronclient.Client), 'subnet',
                'sub123').AndReturn('sub123')
            snippet = template_format.parse(pool_template)
            neutronclient.Client.create_vip(stvippsn).AndReturn(
                {'vip': {
                    'id': 'xyz'
                }})
        else:
            snippet = template_format.parse(pool_template_deprecated)
            neutronclient.Client.create_vip(stvippsn).AndReturn(
                {'vip': {
                    'id': 'xyz'
                }})
        stack = utils.parse_stack(snippet)
        resource_defns = stack.t.resource_definitions(stack)
        return loadbalancer.Pool('pool', resource_defns['pool'], stack)
Example #43
0
 def test_scaling_policy_refid_signed_url(self, mock_get_ec2_url):
     t = template_format.parse(as_template)
     stack = utils.parse_stack(t, params=as_params)
     rsrc = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
     mock_get_ec2_url.return_value = 'http://signed_url'
     self.assertEqual('http://signed_url', rsrc.FnGetRefId())
Example #44
0
 def setUp(self):
     super(TestScalingGroupTags, self).setUp()
     t = template_format.parse(as_template)
     stack = utils.parse_stack(t, params=inline_templates.as_params)
     self.group = stack['WebServerGroup']
Example #45
0
    def test_group_create_no_personality(self):

        template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
    my_group:
        Type: Rackspace::AutoScale::Group
        Properties:
            groupConfiguration:
                name: "My Group"
                cooldown: 60
                minEntities: 1
                maxEntities: 25
                metadata:
                    group: metadata
            launchConfiguration:
                type: "launch_server"
                args:
                    server:
                        name: autoscaled-server
                        flavorRef: flavor-ref
                        imageRef: image-ref
                        key_name: my-key
                        metadata:
                            server: metadata
                        networks:
                            - uuid: "00000000-0000-0000-0000-000000000000"
                            - uuid: "11111111-1111-1111-1111-111111111111"
''')

        self.stack = utils.parse_stack(template)
        self.stack.create()
        self.assertEqual(
            ('CREATE', 'COMPLETE'), self.stack.state,
            self.stack.status_reason)

        self.assertEqual(1, len(self.fake_auto_scale.groups))
        self.assertEqual(
            {
                'cooldown': 60,
                'config_drive': False,
                'user_data': None,
                'disk_config': None,
                'flavor': 'flavor-ref',
                'image': 'image-ref',
                'launch_config_type': 'launch_server',
                'load_balancers': [],
                'key_name': "my-key",
                'max_entities': 25,
                'group_metadata': {'group': 'metadata'},
                'metadata': {'server': 'metadata'},
                'min_entities': 1,
                'name': 'My Group',
                'networks': [{'uuid': '00000000-0000-0000-0000-000000000000'},
                             {'uuid': '11111111-1111-1111-1111-111111111111'}],
                'personality': None,
                'server_name': u'autoscaled-server'},
            self.fake_auto_scale.groups['0'].kwargs)

        resource = self.stack['my_group']
        self.assertEqual('0', resource.FnGetRefId())
    def test_security_group(self):

        show_created = {'security_group': {
            'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
            'name': 'sc1',
            'description': '',
            'security_group_rules': [{
                'direction': 'ingress',
                'protocol': 'tcp',
                'port_range_max': '22',
                'id': 'bbbb',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'port_range_min': '22'
            }, {
                'direction': 'ingress',
                'protocol': 'tcp',
                'port_range_max': '80',
                'id': 'cccc',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'port_range_min': '80'
            }, {
                'direction': 'ingress',
                'protocol': 'tcp',
                'port_range_max': None,
                'id': 'dddd',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
                'remote_group_id': 'wwww',
                'remote_ip_prefix': None,
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'port_range_min': None
            }, {
                'direction': 'egress',
                'protocol': 'tcp',
                'port_range_max': '22',
                'id': 'eeee',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.1.0/24',
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'port_range_min': '22'
            }, {
                'direction': 'egress',
                'protocol': None,
                'port_range_max': None,
                'id': 'ffff',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
                'remote_group_id': 'xxxx',
                'remote_ip_prefix': None,
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'port_range_min': None
            }, {
                'direction': 'egress',
                'protocol': None,
                'port_range_max': None,
                'id': 'gggg',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
                'remote_group_id': 'aaaa',
                'remote_ip_prefix': None,
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'port_range_min': None
            }],
            'id': 'aaaa'}
        }

        #create script
        clients.OpenStackClients.keystone().AndReturn(
            FakeKeystoneClient())
        sg_name = utils.PhysName('test_stack', 'the_sg')
        neutronclient.Client.create_security_group({
            'security_group': {
                'name': sg_name,
                'description': 'HTTP and SSH access'
            }
        }).AndReturn({
            'security_group': {
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'name': sg_name,
                'description': 'HTTP and SSH access',
                'security_group_rules': [{
                    "direction": "egress",
                    "ethertype": "IPv4",
                    "id": "aaaa-1",
                    "port_range_max": None,
                    "port_range_min": None,
                    "protocol": None,
                    "remote_group_id": None,
                    "remote_ip_prefix": None,
                    "security_group_id": "aaaa",
                    "tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
                }, {
                    "direction": "egress",
                    "ethertype": "IPv6",
                    "id": "aaaa-2",
                    "port_range_max": None,
                    "port_range_min": None,
                    "protocol": None,
                    "remote_group_id": None,
                    "remote_ip_prefix": None,
                    "security_group_id": "aaaa",
                    "tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
                }],
                'id': 'aaaa'
            }
        })

        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'bbbb'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '80',
                'ethertype': 'IPv4',
                'port_range_max': '80',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '0.0.0.0/0',
                'port_range_min': '80',
                'ethertype': 'IPv4',
                'port_range_max': '80',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'cccc'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': 'wwww',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': 'wwww',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'dddd'
            }
        })
        neutronclient.Client.show_security_group('aaaa').AndReturn({
            'security_group': {
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'name': sg_name,
                'description': 'HTTP and SSH access',
                'security_group_rules': [{
                    "direction": "egress",
                    "ethertype": "IPv4",
                    "id": "aaaa-1",
                    "port_range_max": None,
                    "port_range_min": None,
                    "protocol": None,
                    "remote_group_id": None,
                    "remote_ip_prefix": None,
                    "security_group_id": "aaaa",
                    "tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
                }, {
                    "direction": "egress",
                    "ethertype": "IPv6",
                    "id": "aaaa-2",
                    "port_range_max": None,
                    "port_range_min": None,
                    "protocol": None,
                    "remote_group_id": None,
                    "remote_ip_prefix": None,
                    "security_group_id": "aaaa",
                    "tenant_id": "f18ca530cc05425e8bac0a5ff92f7e88"
                }],
                'id': 'aaaa'
            }
        })
        neutronclient.Client.delete_security_group_rule('aaaa-1').AndReturn(
            None)
        neutronclient.Client.delete_security_group_rule('aaaa-2').AndReturn(
            None)
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.1.0/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.1.0/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'eeee'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'xxxx',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'xxxx',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'ffff'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'aaaa',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': 'aaaa',
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'gggg'
            }
        })

        # update script
        neutronclient.Client.update_security_group(
            'aaaa',
            {'security_group': {
                'description': 'SSH access for private network',
                'name': 'myrules'}}
        ).AndReturn({
            'security_group': {
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'name': 'myrules',
                'description': 'SSH access for private network',
                'security_group_rules': [],
                'id': 'aaaa'
            }
        })

        neutronclient.Client.show_security_group('aaaa').AndReturn(
            show_created)
        neutronclient.Client.delete_security_group_rule('bbbb').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('cccc').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('dddd').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('eeee').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('ffff').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('gggg').AndReturn(None)

        neutronclient.Client.show_security_group('aaaa').AndReturn({
            'security_group': {
                'tenant_id': 'f18ca530cc05425e8bac0a5ff92f7e88',
                'name': 'sc1',
                'description': '',
                'security_group_rules': [],
                'id': 'aaaa'
            }
        })

        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'ethertype': 'IPv4',
                'security_group_id': 'aaaa',
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv4',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'hhhh'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'egress',
                'ethertype': 'IPv6',
                'security_group_id': 'aaaa',
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'egress',
                'remote_group_id': None,
                'remote_ip_prefix': None,
                'port_range_min': None,
                'ethertype': 'IPv6',
                'port_range_max': None,
                'protocol': None,
                'security_group_id': 'aaaa',
                'id': 'iiii'
            }
        })
        neutronclient.Client.create_security_group_rule({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.0.10/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa'
            }
        }).AndReturn({
            'security_group_rule': {
                'direction': 'ingress',
                'remote_group_id': None,
                'remote_ip_prefix': '10.0.0.10/24',
                'port_range_min': '22',
                'ethertype': 'IPv4',
                'port_range_max': '22',
                'protocol': 'tcp',
                'security_group_id': 'aaaa',
                'id': 'jjjj'
            }
        })

        # delete script
        neutronclient.Client.show_security_group('aaaa').AndReturn(
            show_created)
        neutronclient.Client.delete_security_group_rule('bbbb').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('cccc').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('dddd').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('eeee').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('ffff').AndReturn(None)
        neutronclient.Client.delete_security_group_rule('gggg').AndReturn(None)
        neutronclient.Client.delete_security_group('aaaa').AndReturn(None)

        self.m.ReplayAll()
        stack = self.create_stack(self.test_template)

        sg = stack['the_sg']
        self.assertResourceState(sg, 'aaaa')

        updated_tmpl = template_format.parse(self.test_template_update)
        updated_stack = utils.parse_stack(updated_tmpl)
        stack.update(updated_stack)

        stack.delete()
        self.m.VerifyAll()
 def create_stack(self, template):
     t = template_format.parse(template)
     stack = utils.parse_stack(t)
     stack.create()
     self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
     return stack
Example #48
0
    def test_create_and_update_port(self):
        props = {
            'network_id': u'net1234',
            'name': utils.PhysName('test_stack', 'port'),
            'admin_state_up': True,
            'device_owner': u'network:dhcp'
        }
        new_props = props.copy()
        new_props['name'] = "new_name"
        new_props['security_groups'] = ['8a2f582a-e1cd-480f-b85d-b02631c10656']
        new_props_update = new_props.copy()
        new_props_update.pop('network_id')

        new_props1 = new_props.copy()
        new_props1.pop('security_groups')
        new_props_update1 = new_props_update.copy()
        new_props_update1['security_groups'] = [
            '0389f747-7785-4757-b7bb-2ab07e4b09c3'
        ]

        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client), 'network',
            'net1234').MultipleTimes().AndReturn('net1234')
        neutronclient.Client.create_port({
            'port': props
        }).AndReturn({
            'port': {
                "status": "BUILD",
                "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
            }
        })
        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766').MultipleTimes().AndReturn({
                'port': {
                    "status": "ACTIVE",
                    "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
                    "fixed_ips": {
                        "subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
                        "ip_address": "10.0.0.2"
                    }
                }
            })
        neutronclient.Client.update_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766', {
                'port': new_props_update
            }).AndReturn(None)

        fake_groups_list = {
            'security_groups': [{
                'tenant_id': 'dc4b074874244f7693dd65583733a758',
                'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
                'name': 'default',
                'security_group_rules': [],
                'description': 'no protocol'
            }]
        }
        self.m.StubOutWithMock(neutronclient.Client, 'list_security_groups')
        neutronclient.Client.list_security_groups().AndReturn(fake_groups_list)
        neutronclient.Client.update_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766', {
                'port': new_props_update1
            }).AndReturn(None)

        self.m.ReplayAll()

        # create port
        t = template_format.parse(neutron_port_template)
        t['resources']['port']['properties'].pop('fixed_ips')
        stack = utils.parse_stack(t)

        port = stack['port']
        scheduler.TaskRunner(port.create)()

        # update port
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props)
        scheduler.TaskRunner(port.update, update_snippet)()
        # update again to test port without security group
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_props1)
        scheduler.TaskRunner(port.update, update_snippet)()

        self.m.VerifyAll()
Example #49
0
    def test_update_port(self):
        t = template_format.parse(neutron_port_template)
        stack = utils.parse_stack(t)

        self.patchobject(neutronV20, 'find_resourceid_by_name_or_id',
                         return_value='net1234')
        create_port = self.patchobject(neutronclient.Client, 'create_port')
        update_port = self.patchobject(neutronclient.Client, 'update_port')
        fake_groups_list = {
            'security_groups': [
                {
                    'tenant_id': 'dc4b074874244f7693dd65583733a758',
                    'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
                    'name': 'default',
                    'security_group_rules': [],
                    'description': 'no protocol'
                }
            ]
        }
        self.patchobject(neutronclient.Client, 'list_security_groups',
                         return_value=fake_groups_list)
        set_tag_mock = self.patchobject(neutronclient.Client, 'replace_tag')

        props = {'network_id': u'net1234',
                 'name': str(utils.PhysName(stack.name, 'port')),
                 'admin_state_up': True,
                 'device_owner': u'network:dhcp'}

        update_props = props.copy()
        update_props['security_groups'] = self.secgrp
        update_props['value_specs'] = self.value_specs
        update_props['tags'] = ['test_tag']
        if self.fixed_ips:
            update_props['fixed_ips'] = self.fixed_ips
        update_props['allowed_address_pairs'] = self.addr_pair
        update_props['binding:vnic_type'] = self.vnic_type

        update_dict = update_props.copy()

        if update_props['security_groups'] is None:
            update_dict['security_groups'] = ['default']

        if update_props['name'] is None:
            update_dict['name'] = utils.PhysName(stack.name, 'test_subnet')

        value_specs = update_dict.pop('value_specs')
        if value_specs:
            for value_spec in six.iteritems(value_specs):
                update_dict[value_spec[0]] = value_spec[1]

        tags = update_dict.pop('tags')

        # create port
        port = stack['port']
        self.assertIsNone(scheduler.TaskRunner(port.handle_create)())
        create_port.assset_called_once_with(props)
        # update port
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      update_props)
        self.assertIsNone(scheduler.TaskRunner(port.handle_update,
                                               update_snippet, {},
                                               update_props)())

        update_port.assset_called_once_with(update_dict)
        set_tag_mock.assert_called_with('ports', port.resource_id,
                                        {'tags': tags})
        # check, that update does not cause of Update Replace
        create_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      props)
        after_props, before_props = port._prepare_update_props(update_snippet,
                                                               create_snippet)
        self.assertIsNotNone(
            port.update_template_diff_properties(after_props, before_props))

        # With fixed_ips removed
        scheduler.TaskRunner(port.handle_update, update_snippet,
                             {}, {'fixed_ips': None})()

        # update with empty prop_diff
        scheduler.TaskRunner(port.handle_update, update_snippet, {}, {})()
        self.assertEqual(1, update_port.call_count)
Example #50
0
 def test_validate(self):
     stack = utils.parse_stack(self.template)
     snip = stack.t.resource_definitions(stack)['deploy_mysql']
     resg = sd.SoftwareDeployments('deploy_mysql', snip, stack)
     self.assertIsNone(resg.validate())
Example #51
0
    def test_stack_update_existing_registry(self):
        # Use a template with existing flag and ensure the
        # environment registry is preserved.

        stack_name = 'service_update_test_stack_existing_registry'
        intital_registry = {'OS::Foo': 'foo.yaml',
                            'OS::Foo2': 'foo2.yaml',
                            'resources': {
                                'myserver': {'OS::Server': 'myserver.yaml'}}}
        intial_params = {'encrypted_param_names': [],
                         'parameter_defaults': {},
                         'parameters': {},
                         'event_sinks': [],
                         'resource_registry': intital_registry}
        initial_files = {'foo.yaml': 'foo',
                         'foo2.yaml': 'foo2',
                         'myserver.yaml': 'myserver'}
        update_registry = {'OS::Foo2': 'newfoo2.yaml',
                           'resources': {
                               'myother': {'OS::Other': 'myother.yaml'}}}
        update_params = {'encrypted_param_names': [],
                         'parameter_defaults': {},
                         'parameters': {},
                         'resource_registry': update_registry}
        update_files = {'newfoo2.yaml': 'newfoo',
                        'myother.yaml': 'myother'}
        api_args = {rpc_api.PARAM_TIMEOUT: 60,
                    rpc_api.PARAM_EXISTING: True}
        t = template_format.parse(tools.wp_template)

        stk = utils.parse_stack(t, stack_name=stack_name, params=intial_params,
                                files=initial_files)
        stk.set_stack_user_project_id('1234')
        self.assertEqual(intial_params, stk.t.env.env_as_dict())

        expected_reg = {'OS::Foo': 'foo.yaml',
                        'OS::Foo2': 'newfoo2.yaml',
                        'resources': {
                            'myother': {'OS::Other': 'myother.yaml'},
                            'myserver': {'OS::Server': 'myserver.yaml'}}}
        expected_env = {'encrypted_param_names': [],
                        'parameter_defaults': {},
                        'parameters': {},
                        'event_sinks': [],
                        'resource_registry': expected_reg}
        # FIXME(shardy): Currently we don't prune unused old files
        expected_files = {'foo.yaml': 'foo',
                          'foo2.yaml': 'foo2',
                          'myserver.yaml': 'myserver',
                          'newfoo2.yaml': 'newfoo',
                          'myother.yaml': 'myother'}
        with mock.patch('heat.engine.stack.Stack') as mock_stack:
            stk.update = mock.Mock()
            mock_stack.load.return_value = stk
            mock_stack.validate.return_value = None
            result = self.man.update_stack(self.ctx, stk.identifier(),
                                           t,
                                           update_params,
                                           update_files,
                                           api_args)
            tmpl = mock_stack.call_args[0][2]
            self.assertEqual(expected_env,
                             tmpl.env.env_as_dict())
            self.assertEqual(expected_files,
                             tmpl.files.files)
            self.assertEqual(stk.identifier(), result)
 def test_parse_with_bad_update_policy(self):
     tmpl = template_format.parse(ig_tmpl_with_bad_updt_policy)
     stack = utils.parse_stack(tmpl)
     self.assertRaises(exception.StackValidationFailed, stack.validate)
Example #53
0
    def test_properties_are_prepared_for_session_persistence(self):
        neutronV20.find_resourceid_by_name_or_id(mox.IsA(neutronclient.Client),
                                                 'subnet',
                                                 'sub123').AndReturn('sub123')

        neutronclient.Client.create_pool({
            'pool': {
                'subnet_id': 'sub123',
                'protocol': u'HTTP',
                'name': utils.PhysName('test_stack', 'pool'),
                'lb_method': 'ROUND_ROBIN',
                'admin_state_up': True
            }
        }).AndReturn({'pool': {
            'id': '5678'
        }})
        neutronclient.Client.create_vip({
            'vip': {
                'protocol': u'HTTP',
                'name': 'pool.vip',
                'admin_state_up': True,
                'subnet_id': u'sub123',
                'pool_id': '5678',
                'protocol_port': 80,
                'session_persistence': {
                    'type': 'HTTP_COOKIE'
                }
            }
        }).AndReturn({'vip': {
            'id': 'xyz'
        }})
        neutronclient.Client.show_pool('5678').AndReturn(
            {'pool': {
                'status': 'ACTIVE'
            }})
        neutronclient.Client.show_vip('xyz').AndReturn(
            {'vip': {
                'status': 'ACTIVE'
            }})

        snippet = template_format.parse(pool_with_session_persistence_template)
        pool = snippet['Resources']['pool']
        persistence = pool['Properties']['vip']['session_persistence']

        # change persistence type to HTTP_COOKIE that not require cookie_name
        persistence['type'] = 'HTTP_COOKIE'
        del persistence['cookie_name']

        stack = utils.parse_stack(snippet)
        resource_defns = stack.t.resource_definitions(stack)
        resource = loadbalancer.Pool('pool', resource_defns['pool'], stack)

        # assert that properties contain cookie_name property with None value
        persistence = resource.properties['vip']['session_persistence']
        self.assertIn('cookie_name', persistence)
        self.assertIsNone(persistence['cookie_name'])

        self.m.ReplayAll()
        scheduler.TaskRunner(resource.create)()
        self.assertEqual((resource.CREATE, resource.COMPLETE), resource.state)
        self.m.VerifyAll()
Example #54
0
    def test_update_monitors(self):
        neutronV20.find_resourceid_by_name_or_id(mox.IsA(neutronclient.Client),
                                                 'subnet',
                                                 'sub123').AndReturn('sub123')
        neutronclient.Client.create_pool({
            'pool': {
                'subnet_id': 'sub123',
                'protocol': u'HTTP',
                'name': utils.PhysName('test_stack', 'pool'),
                'lb_method': 'ROUND_ROBIN',
                'admin_state_up': True
            }
        }).AndReturn({'pool': {
            'id': '5678'
        }})
        neutronclient.Client.associate_health_monitor(
            '5678', {'health_monitor': {
                'id': 'mon123'
            }})
        neutronclient.Client.associate_health_monitor(
            '5678', {'health_monitor': {
                'id': 'mon456'
            }})
        neutronclient.Client.create_vip({
            'vip': {
                'protocol': u'HTTP',
                'name': 'pool.vip',
                'admin_state_up': True,
                'subnet_id': u'sub123',
                'pool_id': '5678',
                'protocol_port': 80
            }
        }).AndReturn({'vip': {
            'id': 'xyz'
        }})
        neutronclient.Client.show_pool('5678').AndReturn(
            {'pool': {
                'status': 'ACTIVE'
            }})
        neutronclient.Client.show_vip('xyz').AndReturn(
            {'vip': {
                'status': 'ACTIVE'
            }})
        neutronclient.Client.disassociate_health_monitor('5678', 'mon456')
        neutronclient.Client.associate_health_monitor(
            '5678', {'health_monitor': {
                'id': 'mon789'
            }})

        snippet = template_format.parse(pool_template)
        stack = utils.parse_stack(snippet)
        snippet['Resources']['pool']['Properties']['monitors'] = [
            'mon123', 'mon456'
        ]
        resource_defns = stack.t.resource_definitions(stack)
        rsrc = loadbalancer.Pool('pool', resource_defns['pool'], stack)
        self.m.ReplayAll()
        scheduler.TaskRunner(rsrc.create)()

        update_template = copy.deepcopy(rsrc.t)
        update_template['Properties']['monitors'] = ['mon123', 'mon789']
        scheduler.TaskRunner(rsrc.update, update_template)()

        self.m.VerifyAll()
Example #55
0
    def test_subnet_get_live_state(self):
        template = """
        heat_template_version: 2015-04-30
        resources:
          net:
            type: OS::Neutron::Net
            properties:
              name: test
          subnet:
            type: OS::Neutron::Subnet
            properties:
              network_id: { get_resource: net }
              cidr: 10.0.0.0/25
              value_specs:
                test_value_spec: value_spec_value
        """
        t = template_format.parse(template)
        stack = utils.parse_stack(t)
        rsrc = stack['subnet']
        stack.create()

        subnet_resp = {
            'subnet': {
                'name': 'subnet-subnet-la5usdgifhrd',
                'enable_dhcp': True,
                'network_id': 'dffd43b3-6206-4402-87e6-8a16ddf3bd68',
                'tenant_id': '30f466e3d14b4251853899f9c26e2b66',
                'dns_nameservers': [],
                'ipv6_ra_mode': None,
                'allocation_pools': [{
                    'start': '10.0.0.2',
                    'end': '10.0.0.126'
                }],
                'gateway_ip': '10.0.0.1',
                'ipv6_address_mode': None,
                'ip_version': 4,
                'host_routes': [],
                'prefixlen': None,
                'cidr': '10.0.0.0/25',
                'id': 'b255342b-31b7-4674-8ea4-a144bca658b0',
                'subnetpool_id': None,
                'test_value_spec': 'value_spec_value'
            }
        }
        rsrc.client().show_subnet = mock.MagicMock(return_value=subnet_resp)
        rsrc.resource_id = '1234'

        reality = rsrc.get_live_state(rsrc.properties)
        expected = {
            'name': 'subnet-subnet-la5usdgifhrd',
            'enable_dhcp': True,
            'dns_nameservers': [],
            'allocation_pools': [{
                'start': '10.0.0.2',
                'end': '10.0.0.126'
            }],
            'gateway_ip': '10.0.0.1',
            'host_routes': [],
            'value_specs': {
                'test_value_spec': 'value_spec_value'
            }
        }

        self.assertEqual(set(expected.keys()), set(reality.keys()))
        for key in expected:
            self.assertEqual(expected[key], reality[key])
Example #56
0
 def test_user_refid_rsrc_id(self):
     t = template_format.parse(user_template)
     stack = utils.parse_stack(t)
     rsrc = stack['CfnUser']
     rsrc.resource_id = 'phy-rsrc-id'
     self.assertEqual('phy-rsrc-id', rsrc.FnGetRefId())
Example #57
0
 def _setup_test_stack(self, template):
     self.stack = utils.parse_stack(template)
     self.stack.create()
     self.assertEqual(('CREATE', 'COMPLETE'), self.stack.state,
                      self.stack.status_reason)
Example #58
0
 def _init_ct(self, template):
     self.stack = utils.parse_stack(template)
     return self.stack['cluster-template']
Example #59
0
    def test_vnic_create_update(self):
        port_prop = {
            'network_id': u'net1234',
            'fixed_ips': [{
                'subnet_id': u'sub1234',
                'ip_address': u'10.0.3.21'
            }],
            'name': utils.PhysName('test_stack', 'port'),
            'admin_state_up': True,
            'device_owner': 'network:dhcp',
            'binding:vnic_type': 'direct'
        }
        new_port_prop = port_prop.copy()
        new_port_prop['binding:vnic_type'] = 'normal'
        new_port_prop['name'] = "new_name"
        new_port_prop['security_groups'] = [
            '8a2f582a-e1cd-480f-b85d-b02631c10656'
        ]
        new_port_prop.pop('network_id')

        prop_update = copy.deepcopy(new_port_prop)
        new_port_prop['replacement_policy'] = 'AUTO'
        new_port_prop['network'] = u'net1234'

        neutronV20.find_resourceid_by_name_or_id(mox.IsA(
            neutronclient.Client), 'network', 'net1234').AndReturn('net1234')
        neutronV20.find_resourceid_by_name_or_id(
            mox.IsA(neutronclient.Client), 'subnet',
            'sub1234').MultipleTimes().AndReturn('sub1234')
        neutronclient.Client.create_port({
            'port': port_prop
        }).AndReturn({
            'port': {
                "status": "BUILD",
                "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
            }
        })
        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766').AndReturn({
                'port': {
                    "status": "ACTIVE",
                    "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
                }
            })
        self.stub_SubnetConstraint_validate()
        self.stub_NetworkConstraint_validate()
        neutronclient.Client.update_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766', {
                'port': prop_update
            }).AndReturn(None)
        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766').AndReturn({
                'port': {
                    "status": "ACTIVE",
                    "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
                }
            })

        prop_update2 = copy.deepcopy(prop_update)
        prop_update2['binding:vnic_type'] = 'direct'
        neutronclient.Client.update_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766', {
                'port': prop_update2
            }).AndReturn(None)

        neutronclient.Client.show_port(
            'fc68ea2c-b60b-4b4f-bd82-94ec81110766').AndReturn({
                'port': {
                    "status": "ACTIVE",
                    "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
                }
            })
        self.m.ReplayAll()
        t = template_format.parse(neutron_port_template)
        t['resources']['port']['properties']['binding:vnic_type'] = 'direct'
        stack = utils.parse_stack(t)
        port = stack['port']
        scheduler.TaskRunner(port.create)()
        self.assertEqual('direct', port.properties['binding:vnic_type'])

        # update to normal
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_port_prop)
        new_port_prop2 = copy.deepcopy(new_port_prop)
        scheduler.TaskRunner(port.update, update_snippet)()
        self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
        self.assertEqual('normal', port.properties['binding:vnic_type'])

        # update back to direct
        new_port_prop2['binding:vnic_type'] = 'direct'
        update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
                                                      new_port_prop2)
        scheduler.TaskRunner(port.update, update_snippet)()
        self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
        self.assertEqual('direct', port.properties['binding:vnic_type'])

        self.m.VerifyAll()
Example #60
0
 def test_scaling_policy_refid_rsrc_name(self):
     t = template_format.parse(as_template)
     stack = utils.parse_stack(t, params=as_params)
     rsrc = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
     rsrc.resource_id = None
     self.assertEqual('WebServerScaleUpPolicy', rsrc.FnGetRefId())