コード例 #1
0
ファイル: test_autoscaling.py プロジェクト: zzxwill/heat
    def test_scaling_meta_update(self):
        """Use heatclient to signal the up and down policy.

        Then confirm that the metadata in the custom_lb is updated each
        time.
        """
        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')
        # Scale up one, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 3))

        # Scale down two, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 1))
コード例 #2
0
    def publish_template(self, name, contents):
        oc = self.object_client

        # post the object
        oc.put_object(self.object_container_name, name, contents)
        # TODO(asalkeld) see if this is causing problems.
        # self.addCleanup(self.object_client.delete_object,
        #                self.object_container_name, name)

        # make the tempurl
        key_header = 'x-account-meta-temp-url-key'
        if key_header not in oc.head_account():
            swift_key = hashlib.sha224(
                str(random.getrandbits(256))).hexdigest()[:32]
            LOG.warn('setting swift key to %s' % swift_key)
            oc.post_account({key_header: swift_key})
        key = oc.head_account()[key_header]
        path = '/v1/AUTH_%s/%s/%s' % (self.project_id,
                                      self.object_container_name, name)
        timeout = self.conf.build_timeout * 10
        tempurl = swiftclient_utils.generate_temp_url(path, timeout,
                                                      key, 'GET')
        sw_url = parse.urlparse(oc.url)
        full_url = '%s://%s%s' % (sw_url.scheme, sw_url.netloc, tempurl)

        def download():
            r = requests.get(full_url)
            LOG.info('GET: %s -> %s' % (full_url, r.status_code))
            return r.status_code == requests.codes.ok

        # make sure that the object is available.
        test.call_until_true(self.conf.build_timeout,
                             self.conf.build_interval, download)

        return full_url
コード例 #3
0
    def test_autoscaling_loadbalancer_neutron(self):
        """Check work of AutoScaing and Neutron LBaaS v1 resource in Heat.

        The scenario is the following:
            1. Launch a stack with a load balancer and autoscaling group
               of one server, wait until stack create is complete.
            2. Check that there is only one distinctive response from
               loadbalanced IP.
            3. Signal the scale_up policy, wait until all resources in
               autoscaling group are complete.
            4. Check that now there are two distinctive responses from
               loadbalanced IP.
        """

        parameters = {
            'flavor': self.conf.minimal_instance_type,
            'image': self.conf.minimal_image_ref,
            'net': self.conf.fixed_network_name,
            'subnet': self.conf.fixed_subnet_name,
            'public_net': self.conf.floating_network_name,
            'app_port': 8080,
            'lb_port': 80,
            'timeout': 600
        }

        app_server_template = self._load_template(
            __file__, self.app_server_template_name, self.sub_dir
        )
        webapp_template = self._load_template(
            __file__, self.webapp_template_name, self.sub_dir
        )
        files = {'appserver.yaml': app_server_template,
                 'webapp.yaml': webapp_template}
        env = {'resource_registry':
               {'OS::Test::NeutronAppServer': 'appserver.yaml',
                'OS::Test::WebAppConfig': 'webapp.yaml'}}
        # Launch stack
        sid = self.launch_stack(
            template_name=self.template_name,
            parameters=parameters,
            files=files,
            environment=env
        )
        stack = self.client.stacks.get(sid)
        lb_url = self._stack_output(stack, 'lburl')
        # Check number of distinctive responces, must be 1
        self.check_num_responses(lb_url, 1)

        # Signal the scaling hook
        self.client.resources.signal(sid, 'scale_up')

        # Wait for AutoScalingGroup update to finish
        asg = self.client.resources.get(sid, 'asg')
        test.call_until_true(self.conf.build_timeout,
                             self.conf.build_interval,
                             self.check_autoscale_complete,
                             asg.physical_resource_id, 2)

        # Check number of distinctive responses, must now be 2
        self.check_num_responses(lb_url, 2)
コード例 #4
0
    def test_autoscaling_loadbalancer_neutron(self):
        """Check work of AutoScaing and Neutron LBaaS v1 resource in Heat.

        The scenario is the following:
            1. Launch a stack with a load balancer and autoscaling group
               of one server, wait until stack create is complete.
            2. Check that there is only one distinctive response from
               loadbalanced IP.
            3. Signal the scale_up policy, wait until all resources in
               autoscaling group are complete.
            4. Check that now there are two distinctive responses from
               loadbalanced IP.
        """

        parameters = {
            'flavor': self.conf.minimal_instance_type,
            'image': self.conf.minimal_image_ref,
            'net': self.conf.fixed_network_name,
            'subnet': self.conf.fixed_subnet_name,
            'public_net': self.conf.floating_network_name,
            'app_port': 8080,
            'lb_port': 80,
            'timeout': 600
        }

        app_server_template = self._load_template(
            __file__, self.app_server_template_name, self.sub_dir
        )
        webapp_template = self._load_template(
            __file__, self.webapp_template_name, self.sub_dir
        )
        files = {'appserver.yaml': app_server_template,
                 'webapp.yaml': webapp_template}
        env = {'resource_registry':
               {'OS::Test::NeutronAppServer': 'appserver.yaml',
                'OS::Test::WebAppConfig': 'webapp.yaml'}}
        # Launch stack
        sid = self.launch_stack(
            template_name=self.template_name,
            parameters=parameters,
            files=files,
            environment=env
        )
        stack = self.client.stacks.get(sid)
        lb_url = self._stack_output(stack, 'lburl')
        # Check number of distinctive responces, must be 1
        self.check_num_responses(lb_url, 1)

        # Signal the scaling hook
        self.client.resources.signal(sid, 'scale_up')

        # Wait for AutoScalingGroup update to finish
        asg = self.client.resources.get(sid, 'asg')
        test.call_until_true(self.conf.build_timeout,
                             self.conf.build_interval,
                             self.autoscale_complete,
                             asg.physical_resource_id, 2)

        # Check number of distinctive responses, must now be 2
        self.check_num_responses(lb_url, 2)
コード例 #5
0
    def test_asg_notifications(self):
        stack_identifier = self.stack_create(template=self.asg_template)

        for output in self.client.stacks.get(stack_identifier).outputs:
            if output['output_key'] == 'scale_dn_url':
                scale_down_url = output['output_value']
            else:
                scale_up_url = output['output_value']

        notifications = []
        handler = NotificationHandler(stack_identifier.split('/')[0],
                                      ASG_NOTIFICATIONS)

        with self.conn.Consumer(self.queue,
                                callbacks=[handler.process_message],
                                auto_declare=False):

            requests.post(scale_up_url, verify=self.verify_cert)
            test.call_until_true(20, 0, self.consume_events, handler, 2)
            notifications += handler.notifications

            handler.clear()
            requests.post(scale_down_url, verify=self.verify_cert)
            test.call_until_true(20, 0, self.consume_events, handler, 2)
            notifications += handler.notifications

        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[0]))
        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[1]))
コード例 #6
0
ファイル: test_autoscaling.py プロジェクト: Dynavisor/heat
    def test_signal_during_suspend(self):
        """Prove that a signal will fail when the stack is in suspend."""

        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')

        # suspend the top level stack.
        self.client.actions.suspend(stack_id=stack_identifier)
        self._wait_for_resource_status(
            stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')

        # Send a signal and confirm nothing happened.
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        ev = self.wait_for_event_with_reason(
            stack_identifier,
            reason='Cannot signal resource during SUSPEND',
            rsrc_name='ScaleUpPolicy')
        self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)

        # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
        self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
        # still 2 instances.
        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))
コード例 #7
0
    def publish_template(self, name, contents):
        oc = self.object_client

        # post the object
        oc.put_object(self.object_container_name, name, contents)
        # TODO(asalkeld) see if this is causing problems.
        # self.addCleanup(self.object_client.delete_object,
        #                self.object_container_name, name)

        # make the tempurl
        key_header = 'x-account-meta-temp-url-key'
        if key_header not in oc.head_account():
            swift_key = hashlib.sha224(str(
                random.getrandbits(256))).hexdigest()[:32]
            LOG.warn('setting swift key to %s' % swift_key)
            oc.post_account({key_header: swift_key})
        key = oc.head_account()[key_header]
        path = '/v1/AUTH_%s/%s/%s' % (self.project_id,
                                      self.object_container_name, name)
        timeout = self.conf.build_timeout * 10
        tempurl = swiftclient_utils.generate_temp_url(path, timeout, key,
                                                      'GET')
        sw_url = parse.urlparse(oc.url)
        full_url = '%s://%s%s' % (sw_url.scheme, sw_url.netloc, tempurl)

        def download():
            r = requests.get(full_url)
            LOG.info('GET: %s -> %s' % (full_url, r.status_code))
            return r.status_code == requests.codes.ok

        # make sure that the object is available.
        test.call_until_true(self.conf.build_timeout, self.conf.build_interval,
                             download)

        return full_url
コード例 #8
0
ファイル: test_notifications.py プロジェクト: zzjeric/heat
    def test_asg_notifications(self):
        stack_identifier = self.stack_create(template=self.asg_template)

        for output in self.client.stacks.get(stack_identifier).outputs:
            if output['output_key'] == 'scale_dn_url':
                scale_down_url = output['output_value']
            else:
                scale_up_url = output['output_value']

        notifications = []
        handler = NotificationHandler(
            stack_identifier.split('/')[0], ASG_NOTIFICATIONS)

        with self.conn.Consumer(self.queue,
                                callbacks=[handler.process_message],
                                auto_declare=False):

            requests.post(scale_up_url, verify=self.verify_cert)
            self.assertTrue(
                test.call_until_true(20, 0, self.consume_events, handler, 2))
            notifications += handler.notifications

            handler.clear()
            requests.post(scale_down_url, verify=self.verify_cert)
            self.assertTrue(
                test.call_until_true(20, 0, self.consume_events, handler, 2))
            notifications += handler.notifications

        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[0]))
        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[1]))
コード例 #9
0
    def test_scaling_meta_update(self):
        """Use heatclient to signal the up and down policy.

        Then confirm that the metadata in the custom_lb is updated each
        time.
        """
        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')
        # Scale up one, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 3))

        # Scale down two, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 1))
コード例 #10
0
ファイル: test_autoscaling.py プロジェクト: zzxwill/heat
    def test_signal_during_suspend(self):
        """Prove that a signal will fail when the stack is in suspend."""

        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')

        # suspend the top level stack.
        self.client.actions.suspend(stack_id=stack_identifier)
        self._wait_for_resource_status(
            stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')

        # Send a signal and confirm nothing happened.
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        ev = self.wait_for_event_with_reason(
            stack_identifier,
            reason='Cannot signal resource during SUSPEND',
            rsrc_name='ScaleUpPolicy')
        self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)

        # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
        self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
        # still 2 instances.
        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))
コード例 #11
0
ファイル: test_create_update.py プロジェクト: liuchin/heat
    def test_stack_update_with_conditions(self):
        """Update manages new conditions added.

        When a new resource is added during updates, the stacks handles the new
        conditions correctly, and doesn't fail to load them while the update is
        still in progress.
        """
        stack_identifier = self.stack_create(
            template=test_template_one_resource)

        updated_template = copy.deepcopy(test_template_two_resource)
        updated_template['conditions'] = {'cond1': True}
        updated_template['resources']['test3'] = {
            'type': 'OS::Heat::TestResource',
            'properties': {
                'value': {
                    'if': ['cond1', 'val3', 'val4']
                }
            }
        }
        test2_props = updated_template['resources']['test2']['properties']
        test2_props['action_wait_secs'] = {'create': 30}

        self.update_stack(stack_identifier,
                          template=updated_template,
                          expected_status='UPDATE_IN_PROGRESS')

        def check_resources():
            resources = self.list_resources(stack_identifier)
            if len(resources) < 2:
                return False
            self.assertIn('test3', resources)
            return True

        self.assertTrue(test.call_until_true(20, 2, check_resources))
コード例 #12
0
    def test_alarm(self):
        """Confirm we can create an alarm and trigger it."""
        # create metric
        metric = self.metric_client.metric.create({
            'name':
            'my_metric',
            'archive_policy_name':
            'high',
        })

        # create the stack
        parameters = {'metric_id': metric['id']}
        stack_identifier = self.stack_create(template=self.template,
                                             parameters=parameters)
        measures = [{
            'timestamp': timeutils.isotime(datetime.datetime.now()),
            'value': 100
        }, {
            'timestamp':
            timeutils.isotime(datetime.datetime.now() +
                              datetime.timedelta(minutes=1)),
            'value':
            100
        }]
        # send measures(should cause the alarm to fire)
        self.metric_client.metric.add_measures(metric['id'], measures)

        # confirm we get a scaleup.
        # Note: there is little point waiting more than 60s+time to scale up.
        self.assertTrue(
            test.call_until_true(120, 2, self.check_instance_count,
                                 stack_identifier, 2))

        # cleanup metric
        self.metric_client.metric.delete(metric['id'])
コード例 #13
0
ファイル: test_create_update.py プロジェクト: openstack/heat
    def _test_conditional(self, test3_resource):
        """Update manages new conditions added.

        When a new resource is added during updates, the stacks handles the new
        conditions correctly, and doesn't fail to load them while the update is
        still in progress.
        """
        stack_identifier = self.stack_create(
            template=test_template_one_resource)

        updated_template = copy.deepcopy(test_template_two_resource)
        updated_template['conditions'] = {'cond1': True}
        updated_template['resources']['test3'] = test3_resource
        test2_props = updated_template['resources']['test2']['properties']
        test2_props['action_wait_secs'] = {'create': 30}

        self.update_stack(stack_identifier,
                          template=updated_template,
                          expected_status='UPDATE_IN_PROGRESS')

        def check_resources():
            def is_complete(r):
                return r.resource_status in {'CREATE_COMPLETE',
                                             'UPDATE_COMPLETE'}

            resources = self.list_resources(stack_identifier, is_complete)
            if len(resources) < 2:
                return False
            self.assertIn('test3', resources)
            return True

        self.assertTrue(test.call_until_true(20, 2, check_resources))
コード例 #14
0
ファイル: test_aodh_alarm.py プロジェクト: aaratn/heat
    def test_alarm(self):
        """Confirm we can create an alarm and trigger it."""
        # create metric
        metric = self.metric_client.metric.create({
            'name': 'my_metric',
            'archive_policy_name': 'high',
        })

        # create the stack
        parameters = {'metric_id': metric['id']}
        stack_identifier = self.stack_create(template=self.template,
                                             parameters=parameters)
        measures = [{'timestamp': timeutils.isotime(datetime.datetime.now()),
                     'value': 100}, {'timestamp': timeutils.isotime(
                         datetime.datetime.now() + datetime.timedelta(
                             minutes=1)), 'value': 100}]
        # send measures(should cause the alarm to fire)
        self.metric_client.metric.add_measures(metric['id'], measures)

        # confirm we get a scaleup.
        # Note: there is little point waiting more than 60s+time to scale up.
        self.assertTrue(test.call_until_true(
            120, 2, self.check_instance_count, stack_identifier, 2))

        # cleanup metric
        self.metric_client.metric.delete(metric['id'])
コード例 #15
0
    def _test_conditional(self, test3_resource):
        """Update manages new conditions added.

        When a new resource is added during updates, the stacks handles the new
        conditions correctly, and doesn't fail to load them while the update is
        still in progress.
        """
        stack_identifier = self.stack_create(
            template=test_template_one_resource)

        updated_template = copy.deepcopy(test_template_two_resource)
        updated_template['conditions'] = {'cond1': True}
        updated_template['resources']['test3'] = test3_resource
        test2_props = updated_template['resources']['test2']['properties']
        test2_props['action_wait_secs'] = {'create': 30}

        self.update_stack(stack_identifier,
                          template=updated_template,
                          expected_status='UPDATE_IN_PROGRESS')

        def check_resources():
            def is_complete(r):
                return r.resource_status in {
                    'CREATE_COMPLETE', 'UPDATE_COMPLETE'
                }

            resources = self.list_resources(stack_identifier, is_complete)
            if len(resources) < 2:
                return False
            self.assertIn('test3', resources)
            return True

        self.assertTrue(test.call_until_true(20, 2, check_resources))
コード例 #16
0
    def test_asg_cooldown(self):
        cooldown_tmpl = self.template.replace('cooldown: 0', 'cooldown: 10')
        stack_id = self.stack_create(template=cooldown_tmpl,
                                     expected_status='CREATE_COMPLETE')
        stack = self.client.stacks.get(stack_id)
        asg_size = self._stack_output(stack, 'asg_size')
        # Ensure that initial desired capacity is met
        self.assertEqual(3, asg_size)

        # send scale up signal.
        # Since cooldown is in effect, number of resources should not change
        asg = self.client.resources.get(stack_id, 'random_group')
        expected_resources = 3
        self.client.resources.signal(stack_id, 'scale_up_policy')
        test.call_until_true(self.conf.build_timeout, self.conf.build_interval,
                             self.check_autoscale_complete,
                             asg.physical_resource_id, expected_resources)
コード例 #17
0
 def test_cancel_update_without_rollback(self):
     stack_id = self._test_cancel_update(rollback=False,
                                         expected_status='UPDATE_FAILED')
     self.assertTrue(
         test.call_until_true(60, 2, self.verify_resource_status, stack_id,
                              'test1', 'UPDATE_COMPLETE'))
     eventlet.sleep(2)
     self.assertTrue(
         self.verify_resource_status(stack_id, 'test2', 'CREATE_COMPLETE'))
コード例 #18
0
ファイル: test_heat_autoscaling.py プロジェクト: hongbin/heat
    def test_asg_scale_down_min_size(self):
        stack_id = self.stack_create(template=self.template,
                                     expected_status='CREATE_COMPLETE')
        stack = self.client.stacks.get(stack_id)
        asg_size = self._stack_output(stack, 'asg_size')
        # Ensure that initial desired capacity is met
        self.assertEqual(3, asg_size)

        # send scale down signals and ensure that asg honors min_size
        asg = self.client.resources.get(stack_id, 'random_group')
        min_size = 2
        for num in range(asg_size-1, 0, -1):
            expected_resources = num if num >= min_size else min_size
            self.client.resources.signal(stack_id, 'scale_down_policy')
            test.call_until_true(self.conf.build_timeout,
                                 self.conf.build_interval,
                                 self.check_autoscale_complete,
                                 asg.physical_resource_id, expected_resources)
コード例 #19
0
ファイル: test_stack_cancel.py プロジェクト: aaratn/heat
 def test_cancel_update_without_rollback(self):
     stack_id = self._test_cancel_update(rollback=False,
                                         expected_status='UPDATE_FAILED')
     self.assertTrue(test.call_until_true(
         60, 2, self.verify_resource_status,
         stack_id, 'test1', 'UPDATE_COMPLETE'))
     eventlet.sleep(2)
     self.assertTrue(self.verify_resource_status(stack_id, 'test2',
                                                 'CREATE_COMPLETE'))
コード例 #20
0
    def test_asg_scale_down_min_size(self):
        stack_id = self.stack_create(template=self.template,
                                     expected_status='CREATE_COMPLETE')
        stack = self.client.stacks.get(stack_id)
        asg_size = self._stack_output(stack, 'asg_size')
        # Ensure that initial desired capacity is met
        self.assertEqual(3, asg_size)

        # send scale down signals and ensure that asg honors min_size
        asg = self.client.resources.get(stack_id, 'random_group')
        min_size = 2
        for num in range(asg_size - 1, 0, -1):
            expected_resources = num if num >= min_size else min_size
            self.client.resources.signal(stack_id, 'scale_down_policy')
            test.call_until_true(self.conf.build_timeout,
                                 self.conf.build_interval,
                                 self.check_autoscale_complete,
                                 asg.physical_resource_id, expected_resources)
コード例 #21
0
ファイル: test_heat_autoscaling.py プロジェクト: hongbin/heat
    def test_asg_cooldown(self):
        cooldown_tmpl = self.template.replace('cooldown: 0',
                                              'cooldown: 10')
        stack_id = self.stack_create(template=cooldown_tmpl,
                                     expected_status='CREATE_COMPLETE')
        stack = self.client.stacks.get(stack_id)
        asg_size = self._stack_output(stack, 'asg_size')
        # Ensure that initial desired capacity is met
        self.assertEqual(3, asg_size)

        # send scale up signal.
        # Since cooldown is in effect, number of resources should not change
        asg = self.client.resources.get(stack_id, 'random_group')
        expected_resources = 3
        self.client.resources.signal(stack_id, 'scale_up_policy')
        test.call_until_true(self.conf.build_timeout,
                             self.conf.build_interval,
                             self.check_autoscale_complete,
                             asg.physical_resource_id, expected_resources)
コード例 #22
0
    def test_signal_during_suspend(self):
        """Prove that a signal will fail when the stack is in suspend."""

        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')

        # suspend the top level stack.
        self.client.actions.suspend(stack_id=stack_identifier)

        # Wait for stack to reach SUSPEND_COMPLETE
        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')

        # Send a signal and an exception will raise
        ex = self.assertRaises(exc.BadRequest, self.client.resources.signal,
                               stack_identifier, 'ScaleUpPolicy')

        error_msg = 'Signal resource during SUSPEND is not supported'
        self.assertIn(error_msg, six.text_type(ex))
        ev = self.wait_for_event_with_reason(
            stack_identifier,
            reason='Cannot signal resource during SUSPEND',
            rsrc_name='ScaleUpPolicy')
        self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)

        # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
        self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
        # still 2 instances.
        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 2))
コード例 #23
0
    def test_events(self):
        queue_id = str(uuid.uuid4())
        environment = {
            'event_sinks': [{
                'type': 'zaqar-queue',
                'target': queue_id,
                'ttl': 120
            }]
        }
        stack_identifier = self.stack_create(template=self.template,
                                             environment=environment)
        stack_name, stack_id = stack_identifier.split('/')
        conf = {
            'auth_opts': {
                'backend': 'keystone',
                'options': {
                    'os_username': self.conf.username,
                    'os_password': self.conf.password,
                    'os_project_name': self.conf.project_name,
                    'os_auth_url': self.conf.auth_url,
                    'os_user_domain_id': self.conf.user_domain_id,
                    'os_project_domain_id': self.conf.project_domain_id,
                    'os_user_domain_name': self.conf.user_domain_name,
                    'os_project_domain_name': self.conf.project_domain_name
                }
            }
        }

        zaqar = zaqarclient.Client(conf=conf, version=1.1)
        queue = zaqar.queue(queue_id)

        def validate_messages():
            messages = list(queue.messages())
            if len(messages) < 4:
                return False

            types = [m.body['type'] for m in messages]
            self.assertEqual(['os.heat.event'] * 4, types)
            resources = set(
                [m.body['payload']['resource_name'] for m in messages])
            self.assertEqual(set([stack_name, 'test_resource']), resources)
            stack_ids = [m.body['payload']['stack_id'] for m in messages]
            self.assertEqual([stack_id] * 4, stack_ids)
            statuses = [m.body['payload']['resource_status'] for m in messages]
            statuses.sort()
            self.assertEqual(
                ['COMPLETE', 'COMPLETE', 'IN_PROGRESS', 'IN_PROGRESS'],
                statuses)
            actions = [m.body['payload']['resource_action'] for m in messages]
            self.assertEqual(['CREATE'] * 4, actions)
            return True

        self.assertTrue(test.call_until_true(20, 0, validate_messages))
コード例 #24
0
ファイル: test_autoscaling.py プロジェクト: aaratn/heat
    def test_signal_during_suspend(self):
        """Prove that a signal will fail when the stack is in suspend."""

        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')

        # suspend the top level stack.
        self.client.actions.suspend(stack_id=stack_identifier)

        # Wait for stack to reach SUSPEND_COMPLETE
        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')

        # Send a signal and an exception will raise
        ex = self.assertRaises(exc.BadRequest,
                               self.client.resources.signal,
                               stack_identifier, 'ScaleUpPolicy')

        error_msg = 'Signal resource during SUSPEND is not supported'
        self.assertIn(error_msg, six.text_type(ex))
        ev = self.wait_for_event_with_reason(
            stack_identifier,
            reason='Cannot signal resource during SUSPEND',
            rsrc_name='ScaleUpPolicy')
        self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)

        # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
        self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
        self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
        # still 2 instances.
        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))
コード例 #25
0
    def test_signal_with_policy_update(self):
        """Prove that an updated policy is used in the next signal."""

        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')
        # Scale up one, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 3))

        # increase the adjustment to "+2" and remove the DesiredCapacity
        # so we don't go from 3 to 2.
        new_template = self.template.replace(
            '"ScalingAdjustment": "1"', '"ScalingAdjustment": "2"').replace(
                '"DesiredCapacity" : {"Ref": "size"},', '')

        self.update_stack(stack_identifier,
                          template=new_template,
                          environment=self.env,
                          files=self.files)

        # Scale up two, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(
            test.call_until_true(self.build_timeout, self.build_interval,
                                 self.check_instance_count, stack_identifier,
                                 5))
コード例 #26
0
ファイル: test_stack_cancel.py プロジェクト: aaratn/heat
 def test_cancel_create_without_rollback(self):
     before, after = get_templates(delay_s=30)
     stack_id = self.stack_create(template=before,
                                  expected_status='CREATE_IN_PROGRESS')
     self._wait_for_resource_status(stack_id, 'test1', 'CREATE_IN_PROGRESS')
     self.cancel_update_stack(stack_id, rollback=False,
                              expected_status='CREATE_FAILED')
     self.assertTrue(test.call_until_true(
         60, 2, self.verify_resource_status,
         stack_id, 'test1', 'CREATE_COMPLETE'))
     eventlet.sleep(2)
     self.assertTrue(self.verify_resource_status(stack_id, 'test2',
                                                 'INIT_COMPLETE'))
コード例 #27
0
 def test_cancel_create_without_rollback(self):
     before, after = get_templates(delay_s=30)
     stack_id = self.stack_create(template=before,
                                  expected_status='CREATE_IN_PROGRESS')
     self._wait_for_resource_status(stack_id, 'test1', 'CREATE_IN_PROGRESS')
     self.cancel_update_stack(stack_id,
                              rollback=False,
                              expected_status='CREATE_FAILED')
     self.assertTrue(
         test.call_until_true(60, 2, self.verify_resource_status, stack_id,
                              'test1', 'CREATE_COMPLETE'))
     eventlet.sleep(2)
     self.assertTrue(
         self.verify_resource_status(stack_id, 'test2', 'INIT_COMPLETE'))
コード例 #28
0
ファイル: test_autoscaling.py プロジェクト: zzxwill/heat
    def test_signal_with_policy_update(self):
        """Prove that an updated policy is used in the next signal."""

        stack_identifier = self.stack_create(template=self.template,
                                             files=self.files,
                                             environment=self.env)

        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 2))

        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
                                                       'JobServerGroup')
        # Scale up one, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 3))

        # increase the adjustment to "+2" and remove the DesiredCapacity
        # so we don't go from 3 to 2.
        new_template = self.template.replace(
            '"ScalingAdjustment": "1"',
            '"ScalingAdjustment": "2"').replace(
                '"DesiredCapacity" : {"Ref": "size"},', '')

        self.update_stack(stack_identifier, template=new_template,
                          environment=self.env, files=self.files)

        # Scale up two, Trigger alarm
        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
        self.assertTrue(test.call_until_true(
            self.build_timeout, self.build_interval,
            self.check_instance_count, stack_identifier, 5))
コード例 #29
0
    def test_alarm(self):
        """Confirm we can create an alarm and trigger it."""

        # 1. create the stack
        stack_identifier = self.stack_create(template=self.template)

        # 2. send ceilometer a metric (should cause the alarm to fire)
        sample = {}
        sample['counter_type'] = 'gauge'
        sample['counter_name'] = 'test_meter'
        sample['counter_volume'] = 1
        sample['counter_unit'] = 'count'
        sample['resource_metadata'] = {'metering.stack_id':
                                       stack_identifier.split('/')[-1]}
        sample['resource_id'] = 'shouldnt_matter'
        self.metering_client.samples.create(**sample)

        # 3. confirm we get a scaleup.
        # Note: there is little point waiting more than 60s+time to scale up.
        self.assertTrue(test.call_until_true(
            120, 2, self.check_instance_count, stack_identifier, 2))
コード例 #30
0
ファイル: test_software_config.py プロジェクト: aaratn/heat
    def test_deployments_timeout_failed(self):
        parms = {'flavor': self.conf.minimal_instance_type,
                 'network': self.conf.fixed_network_name,
                 'image': self.conf.minimal_image_ref}
        stack_identifier = self.stack_create(
            parameters=parms,
            template=self.server_template,
            enable_cleanup=self.enable_cleanup)
        server_stack = self.client.stacks.get(stack_identifier)
        server = server_stack.outputs[0]['output_value']
        config_stack = self.deploy_config(server, 3, 1)
        self._wait_for_stack_status(config_stack, 'CREATE_FAILED')
        kwargs = {'server_id': server}

        def check_deployment_status():
            sd_list = self.client.software_deployments.list(**kwargs)
            for sd in sd_list:
                if sd.status != 'FAILED':
                    return False
            return True

        self.assertTrue(test.call_until_true(
            20, 0, check_deployment_status))
コード例 #31
0
    def test_deployments_timeout_failed(self):
        parms = {
            'flavor': self.conf.minimal_instance_type,
            'network': self.conf.fixed_network_name,
            'image': self.conf.minimal_image_ref
        }
        stack_identifier = self.stack_create(
            parameters=parms,
            template=self.server_template,
            enable_cleanup=self.enable_cleanup)
        server_stack = self.client.stacks.get(stack_identifier)
        server = server_stack.outputs[0]['output_value']
        config_stack = self.deploy_config(server, 3, 1)
        self._wait_for_stack_status(config_stack, 'CREATE_FAILED')
        kwargs = {'server_id': server}

        def check_deployment_status():
            sd_list = self.client.software_deployments.list(**kwargs)
            for sd in sd_list:
                if sd.status != 'FAILED':
                    return False
            return True

        self.assertTrue(test.call_until_true(20, 0, check_deployment_status))