def test_asg_notifications(self):
        stack_identifier = self.stack_create(template=self.asg_template)

        for output in self.client.stacks.get(stack_identifier).outputs:
            if output['output_key'] == 'scale_dn_url':
                scale_down_url = output['output_value']
            else:
                scale_up_url = output['output_value']

        notifications = []
        handler = NotificationHandler(stack_identifier.split('/')[0],
                                      ASG_NOTIFICATIONS)

        with self.conn.Consumer(self.queue,
                                callbacks=[handler.process_message],
                                auto_declare=False):

            requests.post(scale_up_url, verify=self.verify_cert)
            self.assertTrue(
                test.call_until_true(20, 0, self.consume_events, handler, 2))
            notifications += handler.notifications

            handler.clear()
            requests.post(scale_down_url, verify=self.verify_cert)
            self.assertTrue(
                test.call_until_true(20, 0, self.consume_events, handler, 2))
            notifications += handler.notifications

        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[0]))
        self.assertEqual(2, notifications.count(ASG_NOTIFICATIONS[1]))
Example #2
0
    def test_autoscaling_loadbalancer_neutron(self):
        """Check work of AutoScaing and Neutron LBaaS v2 resource in Heat.

        The scenario is the following:
            1. Launch a stack with a load balancer and autoscaling group
               of one server, wait until stack create is complete.
            2. Check that there is only one distinctive response from
               loadbalanced IP.
            3. Signal the scale_up policy, wait until all resources in
               autoscaling group are complete.
            4. Check that now there are two distinctive responses from
               loadbalanced IP.
        """

        parameters = {
            'flavor': self.conf.minimal_instance_type,
            'image': self.conf.minimal_image_ref,
            'net': self.conf.fixed_network_name,
            'subnet': self.conf.fixed_subnet_name,
            'public_net': self.conf.floating_network_name
        }

        app_server_template = self._load_template(
            __file__, self.app_server_template_name, self.sub_dir)
        webapp_template = self._load_template(__file__,
                                              self.webapp_template_name,
                                              self.sub_dir)
        files = {
            'appserver.yaml': app_server_template,
            'webapp.yaml': webapp_template
        }
        env = {
            'resource_registry': {
                'OS::Test::NeutronAppServer': 'appserver.yaml',
                'OS::Test::WebAppConfig': 'webapp.yaml'
            }
        }

        # Launch stack
        sid = self.launch_stack(template_name=self.template_name,
                                parameters=parameters,
                                files=files,
                                environment=env)
        stack = self.client.stacks.get(sid)
        lb_url = self._stack_output(stack, 'lburl')
        # Check number of distinctive responces, must be 1
        self.check_num_responses(lb_url, 1)

        # Signal the scaling hook
        self.client.resources.signal(sid, 'scale_up')

        # Wait for AutoScalingGroup update to finish
        asg = self.client.resources.get(sid, 'asg')
        test.call_until_true(self.conf.build_timeout, self.conf.build_interval,
                             self.check_autoscale_complete,
                             asg.physical_resource_id, 2, sid, 'asg')

        # Check number of distinctive responses, must now be 2
        self.check_num_responses(lb_url, 2)
Example #3
0
    def test_stack_update_with_conditions(self):
        """Update manages new conditions added.

        When a new resource is added during updates, the stacks handles the new
        conditions correctly, and doesn't fail to load them while the update is
        still in progress.
        """
        stack_identifier = self.stack_create(
            template=test_template_one_resource)

        updated_template = copy.deepcopy(test_template_two_resource)
        updated_template['conditions'] = {'cond1': True}
        updated_template['resources']['test3'] = {
            'type': 'OS::Heat::TestResource',
            'properties': {
                'value': {
                    'if': ['cond1', 'val3', 'val4']
                }
            }
        }
        test2_props = updated_template['resources']['test2']['properties']
        test2_props['action_wait_secs'] = {'create': 30}

        self.update_stack(stack_identifier,
                          template=updated_template,
                          expected_status='UPDATE_IN_PROGRESS')

        def check_resources():
            resources = self.list_resources(stack_identifier)
            if len(resources) < 2:
                return False
            self.assertIn('test3', resources)
            return True

        self.assertTrue(test.call_until_true(20, 2, check_resources))
    def test_alarm(self):
        """Confirm we can create an alarm and trigger it."""
        # create metric
        metric = self.metric_client.metric.create({
            'name':
            'my_metric',
            'archive_policy_name':
            'high',
        })

        # create the stack
        parameters = {'metric_id': metric['id']}
        stack_identifier = self.stack_create(template=self.template,
                                             parameters=parameters)
        measures = [{
            'timestamp': timeutils.isotime(datetime.datetime.now()),
            'value': 100
        }, {
            'timestamp':
            timeutils.isotime(datetime.datetime.now() +
                              datetime.timedelta(minutes=1)),
            'value':
            100
        }]
        # send measures(should cause the alarm to fire)
        self.metric_client.metric.add_measures(metric['id'], measures)

        # confirm we get a scaleup.
        # Note: there is little point waiting more than 60s+time to scale up.
        self.assertTrue(
            test.call_until_true(120, 2, self.check_instance_count,
                                 stack_identifier, 2))

        # cleanup metric
        self.metric_client.metric.delete(metric['id'])
Example #5
0
    def test_events(self):
        queue_id = str(uuid.uuid4())
        environment = {
            'event_sinks': [{
                'type': 'zaqar-queue',
                'target': queue_id,
                'ttl': 120
            }]
        }
        stack_identifier = self.stack_create(template=self.template,
                                             environment=environment)
        stack_name, stack_id = stack_identifier.split('/')
        conf = {
            'auth_opts': {
                'backend': 'keystone',
                'options': {
                    'os_username': self.conf.username,
                    'os_password': self.conf.password,
                    'os_project_name': self.conf.project_name,
                    'os_auth_url': self.conf.auth_url,
                    'os_user_domain_id': self.conf.user_domain_id,
                    'os_project_domain_id': self.conf.project_domain_id,
                    'os_user_domain_name': self.conf.user_domain_name,
                    'os_project_domain_name': self.conf.project_domain_name,
                    'insecure': self.conf.disable_ssl_certificate_validation,
                    'cacert': self.conf.ca_file
                }
            }
        }

        zaqar = zaqarclient.Client(conf=conf)
        queue = zaqar.queue(queue_id)

        def validate_messages():
            messages = list(queue.messages())
            if len(messages) < 4:
                return False

            types = [m.body['type'] for m in messages]
            self.assertEqual(['os.heat.event'] * 4, types)
            resources = set(
                [m.body['payload']['resource_name'] for m in messages])
            self.assertEqual(set([stack_name, 'test_resource']), resources)
            stack_ids = [m.body['payload']['stack_id'] for m in messages]
            self.assertEqual([stack_id] * 4, stack_ids)
            statuses = [m.body['payload']['resource_status'] for m in messages]
            statuses.sort()
            self.assertEqual(
                ['COMPLETE', 'COMPLETE', 'IN_PROGRESS', 'IN_PROGRESS'],
                statuses)
            actions = [m.body['payload']['resource_action'] for m in messages]
            self.assertEqual(['CREATE'] * 4, actions)
            return True

        self.assertTrue(test.call_until_true(20, 0, validate_messages))
Example #6
0
    def test_asg_scale_down_min_size(self):
        stack_id = self.stack_create(template=self.template,
                                     expected_status='CREATE_COMPLETE')
        stack = self.client.stacks.get(stack_id)
        asg_size = self._stack_output(stack, 'asg_size')
        # Ensure that initial desired capacity is met
        self.assertEqual(3, asg_size)

        # send scale down signals and ensure that asg honors min_size
        asg = self.client.resources.get(stack_id, 'random_group')
        min_size = 2
        for num in range(asg_size-1, 0, -1):
            expected_resources = num if num >= min_size else min_size
            self.client.resources.signal(stack_id, 'scale_down_policy')
            self.assertTrue(
                test.call_until_true(self.conf.build_timeout,
                                     self.conf.build_interval,
                                     self.check_autoscale_complete,
                                     asg.physical_resource_id,
                                     expected_resources, stack_id,
                                     'scale_down_policy'))
Example #7
0
    def test_asg_cooldown(self):
        cooldown_tmpl = self.template.replace('cooldown: 0',
                                              'cooldown: 60')
        stack_id = self.stack_create(template=cooldown_tmpl,
                                     expected_status='CREATE_COMPLETE')
        stack = self.client.stacks.get(stack_id)
        asg_size = self._stack_output(stack, 'asg_size')
        # Ensure that initial desired capacity is met
        self.assertEqual(3, asg_size)

        # send scale up signal.
        # Since cooldown is in effect, number of resources should not change
        asg = self.client.resources.get(stack_id, 'random_group')
        expected_resources = 3
        self.client.resources.signal(stack_id, 'scale_up_policy')
        self.assertTrue(
            test.call_until_true(self.conf.build_timeout,
                                 self.conf.build_interval,
                                 self.check_autoscale_complete,
                                 asg.physical_resource_id,
                                 expected_resources, stack_id,
                                 'scale_up_policy'))
    def test_deployments_timeout_failed(self):
        parms = {
            'flavor': self.conf.minimal_instance_type,
            'network': self.conf.fixed_network_name,
            'image': self.conf.minimal_image_ref
        }
        stack_identifier = self.stack_create(
            parameters=parms,
            template=self.server_template,
            enable_cleanup=self.enable_cleanup)
        server_stack = self.client.stacks.get(stack_identifier)
        server = server_stack.outputs[0]['output_value']
        config_stack = self.deploy_config(server, 3, 1)
        self._wait_for_stack_status(config_stack, 'CREATE_FAILED')
        kwargs = {'server_id': server}

        def check_deployment_status():
            sd_list = self.client.software_deployments.list(**kwargs)
            for sd in sd_list:
                if sd.status != 'FAILED':
                    return False
            return True

        self.assertTrue(test.call_until_true(20, 0, check_deployment_status))