Ejemplo n.º 1
0
 def _create_metrics_for_match_by_sub_expressions_list(
         self, num, alarm_definition_id):
     # create some metrics
     metric1 = helpers.create_metric(name='cpu.idle_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'mini-mon',
                                         'device': '/dev/sda1'
                                     })
     metric2 = helpers.create_metric(name='cpu.idle_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'devstack',
                                         'device': '/dev/sda1'
                                     })
     metric3 = helpers.create_metric(name='cpu.idle_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'mini-mon',
                                         'device': 'tmpfs'
                                     })
     metric4 = helpers.create_metric(name='cpu.idle_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'devstack',
                                         'device': 'tmpfs'
                                     })
     self.monasca_client.create_metrics(metric1)
     self.monasca_client.create_metrics(metric2)
     self.monasca_client.create_metrics(metric3)
     self.monasca_client.create_metrics(metric4)
     self._wait_for_alarms(num, alarm_definition_id)
Ejemplo n.º 2
0
 def _create_metrics_for_match_by_sub_expressions(self, num,
                                                  alarm_definition_id):
     metric1 = helpers.create_metric(name='cpu.idle_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'mini-mon'
                                     })
     metric2 = helpers.create_metric(name='cpu.idle_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'devstack'
                                     })
     self.monasca_client.create_metrics(metric1)
     self.monasca_client.create_metrics(metric2)
     metric3 = helpers.create_metric(name='cpu.user_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'mini-mon'
                                     })
     metric4 = helpers.create_metric(name='cpu.user_perc',
                                     dimensions={
                                         'service': 'monitoring',
                                         'hostname': 'devstack'
                                     })
     self.monasca_client.create_metrics(metric3)
     self.monasca_client.create_metrics(metric4)
     self._waiting_for_alarms(num, alarm_definition_id)
Ejemplo n.º 3
0
 def test_create_metrics(self):
     metrics = [
         helpers.create_metric(),
         helpers.create_metric()
     ]
     resp, body = self.monasca_client.create_metrics(metrics)
     self.assertEqual(204, resp.status)
Ejemplo n.º 4
0
 def _create_metrics_for_match_by_sub_expressions_list(self, num,
                                                       alarm_definition_id):
     # create some metrics
     metric1 = helpers.create_metric(
         name='cpu.idle_perc',
         dimensions={'service': 'monitoring',
                     'hostname': 'mini-mon',
                     'device': '/dev/sda1'})
     metric2 = helpers.create_metric(
         name='cpu.idle_perc',
         dimensions={'service': 'monitoring',
                     'hostname': 'devstack',
                     'device': '/dev/sda1'})
     metric3 = helpers.create_metric(
         name='cpu.idle_perc',
         dimensions={'service': 'monitoring',
                     'hostname': 'mini-mon',
                     'device': 'tmpfs'})
     metric4 = helpers.create_metric(
         name='cpu.idle_perc',
         dimensions={'service': 'monitoring',
                     'hostname': 'devstack',
                     'device': 'tmpfs'})
     self.monasca_client.create_metrics(metric1)
     self.monasca_client.create_metrics(metric2)
     self.monasca_client.create_metrics(metric3)
     self.monasca_client.create_metrics(metric4)
     self._waiting_for_alarms(num, alarm_definition_id)
Ejemplo n.º 5
0
    def test_list_metrics_with_offset_limit(self):
        name = data_utils.rand_name()
        key1 = data_utils.rand_name()
        key2 = data_utils.rand_name()

        metrics = [
            helpers.create_metric(name=name, dimensions={
                key1: 'value-1', key2: 'value-1'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-2', key2: 'value-2'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-3', key2: 'value-3'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-4', key2: 'value-4'})
        ]
        self.monasca_client.create_metrics(metrics)
        query_param = '?name=' + name
        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = self.monasca_client.list_metrics(query_param)
            elements = response_body['elements']
            if elements and len(elements) == 4:
                break
            time.sleep(constants.RETRY_WAIT_SECS)
            if i == constants.MAX_RETRIES - 1:
                error_msg = ("Failed test_list_metrics_with_offset_limit: "
                             "timeout on waiting for metrics: 4 metrics "
                             "are needed. Current number of elements = "
                             "{}").format(len(elements))
                self.fail(error_msg)

        first_element = elements[0]
        query_parms = '?name=' + name + '&limit=4'
        resp, response_body = self.monasca_client.list_metrics(query_parms)
        self.assertEqual(200, resp.status)
        elements = response_body['elements']
        self.assertEqual(4, len(elements))
        self.assertEqual(first_element, elements[0])

        for metric_index in xrange(len(elements) - 1):
            metric = elements[metric_index]
            max_limit = 3 - metric_index

            for limit in xrange(1, max_limit):
                first_index = metric_index + 1
                last_index = first_index + limit
                expected_elements = elements[first_index:last_index]

                query_parms = '?name=' + name + '&offset=' + \
                              str(metric['id']) + '&limit=' + \
                              str(limit)
                resp, response_body = self.\
                    monasca_client.list_metrics(query_parms)
                self.assertEqual(200, resp.status)
                new_elements = response_body['elements']

                self.assertEqual(limit, len(new_elements))
                for i in xrange(len(expected_elements)):
                    self.assertEqual(expected_elements[i], new_elements[i])
Ejemplo n.º 6
0
    def test_list_metrics_with_offset_limit(self):
        name = data_utils.rand_name()
        key1 = data_utils.rand_name()
        key2 = data_utils.rand_name()

        metrics = [
            helpers.create_metric(name=name, dimensions={
                key1: 'value-1', key2: 'value-1'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-2', key2: 'value-2'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-3', key2: 'value-3'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-4', key2: 'value-4'})
        ]
        self.monasca_client.create_metrics(metrics)
        query_param = '?name=' + name
        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = self.monasca_client.list_metrics(query_param)
            elements = response_body['elements']
            if elements and len(elements) == 4:
                break
            time.sleep(constants.RETRY_WAIT_SECS)
            if i == constants.MAX_RETRIES - 1:
                error_msg = ("Failed test_list_metrics_with_offset_limit: "
                             "timeout on waiting for metrics: 4 metrics "
                             "are needed. Current number of elements = "
                             "{}").format(len(elements))
                self.fail(error_msg)

        first_element = elements[0]
        query_parms = '?name=' + name + '&limit=4'
        resp, response_body = self.monasca_client.list_metrics(query_parms)
        self.assertEqual(200, resp.status)
        elements = response_body['elements']
        self.assertEqual(4, len(elements))
        self.assertEqual(first_element, elements[0])

        for metric_index in xrange(len(elements) - 1):
            metric = elements[metric_index]
            max_limit = 3 - metric_index

            for limit in xrange(1, max_limit):
                first_index = metric_index + 1
                last_index = first_index + limit
                expected_elements = elements[first_index:last_index]

                query_parms = '?name=' + name + '&offset=' + \
                              str(metric['id']) + '&limit=' + \
                              str(limit)
                resp, response_body = self.\
                    monasca_client.list_metrics(query_parms)
                self.assertEqual(200, resp.status)
                new_elements = response_body['elements']

                self.assertEqual(limit, len(new_elements))
                for i in xrange(len(expected_elements)):
                    self.assertEqual(expected_elements[i], new_elements[i])
Ejemplo n.º 7
0
 def test_create_metrics(self):
     name = data_utils.rand_name('name')
     key = data_utils.rand_name('key')
     value = data_utils.rand_name('value')
     timestamp = int(round(time.time() * 1000))
     time_iso = helpers.timestamp_to_iso(timestamp)
     end_timestamp = int(round(timestamp + 3600 * 24 * 1000))
     end_time_iso = helpers.timestamp_to_iso(end_timestamp)
     value_meta_key1 = data_utils.rand_name('meta_key')
     value_meta_value1 = data_utils.rand_name('meta_value')
     value_meta_key2 = data_utils.rand_name('value_meta_key')
     value_meta_value2 = data_utils.rand_name('value_meta_value')
     metrics = [
         helpers.create_metric(name=name,
                               dimensions={key: value},
                               timestamp=timestamp,
                               value=1.23,
                               value_meta={
                                   value_meta_key1: value_meta_value1
                               }),
         helpers.create_metric(name=name,
                               dimensions={key: value},
                               timestamp=timestamp + 6000,
                               value=4.56,
                               value_meta={
                                   value_meta_key2: value_meta_value2
                               })
     ]
     resp, response_body = self.monasca_client.create_metrics(metrics)
     self.assertEqual(204, resp.status)
     query_param = '?name=' + name + '&start_time=' + str(time_iso) + \
                   '&end_time=' + str(end_time_iso)
     for i in xrange(constants.MAX_RETRIES):
         resp, response_body = self.monasca_client.\
             list_measurements(query_param)
         self.assertEqual(200, resp.status)
         elements = response_body['elements']
         for element in elements:
             if str(element['name']) == name \
                     and len(element['measurements']) == 2:
                 self._verify_list_measurements_element(element, key, value)
                 first_measurement = element['measurements'][0]
                 second_measurement = element['measurements'][1]
                 self._verify_list_measurements_measurement(
                     first_measurement, metrics[0], value_meta_key1,
                     value_meta_value1)
                 self._verify_list_measurements_measurement(
                     second_measurement, metrics[1], value_meta_key2,
                     value_meta_value2)
                 return
         time.sleep(constants.RETRY_WAIT_SECS)
         if i == constants.MAX_RETRIES - 1:
             error_msg = "Failed test_create_metrics: " \
                         "timeout on waiting for metrics: at least " \
                         "one metric is needed. Current number of " \
                         "metrics = 0"
             self.fail(error_msg)
Ejemplo n.º 8
0
 def test_create_metrics(self):
     name = data_utils.rand_name('name')
     key = data_utils.rand_name('key')
     value = data_utils.rand_name('value')
     timestamp = int(round(time.time() * 1000))
     time_iso = helpers.timestamp_to_iso(timestamp)
     end_timestamp = int(round(timestamp + 3600 * 24 * 1000))
     end_time_iso = helpers.timestamp_to_iso(end_timestamp)
     value_meta_key1 = data_utils.rand_name('meta_key')
     value_meta_value1 = data_utils.rand_name('meta_value')
     value_meta_key2 = data_utils.rand_name('value_meta_key')
     value_meta_value2 = data_utils.rand_name('value_meta_value')
     metrics = [
         helpers.create_metric(name=name,
                               dimensions={key: value},
                               timestamp=timestamp,
                               value=1.23,
                               value_meta={
                                   value_meta_key1: value_meta_value1
                               }),
         helpers.create_metric(name=name,
                               dimensions={key: value},
                               timestamp=timestamp + 6000,
                               value=4.56,
                               value_meta={
                                   value_meta_key2: value_meta_value2
                               })
     ]
     resp, response_body = self.monasca_client.create_metrics(metrics)
     self.assertEqual(204, resp.status)
     query_param = '?name=' + name + '&start_time=' + str(time_iso) + \
                   '&end_time=' + str(end_time_iso)
     for i in xrange(constants.MAX_RETRIES):
         resp, response_body = self.monasca_client.\
             list_measurements(query_param)
         self.assertEqual(200, resp.status)
         elements = response_body['elements']
         for element in elements:
             if str(element['name']) == name \
                     and len(element['measurements']) == 2:
                 self._verify_list_measurements_element(element, key, value)
                 first_measurement = element['measurements'][0]
                 second_measurement = element['measurements'][1]
                 self._verify_list_measurements_measurement(
                     first_measurement, metrics[0], value_meta_key1,
                     value_meta_value1)
                 self._verify_list_measurements_measurement(
                     second_measurement, metrics[1], value_meta_key2,
                     value_meta_value2)
                 return
         time.sleep(constants.RETRY_WAIT_SECS)
         if i == constants.MAX_RETRIES - 1:
             error_msg = "Failed test_create_metrics: " \
                         "timeout on waiting for metrics: at least " \
                         "one metric is needed. Current number of " \
                         "metrics = 0"
             self.fail(error_msg)
Ejemplo n.º 9
0
 def test_create_metric_with_invalid_chars_in_dimensions(self):
     for invalid_char in constants.INVALID_CHARS:
         metric = helpers.create_metric('name-1', {'key-1': invalid_char})
         self.assertRaises(exceptions.UnprocessableEntity,
                           self.monasca_client.create_metrics, metric)
     for invalid_char in constants.INVALID_CHARS:
         metric = helpers.create_metric('name-1', {invalid_char: 'value-1'})
         self.assertRaises(exceptions.UnprocessableEntity,
                           self.monasca_client.create_metrics, metric)
Ejemplo n.º 10
0
    def test_verify_deterministic_alarm(self):
        metric_name = data_utils.rand_name('log.fancy')
        metric_dimensions = {'service': 'monitoring',
                             'hostname': 'mini-mon'}

        name = data_utils.rand_name('alarm_definition')
        expression = ('count(%s{service=monitoring},deterministic) > 10'
                      % metric_name)
        match_by = ['hostname', 'device']
        description = 'deterministic'

        alarm_definition = helpers.create_alarm_definition(
            name=name, description=description,
            expression=expression, match_by=match_by)

        resp, response_body = self.monasca_client.create_alarm_definitions(
            alarm_definition)

        alarm_definition_id = response_body['id']
        query_param = '?alarm_definition_id=' + str(alarm_definition_id)

        # 1. ensure alarm was not created
        resp, response_body = self.monasca_client.list_alarms(query_param)
        self._verify_list_alarms_elements(resp, response_body, 0)

        # 2. put some metrics here to create it, should be in ok
        metrics_count = 5
        for it in range(0, metrics_count):
            metric = helpers.create_metric(name=metric_name,
                                           value=1.0,
                                           dimensions=metric_dimensions)
            self.monasca_client.create_metrics(metric)

        self._wait_for_alarms(1, alarm_definition_id)

        resp, response_body = self.monasca_client.list_alarms(query_param)
        self._verify_list_alarms_elements(resp, response_body, 1)
        element = response_body['elements'][0]

        self.assertEqual('OK', element['state'])

        # 3. exceed threshold
        metrics_count = 20
        for it in range(0, metrics_count):
            metric = helpers.create_metric(name=metric_name,
                                           value=1.0,
                                           dimensions=metric_dimensions)
            self.monasca_client.create_metrics(metric)

        self._wait_for_alarms(1, alarm_definition_id)

        resp, response_body = self.monasca_client.list_alarms(query_param)
        self._verify_list_alarms_elements(resp, response_body, 1)
        element = response_body['elements'][0]

        self.assertEqual('ALARM', element['state'])
Ejemplo n.º 11
0
    def resource_setup(cls):
        super(TestDimensions, cls).resource_setup()
        metric_name1 = data_utils.rand_name()
        name1 = "name_1"
        name2 = "name_2"
        value1 = "value_1"
        value2 = "value_2"

        timestamp = int(round(time.time() * 1000))
        time_iso = helpers.timestamp_to_iso(timestamp)

        metric1 = helpers.create_metric(name=metric_name1,
                                        dimensions={name1: value1,
                                                    name2: value2
                                                    })
        cls.monasca_client.create_metrics(metric1)
        metric1 = helpers.create_metric(name=metric_name1,
                                        dimensions={name1: value2})
        cls.monasca_client.create_metrics(metric1)

        metric_name2 = data_utils.rand_name()
        name3 = "name_3"
        value3 = "value_3"
        metric2 = helpers.create_metric(name=metric_name2,
                                        dimensions={name3: value3})
        cls.monasca_client.create_metrics(metric2)

        metric_name3 = data_utils.rand_name()
        metric3 = helpers.create_metric(name=metric_name3,
                                        dimensions={name1: value3})

        cls.monasca_client.create_metrics(metric3)

        cls._test_metric1 = metric1
        cls._test_metric2 = metric2
        cls._test_metric_names = {metric_name1, metric_name2, metric_name3}
        cls._dim_names_metric1 = [name1, name2]
        cls._dim_names_metric2 = [name3]
        cls._dim_names = cls._dim_names_metric1 + cls._dim_names_metric2
        cls._dim_values_for_metric1 = [value1, value2]
        cls._dim_values = [value1, value2, value3]

        param = '?start_time=' + time_iso
        returned_name_set = set()
        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.list_metrics(
                param)
            elements = response_body['elements']
            for element in elements:
                returned_name_set.add(str(element['name']))
            if cls._test_metric_names.issubset(returned_name_set):
                return
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, 'Unable to initialize metrics'
Ejemplo n.º 12
0
    def resource_setup(cls):
        super(TestDimensions, cls).resource_setup()
        metric_name1 = data_utils.rand_name()
        name1 = "name_1"
        name2 = "name_2"
        value1 = "value_1"
        value2 = "value_2"

        timestamp = int(round(time.time() * 1000))
        time_iso = helpers.timestamp_to_iso(timestamp)

        metric1 = helpers.create_metric(name=metric_name1,
                                        dimensions={name1: value1,
                                                    name2: value2
                                                    })
        cls.monasca_client.create_metrics(metric1)
        metric1 = helpers.create_metric(name=metric_name1,
                                        dimensions={name1: value2})
        cls.monasca_client.create_metrics(metric1)

        metric_name2 = data_utils.rand_name()
        name3 = "name_3"
        value3 = "value_3"
        metric2 = helpers.create_metric(name=metric_name2,
                                        dimensions={name3: value3})
        cls.monasca_client.create_metrics(metric2)

        metric_name3 = data_utils.rand_name()
        metric3 = helpers.create_metric(name=metric_name3,
                                        dimensions={name1: value3})

        cls.monasca_client.create_metrics(metric3)

        cls._test_metric1 = metric1
        cls._test_metric2 = metric2
        cls._test_metric_names = {metric_name1, metric_name2, metric_name3}
        cls._dim_names_metric1 = [name1, name2]
        cls._dim_names_metric2 = [name3]
        cls._dim_names = cls._dim_names_metric1 + cls._dim_names_metric2
        cls._dim_values_for_metric1 = [value1, value2]
        cls._dim_values = [value1, value2, value3]

        param = '?start_time=' + time_iso
        returned_name_set = set()
        for i in range(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.list_metrics(
                param)
            elements = response_body['elements']
            for element in elements:
                returned_name_set.add(str(element['name']))
            if cls._test_metric_names.issubset(returned_name_set):
                return
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, 'Unable to initialize metrics'
Ejemplo n.º 13
0
    def test_list_metrics_with_offset_limit(self):
        name = data_utils.rand_name()
        key1 = data_utils.rand_name()
        key2 = data_utils.rand_name()

        metrics = [
            helpers.create_metric(name=name, dimensions={
                key1: 'value-1', key2: 'value-1'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-2', key2: 'value-2'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-3', key2: 'value-3'}),
            helpers.create_metric(name=name, dimensions={
                key1: 'value-4', key2: 'value-4'})
        ]
        resp, body = self.monasca_client.create_metrics(metrics)
        time.sleep(WAIT_TIME)

        query_parms = '?name=' + name
        resp, response_body = self.monasca_client.list_metrics(query_parms)
        self.assertEqual(200, resp.status)

        elements = response_body['elements']
        first_element = elements[0]
        last_element = elements[3]

        query_parms = '?name=' + name + '&limit=4'
        resp, response_body = self.monasca_client.list_metrics(query_parms)
        self.assertEqual(200, resp.status)

        elements = response_body['elements']
        self.assertEqual(4, len(elements))

        self.assertEqual(first_element, elements[0])

        for limit in xrange(1, 5):
            next_element = elements[limit - 1]
            while True:
                query_parms = '?name=' + name + '&offset=' +\
                              str(next_element['id']) + '&limit=' + str(limit)
                resp, response_body = self.monasca_client.list_metrics(
                    query_parms)
                self.assertEqual(200, resp.status)
                new_elements = response_body['elements']

                if len(new_elements) > limit - 1:
                    self.assertEqual(limit, len(new_elements))
                    next_element = new_elements[limit - 1]
                elif len(new_elements) > 0 and len(new_elements) <= limit - 1:
                    self.assertEqual(last_element, new_elements[0])
                    break
                else:
                    self.assertEqual(last_element, next_element)
                    break
Ejemplo n.º 14
0
 def test_create_metric_with_invalid_chars_in_dimensions(self):
     for invalid_char in constants.INVALID_CHARS:
         metric = helpers.create_metric('name-1', {'key-1': invalid_char})
         self.assertRaises(exceptions.UnprocessableEntity,
                           self.monasca_client.create_metrics,
                           metric)
     for invalid_char in constants.INVALID_CHARS:
         metric = helpers.create_metric('name-1', {invalid_char: 'value-1'})
         self.assertRaises(exceptions.UnprocessableEntity,
                           self.monasca_client.create_metrics,
                           metric)
Ejemplo n.º 15
0
    def test_list_alarms_by_metric_dimensions_multi_value(self):
        metric_name = data_utils.rand_name('metric')
        match_by_key = data_utils.rand_name('key')
        dim_key = data_utils.rand_name('key')
        dim_value_1 = data_utils.rand_name('value')
        dim_value_2 = data_utils.rand_name('value')
        alarm_def = helpers.create_alarm_definition(
            name=data_utils.rand_name('definition'),
            expression=metric_name + " > 1",
            match_by=[match_by_key])
        metric_1 = helpers.create_metric(
            metric_name, {
                match_by_key: data_utils.rand_name('value'),
                dim_key: dim_value_1
            })
        metric_2 = helpers.create_metric(
            metric_name, {
                match_by_key: data_utils.rand_name('value'),
                dim_key: dim_value_2
            })
        metric_3 = helpers.create_metric(
            metric_name, {match_by_key: data_utils.rand_name('value')})
        metrics = [metric_1, metric_2, metric_3]
        resp, response_body = self.monasca_client.create_alarm_definitions(
            alarm_def)
        self.assertEqual(201, resp.status)
        for i in xrange(constants.MAX_RETRIES):
            resp, alarm_def_result = self.monasca_client.create_metrics(
                metrics)
            self.assertEqual(204, resp.status)
            resp, response_body = self.monasca_client.list_alarms(
                '?metric_name=' + metric_name)
            self.assertEqual(200, resp.status)
            if len(response_body['elements']) >= 3:
                break
            time.sleep(constants.RETRY_WAIT_SECS)
            if i >= constants.MAX_RETRIES - 1:
                self.fail(
                    "Timeout creating alarms, required 3 but found {}".format(
                        len(response_body['elements'])))

        query_parms = '?metric_dimensions=' + dim_key + ':' + dim_value_1 + '|' + dim_value_2
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self._verify_list_alarms_elements(resp,
                                          response_body,
                                          expect_num_elements=2)
        dimension_sets = []
        for element in response_body['elements']:
            self.assertEqual(metric_name, element['metrics'][0]['name'])
            dimension_sets.append(element['metrics'][0]['dimensions'])
        self.assertIn(metric_1['dimensions'], dimension_sets)
        self.assertIn(metric_2['dimensions'], dimension_sets)
        self.assertNotIn(metric_3['dimensions'], dimension_sets)
Ejemplo n.º 16
0
 def _create_metrics_for_match_by(self, num, alarm_definition_id):
     metric1 = helpers.create_metric(
         name='cpu.idle_perc',
         dimensions={'service': 'monitoring',
                     'hostname': 'mini-mon'})
     metric2 = helpers.create_metric(
         name='cpu.idle_perc',
         dimensions={'service': 'monitoring',
                     'hostname': 'devstack'})
     self.monasca_client.create_metrics(metric1)
     self.monasca_client.create_metrics(metric2)
     self._waiting_for_alarms(num, alarm_definition_id)
Ejemplo n.º 17
0
 def _create_metrics_with_different_dimensions(self, same_name=True):
     name1 = data_utils.rand_name('name1')
     name2 = name1 if same_name else data_utils.rand_name('name2')
     name3 = name1 if same_name else data_utils.rand_name('name3')
     key_service = data_utils.rand_name('service')
     values = [data_utils.rand_name('value1'),
               data_utils.rand_name('value2')]
     metrics = [helpers.create_metric(name1, {key_service: values[0]}),
                helpers.create_metric(name2, {key_service: values[1]}),
                helpers.create_metric(name3, {'key3': 'value3'})]
     resp, response_body = self.monasca_client.create_metrics(metrics)
     self.assertEqual(204, resp.status)
     return metrics, name1, key_service, values
Ejemplo n.º 18
0
 def _create_metrics_with_different_dimensions(self, same_name=True):
     name1 = data_utils.rand_name('name1')
     name2 = name1 if same_name else data_utils.rand_name('name2')
     name3 = name1 if same_name else data_utils.rand_name('name3')
     key_service = data_utils.rand_name('service')
     values = [data_utils.rand_name('value1'),
               data_utils.rand_name('value2')]
     metrics = [helpers.create_metric(name1, {key_service: values[0]}),
                helpers.create_metric(name2, {key_service: values[1]}),
                helpers.create_metric(name3, {'key3': 'value3'})]
     resp, response_body = self.monasca_client.create_metrics(metrics)
     self.assertEqual(204, resp.status)
     return metrics, name1, key_service, values
Ejemplo n.º 19
0
    def resource_setup(cls):
        super(TestStatistics, cls).resource_setup()
        name = data_utils.rand_name('name')
        key = data_utils.rand_name('key')
        value1 = data_utils.rand_name('value1')
        value2 = data_utils.rand_name('value2')
        cls._test_name = name
        cls._test_key = key
        cls._test_value1 = value1
        cls._start_timestamp = int(time.time() * 1000)
        metrics = [
            helpers.create_metric(name=name,
                                  dimensions={key: value1},
                                  timestamp=cls._start_timestamp,
                                  value=metric_value1),
            helpers.create_metric(name=name,
                                  dimensions={key: value2},
                                  timestamp=cls._start_timestamp + 1000,
                                  value=metric_value2)
        ]
        cls.monasca_client.create_metrics(metrics)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        query_param = '?name=' + str(name) + '&start_time=' + \
                      start_time_iso + '&merge_metrics=true' + '&end_time=' + \
                      helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        cls._start_time_iso = start_time_iso

        num_measurements = 0
        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.\
                list_measurements(query_param)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == name:
                    if len(element['measurements']
                           ) >= MIN_REQUIRED_MEASUREMENTS:
                        cls._end_timestamp = cls._start_timestamp + 1000 * 3
                        cls._end_time_iso = helpers.timestamp_to_iso(
                            cls._end_timestamp)
                        return
                    else:
                        num_measurements = len(element['measurements'])
                        break
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, "Required {} measurements, found {}".format(
            MIN_REQUIRED_MEASUREMENTS, num_measurements)
Ejemplo n.º 20
0
    def resource_setup(cls):
        super(TestMetricsNames, cls).resource_setup()
        name = data_utils.rand_name()
        key = data_utils.rand_name()
        value = data_utils.rand_name()
        cls._param = key + ':' + value
        metric = helpers.create_metric(name=name,
                                       dimensions={key: value})
        cls._test_metric = metric
        cls.monasca_client.create_metrics(metric)

        start_time = str(timeutils.iso8601_from_timestamp(
                         metric['timestamp'] / 1000.0))
        query_params = '?name=' + str(cls._test_metric['name']) +\
                       '&start_time=' + start_time

        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.list_metrics(
                query_params)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == cls._test_metric['name']:
                    return
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, 'Unable to initialize metrics'
Ejemplo n.º 21
0
 def test_create_metric_with_empty_key_in_dimensions(self):
     name = data_utils.rand_name('name')
     metric = helpers.create_metric(name=name,
                                    dimensions={'': 'value'})
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics,
                       metric)
Ejemplo n.º 22
0
 def test_create_metric_with_empty_key_in_dimensions(self):
     name = data_utils.rand_name('name')
     metric = helpers.create_metric(name=name,
                                    dimensions={'': 'value'})
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics,
                       metric)
Ejemplo n.º 23
0
    def test_list_alarms_by_multiple_metric_dimensions(self):
        metric = helpers.create_metric(
            name=data_utils.rand_name("multi-dimension"),
            dimensions={data_utils.rand_name("key-1"): data_utils.rand_name("value-1"),
                        data_utils.rand_name("key-2"): data_utils.rand_name("value-2")},
            value=20
        )
        dimension_strings = [key + '=' + value for key, value in metric['dimensions'].items()]
        alarm_def = helpers.create_alarm_definition(
            name=data_utils.rand_name("multi-dimensions"),
            expression=metric['name'] + "{" + ','.join(dimension_strings) + '} > 15'
        )

        resp, response_body = self.monasca_client.create_alarm_definitions(alarm_def)
        self.assertEqual(201, resp.status)
        alarm_def_id = response_body['id']

        resp, response_body = self.monasca_client.create_metrics(metric)
        self.assertEqual(204, resp.status)
        self._wait_for_alarms(1, alarm_def_id)

        query_dimensions = [key + ':' + value for key, value in metric['dimensions'].items()]
        query_parms="?metric_dimensions=" + ','.join(query_dimensions)

        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self._verify_list_alarms_elements(resp, response_body,
                                          expect_num_elements=1)
        element = response_body['elements'][0]
        metric = element['metrics'][0]
        self._verify_metric_in_alarm(metric, metric)
        self.assertEqual(alarm_def_id,
                         element['alarm_definition']['id'])
    def resource_setup(cls):
        super(TestAlarmsStateHistoryOneTransition, cls).resource_setup()

        for i in xrange(MIN_HISTORY):
            alarm_definition = helpers.create_alarm_definition(
                name=data_utils.rand_name('alarm_state_history' + str(i + 1)),
                expression="min(name-" + str(i + 1) + ") < " + str(i + 1))
            cls.monasca_client.create_alarm_definitions(alarm_definition)

        num_transitions = 0
        for timer in xrange(constants.MAX_RETRIES):
            for i in xrange(MIN_HISTORY):
                # Create some metrics to prime the system and waiting for the
                # alarms to be created and then for them to change state.
                # MIN_HISTORY number of Alarms State History are needed.
                metric = helpers.create_metric(name="name-" + str(i + 1))
                cls.monasca_client.create_metrics(metric)
                # sleep 0.05 second between metrics to make sure timestamps
                # are different
                time.sleep(0.05)
            resp, response_body = cls.monasca_client.\
                list_alarms_state_history()
            elements = response_body['elements']
            if len(elements) >= MIN_HISTORY:
                return
            else:
                num_transitions = len(elements)
            time.sleep(constants.RETRY_WAIT_SECS)
        assert False, "Required {} alarm state transitions, but found {}".\
            format(MIN_HISTORY, num_transitions)
Ejemplo n.º 25
0
 def test_create_metric_no_value(self):
     timestamp = int(round(time.time() * 1000))
     metric = helpers.create_metric(timestamp=timestamp,
                                    value=None)
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics,
                       metric)
Ejemplo n.º 26
0
 def test_create_metric_with_value_meta_name_exceeds_max_length(self):
     long_value_meta_name = "x" * (constants.MAX_VALUE_META_NAME_LENGTH + 1)
     value_meta_dict = {long_value_meta_name: "value_meta_value"}
     metric = helpers.create_metric(name='name', value_meta=value_meta_dict)
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics,
                       metric)
Ejemplo n.º 27
0
    def resource_setup(cls):
        super(TestMetricsNames, cls).resource_setup()
        name = data_utils.rand_name()
        key = data_utils.rand_name()
        value = data_utils.rand_name()
        cls._param = key + ':' + value
        metric = helpers.create_metric(name=name,
                                       dimensions={key: value})
        cls._test_metric = metric
        cls.monasca_client.create_metrics(metric)

        start_time = str(timeutils.iso8601_from_timestamp(
                         metric['timestamp'] / 1000.0))
        query_params = '?name=' + str(cls._test_metric['name']) +\
                       '&start_time=' + start_time

        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.list_metrics(
                query_params)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == cls._test_metric['name']:
                    return
            time.sleep(constants.RETRY_WAIT_SECS)

        cls.fail('Unable to initialize metrics')
Ejemplo n.º 28
0
 def test_create_metric_no_value(self):
     timestamp = int(round(time.time() * 1000))
     metric = helpers.create_metric(timestamp=timestamp,
                                    value=None)
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics,
                       metric)
Ejemplo n.º 29
0
 def test_list_metrics_with_tenant(self):
     name = data_utils.rand_name('name')
     key = data_utils.rand_name('key')
     value = data_utils.rand_name('value')
     tenant = self.tenants_client.create_tenant(
         name=data_utils.rand_name('test_tenant'))['tenant']
     # Delete the tenant at the end of the test
     self.addCleanup(self.tenants_client.delete_tenant, tenant['id'])
     metric = helpers.create_metric(name=name,
                                    dimensions={key: value})
     resp, response_body = self.monasca_client.create_metrics(
         metric, tenant_id=tenant['id'])
     self.assertEqual(204, resp.status)
     query_param = '?tenant_id=' + str(tenant['id'])
     for i in xrange(constants.MAX_RETRIES):
         resp, response_body = self.monasca_client.list_metrics(query_param)
         self.assertEqual(200, resp.status)
         elements = response_body['elements']
         for element in elements:
             if str(element['name']) == name:
                 self._verify_list_metrics_element(element, test_key=key,
                                                   test_value=value)
                 return
         time.sleep(constants.RETRY_WAIT_SECS)
         if i == constants.MAX_RETRIES - 1:
             error_msg = "Failed test_list_metrics_with_tenant: " \
                         "timeout on waiting for metrics: at least " \
                         "one metric is needed. Current number of " \
                         "metrics = 0"
             self.fail(error_msg)
Ejemplo n.º 30
0
 def test_create_metric_with_value_meta_name_exceeds_max_length(self):
     long_value_meta_name = "x" * (constants.MAX_VALUE_META_NAME_LENGTH + 1)
     value_meta_dict = {long_value_meta_name: "value_meta_value"}
     metric = helpers.create_metric(name='name', value_meta=value_meta_dict)
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics,
                       metric)
Ejemplo n.º 31
0
 def test_list_metrics_with_tenant(self):
     name = data_utils.rand_name('name')
     key = data_utils.rand_name('key')
     value = data_utils.rand_name('value')
     tenant = self.tenants_client.create_tenant(
         name=data_utils.rand_name('test_tenant'))['tenant']
     # Delete the tenant at the end of the test
     self.addCleanup(self.tenants_client.delete_tenant, tenant['id'])
     metric = helpers.create_metric(name=name, dimensions={key: value})
     resp, response_body = self.monasca_client.create_metrics(
         metric, tenant_id=tenant['id'])
     self.assertEqual(204, resp.status)
     query_param = '?tenant_id=' + str(tenant['id'])
     for i in xrange(constants.MAX_RETRIES):
         resp, response_body = self.monasca_client.list_metrics(query_param)
         self.assertEqual(200, resp.status)
         elements = response_body['elements']
         for element in elements:
             if str(element['name']) == name:
                 self._verify_list_metrics_element(element,
                                                   test_key=key,
                                                   test_value=value)
                 return
         time.sleep(constants.RETRY_WAIT_SECS)
         if i == constants.MAX_RETRIES - 1:
             error_msg = "Failed test_list_metrics_with_tenant: " \
                         "timeout on waiting for metrics: at least " \
                         "one metric is needed. Current number of " \
                         "metrics = 0"
             self.fail(error_msg)
Ejemplo n.º 32
0
    def test_list_statistics_with_no_merge_metrics(self):
        key = data_utils.rand_name('key')
        value = data_utils.rand_name('value')
        metric3 = helpers.create_metric(
            name=self._test_name,
            dimensions={key: value},
            timestamp=self._start_timestamp + 2000)
        self.monasca_client.create_metrics(metric3)
        query_param = '?name=' + str(self._test_name) + '&start_time=' + \
                      self._start_time_iso + '&end_time=' + helpers.\
            timestamp_to_iso(self._start_timestamp + 1000 * 4) + \
                      '&merge_metrics=True'

        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = self.monasca_client.\
                list_measurements(query_param)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == self._test_name and len(
                        element['measurements']) == 3:
                    end_time_iso = helpers.timestamp_to_iso(
                        self._start_timestamp + 1000 * 4)
                    query_parms = '?name=' + str(self._test_name) + \
                                  '&statistics=avg' + '&start_time=' + \
                                  str(self._start_time_iso) + '&end_time=' +\
                                  str(end_time_iso) + '&period=100000'
                    self.assertRaises(exceptions.Conflict,
                                      self.monasca_client.list_statistics,
                                      query_parms)
                    return
            time.sleep(constants.RETRY_WAIT_SECS)
        self._check_timeout(i, constants.MAX_RETRIES, elements, 3)
Ejemplo n.º 33
0
 def _create_alarms_for_test_alarms(self, num):
     metric_name = data_utils.rand_name('name')
     key = data_utils.rand_name('key')
     value = data_utils.rand_name('value')
     alarm_definition_ids = []
     for i in xrange(num):
         # create an alarm definition
         expression = "max(" + metric_name + ") > 0"
         name = data_utils.rand_name('name-1')
         alarm_definition = helpers.create_alarm_definition(
             name=name, expression=expression)
         resp, response_body = self.monasca_client.create_alarm_definitions(
             alarm_definition)
         alarm_definition_ids.append(response_body['id'])
     expected_metric = helpers.create_metric(name=metric_name,
                                             dimensions={key: value})
     # create some metrics
     for j in xrange(num):
         for i in xrange(constants.MAX_RETRIES):
             self.monasca_client.create_metrics(expected_metric)
             time.sleep(constants.RETRY_WAIT_SECS)
             query_param = '?alarm_definition_id=' + \
                           str(alarm_definition_ids[j])
             resp, response_body = self.monasca_client.list_alarms(
                 query_param)
             elements = response_body['elements']
             if len(elements) >= 1:
                 break
     return alarm_definition_ids, expected_metric
    def test_list_statistics_with_no_merge_metrics(self):
        key = data_utils.rand_name('key')
        value = data_utils.rand_name('value')
        metric3 = helpers.create_metric(
            name=self._test_name,
            dimensions={key: value},
            timestamp=self._start_timestamp + 2000)
        self.monasca_client.create_metrics(metric3)
        query_param = '?name=' + str(self._test_name) + '&start_time=' + \
                      self._start_time_iso + '&end_time=' + helpers.\
            timestamp_to_iso(self._start_timestamp + 1000 * 4) + \
                      '&merge_metrics=True'

        for i in range(constants.MAX_RETRIES):
            resp, response_body = self.monasca_client.\
                list_measurements(query_param)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == self._test_name and len(
                        element['measurements']) == 3:
                    end_time_iso = helpers.timestamp_to_iso(
                        self._start_timestamp + 1000 * 4)
                    query_parms = '?name=' + str(self._test_name) + \
                                  '&statistics=avg' + '&start_time=' + \
                                  str(self._start_time_iso) + '&end_time=' +\
                                  str(end_time_iso) + '&period=100000'
                    self.assertRaises(exceptions.Conflict,
                                      self.monasca_client.list_statistics,
                                      query_parms)
                    return
            time.sleep(constants.RETRY_WAIT_SECS)
        self._check_timeout(i, constants.MAX_RETRIES, elements, 3)
Ejemplo n.º 35
0
 def test_list_metrics_with_name(self):
     name = data_utils.rand_name(u'name')
     key = data_utils.rand_name(u'key')
     value = data_utils.rand_name(u'value')
     metric = helpers.create_metric(name=name, dimensions={key: value})
     resp, response_body = self.monasca_client.create_metrics(metric)
     self.assertEqual(204, resp.status)
     query_param = '?name=' + str(name)
     for i in range(constants.MAX_RETRIES):
         resp, response_body = self.monasca_client.list_metrics(query_param)
         self.assertEqual(200, resp.status)
         elements = response_body['elements']
         for element in elements:
             if element['name'] == name:
                 self._verify_list_metrics_element(element,
                                                   test_key=key,
                                                   test_value=value)
                 return
         time.sleep(constants.RETRY_WAIT_SECS)
         if i == constants.MAX_RETRIES - 1:
             error_msg = "Failed test_list_metrics_with_name: " \
                         "timeout on waiting for metrics: at least " \
                         "one metric is needed. Current number of " \
                         "metrics = 0"
             self.fail(error_msg)
Ejemplo n.º 36
0
    def resource_setup(cls):
        super(TestAlarmsStateHistoryOneTransition, cls).resource_setup()

        for i in range(MIN_HISTORY):
            alarm_definition = helpers.create_alarm_definition(
                name=data_utils.rand_name('alarm_state_history' + str(i + 1)),
                expression="min(name-" + str(i + 1) + ") < " + str(i + 1))
            cls.monasca_client.create_alarm_definitions(alarm_definition)
        # Ensure the new Alarm Definitions get to the Threshold Engine
        time.sleep(constants.ALARM_DEFINITION_CREATION_WAIT)

        num_transitions = 0
        for timer in range(constants.MAX_RETRIES):
            for i in range(MIN_HISTORY):
                # Create some metrics to prime the system and waiting for the
                # alarms to be created and then for them to change state.
                # MIN_HISTORY number of Alarms State History are needed.
                metric = helpers.create_metric(name="name-" + str(i + 1))
                cls.monasca_client.create_metrics(metric)
                # Ensure alarms transition at different times
                time.sleep(0.1)
            resp, response_body = cls.monasca_client.\
                list_alarms_state_history()
            elements = response_body['elements']
            if len(elements) >= MIN_HISTORY:
                return
            else:
                num_transitions = len(elements)
            time.sleep(constants.RETRY_WAIT_SECS)
        assert False, "Required {} alarm state transitions, but found {}".\
            format(MIN_HISTORY, num_transitions)
Ejemplo n.º 37
0
 def _create_alarms_for_test_alarms(self, num):
     metric_name = data_utils.rand_name('name')
     key = data_utils.rand_name('key')
     value = data_utils.rand_name('value')
     alarm_definition_ids = []
     for i in xrange(num):
         # create an alarm definition
         expression = "max(" + metric_name + ") > 0"
         name = data_utils.rand_name('name-1')
         alarm_definition = helpers.create_alarm_definition(
             name=name, expression=expression)
         resp, response_body = self.monasca_client.create_alarm_definitions(
             alarm_definition)
         alarm_definition_ids.append(response_body['id'])
     expected_metric = helpers.create_metric(name=metric_name,
                                             dimensions={key: value})
     # create some metrics
     for j in xrange(num):
         for i in xrange(constants.MAX_RETRIES):
             self.monasca_client.create_metrics(expected_metric)
             time.sleep(constants.RETRY_WAIT_SECS)
             query_param = '?alarm_definition_id=' + \
                           str(alarm_definition_ids[j])
             resp, response_body = self.monasca_client.list_alarms(
                 query_param)
             elements = response_body['elements']
             if len(elements) >= 1:
                 break
     return alarm_definition_ids, expected_metric
Ejemplo n.º 38
0
 def test_create_metric_with_value_meta_exceeds_max_length(self):
     value_meta_name = "x"
     long_value_meta_value = "y" * constants.MAX_VALUE_META_TOTAL_LENGTH
     metric = helpers.create_metric(
         name='name', value_meta={value_meta_name: long_value_meta_value})
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics, metric)
Ejemplo n.º 39
0
 def test_create_metric_with_colon_in_dimension_value(self):
     name = data_utils.rand_name('name')
     key = 'url'
     value = 'http://localhost:8070/v2.0'
     timestamp = int(round(time.time() * 1000))
     time_iso = helpers.timestamp_to_iso(timestamp)
     end_timestamp = int(round((time.time() + 3600 * 24) * 1000))
     end_time_iso = helpers.timestamp_to_iso(end_timestamp)
     metric = helpers.create_metric(name=name, dimensions={key: value})
     resp, response_body = self.monasca_client.create_metrics(metric)
     self.assertEqual(204, resp.status)
     query_param = '?name=' + name + '&start_time=' + time_iso + \
                   '&end_time=' + end_time_iso + \
                   '&dimensions=' + key + ':' + value
     for i in range(constants.MAX_RETRIES):
         resp, response_body = self.monasca_client. \
             list_measurements(query_param)
         self.assertEqual(200, resp.status)
         elements = response_body['elements']
         for element in elements:
             if str(element['name']) == name:
                 self._verify_list_measurements_element(element, key, value)
                 measurement = element['measurements'][0]
                 self._verify_list_measurements_measurement(
                     measurement, metric, None, None)
                 return
         time.sleep(constants.RETRY_WAIT_SECS)
         if i == constants.MAX_RETRIES - 1:
             error_msg = "Failed test_create_metric: " \
                         "timeout on waiting for metrics: at least " \
                         "one metric is needed. Current number of " \
                         "metrics = 0"
             self.fail(error_msg)
Ejemplo n.º 40
0
 def test_create_metric_with_value_meta_exceeds_max_length(self):
     value_meta_name = "x"
     long_value_meta_value = "y" * constants.MAX_VALUE_META_TOTAL_LENGTH
     value_meta_dict = {value_meta_name: long_value_meta_value}
     metric = helpers.create_metric(name='name', value_meta=value_meta_dict)
     self.assertRaises(exceptions.UnprocessableEntity,
                       self.monasca_client.create_metrics,
                       metric)
Ejemplo n.º 41
0
    def resource_setup(cls):
        super(TestStatistics, cls).resource_setup()
        name = data_utils.rand_name('name')
        key = data_utils.rand_name('key')
        value1 = data_utils.rand_name('value1')
        value2 = data_utils.rand_name('value2')
        cls._test_name = name
        cls._test_key = key
        cls._test_value1 = value1
        cls._start_timestamp = int(time.time() * 1000)
        metrics = [
            helpers.create_metric(name=name,
                                  dimensions={key: value1},
                                  timestamp=cls._start_timestamp,
                                  value=metric_value1),
            helpers.create_metric(name=name,
                                  dimensions={key: value2},
                                  timestamp=cls._start_timestamp + 1000,
                                  value=metric_value2)
        ]
        cls.monasca_client.create_metrics(metrics)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        query_param = '?name=' + str(name) + '&start_time=' + \
                      start_time_iso + '&merge_metrics=true' + '&end_time=' + \
                      helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        cls._start_time_iso = start_time_iso

        num_measurements = 0
        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.\
                list_measurements(query_param)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == name:
                    if len(element['measurements']) >= MIN_REQUIRED_MEASUREMENTS:
                        cls._end_timestamp = cls._start_timestamp + 1000 * 3
                        cls._end_time_iso = helpers.timestamp_to_iso(
                            cls._end_timestamp)
                        return
                    else:
                        num_measurements = len(element['measurements'])
                        break
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, "Required {} measurements, found {}".format(MIN_REQUIRED_MEASUREMENTS, num_measurements)
    def resource_setup(cls):
        super(TestAlarmStateHistoryMultipleTransitions, cls).resource_setup()
        alarm_definition = helpers.create_alarm_definition(
            name=data_utils.rand_name('alarm_state_history'),
            expression="min(name-1) < 1.0")
        cls.monasca_client.create_alarm_definitions(alarm_definition)
        for timer in xrange(constants.MAX_RETRIES):
            # create some metrics to prime the system and create
            # MIN_HISTORY alarms
            metric = helpers.create_metric(name="name-1",
                                           dimensions={'key1': 'value1'},
                                           value=0.0)
            cls.monasca_client.create_metrics(metric)
            # sleep 1 second between metrics to make sure timestamps
            # are different in the second field. Influxdb has a bug
            # where it does not sort properly by milliseconds. .014
            # is sorted as greater than .138
            time.sleep(1.0)
            resp, response_body = cls.monasca_client.\
                list_alarms_state_history()
            elements = response_body['elements']
            if len(elements) >= 1:
                break
            time.sleep(constants.RETRY_WAIT_SECS)

        time.sleep(constants.MAX_RETRIES)

        for timer in xrange(constants.MAX_RETRIES * 2):
            metric = helpers.create_metric(name="name-1",
                                           dimensions={'key2': 'value2'},
                                           value=2.0)
            cls.monasca_client.create_metrics(metric)
            # sleep 0.05 second between metrics to make sure timestamps
            # are different
            time.sleep(0.05)
            resp, response_body = \
                cls.monasca_client.list_alarms_state_history()
            elements = response_body['elements']
            if len(elements) >= 2:
                return
            else:
                num_transitions = len(elements)
            time.sleep(constants.RETRY_WAIT_SECS)
        assert False, "Required {} alarm state transitions, but found {}".\
            format(MIN_HISTORY, num_transitions)
Ejemplo n.º 43
0
    def test_list_alarms_by_metric_dimensions_no_value(self):
        metric_name = data_utils.rand_name('metric')
        match_by_key = data_utils.rand_name('key')
        dim_key = data_utils.rand_name('key')
        alarm_def = helpers.create_alarm_definition(
            name=data_utils.rand_name('definition'),
            expression=metric_name + " > 1",
            match_by=[match_by_key])
        metric_1 = helpers.create_metric(metric_name,
                                         {match_by_key: data_utils.rand_name('value'),
                                          dim_key: data_utils.rand_name('value')})
        metric_2 = helpers.create_metric(metric_name,
                                         {match_by_key: data_utils.rand_name('value'),
                                          dim_key: data_utils.rand_name('value')})
        metric_3 = helpers.create_metric(metric_name,
                                         {match_by_key: data_utils.rand_name('value')})
        metrics = [metric_1, metric_2, metric_3]
        resp, response_body = self.monasca_client.create_alarm_definitions(alarm_def)
        self.assertEqual(201, resp.status)

        for i in xrange(constants.MAX_RETRIES):
            resp, alarm_def_result = self.monasca_client.create_metrics(metrics)
            self.assertEqual(204, resp.status)
            resp, response_body = self.monasca_client.list_alarms('?metric_name=' + metric_name)
            self.assertEqual(200, resp.status)
            if len(response_body['elements']) >= 3:
                break
            time.sleep(constants.RETRY_WAIT_SECS)
            if i >= constants.MAX_RETRIES - 1:
                self.fail("Timeout creating alarms, required 3 but found {}".format(
                    len(response_body['elements'])))

        query_parms = '?metric_dimensions=' + dim_key
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self._verify_list_alarms_elements(resp, response_body,
                                          expect_num_elements=2)
        dimension_sets = []
        for element in response_body['elements']:
            self.assertEqual(metric_name, element['metrics'][0]['name'])
            dimension_sets.append(element['metrics'][0]['dimensions'])
        self.assertIn(metric_1['dimensions'], dimension_sets)
        self.assertIn(metric_2['dimensions'], dimension_sets)
        self.assertNotIn(metric_3['dimensions'], dimension_sets)
    def resource_setup(cls):
        super(TestAlarmStateHistoryMultipleTransitions, cls).resource_setup()
        alarm_definition = helpers.create_alarm_definition(
            name=data_utils.rand_name('alarm_state_history'),
            expression="min(name-1) < 1.0")
        cls.monasca_client.create_alarm_definitions(alarm_definition)
        for timer in xrange(constants.MAX_RETRIES):
            # create some metrics to prime the system and create
            # MIN_HISTORY alarms
            metric = helpers.create_metric(
                name="name-1", dimensions={'key1': 'value1'}, value=0.0)
            cls.monasca_client.create_metrics(metric)
            # sleep 1 second between metrics to make sure timestamps
            # are different in the second field. Influxdb has a bug
            # where it does not sort properly by milliseconds. .014
            # is sorted as greater than .138
            time.sleep(1.0)
            resp, response_body = cls.monasca_client.\
                list_alarms_state_history()
            elements = response_body['elements']
            if len(elements) >= 1:
                break
            time.sleep(constants.RETRY_WAIT_SECS)

        time.sleep(constants.MAX_RETRIES)

        for timer in xrange(constants.MAX_RETRIES * 2):
            metric = helpers.create_metric(
                name="name-1", dimensions={'key2': 'value2'}, value=2.0)
            cls.monasca_client.create_metrics(metric)
            # sleep 0.05 second between metrics to make sure timestamps
            # are different
            time.sleep(0.05)
            resp, response_body = \
                cls.monasca_client.list_alarms_state_history()
            elements = response_body['elements']
            if len(elements) >= 2:
                return
            else:
                num_transitions = len(elements)
            time.sleep(constants.RETRY_WAIT_SECS)
        assert False, "Required {} alarm state transitions, but found {}".\
            format(MIN_HISTORY, num_transitions)
Ejemplo n.º 45
0
    def test_list_alarms_by_severity(self):
        metric_name = data_utils.rand_name("severity-metric")
        alarm_defs = []
        alarm_defs.append(
            helpers.create_alarm_definition(
                name=data_utils.rand_name("alarm-severity"),
                expression=metric_name + " > 12",
                severity='LOW'))
        alarm_defs.append(
            helpers.create_alarm_definition(
                name=data_utils.rand_name("alarm-severity"),
                expression=metric_name + " > 12",
                severity='MEDIUM'))
        alarm_defs.append(
            helpers.create_alarm_definition(
                name=data_utils.rand_name("alarm-severity"),
                expression=metric_name + " > 12",
                severity='HIGH'))
        alarm_defs.append(
            helpers.create_alarm_definition(
                name=data_utils.rand_name("alarm-severity"),
                expression=metric_name + " > 12",
                severity='CRITICAL'))

        alarm_def_ids = []
        for definition in alarm_defs:
            resp, response_body = self.monasca_client.create_alarm_definitions(
                definition)
            self.assertEqual(201, resp.status)
            alarm_def_ids.append(response_body['id'])

        metric = helpers.create_metric(name=metric_name, value=14)
        resp, response_body = self.monasca_client.create_metrics(metric)
        self.assertEqual(204, resp.status)
        for def_id in alarm_def_ids:
            self._wait_for_alarms(1, def_id)

        query_parms = '?severity=LOW'
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self.assertEqual(200, resp.status)
        for alarm in response_body['elements']:
            self.assertEqual('LOW', alarm['alarm_definition']['severity'])

        query_parms = '?severity=HIGH'
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self.assertEqual(200, resp.status)
        for alarm in response_body['elements']:
            self.assertEqual('HIGH', alarm['alarm_definition']['severity'])

        query_parms = '?severity=CRITICAL'
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self.assertEqual(200, resp.status)
        for alarm in response_body['elements']:
            self.assertEqual('CRITICAL', alarm['alarm_definition']['severity'])
Ejemplo n.º 46
0
    def test_list_alarms_by_severity(self):
        metric_name = data_utils.rand_name("severity-metric")
        alarm_defs = []
        alarm_defs.append(helpers.create_alarm_definition(
            name=data_utils.rand_name("alarm-severity"),
            expression=metric_name + " > 12",
            severity='LOW'
        ))
        alarm_defs.append(helpers.create_alarm_definition(
            name=data_utils.rand_name("alarm-severity"),
            expression=metric_name + " > 12",
            severity='MEDIUM'
        ))
        alarm_defs.append(helpers.create_alarm_definition(
            name=data_utils.rand_name("alarm-severity"),
            expression=metric_name + " > 12",
            severity='HIGH'
        ))
        alarm_defs.append(helpers.create_alarm_definition(
            name=data_utils.rand_name("alarm-severity"),
            expression=metric_name + " > 12",
            severity='CRITICAL'
        ))

        alarm_def_ids = []
        for definition in alarm_defs:
            resp, response_body = self.monasca_client.create_alarm_definitions(definition)
            self.assertEqual(201, resp.status)
            alarm_def_ids.append(response_body['id'])

        metric = helpers.create_metric(name=metric_name,
                                       value=14)
        resp, response_body = self.monasca_client.create_metrics(metric)
        self.assertEqual(204, resp.status)
        for def_id in alarm_def_ids:
            self._wait_for_alarms(1, def_id)

        query_parms = '?severity=LOW'
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self.assertEqual(200, resp.status)
        for alarm in response_body['elements']:
            self.assertEqual('LOW', alarm['alarm_definition']['severity'])

        query_parms = '?severity=HIGH'
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self.assertEqual(200, resp.status)
        for alarm in response_body['elements']:
            self.assertEqual('HIGH', alarm['alarm_definition']['severity'])

        query_parms = '?severity=CRITICAL'
        resp, response_body = self.monasca_client.list_alarms(query_parms)
        self.assertEqual(200, resp.status)
        for alarm in response_body['elements']:
            self.assertEqual('CRITICAL', alarm['alarm_definition']['severity'])
Ejemplo n.º 47
0
    def resource_setup(cls):
        super(TestStatistics, cls).resource_setup()
        name = data_utils.rand_name('name')
        key = data_utils.rand_name('key')
        value = data_utils.rand_name('value')
        cls._test_name = name
        cls._test_key = key
        cls._test_value = value
        cls._start_timestamp = int(time.time() * 1000)
        metrics = [
            helpers.create_metric(name=cls._test_name,
                                  dimensions={cls._test_key: cls._test_value},
                                  timestamp=cls._start_timestamp,
                                  value=metric_value1),
            helpers.create_metric(name=cls._test_name,
                                  dimensions={cls._test_key: cls._test_value},
                                  timestamp=cls._start_timestamp + 1000,
                                  value=metric_value2)
        ]
        cls.monasca_client.create_metrics(metrics)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        query_param = '?name=' + str(name) + '&start_time=' + \
                      start_time_iso + '&end_time=' + \
                      helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        cls._start_time_iso = start_time_iso

        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.\
                list_measurements(query_param)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == name and len(
                        element['measurements']) == 2:
                    cls._end_timestamp = cls._start_timestamp + 1000 * 3
                    cls._end_time_iso = helpers.timestamp_to_iso(
                        cls._end_timestamp)
                    return
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, "Failed to find enough measurements to test"
Ejemplo n.º 48
0
    def resource_setup(cls):
        super(TestStatistics, cls).resource_setup()
        name = data_utils.rand_name('name')
        key = data_utils.rand_name('key')
        value = data_utils.rand_name('value')
        cls._test_name = name
        cls._test_key = key
        cls._test_value = value
        cls._start_timestamp = int(time.time() * 1000)
        metrics = [
            helpers.create_metric(name=cls._test_name,
                                  dimensions={cls._test_key: cls._test_value},
                                  timestamp=cls._start_timestamp,
                                  value=metric_value1),
            helpers.create_metric(name=cls._test_name,
                                  dimensions={cls._test_key: cls._test_value},
                                  timestamp=cls._start_timestamp + 1000,
                                  value=metric_value2)
        ]
        cls.monasca_client.create_metrics(metrics)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        query_param = '?name=' + str(name) + '&start_time=' + \
                      start_time_iso + '&end_time=' + \
                      helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2)
        start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp)
        cls._start_time_iso = start_time_iso

        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.\
                list_measurements(query_param)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == name and len(
                        element['measurements']) == 2:
                    cls._end_timestamp = cls._start_timestamp + 1000 * 3
                    cls._end_time_iso = helpers.timestamp_to_iso(
                        cls._end_timestamp)
                    return
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, "Failed to find enough measurements to test"
Ejemplo n.º 49
0
    def resource_setup(cls):
        super(TestMetricsNames, cls).resource_setup()
        name1 = data_utils.rand_name('name1')
        name2 = data_utils.rand_name('name2')
        name3 = data_utils.rand_name('name3')
        key = data_utils.rand_name()
        key1 = data_utils.rand_name()
        value = data_utils.rand_name()
        value1 = data_utils.rand_name()

        timestamp = int(round(time.time() * 1000))
        time_iso = helpers.timestamp_to_iso(timestamp)

        metric1 = helpers.create_metric(name=name1,
                                        dimensions={key: value})
        metric2 = helpers.create_metric(name=name2,
                                        dimensions={key1: value1})
        metric3 = helpers.create_metric(name=name3,
                                        dimensions={key: value})
        cls._test_metric_names = {name1, name2, name3}
        cls._expected_names_list = list(cls._test_metric_names)
        cls._expected_names_list.sort()
        cls._test_metric_names_with_same_dim = [name1, name3]
        cls._test_metrics = [metric1, metric2, metric3]
        cls._dimensions_param = key + ':' + value

        cls.monasca_client.create_metrics(cls._test_metrics)

        query_param = '?start_time=' + time_iso
        returned_name_set = set()
        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.list_metrics(query_param)
            elements = response_body['elements']
            for element in elements:
                returned_name_set.add(str(element['name']))
            if cls._test_metric_names.issubset(returned_name_set):
                return
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, 'Unable to initialize metrics'
    def resource_setup(cls):
        super(TestMetricsNames, cls).resource_setup()
        name1 = data_utils.rand_name('name1')
        name2 = data_utils.rand_name('name2')
        name3 = data_utils.rand_name('name3')
        key = data_utils.rand_name()
        key1 = data_utils.rand_name()
        value = data_utils.rand_name()
        value1 = data_utils.rand_name()

        timestamp = int(round(time.time() * 1000))
        time_iso = helpers.timestamp_to_iso(timestamp)

        metric1 = helpers.create_metric(name=name1,
                                        dimensions={key: value})
        metric2 = helpers.create_metric(name=name2,
                                        dimensions={key1: value1})
        metric3 = helpers.create_metric(name=name3,
                                        dimensions={key: value})
        cls._test_metric_names = {name1, name2, name3}
        cls._expected_names_list = list(cls._test_metric_names)
        cls._expected_names_list.sort()
        cls._test_metric_names_with_same_dim = [name1, name3]
        cls._test_metrics = [metric1, metric2, metric3]
        cls._dimensions_param = key + ':' + value

        cls.monasca_client.create_metrics(cls._test_metrics)

        query_param = '?start_time=' + time_iso
        returned_name_set = set()
        for i in range(constants.MAX_RETRIES):
            resp, response_body = cls.monasca_client.list_metrics(query_param)
            elements = response_body['elements']
            for element in elements:
                returned_name_set.add(str(element['name']))
            if cls._test_metric_names.issubset(returned_name_set):
                return
            time.sleep(constants.RETRY_WAIT_SECS)

        assert False, 'Unable to initialize metrics'
Ejemplo n.º 51
0
    def resource_setup(cls):
        super(TestStatistics, cls).resource_setup()

        start_timestamp = int(time.time() * 1000)
        end_timestamp = int(time.time() * 1000) + NUM_MEASUREMENTS * 1000
        metrics = []

        for i in xrange(NUM_MEASUREMENTS):
            metric = helpers.create_metric(name="name-1", timestamp=start_timestamp + i)
            metrics.append(metric)

        resp, response_body = cls.monasca_client.create_metrics(metric)
        cls._start_timestamp = start_timestamp
        cls._end_timestamp = end_timestamp
        cls._metrics = metrics
Ejemplo n.º 52
0
    def test_list_metrics_with_name(self):
        name_org = data_utils.rand_name('name')
        key = data_utils.rand_name('key')
        metric = helpers.create_metric(name=name_org,
                                       dimensions={key: 'value-1'})
        resp, body = self.monasca_client.create_metrics(metric)
        time.sleep(WAIT_TIME)

        query_parms = '?dimensions=' + str(key) + ':value-1'
        resp, response_body = self.monasca_client.list_metrics(query_parms)
        self.assertEqual(200, resp.status)
        elements = response_body['elements']
        dimensions = elements[0]
        name = dimensions['name']
        self.assertEqual(name_org, str(name))
    def resource_setup(cls):
        super(TestAlarmsStateHistory, cls).resource_setup()

        start_timestamp = int(time.time() * 1000)
        end_timestamp = int(time.time() * 1000) + 1000

        # create an alarm definition
        expression = "avg(name-1) > 0"
        name = data_utils.rand_name('alarm_definition')
        alarm_definition = helpers.create_alarm_definition(
            name=name,
            expression=expression)
        resp, response_body = cls.monasca_client.create_alarm_definitions(
            alarm_definition)

        # create another alarm definition
        name1 = data_utils.rand_name('alarm_definition1')
        expression1 = "max(cpu.system_perc) > 0"
        alarm_definition1 = helpers.create_alarm_definition(
            name=name1,
            expression=expression1)
        resp, response_body1 = cls.monasca_client.create_alarm_definitions(
            alarm_definition1)

        # create another alarm definition
        name2 = data_utils.rand_name('alarm_definition2')
        expression1 = "avg(mysql.performance.slow_queries) > 10.0"
        alarm_definition2 = helpers.create_alarm_definition(
            name=name2,
            expression=expression1)
        resp, response_body2 = cls.monasca_client.create_alarm_definitions(
            alarm_definition2)

        # create some metrics
        for i in xrange(180):
            metric = helpers.create_metric()
            resp, body = cls.monasca_client.create_metrics(metric)
            cls._start_timestamp = start_timestamp + i
            cls._end_timestamp = end_timestamp + i
            time.sleep(1)
            resp, response_body = cls.monasca_client.\
                list_alarms_state_history()
            elements = response_body['elements']
            if len(elements) > 4:
                break

        if len(elements) < 3:
            cls.assertEqual(1, False)
Ejemplo n.º 54
0
    def resource_setup(cls):
        super(TestMeasurements, cls).resource_setup()

        start_timestamp = int(time.time() * 1000)
        end_timestamp = int(time.time() * 1000) + NUM_MEASUREMENTS * 1000
        metrics = []

        for i in xrange(NUM_MEASUREMENTS):
            metric = helpers.create_metric(name="name-1",
                                           timestamp=start_timestamp + i)
            metrics.append(metric)

        resp, response_body = cls.monasca_client.create_metrics(metrics)
        cls._start_timestamp = start_timestamp
        cls._end_timestamp = end_timestamp
        cls._metrics = metrics
Ejemplo n.º 55
0
 def create_alarms_for_test_alarms(cls):
     # create an alarm definition
     expression = "avg(name-1) > 0"
     name = data_utils.rand_name('name-1')
     alarm_definition = helpers.create_alarm_definition(
         name=name, expression=expression)
     resp, response_body = cls.monasca_client.create_alarm_definitions(
         alarm_definition)
     # create some metrics
     for i in xrange(30):
         metric = helpers.create_metric()
         resp, response_body = cls.monasca_client.create_metrics(metric)
         time.sleep(1)
         resp, response_body = cls.monasca_client.list_alarms()
         elements = response_body['elements']
         if len(elements) > 0:
             break
Ejemplo n.º 56
0
    def test_list_metrics_with_dimensions(self):
        name = data_utils.rand_name('name')
        key = data_utils.rand_name('key')
        value_org = data_utils.rand_name('value')
        metric = helpers.create_metric(name=name,
                                       dimensions={key: value_org})
        resp, body = self.monasca_client.create_metrics(metric)
        time.sleep(WAIT_TIME)

        query_parms = '?name=' + name
        resp, response_body = self.monasca_client.list_metrics(query_parms)
        self.assertEqual(200, resp.status)
        elements = response_body['elements']
        dimensions = elements[0]
        dimension = dimensions['dimensions']
        value = dimension[unicode(key)]
        self.assertEqual(value_org, str(value))