def test_create_metric_with_colon_in_dimension_value(self): name = data_utils.rand_name('name') key = 'url' value = 'http://localhost:8070/v2.0' timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) end_timestamp = int(round((time.time() + 3600 * 24) * 1000)) end_time_iso = helpers.timestamp_to_iso(end_timestamp) metric = helpers.create_metric(name=name, dimensions={key: value}) resp, response_body = self.monasca_client.create_metrics(metric) self.assertEqual(204, resp.status) query_param = '?name=' + name + '&start_time=' + time_iso + \ '&end_time=' + end_time_iso + \ '&dimensions=' + key + ':' + value for i in range(constants.MAX_RETRIES): resp, response_body = self.monasca_client. \ list_measurements(query_param) self.assertEqual(200, resp.status) elements = response_body['elements'] for element in elements: if str(element['name']) == name: self._verify_list_measurements_element(element, key, value) measurement = element['measurements'][0] self._verify_list_measurements_measurement( measurement, metric, None, None) return time.sleep(constants.RETRY_WAIT_SECS) if i == constants.MAX_RETRIES - 1: error_msg = "Failed test_create_metric: " \ "timeout on waiting for metrics: at least " \ "one metric is needed. Current number of " \ "metrics = 0" self.fail(error_msg)
def test_create_metrics(self): name = data_utils.rand_name('name') key = data_utils.rand_name('key') value = data_utils.rand_name('value') timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) end_timestamp = int(round(timestamp + 3600 * 24 * 1000)) end_time_iso = helpers.timestamp_to_iso(end_timestamp) value_meta_key1 = data_utils.rand_name('meta_key') value_meta_value1 = data_utils.rand_name('meta_value') value_meta_key2 = data_utils.rand_name('value_meta_key') value_meta_value2 = data_utils.rand_name('value_meta_value') metrics = [ helpers.create_metric(name=name, dimensions={key: value}, timestamp=timestamp, value=1.23, value_meta={ value_meta_key1: value_meta_value1 }), helpers.create_metric(name=name, dimensions={key: value}, timestamp=timestamp + 6000, value=4.56, value_meta={ value_meta_key2: value_meta_value2 }) ] resp, response_body = self.monasca_client.create_metrics(metrics) self.assertEqual(204, resp.status) query_param = '?name=' + name + '&start_time=' + str(time_iso) + \ '&end_time=' + str(end_time_iso) for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.\ list_measurements(query_param) self.assertEqual(200, resp.status) elements = response_body['elements'] for element in elements: if str(element['name']) == name \ and len(element['measurements']) == 2: self._verify_list_measurements_element(element, key, value) first_measurement = element['measurements'][0] second_measurement = element['measurements'][1] self._verify_list_measurements_measurement( first_measurement, metrics[0], value_meta_key1, value_meta_value1) self._verify_list_measurements_measurement( second_measurement, metrics[1], value_meta_key2, value_meta_value2) return time.sleep(constants.RETRY_WAIT_SECS) if i == constants.MAX_RETRIES - 1: error_msg = "Failed test_create_metrics: " \ "timeout on waiting for metrics: at least " \ "one metric is needed. Current number of " \ "metrics = 0" self.fail(error_msg)
def resource_setup(cls): super(TestStatistics, cls).resource_setup() name = data_utils.rand_name('name') key = data_utils.rand_name('key') value1 = data_utils.rand_name('value1') value2 = data_utils.rand_name('value2') cls._test_name = name cls._test_key = key cls._test_value1 = value1 cls._start_timestamp = int(time.time() * 1000) metrics = [ helpers.create_metric(name=name, dimensions={key: value1}, timestamp=cls._start_timestamp, value=metric_value1), helpers.create_metric(name=name, dimensions={key: value2}, timestamp=cls._start_timestamp + 1000, value=metric_value2) ] cls.monasca_client.create_metrics(metrics) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) query_param = '?name=' + str(name) + '&start_time=' + \ start_time_iso + '&merge_metrics=true' + '&end_time=' + \ helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) cls._start_time_iso = start_time_iso num_measurements = 0 for i in xrange(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.\ list_measurements(query_param) elements = response_body['elements'] for element in elements: if str(element['name']) == name: if len(element['measurements'] ) >= MIN_REQUIRED_MEASUREMENTS: cls._end_timestamp = cls._start_timestamp + 1000 * 3 cls._end_time_iso = helpers.timestamp_to_iso( cls._end_timestamp) return else: num_measurements = len(element['measurements']) break time.sleep(constants.RETRY_WAIT_SECS) assert False, "Required {} measurements, found {}".format( MIN_REQUIRED_MEASUREMENTS, num_measurements)
def test_list_statistics_with_no_merge_metrics(self): key = data_utils.rand_name('key') value = data_utils.rand_name('value') metric3 = helpers.create_metric( name=self._test_name, dimensions={key: value}, timestamp=self._start_timestamp + 2000) self.monasca_client.create_metrics(metric3) query_param = '?name=' + str(self._test_name) + '&start_time=' + \ self._start_time_iso + '&end_time=' + helpers.\ timestamp_to_iso(self._start_timestamp + 1000 * 4) + \ '&merge_metrics=True' for i in range(constants.MAX_RETRIES): resp, response_body = self.monasca_client.\ list_measurements(query_param) elements = response_body['elements'] for element in elements: if str(element['name']) == self._test_name and len( element['measurements']) == 3: end_time_iso = helpers.timestamp_to_iso( self._start_timestamp + 1000 * 4) query_parms = '?name=' + str(self._test_name) + \ '&statistics=avg' + '&start_time=' + \ str(self._start_time_iso) + '&end_time=' +\ str(end_time_iso) + '&period=100000' self.assertRaises(exceptions.Conflict, self.monasca_client.list_statistics, query_parms) return time.sleep(constants.RETRY_WAIT_SECS) self._check_timeout(i, constants.MAX_RETRIES, elements, 3)
def test_list_statistics_with_no_merge_metrics(self): key = data_utils.rand_name('key') value = data_utils.rand_name('value') metric3 = helpers.create_metric( name=self._test_name, dimensions={key: value}, timestamp=self._start_timestamp + 2000) self.monasca_client.create_metrics(metric3) query_param = '?name=' + str(self._test_name) + '&start_time=' + \ self._start_time_iso + '&end_time=' + helpers.\ timestamp_to_iso(self._start_timestamp + 1000 * 4) + \ '&merge_metrics=True' for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.\ list_measurements(query_param) elements = response_body['elements'] for element in elements: if str(element['name']) == self._test_name and len( element['measurements']) == 3: end_time_iso = helpers.timestamp_to_iso( self._start_timestamp + 1000 * 4) query_parms = '?name=' + str(self._test_name) + \ '&statistics=avg' + '&start_time=' + \ str(self._start_time_iso) + '&end_time=' +\ str(end_time_iso) + '&period=100000' self.assertRaises(exceptions.Conflict, self.monasca_client.list_statistics, query_parms) return time.sleep(constants.RETRY_WAIT_SECS) self._check_timeout(i, constants.MAX_RETRIES, elements, 3)
def resource_setup(cls): super(TestStatistics, cls).resource_setup() name = data_utils.rand_name('name') key = data_utils.rand_name('key') value1 = data_utils.rand_name('value1') value2 = data_utils.rand_name('value2') cls._test_name = name cls._test_key = key cls._test_value1 = value1 cls._start_timestamp = int(time.time() * 1000) metrics = [ helpers.create_metric(name=name, dimensions={key: value1}, timestamp=cls._start_timestamp, value=metric_value1), helpers.create_metric(name=name, dimensions={key: value2}, timestamp=cls._start_timestamp + 1000, value=metric_value2) ] cls.monasca_client.create_metrics(metrics) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) query_param = '?name=' + str(name) + '&start_time=' + \ start_time_iso + '&merge_metrics=true' + '&end_time=' + \ helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) cls._start_time_iso = start_time_iso num_measurements = 0 for i in xrange(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.\ list_measurements(query_param) elements = response_body['elements'] for element in elements: if str(element['name']) == name: if len(element['measurements']) >= MIN_REQUIRED_MEASUREMENTS: cls._end_timestamp = cls._start_timestamp + 1000 * 3 cls._end_time_iso = helpers.timestamp_to_iso( cls._end_timestamp) return else: num_measurements = len(element['measurements']) break time.sleep(constants.RETRY_WAIT_SECS) assert False, "Required {} measurements, found {}".format(MIN_REQUIRED_MEASUREMENTS, num_measurements)
def resource_setup(cls): super(TestDimensions, cls).resource_setup() metric_name1 = data_utils.rand_name() name1 = "name_1" name2 = "name_2" value1 = "value_1" value2 = "value_2" timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) metric1 = helpers.create_metric(name=metric_name1, dimensions={name1: value1, name2: value2 }) cls.monasca_client.create_metrics(metric1) metric1 = helpers.create_metric(name=metric_name1, dimensions={name1: value2}) cls.monasca_client.create_metrics(metric1) metric_name2 = data_utils.rand_name() name3 = "name_3" value3 = "value_3" metric2 = helpers.create_metric(name=metric_name2, dimensions={name3: value3}) cls.monasca_client.create_metrics(metric2) metric_name3 = data_utils.rand_name() metric3 = helpers.create_metric(name=metric_name3, dimensions={name1: value3}) cls.monasca_client.create_metrics(metric3) cls._test_metric1 = metric1 cls._test_metric2 = metric2 cls._test_metric_names = {metric_name1, metric_name2, metric_name3} cls._dim_names_metric1 = [name1, name2] cls._dim_names_metric2 = [name3] cls._dim_names = cls._dim_names_metric1 + cls._dim_names_metric2 cls._dim_values_for_metric1 = [value1, value2] cls._dim_values = [value1, value2, value3] param = '?start_time=' + time_iso returned_name_set = set() for i in range(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.list_metrics( param) elements = response_body['elements'] for element in elements: returned_name_set.add(str(element['name'])) if cls._test_metric_names.issubset(returned_name_set): return time.sleep(constants.RETRY_WAIT_SECS) assert False, 'Unable to initialize metrics'
def resource_setup(cls): super(TestDimensions, cls).resource_setup() metric_name1 = data_utils.rand_name() name1 = "name_1" name2 = "name_2" value1 = "value_1" value2 = "value_2" timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) metric1 = helpers.create_metric(name=metric_name1, dimensions={name1: value1, name2: value2 }) cls.monasca_client.create_metrics(metric1) metric1 = helpers.create_metric(name=metric_name1, dimensions={name1: value2}) cls.monasca_client.create_metrics(metric1) metric_name2 = data_utils.rand_name() name3 = "name_3" value3 = "value_3" metric2 = helpers.create_metric(name=metric_name2, dimensions={name3: value3}) cls.monasca_client.create_metrics(metric2) metric_name3 = data_utils.rand_name() metric3 = helpers.create_metric(name=metric_name3, dimensions={name1: value3}) cls.monasca_client.create_metrics(metric3) cls._test_metric1 = metric1 cls._test_metric2 = metric2 cls._test_metric_names = {metric_name1, metric_name2, metric_name3} cls._dim_names_metric1 = [name1, name2] cls._dim_names_metric2 = [name3] cls._dim_names = cls._dim_names_metric1 + cls._dim_names_metric2 cls._dim_values_for_metric1 = [value1, value2] cls._dim_values = [value1, value2, value3] param = '?start_time=' + time_iso returned_name_set = set() for i in xrange(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.list_metrics( param) elements = response_body['elements'] for element in elements: returned_name_set.add(str(element['name'])) if cls._test_metric_names.issubset(returned_name_set): return time.sleep(constants.RETRY_WAIT_SECS) assert False, 'Unable to initialize metrics'
def test_create_metric(self): name = data_utils.rand_name('name') key = data_utils.rand_name('key') value = data_utils.rand_name('value') timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) end_timestamp = int(round((time.time() + 3600 * 24) * 1000)) end_time_iso = helpers.timestamp_to_iso(end_timestamp) value_meta_key = data_utils.rand_name('value_meta_key') value_meta_value = data_utils.rand_name('value_meta_value') metric = helpers.create_metric( name=name, dimensions={key: value}, timestamp=timestamp, value=1.23, value_meta={value_meta_key: value_meta_value}) resp, response_body = self.monasca_client.create_metrics(metric) self.assertEqual(204, resp.status) query_param = '?name=' + name + '&start_time=' + time_iso + \ '&end_time=' + end_time_iso for i in range(constants.MAX_RETRIES): resp, response_body = self.monasca_client.\ list_measurements(query_param) self.assertEqual(200, resp.status) elements = response_body['elements'] for element in elements: # check if metric is there and dimension info already available if (str(element['name']) == name and len(element['dimensions']) > 0): self._verify_list_measurements_element(element, key, value) measurement = element['measurements'][0] self._verify_list_measurements_measurement( measurement, metric, value_meta_key, value_meta_value) return time.sleep(constants.RETRY_WAIT_SECS) if i == constants.MAX_RETRIES - 1: error_msg = "Failed test_create_metric: " \ "timeout on waiting for metrics: at least " \ "one metric is needed. Current number of " \ "metrics = 0" self.fail(error_msg)
def test_create_metric_with_no_dimensions(self): name = data_utils.rand_name('name') timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) end_timestamp = int(round(timestamp + 3600 * 24 * 1000)) end_time_iso = helpers.timestamp_to_iso(end_timestamp) value_meta_key = data_utils.rand_name('value_meta_key') value_meta_value = data_utils.rand_name('value_meta_value') metric = helpers.create_metric( name=name, dimensions=None, timestamp=timestamp, value=1.23, value_meta={value_meta_key: value_meta_value}) resp, response_body = self.monasca_client.create_metrics(metric) self.assertEqual(204, resp.status) query_param = '?name=' + str(name) + '&start_time=' + str(time_iso) \ + '&end_time=' + str(end_time_iso) for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.\ list_measurements(query_param) self.assertEqual(200, resp.status) elements = response_body['elements'] for element in elements: if str(element['name']) == name: self._verify_list_measurements_element(element, test_key=None, test_value=None) if len(element['measurements']) > 0: measurement = element['measurements'][0] self._verify_list_measurements_measurement( measurement, metric, value_meta_key, value_meta_value) return time.sleep(constants.RETRY_WAIT_SECS) if i == constants.MAX_RETRIES - 1: error_msg = "Failed test_create_metric_with_no_dimensions: " \ "timeout on waiting for metrics: at least " \ "one metric is needed. Current number of " \ "metrics = 0" self.fail(error_msg)
def resource_setup(cls): super(TestStatistics, cls).resource_setup() name = data_utils.rand_name('name') key = data_utils.rand_name('key') value = data_utils.rand_name('value') cls._test_name = name cls._test_key = key cls._test_value = value cls._start_timestamp = int(time.time() * 1000) metrics = [ helpers.create_metric(name=cls._test_name, dimensions={cls._test_key: cls._test_value}, timestamp=cls._start_timestamp, value=metric_value1), helpers.create_metric(name=cls._test_name, dimensions={cls._test_key: cls._test_value}, timestamp=cls._start_timestamp + 1000, value=metric_value2) ] cls.monasca_client.create_metrics(metrics) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) query_param = '?name=' + str(name) + '&start_time=' + \ start_time_iso + '&end_time=' + \ helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) cls._start_time_iso = start_time_iso for i in xrange(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.\ list_measurements(query_param) elements = response_body['elements'] for element in elements: if str(element['name']) == name and len( element['measurements']) == 2: cls._end_timestamp = cls._start_timestamp + 1000 * 3 cls._end_time_iso = helpers.timestamp_to_iso( cls._end_timestamp) return time.sleep(constants.RETRY_WAIT_SECS) assert False, "Failed to find enough measurements to test"
def test_list_measurements_success(self): start_timestamp = int(time.time() * 1000) start_time = str(helpers.timestamp_to_iso(start_timestamp)) parms = '?name=foo&start_time=' + start_time resp, response_body = self.monasca_client.list_measurements(parms) # # Validate the call succeeds with empty result (we didn't # create any metrics to get measurements for) # self.assertEqual(200, resp.status) self.assertEqual(0, len(response_body['elements'])) self.assertTrue('/v2.0/metrics/measurements' in response_body['links'][0]['href'])
def test_create_metric_with_no_dimensions(self): name = data_utils.rand_name('name') timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) end_timestamp = int(round(timestamp + 3600 * 24 * 1000)) end_time_iso = helpers.timestamp_to_iso(end_timestamp) value_meta_key = data_utils.rand_name('value_meta_key') value_meta_value = data_utils.rand_name('value_meta_value') metric = helpers.create_metric(name=name, dimensions=None, timestamp=timestamp, value=1.23, value_meta={ value_meta_key: value_meta_value}) resp, response_body = self.monasca_client.create_metrics(metric) self.assertEqual(204, resp.status) query_param = '?name=' + str(name) + '&start_time=' + str(time_iso) \ + '&end_time=' + str(end_time_iso) for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.\ list_measurements(query_param) self.assertEqual(200, resp.status) elements = response_body['elements'] for element in elements: if str(element['name']) == name: self._verify_list_measurements_element( element, test_key=None, test_value=None) if len(element['measurements']) > 0: measurement = element['measurements'][0] self._verify_list_measurements_measurement( measurement, metric, value_meta_key, value_meta_value) return time.sleep(constants.RETRY_WAIT_SECS) if i == constants.MAX_RETRIES - 1: error_msg = "Failed test_create_metric_with_no_dimensions: " \ "timeout on waiting for metrics: at least " \ "one metric is needed. Current number of " \ "metrics = 0" self.fail(error_msg)
def test_create_metric_with_multibyte_character(self): name = data_utils.rand_name('name').decode('utf8') key = data_utils.rand_name('key').decode('utf8') value = data_utils.rand_name('value').decode('utf8') timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) end_timestamp = int(round((time.time() + 3600 * 24) * 1000)) end_time_iso = helpers.timestamp_to_iso(end_timestamp) value_meta_key = data_utils.rand_name('value_meta_key').decode('utf8') value_meta_value = data_utils.rand_name('value_meta_value').decode( 'utf8') metric = helpers.create_metric( name=name, dimensions={key: value}, timestamp=timestamp, value=1.23, value_meta={value_meta_key: value_meta_value}) resp, response_body = self.monasca_client.create_metrics(metric) self.assertEqual(204, resp.status) query_param = '?name=' + urlparse.quote(name.encode('utf8')) + \ '&start_time=' + time_iso + '&end_time=' + end_time_iso for i in range(constants.MAX_RETRIES): resp, response_body = self.monasca_client.\ list_measurements(query_param) self.assertEqual(200, resp.status) elements = response_body['elements'] for element in elements: if element['name'] == name: self._verify_list_measurements_element(element, key, value) measurement = element['measurements'][0] self._verify_list_measurements_measurement( measurement, metric, value_meta_key, value_meta_value) return time.sleep(constants.RETRY_WAIT_SECS) if i == constants.MAX_RETRIES - 1: error_msg = "Failed test_create_metric: " \ "timeout on waiting for metrics: at least " \ "one metric is needed. Current number of " \ "metrics = 0" self.fail(error_msg)
def test_list_statistics_success(self): start_timestamp = int(time.time() * 1000) start_time = str(helpers.timestamp_to_iso(start_timestamp)) query_parms = '?name=foo&statistics=avg&start_time=' + start_time resp, response_body = self.monasca_client.list_statistics(query_parms) # # Validate the call succeeds with empty result (we didn't # create any metrics to get statistics for) # self.assertEqual(200, resp.status) self.assertEqual(0, len(response_body['elements'])) self.assertTrue( '/v2.0/metrics/statistics' in response_body['links'][0]['href'])
def test_list_metrics_with_time_args(self): name = data_utils.rand_name('name') key = data_utils.rand_name('key') value_org = data_utils.rand_name('value') now = int(round(time.time() * 1000)) # # Built start and end time args before and after the measurement. # start_iso = helpers.timestamp_to_iso(now - 1000) end_timestamp = int(round(now + 1000)) end_iso = helpers.timestamp_to_iso(end_timestamp) metric = helpers.create_metric(name=name, dimensions={key: value_org}, timestamp=now) self.monasca_client.create_metrics(metric) for timer in xrange(constants.MAX_RETRIES): query_parms = '?name=' + name + '&start_time=' + start_iso + '&end_time=' + end_iso resp, response_body = self.monasca_client.list_metrics(query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'] if elements: dimensions = elements[0] dimension = dimensions['dimensions'] value = dimension[unicode(key)] self.assertEqual(value_org, str(value)) break else: time.sleep(constants.RETRY_WAIT_SECS) if timer == constants.MAX_RETRIES - 1: skip_msg = "Skipped test_list_metrics_with_time_args: " \ "timeout on waiting for metrics: at least one " \ "metric is needed. Current number of metrics " \ "= 0" raise self.skipException(skip_msg)
def test_list_measurements_with_endtime(self): time_iso = helpers.timestamp_to_iso( self._start_timestamp + ONE_SECOND * 2) query_parms = '?name=' + str(self._names_list[0]) + \ '&merge_metrics=true' \ '&start_time=' + str(self._start_time) + \ '&end_time=' + str(time_iso) resp, response_body = self.monasca_client.list_measurements( query_parms) self._verify_list_measurements(resp, response_body) elements = response_body['elements'] self._verify_list_measurements_elements(elements=elements, test_key=None, test_value=None) measurements = elements[0]['measurements'] self._verify_list_measurements_meas_len(measurements=measurements, test_len=NUM_MEASUREMENTS)
def resource_setup(cls): super(TestMetricsNames, cls).resource_setup() name1 = data_utils.rand_name('name1') name2 = data_utils.rand_name('name2') name3 = data_utils.rand_name('name3') key = data_utils.rand_name() key1 = data_utils.rand_name() value = data_utils.rand_name() value1 = data_utils.rand_name() timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) metric1 = helpers.create_metric(name=name1, dimensions={key: value}) metric2 = helpers.create_metric(name=name2, dimensions={key1: value1}) metric3 = helpers.create_metric(name=name3, dimensions={key: value}) cls._test_metric_names = {name1, name2, name3} cls._expected_names_list = list(cls._test_metric_names) cls._expected_names_list.sort() cls._test_metric_names_with_same_dim = [name1, name3] cls._test_metrics = [metric1, metric2, metric3] cls._dimensions_param = key + ':' + value cls.monasca_client.create_metrics(cls._test_metrics) query_param = '?start_time=' + time_iso returned_name_set = set() for i in xrange(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.list_metrics(query_param) elements = response_body['elements'] for element in elements: returned_name_set.add(str(element['name'])) if cls._test_metric_names.issubset(returned_name_set): return time.sleep(constants.RETRY_WAIT_SECS) assert False, 'Unable to initialize metrics'
def resource_setup(cls): super(TestMetricsNames, cls).resource_setup() name1 = data_utils.rand_name('name1') name2 = data_utils.rand_name('name2') name3 = data_utils.rand_name('name3') key = data_utils.rand_name() key1 = data_utils.rand_name() value = data_utils.rand_name() value1 = data_utils.rand_name() timestamp = int(round(time.time() * 1000)) time_iso = helpers.timestamp_to_iso(timestamp) metric1 = helpers.create_metric(name=name1, dimensions={key: value}) metric2 = helpers.create_metric(name=name2, dimensions={key1: value1}) metric3 = helpers.create_metric(name=name3, dimensions={key: value}) cls._test_metric_names = {name1, name2, name3} cls._expected_names_list = list(cls._test_metric_names) cls._expected_names_list.sort() cls._test_metric_names_with_same_dim = [name1, name3] cls._test_metrics = [metric1, metric2, metric3] cls._dimensions_param = key + ':' + value cls.monasca_client.create_metrics(cls._test_metrics) query_param = '?start_time=' + time_iso returned_name_set = set() for i in range(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.list_metrics(query_param) elements = response_body['elements'] for element in elements: returned_name_set.add(str(element['name'])) if cls._test_metric_names.issubset(returned_name_set): return time.sleep(constants.RETRY_WAIT_SECS) assert False, 'Unable to initialize metrics'
def test_list_statistics_with_offset_limit(self): start_timestamp = int(time.time() * 1000) name = data_utils.rand_name() metric = [ helpers.create_metric(name=name, timestamp=start_timestamp + 0, dimensions={'key1': 'value-1', 'key2': 'value-1'}, value=1), helpers.create_metric(name=name, timestamp=start_timestamp + 500, dimensions={'key1': 'value-2', 'key2': 'value-2'}, value=2), helpers.create_metric(name=name, timestamp=start_timestamp + 1000, dimensions={'key1': 'value-3', 'key2': 'value-3'}, value=3), helpers.create_metric(name=name, timestamp=start_timestamp + 1500, dimensions={'key1': 'value-4', 'key2': 'value-4'}, value=4) ] self.monasca_client.create_metrics(metric) query_parms = '?name=' + name for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.list_metrics(query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'] if elements: break else: time.sleep(constants.RETRY_WAIT_SECS) self._check_timeout(i, constants.MAX_RETRIES, elements, 4) start_time = helpers.timestamp_to_iso(start_timestamp) end_timestamp = start_timestamp + 4000 end_time = helpers.timestamp_to_iso(end_timestamp) query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg' \ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' resp, body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) elements = body['elements'][0]['statistics'] first_element = elements[0] query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg'\ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=4' resp, response_body = self.monasca_client.list_statistics( query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'][0]['statistics'] self.assertEqual(4, len(elements)) self.assertEqual(first_element, elements[0]) for index in xrange(1, 5): max_limit = 5 - index for limit in xrange(1, max_limit): offset_timestamp = start_timestamp + (1000 * index) offset = timeutils.iso8601_from_timestamp(offset_timestamp / 1000) last_index = index + limit expected_elements = elements[index:last_index] query_parms = '?name=' + name + '&merge_metrics=true' + \ '&statistics=avg' + '&start_time=' + \ str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=' + \ str(limit) + '&offset=' + str(offset) resp, response_body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) if not response_body['elements']: self.fail("No metrics returned") if not response_body['elements'][0]['statistics']: self.fail("No statistics returned") new_elements = response_body['elements'][0]['statistics'] self.assertEqual(limit, len(new_elements)) # bug in the python API causes limit 1 to not have matching timestamps if limit > 1: self.assertEqual(expected_elements, new_elements)
def test_list_statistics_with_offset_limit(self): start_timestamp = int(time.time() * 1000) name = data_utils.rand_name() metric = [ helpers.create_metric(name=name, timestamp=start_timestamp + 0, dimensions={'key1': 'value-1', 'key2': 'value-1'}, value=1), helpers.create_metric(name=name, timestamp=start_timestamp + 1000, dimensions={'key1': 'value-2', 'key2': 'value-2'}, value=2), helpers.create_metric(name=name, timestamp=start_timestamp + 2000, dimensions={'key1': 'value-3', 'key2': 'value-3'}, value=3), helpers.create_metric(name=name, timestamp=start_timestamp + 3000, dimensions={'key1': 'value-4', 'key2': 'value-4'}, value=4) ] self.monasca_client.create_metrics(metric) query_parms = '?name=' + name for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.list_metrics(query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'] if elements: break else: time.sleep(constants.RETRY_WAIT_SECS) self._check_timeout(i, constants.MAX_RETRIES, elements, 4) start_time = helpers.timestamp_to_iso(start_timestamp) end_timestamp = start_timestamp + 4000 end_time = helpers.timestamp_to_iso(end_timestamp) query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg' \ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' resp, body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) elements = body['elements'][0]['statistics'] first_element = elements[0] query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg'\ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=4' resp, response_body = self.monasca_client.list_statistics( query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'][0]['statistics'] self.assertEqual(4, len(elements)) self.assertEqual(first_element, elements[0]) for index in xrange(1, 4): max_limit = 4 - index # Get first offset from api query_parms = '?name=' + str(name) + \ '&merge_metrics=true&start_time=' + elements[index - 1][0] + \ '&end_time=' + end_time + \ '&limit=1' resp, response_body = self.monasca_client.list_measurements(query_parms) for link in response_body['links']: if link['rel'] == 'next': next_link = link['href'] if not next_link: self.fail("No next link returned with query parameters: {}".formet(query_parms)) offset = helpers.get_query_param(next_link, "offset") # python api returns exact timestamp, but the test needs a rounded number offset_period_index = offset.find('.') offset = offset[:offset_period_index] + 'Z' for limit in xrange(1, max_limit): expected_elements = [elem for elem in elements if elem[0] > offset] expected_elements = expected_elements[:limit] query_parms = '?name=' + name + '&merge_metrics=true' + \ '&statistics=avg' + '&start_time=' + \ str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=' + \ str(limit) + '&offset=' + str(offset) resp, response_body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) if not response_body['elements']: self.fail("No metrics returned") if not response_body['elements'][0]['statistics']: self.fail("No statistics returned") new_elements = response_body['elements'][0]['statistics'] self.assertEqual(limit, len(new_elements)) # bug in the python API causes limit 1 to not have matching timestamps if limit > 1: self.assertEqual(expected_elements, new_elements)
def resource_setup(cls): super(TestStatistics, cls).resource_setup() name = data_utils.rand_name('name') key = data_utils.rand_name('key') value1 = data_utils.rand_name('value1') value2 = data_utils.rand_name('value2') cls._test_name = name cls._test_key = key cls._test_value1 = value1 cls._start_timestamp = int(time.time() * 1000) metrics = [ helpers.create_metric(name=name, dimensions={key: value1}, timestamp=cls._start_timestamp, value=1.23), helpers.create_metric(name=name, dimensions={key: value2}, timestamp=cls._start_timestamp + 1000, value=4.56) ] cls.metric_values = [m['value'] for m in metrics] cls.monasca_client.create_metrics(metrics) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) query_param = '?name=' + str(name) + '&start_time=' + \ start_time_iso + '&merge_metrics=true' + '&end_time=' + \ helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) cls._start_time_iso = start_time_iso num_measurements = 0 for i in xrange(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.\ list_measurements(query_param) elements = response_body['elements'] if len(elements) > 0: num_measurements = len(elements[0]['measurements']) if num_measurements >= MIN_REQUIRED_MEASUREMENTS: break time.sleep(constants.RETRY_WAIT_SECS) if num_measurements < MIN_REQUIRED_MEASUREMENTS: assert False, "Required {} measurements, found {}".format(MIN_REQUIRED_MEASUREMENTS, num_measurements) cls._end_timestamp = cls._start_timestamp + 3000 cls._end_time_iso = helpers.timestamp_to_iso(cls._end_timestamp) name2 = data_utils.rand_name("group-by") cls._group_by_metric_name = name2 cls._group_by_end_time_iso = helpers.timestamp_to_iso(cls._start_timestamp + 4000) group_by_metrics = [ helpers.create_metric(name=name2, dimensions={'key1': 'value1', 'key2': 'value5', 'key3': 'value7'}, timestamp=cls._start_timestamp + 1, value=2), helpers.create_metric(name=name2, dimensions={'key1': 'value2', 'key2': 'value5', 'key3': 'value7'}, timestamp=cls._start_timestamp + 1001, value=3), helpers.create_metric(name=name2, dimensions={'key1': 'value3', 'key2': 'value6', 'key3': 'value7'}, timestamp=cls._start_timestamp + 2001, value=5), helpers.create_metric(name=name2, dimensions={'key1': 'value4', 'key2': 'value6', 'key3': 'value8'}, timestamp=cls._start_timestamp + 3001, value=7), ] cls.monasca_client.create_metrics(group_by_metrics) query_param = '?name=' + str(name2) + \ '&start_time=' + start_time_iso + \ '&merge_metrics=true' + \ '&end_time=' + cls._group_by_end_time_iso num_measurements = 0 for i in xrange(constants.MAX_RETRIES): resp, response_body = cls.monasca_client. \ list_measurements(query_param) elements = response_body['elements'] if len(elements) > 0: num_measurements = len(elements[0]['measurements']) if num_measurements >= len(group_by_metrics): break time.sleep(constants.RETRY_WAIT_SECS) if num_measurements < len(group_by_metrics): assert False, "Required {} measurements, found {}".format(len(group_by_metrics), response_body)
def test_list_statistics_with_offset_limit(self): start_timestamp = int(time.time() * 1000) name = data_utils.rand_name() metric = [ helpers.create_metric(name=name, timestamp=start_timestamp + 1, dimensions={'key1': 'value-1', 'key2': 'value-1'}, value=1), helpers.create_metric(name=name, timestamp=start_timestamp + 1001, dimensions={'key1': 'value-2', 'key2': 'value-2'}, value=2), helpers.create_metric(name=name, timestamp=start_timestamp + 2001, dimensions={'key1': 'value-3', 'key2': 'value-3'}, value=3), helpers.create_metric(name=name, timestamp=start_timestamp + 3001, dimensions={'key1': 'value-4', 'key2': 'value-4'}, value=4) ] num_metrics = len(metric) self.monasca_client.create_metrics(metric) query_parms = '?name=' + name for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.list_metrics(query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'] if elements: break else: time.sleep(constants.RETRY_WAIT_SECS) self._check_timeout(i, constants.MAX_RETRIES, elements, num_metrics) start_time = helpers.timestamp_to_iso(start_timestamp) end_timestamp = start_timestamp + 4001 end_time = helpers.timestamp_to_iso(end_timestamp) query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg' \ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' resp, body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) elements = body['elements'][0]['statistics'] first_element = elements[0] query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg'\ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=' + str(num_metrics) resp, response_body = self.monasca_client.list_statistics( query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'][0]['statistics'] self.assertEqual(num_metrics, len(elements)) self.assertEqual(first_element, elements[0]) for limit in xrange(1, num_metrics): start_index = 0 params = [('name', name), ('merge_metrics', 'true'), ('statistics', 'avg'), ('start_time', str(start_time)), ('end_time', str(end_time)), ('period', 1), ('limit', limit)] offset = None while True: num_expected_elements = limit if (num_expected_elements + start_index) > num_metrics: num_expected_elements = num_metrics - start_index these_params = list(params) # If not the first call, use the offset returned by the last call if offset: these_params.extend([('offset', str(offset))]) query_parms = '?' + urlencode(these_params) resp, response_body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) if not response_body['elements']: self.fail("No metrics returned") if not response_body['elements'][0]['statistics']: self.fail("No statistics returned") new_elements = response_body['elements'][0]['statistics'] self.assertEqual(num_expected_elements, len(new_elements)) expected_elements = elements[start_index:start_index + limit] self.assertEqual(expected_elements, new_elements) start_index += num_expected_elements if start_index >= num_metrics: break # Get the next set offset = self._get_offset(response_body)
def resource_setup(cls): super(TestDimensions, cls).resource_setup() start_timestamp = int(round(time.time() * 1000)) start_time_iso = helpers.timestamp_to_iso(start_timestamp) # NOTE (brtknr): use interval of a day because the tag based queries # appear to only support smallest granularity of a day, and disregard # time of day, which is fine for most use cases. day = 60 * 60 * 24 * 1000 end_timestamp = start_timestamp + 10 * day end_time_iso = helpers.timestamp_to_iso(end_timestamp) metric_name1 = data_utils.rand_name() name1 = "name_1" name2 = "name_2" value1 = "value_1" value2 = "value_2" timestamp1 = start_timestamp - day timestamp2 = start_timestamp + day timestamp3 = start_timestamp + day + day timestamp4 = end_timestamp + day metric1 = helpers.create_metric(name=metric_name1, timestamp=timestamp1, dimensions={ name1: value1, name2: value2, }) cls.monasca_client.create_metrics(metric1) metric1 = helpers.create_metric(name=metric_name1, timestamp=timestamp2, dimensions={name1: value2}) cls.monasca_client.create_metrics(metric1) metric_name2 = data_utils.rand_name() name3 = "name_3" value3 = "value_3" value4 = "value_4" metric2 = helpers.create_metric(name=metric_name2, timestamp=timestamp3, dimensions={ name1: value3, name3: value4, }) cls.monasca_client.create_metrics(metric2) metric_name3 = data_utils.rand_name() metric3 = helpers.create_metric(name=metric_name3, timestamp=timestamp4, dimensions={name2: value3}) cls.monasca_client.create_metrics(metric3) cls._test_metric1 = metric1 cls._test_metric2 = metric2 cls._test_metric_names = {metric_name1, metric_name2, metric_name3} cls._dim_names_metric1 = [name1, name2] cls._dim_names_metric1_in_timerange = [name1] cls._dim_names_metric2 = [name1, name3] cls._dim_names_metric2_in_timerange = [name1, name3] cls._dim_names = sorted( set(cls._dim_names_metric1 + cls._dim_names_metric2)) cls._dim_names_in_timerange = sorted( set(cls._dim_names_metric1_in_timerange + cls._dim_names_metric2_in_timerange)) cls._dim_name1 = name1 cls._dim_name1_values_for_metric1 = [value1, value2] cls._dim_name1_values_for_metric1_in_timerange = [value2] cls._dim_name1_values = [value1, value2, value3] cls._dim_name1_values_in_timerange = [value2, value3] cls._start_time = start_time_iso cls._end_time = end_time_iso param = '?start_time=' + start_time_iso returned_name_set = set() for i in range(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.list_metrics(param) elements = response_body['elements'] metric_name1_count = 0 for element in elements: returned_name_set.add(str(element['name'])) if (str(element['name']) == metric_name1): metric_name1_count += 1 # Java version of influxdb never returns both metric1 in the list but Python does. if cls._test_metric_names.issubset(returned_name_set) \ and (metric_name1_count == 2 or i == constants.MAX_RETRIES - 1): return time.sleep(constants.RETRY_WAIT_SECS) assert False, 'Unable to initialize metrics'
def resource_setup(cls): super(TestStatistics, cls).resource_setup() name = data_utils.rand_name('name') key = data_utils.rand_name('key') value1 = data_utils.rand_name('value1') value2 = data_utils.rand_name('value2') cls._test_name = name cls._test_key = key cls._test_value1 = value1 cls._start_timestamp = int(time.time() * 1000) metrics = [ helpers.create_metric(name=name, dimensions={key: value1}, timestamp=cls._start_timestamp, value=1.23), helpers.create_metric(name=name, dimensions={key: value2}, timestamp=cls._start_timestamp + 1000, value=4.56) ] cls.metric_values = [m['value'] for m in metrics] cls.monasca_client.create_metrics(metrics) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) query_param = '?name=' + str(name) + '&start_time=' + \ start_time_iso + '&merge_metrics=true' + '&end_time=' + \ helpers.timestamp_to_iso(cls._start_timestamp + 1000 * 2) start_time_iso = helpers.timestamp_to_iso(cls._start_timestamp) cls._start_time_iso = start_time_iso num_measurements = 0 for i in range(constants.MAX_RETRIES): resp, response_body = cls.monasca_client.\ list_measurements(query_param) elements = response_body['elements'] if len(elements) > 0: num_measurements = len(elements[0]['measurements']) if num_measurements >= MIN_REQUIRED_MEASUREMENTS: break time.sleep(constants.RETRY_WAIT_SECS) if num_measurements < MIN_REQUIRED_MEASUREMENTS: assert False, "Required {} measurements, found {}".format(MIN_REQUIRED_MEASUREMENTS, num_measurements) cls._end_timestamp = cls._start_timestamp + 3000 cls._end_time_iso = helpers.timestamp_to_iso(cls._end_timestamp) name2 = data_utils.rand_name("group-by") cls._group_by_metric_name = name2 cls._group_by_end_time_iso = helpers.timestamp_to_iso(cls._start_timestamp + 4000) group_by_metrics = [ helpers.create_metric(name=name2, dimensions={'key1': 'value1', 'key2': 'value5', 'key3': 'value7'}, timestamp=cls._start_timestamp + 1, value=2), helpers.create_metric(name=name2, dimensions={'key1': 'value2', 'key2': 'value5', 'key3': 'value7'}, timestamp=cls._start_timestamp + 1001, value=3), helpers.create_metric(name=name2, dimensions={'key1': 'value3', 'key2': 'value6', 'key3': 'value7'}, timestamp=cls._start_timestamp + 2001, value=5), helpers.create_metric(name=name2, dimensions={'key1': 'value4', 'key2': 'value6', 'key3': 'value8'}, timestamp=cls._start_timestamp + 3001, value=7), ] cls.monasca_client.create_metrics(group_by_metrics) query_param = '?name=' + str(name2) + \ '&start_time=' + start_time_iso + \ '&merge_metrics=true' + \ '&end_time=' + cls._group_by_end_time_iso num_measurements = 0 for i in range(constants.MAX_RETRIES): resp, response_body = cls.monasca_client. \ list_measurements(query_param) elements = response_body['elements'] if len(elements) > 0: num_measurements = len(elements[0]['measurements']) if num_measurements >= len(group_by_metrics): break time.sleep(constants.RETRY_WAIT_SECS) if num_measurements < len(group_by_metrics): assert False, "Required {} measurements, found {}".format(len(group_by_metrics), response_body)
def test_list_statistics_with_offset_limit(self): start_timestamp = int(time.time() * 1000) name = data_utils.rand_name() metric = [ helpers.create_metric(name=name, timestamp=start_timestamp + 1, dimensions={'key1': 'value-1', 'key2': 'value-1'}, value=1), helpers.create_metric(name=name, timestamp=start_timestamp + 1001, dimensions={'key1': 'value-2', 'key2': 'value-2'}, value=2), helpers.create_metric(name=name, timestamp=start_timestamp + 2001, dimensions={'key1': 'value-3', 'key2': 'value-3'}, value=3), helpers.create_metric(name=name, timestamp=start_timestamp + 3001, dimensions={'key1': 'value-4', 'key2': 'value-4'}, value=4) ] num_metrics = len(metric) self.monasca_client.create_metrics(metric) query_parms = '?name=' + name for i in range(constants.MAX_RETRIES): resp, response_body = self.monasca_client.list_metrics(query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'] if elements and len(elements) == num_metrics: break else: time.sleep(constants.RETRY_WAIT_SECS) self._check_timeout(i, constants.MAX_RETRIES, elements, num_metrics) start_time = helpers.timestamp_to_iso(start_timestamp) end_timestamp = start_timestamp + 4001 end_time = helpers.timestamp_to_iso(end_timestamp) query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg' \ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' resp, body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) elements = body['elements'][0]['statistics'] first_element = elements[0] query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg'\ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=' + str(num_metrics) resp, response_body = self.monasca_client.list_statistics( query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'][0]['statistics'] self.assertEqual(num_metrics, len(elements)) self.assertEqual(first_element, elements[0]) for limit in range(1, num_metrics): start_index = 0 params = [('name', name), ('merge_metrics', 'true'), ('statistics', 'avg'), ('start_time', str(start_time)), ('end_time', str(end_time)), ('period', 1), ('limit', limit)] offset = None while True: num_expected_elements = limit if (num_expected_elements + start_index) > num_metrics: num_expected_elements = num_metrics - start_index these_params = list(params) # If not the first call, use the offset returned by the last call if offset: these_params.extend([('offset', str(offset))]) query_parms = '?' + urlencode(these_params) resp, response_body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) if not response_body['elements']: self.fail("No metrics returned") if not response_body['elements'][0]['statistics']: self.fail("No statistics returned") new_elements = response_body['elements'][0]['statistics'] self.assertEqual(num_expected_elements, len(new_elements)) expected_elements = elements[start_index:start_index + limit] self.assertEqual(expected_elements, new_elements) start_index += num_expected_elements if start_index >= num_metrics: break # Get the next set offset = self._get_offset(response_body)
def test_list_statistics_with_offset_limit(self): start_timestamp = int(time.time() * 1000) name = data_utils.rand_name() metric = [ helpers.create_metric(name=name, timestamp=start_timestamp + 0, dimensions={ 'key1': 'value-1', 'key2': 'value-1' }, value=1), helpers.create_metric(name=name, timestamp=start_timestamp + 500, dimensions={ 'key1': 'value-2', 'key2': 'value-2' }, value=2), helpers.create_metric(name=name, timestamp=start_timestamp + 1000, dimensions={ 'key1': 'value-3', 'key2': 'value-3' }, value=3), helpers.create_metric(name=name, timestamp=start_timestamp + 1500, dimensions={ 'key1': 'value-4', 'key2': 'value-4' }, value=4) ] self.monasca_client.create_metrics(metric) query_parms = '?name=' + name for i in xrange(constants.MAX_RETRIES): resp, response_body = self.monasca_client.list_metrics(query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'] if elements: break else: time.sleep(constants.RETRY_WAIT_SECS) self._check_timeout(i, constants.MAX_RETRIES, elements, 4) start_time = helpers.timestamp_to_iso(start_timestamp) end_timestamp = start_timestamp + 4000 end_time = helpers.timestamp_to_iso(end_timestamp) query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg' \ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' resp, body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) elements = body['elements'][0]['statistics'] first_element = elements[0] query_parms = '?name=' + name + '&merge_metrics=true&statistics=avg'\ + '&start_time=' + str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=4' resp, response_body = self.monasca_client.list_statistics(query_parms) self.assertEqual(200, resp.status) elements = response_body['elements'][0]['statistics'] self.assertEqual(4, len(elements)) self.assertEqual(first_element, elements[0]) for index in xrange(1, 5): max_limit = 5 - index for limit in xrange(1, max_limit): offset_timestamp = start_timestamp + (1000 * index) offset = timeutils.iso8601_from_timestamp(offset_timestamp / 1000) last_index = index + limit expected_elements = elements[index:last_index] query_parms = '?name=' + name + '&merge_metrics=true' + \ '&statistics=avg' + '&start_time=' + \ str(start_time) + '&end_time=' + \ str(end_time) + '&period=1' + '&limit=' + \ str(limit) + '&offset=' + str(offset) resp, response_body = self.monasca_client.list_statistics( query_parms) self.assertEqual(200, resp.status) if not response_body['elements']: self.fail("No metrics returned") if not response_body['elements'][0]['statistics']: self.fail("No statistics returned") new_elements = response_body['elements'][0]['statistics'] self.assertEqual(limit, len(new_elements)) # bug in the python API causes limit 1 to not have matching timestamps if limit > 1: self.assertEqual(expected_elements, new_elements)
def resource_setup(cls): super(TestMeasurements, cls).resource_setup() start_timestamp = int(time.time() * 1000) start_time = str(helpers.timestamp_to_iso(start_timestamp)) metrics = [] name1 = data_utils.rand_name() name2 = data_utils.rand_name() cls._names_list = [name1, name2] key = data_utils.rand_name('key') value = data_utils.rand_name('value') cls._key = key cls._value = value cls._start_timestamp = start_timestamp for i in xrange(NUM_MEASUREMENTS): metric = helpers.create_metric( name=name1, timestamp=start_timestamp + (i * 10), value=i) metrics.append(metric) cls.monasca_client.create_metrics(metrics) # Create metric2 for test_list_measurements_with_dimensions metric2 = helpers.create_metric( name=name1, timestamp=start_timestamp + ONE_SECOND * 2, dimensions={key: value}, value=NUM_MEASUREMENTS) cls.monasca_client.create_metrics(metric2) # Create metric3 for test_list_measurements_with_offset_limit metric3 = [ helpers.create_metric( name=name2, timestamp=start_timestamp + ONE_SECOND * 3, dimensions={'key1': 'value1', 'key2': 'value5', 'key3': 'value7'}), helpers.create_metric( name=name2, timestamp=start_timestamp + ONE_SECOND * 3 + 10, dimensions={'key1': 'value2', 'key2': 'value5', 'key3': 'value7'}), helpers.create_metric( name=name2, timestamp=start_timestamp + ONE_SECOND * 3 + 20, dimensions={'key1': 'value3', 'key2': 'value6', 'key3': 'value7'}), helpers.create_metric( name=name2, timestamp=start_timestamp + ONE_SECOND * 3 + 30, dimensions={'key1': 'value4', 'key2': 'value6', 'key3': 'value8'}) ] cls.monasca_client.create_metrics(metric3) # Create metric3 for test_list_measurements_with_no_merge_metrics metric4 = helpers.create_metric( name=name1, timestamp=start_timestamp + ONE_SECOND * 4, dimensions={'key-1': 'value-1'}, value=NUM_MEASUREMENTS + 1) cls.monasca_client.create_metrics(metric4) end_time = str(helpers.timestamp_to_iso( start_timestamp + NUM_MEASUREMENTS + ONE_SECOND * 5)) queries = [] queries.append('?name={}&start_time={}&end_time={}&merge_metrics=true'. format(name1, start_time, end_time)) queries.append('?name={}&start_time={}&end_time={}&merge_metrics=true'. format(name2, start_time, end_time)) for timer in xrange(constants.MAX_RETRIES): responses = map(cls.monasca_client.list_measurements, queries) resp_first = responses[0][0] response_body_first = responses[0][1] resp_second = responses[1][0] response_body_second = responses[1][1] if resp_first.status == 200 and resp_second.status == 200 \ and len(response_body_first['elements']) == 1 \ and len(response_body_second['elements']) == 1: len_meas_first = len( response_body_first['elements'][0]['measurements']) len_meas_second = len( response_body_second['elements'][0]['measurements']) if len_meas_first == NUM_MEASUREMENTS + 2 \ and len_meas_second == 4: break else: time.sleep(constants.RETRY_WAIT_SECS) else: time.sleep(constants.RETRY_WAIT_SECS) cls._start_time = start_time cls._end_time = end_time