def test_get_total_filtering_on_one_period_and_one_tenant(self): self.insert_data() total = self.storage.get_total( begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN), end=ck_utils.ts2dt(samples.FIRST_PERIOD_END), tenant_id=self._tenant_id) self.assertEqual(0.5537, total)
def test_get_total_filtering_on_service(self): self.insert_data() total = self.storage.get_total( begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN), end=ck_utils.ts2dt(samples.FIRST_PERIOD_END), service='compute') self.assertEqual(0.84, total)
def test_get_total_without_filter_but_timestamp(self): self.insert_data() total = self.storage.get_total( begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN), end=ck_utils.ts2dt(samples.SECOND_PERIOD_END)) # FIXME(sheeprine): floating point error (transition to decimal) self.assertEqual(1.9473999999999998, total)
def _get_result(metric): try: return self._collect(metric, timestamp) except collector.NoDataCollected: LOG.info( '[scope: {scope}, worker: {worker}] No data collected ' 'for metric {metric} at timestamp {ts}'.format( scope=self._tenant_id, worker=self._worker_id, metric=metric, ts=ck_utils.ts2dt(timestamp)) ) return None except Exception as e: LOG.warning( '[scope: {scope}, worker: {worker}] Error while collecting' ' metric {metric} at timestamp {ts}: {e}. Exiting.'.format( scope=self._tenant_id, worker=self._worker_id, metric=metric, ts=ck_utils.ts2dt(timestamp), e=e) ) # FIXME(peschk_l): here we just exit, and the # collection will be retried during the next collect # cycle. In the future, we should implement a retrying # system in workers sys.exit(1)
def test_get_total_groupby_tenant_and_restype(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.total( begin=begin, end=end, groupby=['project_id', 'type'])['results'] self.assertEqual(4, len(total)) self.assertEqual(0.1337, total[0]["rate"]) self.assertEqual(self._other_tenant_id, total[0]["tenant_id"]) self.assertEqual('image.size', total[0]["res_type"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"]) self.assertEqual(0.1337, total[1]["rate"]) self.assertEqual(self._tenant_id, total[1]["tenant_id"]) self.assertEqual('image.size', total[1]["res_type"]) self.assertEqual(begin, total[1]["begin"]) self.assertEqual(end, total[1]["end"]) self.assertEqual(0.84, total[2]["rate"]) self.assertEqual(self._other_tenant_id, total[2]["tenant_id"]) self.assertEqual('instance', total[2]["res_type"]) self.assertEqual(begin, total[2]["begin"]) self.assertEqual(end, total[2]["end"]) self.assertEqual(0.84, total[3]["rate"]) self.assertEqual(self._tenant_id, total[3]["tenant_id"]) self.assertEqual('instance', total[3]["res_type"]) self.assertEqual(begin, total[3]["begin"]) self.assertEqual(end, total[3]["end"])
def generate_v2_storage_data(min_length=10, nb_projects=2, project_ids=None, start=datetime(2018, 1, 1), end=datetime(2018, 1, 1, 1)): if isinstance(start, int): start = ck_utils.ts2dt(start) if isinstance(end, int): end = ck_utils.ts2dt(end) if not project_ids: project_ids = [uuidutils.generate_uuid() for i in range(nb_projects)] elif not isinstance(project_ids, list): project_ids = [project_ids] usage = {} for metric_name, sample in samples.V2_STORAGE_SAMPLE.items(): dataframes = [] for project_id in project_ids: data = [ copy.deepcopy(sample) for i in range(min_length + random.randint(1, 10)) ] for elem in data: elem['groupby']['id'] = uuidutils.generate_uuid() elem['groupby']['project_id'] = project_id dataframes += data usage[metric_name] = dataframes return {'usage': usage, 'period': {'begin': start, 'end': end}}
def test_get_total_groupby_tenant_and_restype(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.get_total(begin=begin, end=end, groupby="tenant_id,res_type") self.assertEqual(4, len(total)) self.assertEqual(0.1337, total[0]["rate"]) self.assertEqual(self._other_tenant_id, total[0]["tenant_id"]) self.assertEqual('image', total[0]["res_type"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"]) self.assertEqual(0.1337, total[1]["rate"]) self.assertEqual(self._tenant_id, total[1]["tenant_id"]) self.assertEqual('image', total[1]["res_type"]) self.assertEqual(begin, total[1]["begin"]) self.assertEqual(end, total[1]["end"]) self.assertEqual(0.84, total[2]["rate"]) self.assertEqual(self._other_tenant_id, total[2]["tenant_id"]) self.assertEqual('compute', total[2]["res_type"]) self.assertEqual(begin, total[2]["begin"]) self.assertEqual(end, total[2]["end"]) self.assertEqual(0.84, total[3]["rate"]) self.assertEqual(self._tenant_id, total[3]["tenant_id"]) self.assertEqual('compute', total[3]["res_type"]) self.assertEqual(begin, total[3]["begin"]) self.assertEqual(end, total[3]["end"])
def _fetch_metrics(self, metric_name, start, end, project_id=None, q_filter=None): """List active metrics during the timeframe. :param metric_name: metric name to filter on. :type metric_name: str :param start: Start of the timeframe. :param end: End of the timeframe if needed. :param project_id: Filter on a specific tenant/project. :type project_id: str :param q_filter: Append a custom filter. :type q_filter: list """ dimensions = self._get_dimensions(metric_name, project_id, q_filter) metrics = self._conn.metrics.list( name=metric_name, dimensions=dimensions, start_time=ck_utils.ts2dt(start), end_time=ck_utils.ts2dt(end), ) resource_key = self.conf[metric_name]['extra_args']['resource_key'] return { metric['dimensions'][resource_key]: metric['dimensions'] for metric in metrics }
def _fetch_measures(self, metric_name, start, end, project_id=None, q_filter=None): """Get measures for given metric during the timeframe. :param metric_name: metric name to filter on. :type metric_name: str :param start: Start of the timeframe. :param end: End of the timeframe if needed. :param project_id: Filter on a specific tenant/project. :type project_id: str :param q_filter: Append a custom filter. :type q_filter: list """ dimensions = self._get_dimensions(metric_name, project_id, q_filter) group_by = self.conf[metric_name]['groupby'] # NOTE(lpeschke): One aggregated measure per collect period period = end - start extra_args = self.conf[metric_name]['extra_args'] return self._conn.metrics.list_statistics( name=metric_name, merge_metrics=True, dimensions=dimensions, start_time=ck_utils.ts2dt(start), end_time=ck_utils.ts2dt(end), period=period, statistics=extra_args['aggregation_method'], group_by=group_by)
def _fetch_metrics(self, metric_name, start, end, project_id=None, q_filter=None): """List active metrics during the timeframe. :param metric_name: metric name to filter on. :type metric_name: str :param start: Start of the timeframe. :param end: End of the timeframe if needed. :param project_id: Filter on a specific tenant/project. :type project_id: str :param q_filter: Append a custom filter. :type q_filter: list """ dimensions = self._get_dimensions(metric_name, project_id, q_filter) metrics = self._conn.metrics.list( name=metric_name, dimensions=dimensions, start_time=ck_utils.ts2dt(start), end_time=ck_utils.ts2dt(end), ) resource_key = self.conf[metric_name]['extra_args']['resource_key'] return {metric['dimensions'][resource_key]: metric['dimensions'] for metric in metrics}
def test_update_period_on_append(self): self.assertNotIn(self._tenant_id, self.storage.usage_start) self.assertNotIn(self._tenant_id, self.storage.usage_start_dt) self.assertNotIn(self._tenant_id, self.storage.usage_end) self.assertNotIn(self._tenant_id, self.storage.usage_end_dt) working_data = copy.deepcopy(samples.RATED_DATA) self.storage.append([working_data[0]], self._tenant_id) self.assertEqual( self.storage.usage_start[self._tenant_id], samples.FIRST_PERIOD_BEGIN) self.assertEqual( self.storage.usage_start_dt[self._tenant_id], ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN)) self.assertEqual( self.storage.usage_end[self._tenant_id], samples.FIRST_PERIOD_END) self.assertEqual( self.storage.usage_end_dt[self._tenant_id], ck_utils.ts2dt(samples.FIRST_PERIOD_END)) self.storage.append([working_data[1]], self._tenant_id) self.assertEqual( self.storage.usage_start[self._tenant_id], samples.SECOND_PERIOD_BEGIN) self.assertEqual( self.storage.usage_start_dt[self._tenant_id], ck_utils.ts2dt(samples.SECOND_PERIOD_BEGIN)) self.assertEqual( self.storage.usage_end[self._tenant_id], samples.SECOND_PERIOD_END) self.assertEqual( self.storage.usage_end_dt[self._tenant_id], ck_utils.ts2dt(samples.SECOND_PERIOD_END))
def _get_state_manager_timeframe(self): timeframe = self._sm.get_state() self.usage_start = timeframe self.usage_start_dt = ck_utils.ts2dt(timeframe) self.usage_end = timeframe + self._period self.usage_end_dt = ck_utils.ts2dt(self.usage_end) metadata = self._sm.get_metadata() self.total = metadata.get('total', 0)
def test_get_total_filtering_on_one_period(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.FIRST_PERIOD_END) self.insert_data() total = self.storage.get_total(begin=begin, end=end) self.assertEqual(1, len(total)) self.assertEqual(1.1074, total[0]["rate"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def get_network_bw_total(self, start, end=None, project_id=None, storage=None, q_filter=None): #start = 1438164000 #end = ck_utils.utcnow_ts() #end = start+3600 bw_data = [] period_taps_in_bw = self._get_network_bw('in', start, end, project_id, q_filter) period_taps_out_bw = self._get_network_bw('out', start, end, project_id, q_filter) period_taps_total_bw = {} for tap_id in period_taps_in_bw: if tap_id in period_taps_out_bw: period_taps_total_bw[tap_id] = {'total': period_taps_in_bw[tap_id] + period_taps_out_bw[tap_id], 'in': period_taps_in_bw[tap_id], 'out': period_taps_out_bw[tap_id]} del period_taps_out_bw[tap_id] else: period_taps_total_bw[tap_id] = {'total': period_taps_in_bw[tap_id], 'in': period_taps_in_bw[tap_id], 'out': 0} if period_taps_out_bw: for tap_id in period_taps_out_bw: period_taps_total_bw[tap_id] = {'total': period_taps_out_bw[tap_id], 'in': 0, 'out': period_taps_out_bw[tap_id]} for tap_id in period_taps_total_bw: if not self._cacher.has_resource_detail('network.tap', tap_id): raw_resource = self._conn.resources.get(tap_id) tap = self.t_ceilometer.strip_resource_data( 'network.tap', raw_resource) self._cacher.add_resource_detail('network.tap', tap_id, tap) tap = self._cacher.get_resource_detail('network.tap', tap_id) period_usage = self.t_cloudkitty.format_item(tap, 'network.bw.total') duration = end - start period_usage['period'] = {'begin': ts2dt(start), 'end': ts2dt(end), 'period_duration': duration, 'begining_event_type': 'period_cutting_event', 'ending_event_type': 'period_cutting_event', 'chargeable': True} qty = "%0.6f"%period_taps_total_bw[tap_id]['total'] period_usage['vol'] = {'unit': 'MB', 'qty': qty, 'charging_key': 'network.bw.total'} period_usage['desc']['bw_stat'] = period_taps_total_bw[tap_id] bw_data.append(period_usage) if not (period_taps_in_bw or period_taps_out_bw): raise collector.NoDataCollected(self.collector_name, 'network.bw.total') return [{'usage': self.t_cloudkitty.format_service('network.bw.total', bw_data)}]
def test_get_empty_total(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN - 3600) end = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) self.insert_data() total = self.storage.get_total(begin=begin, end=end) self.assertEqual(1, len(total)) self.assertIsNone(total[0]["rate"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def _fetch_metric(self, metric_name, start, end, project_id=None, q_filter=None): """Get metric during the timeframe. :param metric_name: metric name to filter on. :type resource_name: str :param start: Start of the timeframe. :param end: End of the timeframe if needed. :param project_id: Filter on a specific tenant/project. :type project_id: str :param q_filter: Append a custom filter. :type q_filter: list """ # Get gnocchi specific conf extra_args = self.conf[metric_name]['extra_args'] # get ressource type resource_type = extra_args['resource_type'] scope_key = CONF.collect.scope_key # build search query using ressource type and project_id if provided query_parameters = list() query_parameters.append( self.gen_filter(cop="=", type=resource_type)) if project_id: kwargs = {scope_key: project_id} query_parameters.append(self.gen_filter(**kwargs)) if q_filter: query_parameters.append(q_filter) # build aggregration operation op = ["aggregate", extra_args['aggregation_method'], ["metric", metric_name, extra_args['aggregation_method']]] # get groupby groupby = self.conf[metric_name]['groupby'] try: return self._conn.aggregates.fetch( op, resource_type=resource_type, start=ck_utils.ts2dt(start), stop=ck_utils.ts2dt(end), groupby=groupby, search=self.extend_filter(*query_parameters)) except (gexceptions.MetricNotFound, gexceptions.BadRequest) as e: # FIXME(peschk_l): gnocchiclient seems to be raising a BadRequest # when it should be raising MetricNotFound if isinstance(e, gexceptions.BadRequest): if 'Metrics not found' not in six.text_type(e): raise LOG.warning('[{scope}] Skipping this metric for the ' 'current cycle.'.format(scope=project_id, err=e)) return []
def test_get_total_filtering_on_service(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.FIRST_PERIOD_END) self.insert_data() total = self.storage.get_total(begin=begin, end=end, service='compute') self.assertEqual(1, len(total)) self.assertEqual(0.84, total[0]["rate"]) self.assertEqual('compute', total[0]["res_type"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def _is_resource_active(self, meter, resource_id, start, end): measurements = self._conn.metrics.list_measurements( name=meter, start_time=ck_utils.ts2dt(start), end_time=ck_utils.ts2dt(end), group_by='resource_id', merge_metrics=True, dimensions={'resource_id': resource_id}, ) return len(measurements) > 0
def test_get_total_without_filter_but_timestamp(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.total(begin=begin, end=end)['results'] # FIXME(sheeprine): floating point error (transition to decimal) self.assertEqual(1, len(total)) self.assertEqual(1.9473999999999998, total[0]["rate"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def test_get_empty_total(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN - 3600) end = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) self.insert_data() total = self.storage.total( begin=begin, end=end)['results'] self.assertEqual(1, len(total)) self.assertEqual(total[0]["rate"], 0) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_one_period(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.FIRST_PERIOD_END) self.insert_data() total = self.storage.total( begin=begin, end=end)['results'] self.assertEqual(1, len(total)) self.assertEqual(1.1074, total[0]["rate"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def append(self, data, start, end): # FIXME we should use the real time values if self.usage_end is not None and start >= self.usage_end: self.usage_start = None if self.usage_start is None: self.usage_start = start self.usage_end = start + self._period self.usage_start_dt = ck_utils.ts2dt(self.usage_start) self.usage_end_dt = ck_utils.ts2dt(self.usage_end) self._update(data)
def test_get_total_without_filter_but_timestamp(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.total( begin=begin, end=end)['results'] # FIXME(sheeprine): floating point error (transition to decimal) self.assertEqual(1, len(total)) self.assertEqual(1.9473999999999998, total[0]["rate"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_one_period_and_one_tenant(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.FIRST_PERIOD_END) self.insert_data() filters = {'project_id': self._tenant_id} total = self.storage.total(begin=begin, end=end, filters=filters)['results'] self.assertEqual(1, len(total)) self.assertEqual(0.5537, total[0]["rate"]) self.assertEqual(self._tenant_id, total[0]["tenant_id"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def test_get_total_filtering_on_service(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.FIRST_PERIOD_END) self.insert_data() total = self.storage.total( begin=begin, end=end, metric_types='instance')['results'] self.assertEqual(1, len(total)) self.assertEqual(0.84, total[0]["rate"]) self.assertEqual('instance', total[0]["res_type"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def _fetch_metric(self, metric_name, start, end, project_id=None, q_filter=None): """Get metric during the timeframe. :param metric_name: metric name to filter on. :type resource_name: str :param start: Start of the timeframe. :param end: End of the timeframe if needed. :param project_id: Filter on a specific tenant/project. :type project_id: str :param q_filter: Append a custom filter. :type q_filter: list """ # Get gnocchi specific conf extra_args = self.conf[metric_name]['extra_args'] # get ressource type resource_type = extra_args['resource_type'] scope_key = CONF.collect.scope_key # build search query using ressource type and project_id if provided query_parameters = list() query_parameters.append(self.gen_filter(cop="=", type=resource_type)) if project_id: kwargs = {scope_key: project_id} query_parameters.append(self.gen_filter(**kwargs)) if q_filter: query_parameters.append(q_filter) # build aggregration operation op = [ "aggregate", extra_args['aggregation_method'], ["metric", metric_name, extra_args['aggregation_method']] ] # get groupby groupby = self.conf[metric_name]['groupby'] return self._conn.aggregates.fetch( op, resource_type=resource_type, start=ck_utils.ts2dt(start), stop=ck_utils.ts2dt(end), groupby=groupby, search=self.extend_filter(*query_parameters))
def test_get_total_filtering_on_one_period_and_one_tenant(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.FIRST_PERIOD_END) self.insert_data() filters = {'project_id': self._tenant_id} total = self.storage.total( begin=begin, end=end, filters=filters)['results'] self.assertEqual(1, len(total)) self.assertEqual(0.5537, total[0]["rate"]) self.assertEqual(self._tenant_id, total[0]["tenant_id"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"])
def _expand(self, metrics, resource, name, aggregate, start, end): try: values = self._conn.metric.get_measures( metric=metrics[name], start=ck_utils.ts2dt(start), stop=ck_utils.ts2dt(end), aggregation=aggregate) # NOTE(sheeprine): Get the list of values for the current # metric and get the first result value. # [point_date, granularity, value] # ["2015-11-24T00:00:00+00:00", 86400.0, 64.0] resource[name] = values[0][2] except (IndexError, KeyError): resource[name] = 0
def test_get_total_groupby_restype(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.total(begin=begin, end=end, groupby=['type']) self.assertEqual(2, len(total)) self.assertEqual(0.2674, total[0]["rate"]) self.assertEqual('image.size', total[0]["res_type"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"]) self.assertEqual(1.68, total[1]["rate"]) self.assertEqual('instance', total[1]["res_type"]) self.assertEqual(begin, total[1]["begin"]) self.assertEqual(end, total[1]["end"])
def create_fake_data(self, begin, end, project_id): if isinstance(begin, int): begin = ck_utils.ts2dt(begin) if isinstance(end, int): end = ck_utils.ts2dt(end) data = [{ "period": { "begin": begin, "end": end }, "usage": { "cpu": [{ "desc": { "dummy": True, "fake_meta": 1.0, "project_id": project_id }, "vol": { "qty": 1, "unit": "nothing" }, "rating": { "price": decimal.Decimal('1.337') } }] } }, { "period": { "begin": begin, "end": end }, "usage": { "image.size": [{ "desc": { "dummy": True, "fake_meta": 1.0, "project_id": project_id }, "vol": { "qty": 1, "unit": "nothing" }, "rating": { "price": decimal.Decimal('0.121') } }] } }] return data
def _get_expected_total(self, begin=None, end=None, filters=None, group_filters=None): total = decimal.Decimal(0) for dataframes in self.data: if (ck_utils.ts2dt(dataframes['period']['begin']) >= end or ck_utils.ts2dt(dataframes['period']['end']) <= begin): continue for df in dataframes['usage'].values(): for elem in df: if self._validate_filters(elem, filters, group_filters): total += elem['rating']['price'] return total
def _check_begin_end(begin, end): if not begin: begin = utils.get_month_start() if not end: end = utils.get_next_month() if isinstance(begin, six.text_type): begin = utils.iso2dt(begin) if isinstance(begin, int): begin = utils.ts2dt(begin) if isinstance(end, six.text_type): end = utils.iso2dt(end) if isinstance(end, int): end = utils.ts2dt(end) return begin, end
def test_get_total_groupby_tenant(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.get_total(begin=begin, end=end, groupby="tenant_id") self.assertEqual(2, len(total)) self.assertEqual(0.9737, total[0]["rate"]) self.assertEqual(self._other_tenant_id, total[0]["tenant_id"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"]) self.assertEqual(0.9737, total[1]["rate"]) self.assertEqual(self._tenant_id, total[1]["tenant_id"]) self.assertEqual(begin, total[1]["begin"]) self.assertEqual(end, total[1]["end"])
def append_point(self, metric_type, timestamp, qty, price, unit, fields, tags): """Adds two points to commit to InfluxDB""" measurement_fields = copy.deepcopy(fields) measurement_fields['qty'] = float(qty) measurement_fields['price'] = float(price) measurement_fields['unit'] = unit # Unfortunately, this seems to be the fastest way: Having several # measurements would imply a high client-side workload, and this allows # us to filter out unrequired keys measurement_fields['groupby'] = '|'.join(tags.keys()) measurement_fields['metadata'] = '|'.join(fields.keys()) measurement_tags = copy.deepcopy(tags) measurement_tags['type'] = metric_type self._points.append({ 'measurement': 'dataframes', 'tags': measurement_tags, 'fields': measurement_fields, 'time': utils.ts2dt(timestamp), }) if self._autocommit and len(self._points) >= self._chunk_size: self.commit()
def set_state(self, identifier, state, fetcher=None, collector=None, scope_key=None): if isinstance(state, int): state = ck_utils.ts2dt(state) session = db.get_session() session.begin() r = self._get_db_item(session, identifier, fetcher, collector, scope_key) if r and r.state != state: r.state = state session.commit() else: state_object = self.model( identifier=identifier, state=state, fetcher=fetcher, collector=collector, scope_key=scope_key, ) session.add(state_object) session.commit() session.close()
def get_time_frame(self, begin, end, **filters): model = models.RatedDataFrame session = db.get_session() q = utils.model_query(model, session).filter( model.begin >= ck_utils.ts2dt(begin), model.end <= ck_utils.ts2dt(end)) for filter_name, filter_value in filters.items(): if filter_value: q = q.filter(getattr(model, filter_name) == filter_value) if not filters.get('res_type'): q = q.filter(model.res_type != '_NO_DATA_') count = q.count() if not count: raise storage.NoTimeFrame() r = q.all() return [entry.to_cloudkitty() for entry in r]
def fetch_all(self, metric_name, start, end, project_id=None, q_filter=None): met = self.conf[metric_name] data = self._fetch_metric( metric_name, start, end, project_id=project_id, q_filter=q_filter, ) resources_info = None if met['metadata']: resources_info = self._fetch_resources(metric_name, start, end, project_id=project_id, q_filter=q_filter) formated_resources = list() for d in data: # Only if aggregates have been found if d['measures']['measures']['aggregated']: try: metadata, groupby, qty = self._format_data( met, d, resources_info) except AssociatedResourceNotFound as e: LOG.warning( '[{}] An error occured during data collection ' 'between {} and {}: {}'.format(project_id, ck_utils.ts2dt(start), ck_utils.ts2dt(end), e), ) continue data = self.t_cloudkitty.format_item( groupby, metadata, met['unit'], qty=qty, ) formated_resources.append(data) return formated_resources
def fetch_all(self, metric_name, start, end, project_id=None, q_filter=None): met = self.conf[metric_name] data = self._fetch_metric( metric_name, start, end, project_id=project_id, q_filter=q_filter, ) resources_info = None if met['metadata']: resources_info = self._fetch_resources( metric_name, start, end, project_id=project_id, q_filter=q_filter ) formated_resources = list() for d in data: # Only if aggregates have been found if d['measures']['measures']['aggregated']: try: metadata, groupby, qty = self._format_data( met, d, resources_info) except AssociatedResourceNotFound as e: LOG.warning( '[{}] An error occured during data collection ' 'between {} and {}: {}'.format( project_id, ck_utils.ts2dt(start), ck_utils.ts2dt(end), e), ) continue data = self.t_cloudkitty.format_item( groupby, metadata, met['unit'], qty=qty, ) formated_resources.append(data) return formated_resources
def test_get_total_groupby_restype(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.total( begin=begin, end=end, groupby=['type'])['results'] self.assertEqual(2, len(total)) self.assertEqual(0.2674, total[0]["rate"]) self.assertEqual('image.size', total[0]["res_type"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"]) self.assertEqual(1.68, total[1]["rate"]) self.assertEqual('instance', total[1]["res_type"]) self.assertEqual(begin, total[1]["begin"]) self.assertEqual(end, total[1]["end"])
def _update_end(self, end, tenant_id): """Update usage_end with a new timestamp. :param end: New usage end timestamp. :param tenant_id: tenant_id to update. """ self.usage_end[tenant_id] = end self.usage_end_dt[tenant_id] = ck_utils.ts2dt(end)
def _update_start(self, begin, tenant_id): """Update usage_start with a new timestamp. :param begin: New usage beginning timestamp. :param tenant_id: tenant_id to update. """ self.usage_start[tenant_id] = begin self.usage_start_dt[tenant_id] = ck_utils.ts2dt(begin)
def _update_end(self, end, tenant_id): """Update usage_end with a new timestamp. :param end: New usage end timestamp. :param tenant_id: Tenant ID to update. """ self.usage_end[tenant_id] = end self.usage_end_dt[tenant_id] = ck_utils.ts2dt(end)
def test_get_total_groupby_tenant(self): begin = ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN) end = ck_utils.ts2dt(samples.SECOND_PERIOD_END) self.insert_data() total = self.storage.total( begin=begin, end=end, groupby=['project_id'])['results'] self.assertEqual(2, len(total)) self.assertEqual(0.9737, total[0]["rate"]) self.assertEqual(self._other_tenant_id, total[0]["tenant_id"]) self.assertEqual(begin, total[0]["begin"]) self.assertEqual(end, total[0]["end"]) self.assertEqual(0.9737, total[1]["rate"]) self.assertEqual(self._tenant_id, total[1]["tenant_id"]) self.assertEqual(begin, total[1]["begin"]) self.assertEqual(end, total[1]["end"])
def _update_start(self, begin, tenant_id): """Update usage_start with a new timestamp. :param begin: New usage beginning timestamp. :param tenant_id: Tenant ID to update. """ self.usage_start[tenant_id] = begin self.usage_start_dt[tenant_id] = ck_utils.ts2dt(begin)
def get_time_frame(self, begin, end, **filters): session = db.get_session() q = utils.model_query( self.frame_model, session) q = q.filter( self.frame_model.begin >= ck_utils.ts2dt(begin), self.frame_model.end <= ck_utils.ts2dt(end)) for filter_name, filter_value in filters.items(): if filter_value: q = q.filter( getattr(self.frame_model, filter_name) == filter_value) if not filters.get('res_type'): q = q.filter(self.frame_model.res_type != '_NO_DATA_') count = q.count() if not count: raise storage.NoTimeFrame() r = q.all() return [entry.to_cloudkitty(self._collector) for entry in r]
def _load_pricing_infos(self, timestamp): db_api = common_db_api.get_instance() dt = datetime.datetime.strptime(ck_utils.utc2local(ck_utils.ts2dt(timestamp)), "%Y-%m-%d %H:%M:%S") pricing_version = db_api.get_current_pricing_version(dt) pricing_infos_list = [] pricing_infos_list = db_api.get_pricing_infos(pricing_version=pricing_version) for pricing_info in pricing_infos_list: self._entries[pricing_info['charging_key_id']] = {'charging_key': pricing_info['charging_key'], 'res_type': pricing_info['res_type'], 'unit_price': pricing_info['unit_price'], 'time_based': pricing_info['time_based']}
def parse_events(self, start, end=None, instance_events=None, resource_id=None, service=None): if service == 'compute': deletion_event = u'compute.instance.delete.end' elif service == 'volume': deletion_event = u'volume.delete.end' self.start = ts2dt(start) self.end = ts2dt(end) items = [] if len(instance_events)>1: i=0 while len(instance_events)-1 > i: items.append(self.build_period_usage(instance_events[i], instance_events[i+1], service=service)) i = i+1 if type(instance_events[-1]) is dict: items.append(self.build_period_usage(instance_events[-1], ending_event=None, service=service)) else: if instance_events[-1].event_type != deletion_event and ck_utils.iso2dt(instance_events[-1].generated) != self.end: items.append(self.build_period_usage(instance_events[-1], ending_event=None, service=service)) return items
def set_state(self, identifier, state, fetcher=None, collector=None, scope_key=None): if isinstance(state, int): state = ck_utils.ts2dt(state) session = db.get_session() session.begin() r = self._get_db_item( session, identifier, fetcher, collector, scope_key) if r and r.state != state: r.state = state session.commit() else: state_object = self.model( identifier=identifier, state=state, fetcher=fetcher, collector=collector, scope_key=scope_key, ) session.add(state_object) session.commit() session.close()
def test_get_tenants_filtering_on_period(self): self.insert_different_data_two_tenants() tenants = self.storage.get_tenants( begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN), end=ck_utils.ts2dt(samples.SECOND_PERIOD_END)) self.assertListEqual( [self._tenant_id, self._other_tenant_id], tenants) tenants = self.storage.get_tenants( begin=ck_utils.ts2dt(samples.FIRST_PERIOD_BEGIN), end=ck_utils.ts2dt(samples.FIRST_PERIOD_END)) self.assertListEqual( [self._tenant_id], tenants) tenants = self.storage.get_tenants( begin=ck_utils.ts2dt(samples.SECOND_PERIOD_BEGIN), end=ck_utils.ts2dt(samples.SECOND_PERIOD_END)) self.assertListEqual( [self._other_tenant_id], tenants)
def construct_previous_event(self, start, resource_id, storage, service): if service == 'compute': res_type = 'instance' elif service == 'volume': #event_type_filter = ['volume.create.end','volume.resize.end', 'volume.delete.end', 'volume.exists'] res_type = 'volume' #ck_dict = storage.get_previous_period(start, resource_id) last_sample = self._get_last_sample(res_type, start-1800, start, resource_id) previous_event = {} if last_sample: #sample_details = self._conn.samples.get(last_sample.message_id) if service == 'volume': if last_sample.resource_metadata['event_type'] == 'volume.delete.end': previous_event = {'usage': 'ignore me'} return previous_event previous_event = self.build_event_from_sample(service, last_sample) else: previous_event = {'usage': {service: [{'user_id': 'unknown', 'resource_id': resource_id, 'vol': {'unit': 'unknown', 'qty': 0, 'charging_key':''}, 'tenant_id': 'unknown', 'desc': {u'state': u'unknown'}}]}} previous_event['usage'][service][0]['period'] = {'begining_event_type': u'period_cutting_event', 'begin': ck_utils.ts2dt(start)} return previous_event