def _get_dimensions(self, tenant_id, region, name, dimensions): metrics_list = self.list_metrics(tenant_id, region, name, dimensions, None, 2) if len(metrics_list) > 1: raise exceptions.MultipleMetricsException(self.MULTIPLE_METRICS_MESSAGE) if not metrics_list: return {} return metrics_list[0]['dimensions']
def _get_measurements(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag): metric_list = self.list_metrics(tenant_id, region, name, dimensions, None, None, start_timestamp, end_timestamp, include_metric_hash=True) if not metric_list: return None if len(metric_list) > 1: if not merge_metrics_flag: raise exceptions.MultipleMetricsException( self.MULTIPLE_METRICS_MESSAGE) select_stmt = """ select time_stamp, value, value_meta from measurements where tenant_id = %s and region = %s """ parms = [tenant_id.encode('utf8'), region.encode('utf8')] metric_hash_list = [ bytearray(metric['metric_hash']) for metric in metric_list ] place_holders = ["%s"] * len(metric_hash_list) in_clause = ' and metric_hash in ({}) '.format(",".join(place_holders)) select_stmt += in_clause parms.extend(metric_hash_list) if offset: select_stmt += ' and time_stamp > %s ' parms.append(offset) elif start_timestamp: select_stmt += ' and time_stamp >= %s ' parms.append(int(start_timestamp * 1000)) if end_timestamp: select_stmt += ' and time_stamp <= %s ' parms.append(int(end_timestamp * 1000)) select_stmt += ' order by time_stamp ' if limit: select_stmt += ' limit %s ' parms.append(limit + 1) stmt = SimpleStatement(select_stmt, fetch_size=2147483647) rows = self.cassandra_session.execute(stmt, parms) return rows
def measurement_list(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag, group_by): metrics = self.list_metrics(tenant_id, region, name, dimensions, None, None) if offset: tmp = offset.split("_") if len(tmp) > 1: offset_id = tmp[0] offset_timestamp = tmp[1] else: offset_id = None offset_timestamp = offset else: offset_timestamp = None offset_id = None if not metrics: return None elif len(metrics) > 1: if not merge_metrics_flag and not group_by: raise exceptions.MultipleMetricsException( self.MULTIPLE_METRICS_MESSAGE) try: if len(metrics) > 1 and not group_by: # offset is controlled only by offset_timestamp when the group by option # is not enabled count, series_list = self._query_merge_measurements( metrics, dimensions, start_timestamp, end_timestamp, offset_timestamp, limit) return series_list if group_by: if not isinstance(group_by, list): group_by = group_by.split(',') elif len(group_by) == 1: group_by = group_by[0].split(',') if len(metrics) == 1 or group_by[0].startswith('*'): if offset_id: for index, metric in enumerate(metrics): if metric['id'] == offset_id: if index > 0: metrics[0:index] = [] break count, series_list = self._query_measurements( metrics, start_timestamp, end_timestamp, offset_timestamp, limit) return series_list grouped_metrics = self._group_metrics(metrics, group_by, dimensions) if not grouped_metrics or len(grouped_metrics) == 0: return None if offset_id: found_offset = False for outer_index, sublist in enumerate(grouped_metrics): for inner_index, metric in enumerate(sublist): if metric['id'] == offset_id: found_offset = True if inner_index > 0: sublist[0:inner_index] = [] break if found_offset: if outer_index > 0: grouped_metrics[0:outer_index] = [] break remaining = limit series_list = [] for sublist in grouped_metrics: sub_count, results = self._query_merge_measurements( sublist, sublist[0]['dimensions'], start_timestamp, end_timestamp, offset_timestamp, remaining) series_list.extend(results) if remaining: remaining -= sub_count if remaining <= 0: break # offset_timestamp is used only in the first group, reset to None for # subsequent groups if offset_timestamp: offset_timestamp = None return series_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)