def _set_metrics_for_resource(session, r, metrics): for name, value in six.iteritems(metrics): if isinstance(value, uuid.UUID): try: update = session.query(Metric).filter( Metric.id == value, Metric.status == 'active', (Metric.created_by_user_id == r.created_by_user_id), (Metric.created_by_project_id == r.created_by_project_id), ).update({ "resource_id": r.id, "name": name }) except exception.DBDuplicateEntry: raise indexer.NamedMetricAlreadyExists(name) if update == 0: raise indexer.NoSuchMetric(value) else: ap_name = value['archive_policy_name'] m = Metric(id=uuid.uuid4(), created_by_user_id=r.created_by_user_id, created_by_project_id=r.created_by_project_id, archive_policy_name=ap_name, name=name, resource_id=r.id) session.add(m) try: session.flush() except exception.DBDuplicateEntry: raise indexer.NamedMetricAlreadyExists(name) except exception.DBReferenceError as e: if (e.constraint == 'fk_metric_archive_policy_name_archive_policy_name' ): raise indexer.NoSuchArchivePolicy(ap_name) raise session.expire(r, ['metrics'])
def _get_measures_by_name(resources, metric_wildcards, operations, start, stop, granularity, needed_overlap, fill, details): references = [] for r in resources: references.extend([ processor.MetricReference(m, agg, r, wildcard) for wildcard, agg in metric_wildcards for m in r.metrics if fnmatch.fnmatch(m.name, wildcard) ]) if not references: raise indexer.NoSuchMetric(set((m for (m, a) in metric_wildcards))) response = { "measures": get_measures_or_abort(references, operations, start, stop, granularity, needed_overlap, fill) } if details: response["references"] = set((r.resource for r in references)) return response
def delete_metric(self, id): with self.facade.writer() as session: if session.query(Metric).filter(Metric.id == id).update( {"status": "delete"}) == 0: raise indexer.NoSuchMetric(id)
def expunge_metric(self, id): with self.facade.writer() as session: if session.query(Metric).filter(Metric.id == id).delete() == 0: raise indexer.NoSuchMetric(id)
def post(self, start=None, stop=None, granularity=None, needed_overlap=None, fill=None, groupby=None, **kwargs): details = api.get_bool_param('details', kwargs) if fill is None and needed_overlap is None: fill = "dropna" start, stop, granularity, needed_overlap, fill = api.validate_qs( start, stop, granularity, needed_overlap, fill) body = api.deserialize_and_validate(self.FetchSchema) references = extract_references(body["operations"]) if not references: api.abort( 400, { "cause": "Operations is invalid", "reason": "At least one 'metric' is required", "detail": body["operations"] }) if "resource_type" in body: attr_filter = body["search"] policy_filter = ( pecan.request.auth_helper.get_resource_policy_filter( pecan.request, "search resource", body["resource_type"])) if policy_filter: if attr_filter: attr_filter = {"and": [policy_filter, attr_filter]} else: attr_filter = policy_filter groupby = sorted(set(api.arg_to_list(groupby))) sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION try: resources = pecan.request.indexer.list_resources( body["resource_type"], attribute_filter=attr_filter, sorts=sorts) except indexer.IndexerException as e: api.abort(400, six.text_type(e)) if not groupby: try: return self._get_measures_by_name(resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) except indexer.NoSuchMetric as e: api.abort(400, e) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) results = [] for key, resources in itertools.groupby(resources, groupper): try: results.append({ "group": dict(key), "measures": self._get_measures_by_name(resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) }) except indexer.NoSuchMetric: pass if not results: api.abort( 400, indexer.NoSuchMetric(set( (m for (m, a) in references)))) return results else: try: metric_ids = set( six.text_type(utils.UUID(m)) for (m, a) in references) except ValueError as e: api.abort( 400, { "cause": "Invalid metric references", "reason": six.text_type(e), "detail": references }) metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": { "id": metric_ids }}) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) if missing_metric_ids: api.abort( 404, { "cause": "Unknown metrics", "reason": "Provided metrics don't exists", "detail": missing_metric_ids }) number_of_metrics = len(metrics) if number_of_metrics == 0: return [] for metric in metrics: api.enforce("get metric", metric) metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) references = [ processor.MetricReference(metrics_by_ids[m], a) for (m, a) in references ] response = { "measures": get_measures_or_abort(references, body["operations"], start, stop, granularity, needed_overlap, fill) } if details: response["references"] = metrics return response