def _get_measures_by_name(resources, metric_wildcards, operations, start, stop, granularity, needed_overlap, fill, details): references = [] for r in resources: references.extend([ processor.MetricReference(m, agg, r, wildcard) for wildcard, agg in metric_wildcards for m in r.metrics if fnmatch.fnmatch(m.name, wildcard) ]) if not references: api.abort( 400, { "cause": "Metrics not found", "detail": set((m for (m, a) in metric_wildcards)) }) response = { "measures": get_measures_or_abort(references, operations, start, stop, granularity, needed_overlap, fill) } if details: response["references"] = set((r.resource for r in references)) return response
def _get_measures_by_name(self, resources, metric_names, operations, start, stop, granularity, needed_overlap, fill, details): references = [ processor.MetricReference(r.get_metric(metric_name), agg, r) for (metric_name, agg) in metric_names for r in resources if r.get_metric(metric_name) is not None ] if not references: api.abort( 400, { "cause": "Metrics not found", "detail": set((m for (m, a) in metric_names)) }) response = { "measures": get_measures_or_abort(references, operations, start, stop, granularity, needed_overlap, fill) } if details: response["references"] = references return response
def get_current_user(request): user = request.remote_user if user is None: api.abort(401) try: return user.decode('iso-8859-1') except AttributeError: return user
def get_current_user(request): hdr = request.headers.get("Authorization") auth_hdr = (hdr.decode('utf-8') if isinstance(hdr, bytes) else hdr) auth = werkzeug.http.parse_authorization_header(auth_hdr) if auth is None: api.abort(401) return auth.username
def _write_get_lines(): encoding = pecan.request.headers.get('Transfer-Encoding', "").lower() if encoding == "chunked": if uwsgi is None: api.abort( 501, { "cause": "Not implemented error", "reason": "This server is not running with uwsgi" }) return encoding, uwsgi.chunked_read() return None, pecan.request.body
def OperationsSchema(v): if isinstance(v, six.text_type): try: v = pyparsing.OneOrMore( pyparsing.nestedExpr()).parseString(v).asList()[0] except pyparsing.ParseException as e: api.abort(400, {"cause": "Invalid operations", "reason": "Fail to parse the operations string", "detail": six.text_type(e)}) return voluptuous.Schema(voluptuous.Any(*OperationsSchemaBase), required=True)(v)
def _write_get_lines(): encoding = pecan.request.headers.get('Transfer-Encoding', "").lower() if encoding == "chunked": # TODO(sileht): Support reading chunk without uwsgi when # pecan.request.environ['wsgi.input_terminated'] is set. # https://github.com/unbit/uwsgi/issues/1428 if uwsgi is None: api.abort( 501, {"cause": "Not implemented error", "reason": "This server is not running with uwsgi"}) return encoding, uwsgi.chunked_read() return None, pecan.request.body
def _write_get_lines(): encoding = pecan.request.headers.get('Transfer-Encoding', "").lower() if encoding == "chunked": # TODO(sileht): Support reading chunk without uwsgi when # pecan.request.environ['wsgi.input_terminated'] is set. # https://github.com/unbit/uwsgi/issues/1428 if uwsgi is None: api.abort( 501, { "cause": "Not implemented error", "reason": "This server is not running with uwsgi" }) return encoding, uwsgi.chunked_read() return None, pecan.request.body
def get_resource_policy_filter(request, rule, resource_type, prefix=None): try: # Check if the policy allows the user to list any resource api.enforce(rule, { "resource_type": resource_type, }) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") target = {} if prefix: resource = target[prefix] = {} else: resource = target resource["resource_type"] = resource_type resource["project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: policy_filter.append({"=": {"project_id": project_id}}) del resource["project_id"] resource["created_by_project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their created_by_project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: if project_id: policy_filter.append( {"like": { "creator": "%:" + project_id }}) else: policy_filter.append({"=": {"creator": None}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def post_query(self, q=None): if q is not None: try: query = query_parser.parseString(q) except pyparsing.ParseException: api.abort(501, {"cause": "Not implemented error", "detail": "q", "reason": "Query not implemented"}) resource_type = query[0] api.enforce("create resource type", {"name": resource_type}) schema = pecan.request.indexer.get_resource_type_schema() rt = schema.resource_type_from_dict(resource_type, {}, 'creating') try: pecan.request.indexer.create_resource_type(rt) except indexer.ResourceTypeAlreadyExists: pass pecan.response.status = 204
def get_resource_policy_filter(request, rule, resource_type, prefix=None): try: # Check if the policy allows the user to list any resource api.enforce(rule, { "resource_type": resource_type, }) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") target = {} if prefix: resource = target[prefix] = {} else: resource = target resource["resource_type"] = resource_type resource["project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: policy_filter.append({"=": {"project_id": project_id}}) del resource["project_id"] resource["created_by_project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their created_by_project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: if project_id: policy_filter.append( {"like": {"creator": "%:" + project_id}}) else: policy_filter.append({"=": {"creator": None}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def post_query(self, q=None): if q is not None: try: query = query_parser.parseString(q) except pyparsing.ParseException: api.abort( 501, { "cause": "Not implemented error", "detail": "q", "reason": "Query not implemented" }) resource_type = query[0] api.enforce("create resource type", {"name": resource_type}) schema = pecan.request.indexer.get_resource_type_schema() rt = schema.resource_type_from_dict(resource_type, {}, 'creating') try: pecan.request.indexer.create_resource_type(rt) except indexer.ResourceTypeAlreadyExists: pass pecan.response.status = 204
def get_measures_or_abort(metrics_and_aggregations, operations, start, stop, granularity, needed_overlap, fill, ref_identifier): try: return processor.get_measures(pecan.request.storage, metrics_and_aggregations, operations, start, stop, granularity, needed_overlap, fill, ref_identifier=ref_identifier) except exceptions.UnAggregableTimeseries as e: api.abort(400, e) # TODO(sileht): We currently got only one metric for these exceptions but # we can improve processor to returns all missing metrics at once, so we # returns a list for the future except storage.MetricDoesNotExist as e: api.abort(404, { "cause": "Unknown metrics", "detail": [str(e.metric.id)] }) except storage.AggregationDoesNotExist as e: api.abort( 404, { "cause": "Metrics with unknown aggregation", "detail": [(str(e.metric.id), e.method)] })
def _get_measures_by_name(self, resources, metric_names, operations, start, stop, granularity, needed_overlap, fill): metrics_and_aggregations = list( filter(lambda x: x[0] is not None, ([r.get_metric(metric_name), agg] for (metric_name, agg) in metric_names for r in resources))) if not metrics_and_aggregations: api.abort( 400, { "cause": "Metrics not found", "detail": set((m for (m, a) in metric_names)) }) return get_measures_or_abort(metrics_and_aggregations, operations, start, stop, granularity, needed_overlap, fill, ref_identifier="name")
def get_metric_policy_filter(request, rule): try: # Check if the policy allows the user to list any metric api.enforce(rule, {}) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") try: # Check if the policy allows the user to list metrics linked # to their created_by_project api.enforce(rule, { "created_by_project_id": project_id, }) except webob.exc.HTTPForbidden: pass else: policy_filter.append({"like": {"creator": "%:" + project_id}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def get_metric_policy_filter(request, rule): try: # Check if the policy allows the user to list any metric api.enforce(rule, {}) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") try: # Check if the policy allows the user to list metrics linked # to their created_by_project api.enforce(rule, { "created_by_project_id": project_id, }) except webob.exc.HTTPForbidden: pass else: policy_filter.append( {"like": {"creator": "%:" + project_id}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def _get_measures_by_name(resources, metric_wildcards, operations, start, stop, granularity, needed_overlap, fill, details): references = [] for r in resources: references.extend([ processor.MetricReference(m, agg, r, wildcard) for wildcard, agg in metric_wildcards for m in r.metrics if fnmatch.fnmatch(m.name, wildcard) ]) if not references: api.abort(400, {"cause": "Metrics not found", "detail": set((m for (m, a) in metric_wildcards))}) response = { "measures": get_measures_or_abort( references, operations, start, stop, granularity, needed_overlap, fill) } if details: response["references"] = set((r.resource for r in references)) return response
def get_measures_or_abort(references, operations, start, stop, granularity, needed_overlap, fill): try: return processor.get_measures( pecan.request.storage, references, operations, start, stop, granularity, needed_overlap, fill) except exceptions.UnAggregableTimeseries as e: api.abort(400, e) # TODO(sileht): We currently got only one metric for these exceptions but # we can improve processor to returns all missing metrics at once, so we # returns a list for the future except storage.MetricDoesNotExist as e: api.abort(404, {"cause": "Unknown metrics", "detail": [str(e.metric.id)]}) except storage.AggregationDoesNotExist as e: api.abort(404, {"cause": "Metrics with unknown aggregation", "detail": [(str(e.metric.id), e.method)]})
def ResourceTypeSchema(resource_type): try: pecan.request.indexer.get_resource_type(resource_type) except indexer.NoSuchResourceType as e: api.abort(400, e) return resource_type
def post(self, start=None, stop=None, granularity=None, needed_overlap=None, fill=None, groupby=None, **kwargs): details = api.get_bool_param('details', kwargs) if fill is None and needed_overlap is None: fill = "dropna" start, stop, granularity, needed_overlap, fill = api.validate_qs( start, stop, granularity, needed_overlap, fill) body = api.deserialize_and_validate(self.FetchSchema) references = extract_references(body["operations"]) if not references: api.abort( 400, { "cause": "Operations is invalid", "reason": "At least one 'metric' is required", "detail": body["operations"] }) if "resource_type" in body: attr_filter = body["search"] policy_filter = ( pecan.request.auth_helper.get_resource_policy_filter( pecan.request, "search resource", body["resource_type"])) if policy_filter: if attr_filter: attr_filter = {"and": [policy_filter, attr_filter]} else: attr_filter = policy_filter groupby = sorted(set(api.arg_to_list(groupby))) sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION try: resources = pecan.request.indexer.list_resources( body["resource_type"], attribute_filter=attr_filter, sorts=sorts) except indexer.IndexerException as e: api.abort(400, six.text_type(e)) if not groupby: return self._get_measures_by_name(resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) results = [] for key, resources in itertools.groupby(resources, groupper): results.append({ "group": dict(key), "measures": self._get_measures_by_name(resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) }) return results else: try: metric_ids = set( six.text_type(utils.UUID(m)) for (m, a) in references) except ValueError as e: api.abort( 400, { "cause": "Invalid metric references", "reason": six.text_type(e), "detail": references }) metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": { "id": metric_ids }}) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) if missing_metric_ids: api.abort( 404, { "cause": "Unknown metrics", "reason": "Provided metrics don't exists", "detail": missing_metric_ids }) number_of_metrics = len(metrics) if number_of_metrics == 0: return [] for metric in metrics: api.enforce("get metric", metric) metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) references = [ processor.MetricReference(metrics_by_ids[m], a) for (m, a) in references ] response = { "measures": get_measures_or_abort(references, body["operations"], start, stop, granularity, needed_overlap, fill) } if details: response["references"] = metrics return response
def get_current_user(request): user = request.remote_user if user is None: api.abort(401) return user.decode('iso-8859-1')
def get_current_user(request): auth = werkzeug.http.parse_authorization_header( request.headers.get("Authorization")) if auth is None: api.abort(401) return auth.username
def post_write(self, db="influxdb"): creator = pecan.request.auth_helper.get_current_user(pecan.request) tag_to_rid = pecan.request.headers.get( "X-Gnocchi-InfluxDB-Tag-Resource-ID", self.DEFAULT_TAG_RESOURCE_ID) while True: encoding, chunk = self._write_get_lines() # If chunk is empty then this is over. if not chunk: break # Compute now on a per-chunk basis now = numpy.datetime64(int(time.time() * 10e8), 'ns') # resources = { resource_id: { # metric_name: [ incoming.Measure(t, v), …], … # }, … # } resources = collections.defaultdict( lambda: collections.defaultdict(list)) for line_number, line in enumerate(chunk.split(b"\n")): # Ignore empty lines if not line: continue try: measurement, tags, fields, timestamp = ( line_protocol.parseString(line.decode()) ) except (UnicodeDecodeError, SyntaxError, pyparsing.ParseException): api.abort(400, { "cause": "Value error", "detail": "line", "reason": "Unable to parse line %d" % ( line_number + 1), }) if timestamp is None: timestamp = now try: resource_id = tags.pop(tag_to_rid) except KeyError: api.abort(400, { "cause": "Value error", "detail": "key", "reason": "Unable to find key `%s' in tags" % ( tag_to_rid), }) tags_str = (("@" if tags else "") + ",".join(("%s=%s" % (k, tags[k])) for k in sorted(tags))) for field_name, field_value in six.iteritems(fields): if isinstance(field_value, str): # We do not support field value that are not numerical continue # Metric name is the: # <measurement>.<field_key>@<tag_key>=<tag_value>,… # with tag ordered # Replace "/" with "_" because Gnocchi does not support / # in metric names metric_name = ( measurement + "." + field_name + tags_str ).replace("/", "_") resources[resource_id][metric_name].append( incoming.Measure(timestamp, field_value)) measures_to_batch = {} for resource_name, metrics_and_measures in six.iteritems( resources): resource_name = resource_name resource_id = utils.ResourceUUID( resource_name, creator=creator) LOG.debug("Getting metrics from resource `%s'", resource_name) timeout = pecan.request.conf.api.operation_timeout metrics = ( api.get_or_create_resource_and_metrics.retry_with( stop=tenacity.stop_after_delay(timeout))( creator, resource_id, resource_name, metrics_and_measures.keys(), {}, db) ) for metric in metrics: api.enforce("post measures", metric) measures_to_batch.update( dict((metric.id, metrics_and_measures[metric.name]) for metric in metrics if metric.name in metrics_and_measures)) LOG.debug("Add measures batch for %d metrics", len(measures_to_batch)) pecan.request.incoming.add_measures_batch(measures_to_batch) pecan.response.status = 204 if encoding != "chunked": return
def post_write(self, db="influxdb"): creator = pecan.request.auth_helper.get_current_user(pecan.request) tag_to_rid = pecan.request.headers.get( "X-Gnocchi-InfluxDB-Tag-Resource-ID", self.DEFAULT_TAG_RESOURCE_ID) while True: encoding, chunk = self._write_get_lines() # If chunk is empty then this is over. if not chunk: break # Compute now on a per-chunk basis now = numpy.datetime64(int(time.time() * 10e8), 'ns') # resources = { resource_id: { # metric_name: [ incoming.Measure(t, v), …], … # }, … # } resources = collections.defaultdict( lambda: collections.defaultdict(list)) for line_number, line in enumerate(chunk.split(b"\n")): # Ignore empty lines if not line: continue try: measurement, tags, fields, timestamp = ( line_protocol.parseString(line.decode())) except (UnicodeDecodeError, SyntaxError, pyparsing.ParseException): api.abort( 400, { "cause": "Value error", "detail": "line", "reason": "Unable to parse line %d" % (line_number + 1), }) if timestamp is None: timestamp = now try: resource_id = tags.pop(tag_to_rid) except KeyError: api.abort( 400, { "cause": "Value error", "detail": "key", "reason": "Unable to find key `%s' in tags" % (tag_to_rid), }) tags_str = (("@" if tags else "") + ",".join( ("%s=%s" % (k, tags[k])) for k in sorted(tags))) for field_name, field_value in six.iteritems(fields): if isinstance(field_value, str): # We do not support field value that are not numerical continue # Metric name is the: # <measurement>.<field_key>@<tag_key>=<tag_value>,… # with tag ordered # Replace "/" with "_" because Gnocchi does not support / # in metric names metric_name = (measurement + "." + field_name + tags_str).replace("/", "_") resources[resource_id][metric_name].append( incoming.Measure(timestamp, field_value)) measures_to_batch = {} for resource_name, metrics_and_measures in six.iteritems( resources): resource_name = resource_name resource_id = utils.ResourceUUID(resource_name, creator=creator) LOG.debug("Getting metrics from resource `%s'", resource_name) timeout = pecan.request.conf.api.operation_timeout metrics = (api.get_or_create_resource_and_metrics.retry_with( stop=tenacity.stop_after_delay(timeout))( creator, resource_id, resource_name, metrics_and_measures.keys(), {}, db)) for metric in metrics: api.enforce("post measures", metric) measures_to_batch.update( dict((metric.id, metrics_and_measures[metric.name]) for metric in metrics if metric.name in metrics_and_measures)) LOG.debug("Add measures batch for %d metrics", len(measures_to_batch)) pecan.request.incoming.add_measures_batch(measures_to_batch) pecan.response.status = 204 if encoding != "chunked": return
def post(self, start=None, stop=None, granularity=None, needed_overlap=None, fill=None, groupby=None, **kwargs): details = api.get_bool_param('details', kwargs) if fill is None and needed_overlap is None: fill = "dropna" start, stop, granularity, needed_overlap, fill = api.validate_qs( start, stop, granularity, needed_overlap, fill) body = api.deserialize_and_validate(self.FetchSchema) references = extract_references(body["operations"]) if not references: api.abort(400, {"cause": "Operations is invalid", "reason": "At least one 'metric' is required", "detail": body["operations"]}) if "resource_type" in body: attr_filter = body["search"] policy_filter = ( pecan.request.auth_helper.get_resource_policy_filter( pecan.request, "search resource", body["resource_type"])) if policy_filter: if attr_filter: attr_filter = {"and": [ policy_filter, attr_filter ]} else: attr_filter = policy_filter groupby = sorted(set(api.arg_to_list(groupby))) sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION try: resources = pecan.request.indexer.list_resources( body["resource_type"], attribute_filter=attr_filter, sorts=sorts) except indexer.IndexerException as e: api.abort(400, six.text_type(e)) if not groupby: return self._get_measures_by_name( resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) results = [] for key, resources in itertools.groupby(resources, groupper): results.append({ "group": dict(key), "measures": self._get_measures_by_name( resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) }) return results else: try: metric_ids = set(six.text_type(utils.UUID(m)) for (m, a) in references) except ValueError as e: api.abort(400, {"cause": "Invalid metric references", "reason": six.text_type(e), "detail": references}) metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": {"id": metric_ids}}) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) if missing_metric_ids: api.abort(404, {"cause": "Unknown metrics", "reason": "Provided metrics don't exists", "detail": missing_metric_ids}) number_of_metrics = len(metrics) if number_of_metrics == 0: return [] for metric in metrics: api.enforce("get metric", metric) metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) references = [processor.MetricReference(metrics_by_ids[m], a) for (m, a) in references] response = { "measures": get_measures_or_abort( references, body["operations"], start, stop, granularity, needed_overlap, fill) } if details: response["references"] = metrics return response