def post_query(self, q=None): if q is not None: try: query = query_parser.parseString(q) except pyparsing.ParseException: api.abort(501, {"cause": "Not implemented error", "detail": "q", "reason": "Query not implemented"}) resource_type = query[0] api.enforce("create resource type", {"name": resource_type}) schema = pecan.request.indexer.get_resource_type_schema() rt = schema.resource_type_from_dict(resource_type, {}, 'creating') try: pecan.request.indexer.create_resource_type(rt) except indexer.ResourceTypeAlreadyExists: pass pecan.response.status = 204
def post_query(self, q=None): if q is not None: try: query = query_parser.parseString(q) except pyparsing.ParseException: api.abort( 501, { "cause": "Not implemented error", "detail": "q", "reason": "Query not implemented" }) resource_type = query[0] api.enforce("create resource type", {"name": resource_type}) schema = pecan.request.indexer.get_resource_type_schema() rt = schema.resource_type_from_dict(resource_type, {}, 'creating') try: pecan.request.indexer.create_resource_type(rt) except indexer.ResourceTypeAlreadyExists: pass pecan.response.status = 204
def get_metric_policy_filter(request, rule): try: # Check if the policy allows the user to list any metric api.enforce(rule, {}) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") try: # Check if the policy allows the user to list metrics linked # to their created_by_project api.enforce(rule, { "created_by_project_id": project_id, }) except webob.exc.HTTPForbidden: pass else: policy_filter.append({"like": {"creator": "%:" + project_id}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def get_metric_policy_filter(request, rule): try: # Check if the policy allows the user to list any metric api.enforce(rule, {}) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") try: # Check if the policy allows the user to list metrics linked # to their created_by_project api.enforce(rule, { "created_by_project_id": project_id, }) except webob.exc.HTTPForbidden: pass else: policy_filter.append( {"like": {"creator": "%:" + project_id}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def get_resource_policy_filter(request, rule, resource_type, prefix=None): try: # Check if the policy allows the user to list any resource api.enforce(rule, { "resource_type": resource_type, }) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") target = {} if prefix: resource = target[prefix] = {} else: resource = target resource["resource_type"] = resource_type resource["project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: policy_filter.append({"=": {"project_id": project_id}}) del resource["project_id"] resource["created_by_project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their created_by_project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: if project_id: policy_filter.append( {"like": { "creator": "%:" + project_id }}) else: policy_filter.append({"=": {"creator": None}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def get_resource_policy_filter(request, rule, resource_type, prefix=None): try: # Check if the policy allows the user to list any resource api.enforce(rule, { "resource_type": resource_type, }) except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") target = {} if prefix: resource = target[prefix] = {} else: resource = target resource["resource_type"] = resource_type resource["project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: policy_filter.append({"=": {"project_id": project_id}}) del resource["project_id"] resource["created_by_project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their created_by_project api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: if project_id: policy_filter.append( {"like": {"creator": "%:" + project_id}}) else: policy_filter.append({"=": {"creator": None}}) if not policy_filter: # We need to have at least one policy filter in place api.abort(403, "Insufficient privileges") return {"or": policy_filter}
def post(self, start=None, stop=None, granularity=None, needed_overlap=None, fill=None, groupby=None, **kwargs): details = api.get_bool_param('details', kwargs) if fill is None and needed_overlap is None: fill = "dropna" start, stop, granularity, needed_overlap, fill = api.validate_qs( start, stop, granularity, needed_overlap, fill) body = api.deserialize_and_validate(self.FetchSchema) references = extract_references(body["operations"]) if not references: api.abort( 400, { "cause": "Operations is invalid", "reason": "At least one 'metric' is required", "detail": body["operations"] }) if "resource_type" in body: attr_filter = body["search"] policy_filter = ( pecan.request.auth_helper.get_resource_policy_filter( pecan.request, "search resource", body["resource_type"])) if policy_filter: if attr_filter: attr_filter = {"and": [policy_filter, attr_filter]} else: attr_filter = policy_filter groupby = sorted(set(api.arg_to_list(groupby))) sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION try: resources = pecan.request.indexer.list_resources( body["resource_type"], attribute_filter=attr_filter, sorts=sorts) except indexer.IndexerException as e: api.abort(400, six.text_type(e)) if not groupby: return self._get_measures_by_name(resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) results = [] for key, resources in itertools.groupby(resources, groupper): results.append({ "group": dict(key), "measures": self._get_measures_by_name(resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) }) return results else: try: metric_ids = set( six.text_type(utils.UUID(m)) for (m, a) in references) except ValueError as e: api.abort( 400, { "cause": "Invalid metric references", "reason": six.text_type(e), "detail": references }) metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": { "id": metric_ids }}) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) if missing_metric_ids: api.abort( 404, { "cause": "Unknown metrics", "reason": "Provided metrics don't exists", "detail": missing_metric_ids }) number_of_metrics = len(metrics) if number_of_metrics == 0: return [] for metric in metrics: api.enforce("get metric", metric) metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) references = [ processor.MetricReference(metrics_by_ids[m], a) for (m, a) in references ] response = { "measures": get_measures_or_abort(references, body["operations"], start, stop, granularity, needed_overlap, fill) } if details: response["references"] = metrics return response
def post_write(self, db="influxdb"): creator = pecan.request.auth_helper.get_current_user(pecan.request) tag_to_rid = pecan.request.headers.get( "X-Gnocchi-InfluxDB-Tag-Resource-ID", self.DEFAULT_TAG_RESOURCE_ID) while True: encoding, chunk = self._write_get_lines() # If chunk is empty then this is over. if not chunk: break # Compute now on a per-chunk basis now = numpy.datetime64(int(time.time() * 10e8), 'ns') # resources = { resource_id: { # metric_name: [ incoming.Measure(t, v), …], … # }, … # } resources = collections.defaultdict( lambda: collections.defaultdict(list)) for line_number, line in enumerate(chunk.split(b"\n")): # Ignore empty lines if not line: continue try: measurement, tags, fields, timestamp = ( line_protocol.parseString(line.decode())) except (UnicodeDecodeError, SyntaxError, pyparsing.ParseException): api.abort( 400, { "cause": "Value error", "detail": "line", "reason": "Unable to parse line %d" % (line_number + 1), }) if timestamp is None: timestamp = now try: resource_id = tags.pop(tag_to_rid) except KeyError: api.abort( 400, { "cause": "Value error", "detail": "key", "reason": "Unable to find key `%s' in tags" % (tag_to_rid), }) tags_str = (("@" if tags else "") + ",".join( ("%s=%s" % (k, tags[k])) for k in sorted(tags))) for field_name, field_value in six.iteritems(fields): if isinstance(field_value, str): # We do not support field value that are not numerical continue # Metric name is the: # <measurement>.<field_key>@<tag_key>=<tag_value>,… # with tag ordered # Replace "/" with "_" because Gnocchi does not support / # in metric names metric_name = (measurement + "." + field_name + tags_str).replace("/", "_") resources[resource_id][metric_name].append( incoming.Measure(timestamp, field_value)) measures_to_batch = {} for resource_name, metrics_and_measures in six.iteritems( resources): resource_name = resource_name resource_id = utils.ResourceUUID(resource_name, creator=creator) LOG.debug("Getting metrics from resource `%s'", resource_name) timeout = pecan.request.conf.api.operation_timeout metrics = (api.get_or_create_resource_and_metrics.retry_with( stop=tenacity.stop_after_delay(timeout))( creator, resource_id, resource_name, metrics_and_measures.keys(), {}, db)) for metric in metrics: api.enforce("post measures", metric) measures_to_batch.update( dict((metric.id, metrics_and_measures[metric.name]) for metric in metrics if metric.name in metrics_and_measures)) LOG.debug("Add measures batch for %d metrics", len(measures_to_batch)) pecan.request.incoming.add_measures_batch(measures_to_batch) pecan.response.status = 204 if encoding != "chunked": return
def post_write(self, db="influxdb"): creator = pecan.request.auth_helper.get_current_user(pecan.request) tag_to_rid = pecan.request.headers.get( "X-Gnocchi-InfluxDB-Tag-Resource-ID", self.DEFAULT_TAG_RESOURCE_ID) while True: encoding, chunk = self._write_get_lines() # If chunk is empty then this is over. if not chunk: break # Compute now on a per-chunk basis now = numpy.datetime64(int(time.time() * 10e8), 'ns') # resources = { resource_id: { # metric_name: [ incoming.Measure(t, v), …], … # }, … # } resources = collections.defaultdict( lambda: collections.defaultdict(list)) for line_number, line in enumerate(chunk.split(b"\n")): # Ignore empty lines if not line: continue try: measurement, tags, fields, timestamp = ( line_protocol.parseString(line.decode()) ) except (UnicodeDecodeError, SyntaxError, pyparsing.ParseException): api.abort(400, { "cause": "Value error", "detail": "line", "reason": "Unable to parse line %d" % ( line_number + 1), }) if timestamp is None: timestamp = now try: resource_id = tags.pop(tag_to_rid) except KeyError: api.abort(400, { "cause": "Value error", "detail": "key", "reason": "Unable to find key `%s' in tags" % ( tag_to_rid), }) tags_str = (("@" if tags else "") + ",".join(("%s=%s" % (k, tags[k])) for k in sorted(tags))) for field_name, field_value in six.iteritems(fields): if isinstance(field_value, str): # We do not support field value that are not numerical continue # Metric name is the: # <measurement>.<field_key>@<tag_key>=<tag_value>,… # with tag ordered # Replace "/" with "_" because Gnocchi does not support / # in metric names metric_name = ( measurement + "." + field_name + tags_str ).replace("/", "_") resources[resource_id][metric_name].append( incoming.Measure(timestamp, field_value)) measures_to_batch = {} for resource_name, metrics_and_measures in six.iteritems( resources): resource_name = resource_name resource_id = utils.ResourceUUID( resource_name, creator=creator) LOG.debug("Getting metrics from resource `%s'", resource_name) timeout = pecan.request.conf.api.operation_timeout metrics = ( api.get_or_create_resource_and_metrics.retry_with( stop=tenacity.stop_after_delay(timeout))( creator, resource_id, resource_name, metrics_and_measures.keys(), {}, db) ) for metric in metrics: api.enforce("post measures", metric) measures_to_batch.update( dict((metric.id, metrics_and_measures[metric.name]) for metric in metrics if metric.name in metrics_and_measures)) LOG.debug("Add measures batch for %d metrics", len(measures_to_batch)) pecan.request.incoming.add_measures_batch(measures_to_batch) pecan.response.status = 204 if encoding != "chunked": return
def post(self, start=None, stop=None, granularity=None, needed_overlap=None, fill=None, groupby=None, **kwargs): details = api.get_bool_param('details', kwargs) if fill is None and needed_overlap is None: fill = "dropna" start, stop, granularity, needed_overlap, fill = api.validate_qs( start, stop, granularity, needed_overlap, fill) body = api.deserialize_and_validate(self.FetchSchema) references = extract_references(body["operations"]) if not references: api.abort(400, {"cause": "Operations is invalid", "reason": "At least one 'metric' is required", "detail": body["operations"]}) if "resource_type" in body: attr_filter = body["search"] policy_filter = ( pecan.request.auth_helper.get_resource_policy_filter( pecan.request, "search resource", body["resource_type"])) if policy_filter: if attr_filter: attr_filter = {"and": [ policy_filter, attr_filter ]} else: attr_filter = policy_filter groupby = sorted(set(api.arg_to_list(groupby))) sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION try: resources = pecan.request.indexer.list_resources( body["resource_type"], attribute_filter=attr_filter, sorts=sorts) except indexer.IndexerException as e: api.abort(400, six.text_type(e)) if not groupby: return self._get_measures_by_name( resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) results = [] for key, resources in itertools.groupby(resources, groupper): results.append({ "group": dict(key), "measures": self._get_measures_by_name( resources, references, body["operations"], start, stop, granularity, needed_overlap, fill, details=details) }) return results else: try: metric_ids = set(six.text_type(utils.UUID(m)) for (m, a) in references) except ValueError as e: api.abort(400, {"cause": "Invalid metric references", "reason": six.text_type(e), "detail": references}) metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": {"id": metric_ids}}) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) if missing_metric_ids: api.abort(404, {"cause": "Unknown metrics", "reason": "Provided metrics don't exists", "detail": missing_metric_ids}) number_of_metrics = len(metrics) if number_of_metrics == 0: return [] for metric in metrics: api.enforce("get metric", metric) metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) references = [processor.MetricReference(metrics_by_ids[m], a) for (m, a) in references] response = { "measures": get_measures_or_abort( references, body["operations"], start, stop, granularity, needed_overlap, fill) } if details: response["references"] = metrics return response