def add_metrics(resource_id, request_params, dataset, proto_version): current_time = datetime.utcnow().replace(second=0, microsecond=0) try: resource = ApplicationService.by_id_cached()(resource_id) resource = DBSession.merge(resource, load=False) es_docs = [] rows = [] for metric in dataset: tags = dict(metric["tags"]) server_n = tags.get("server_name", metric["server_name"]).lower() tags["server_name"] = server_n or "unknown" new_metric = Metric( timestamp=metric["timestamp"], resource_id=resource.resource_id, namespace=metric["namespace"], tags=tags, ) rows.append(new_metric) es_docs.append(new_metric.es_doc()) session = DBSession() session.bulk_save_objects(rows) session.flush() action = "METRICS" metrics_msg = "%s: %s, metrics: %s, proto:%s" % ( action, str(resource), len(dataset), proto_version, ) log.info(metrics_msg) mark_changed(session) redis_pipeline = Datastores.redis.pipeline(transaction=False) key = REDIS_KEYS["counters"]["metrics_per_minute"].format(current_time) redis_pipeline.incr(key, len(rows)) redis_pipeline.expire(key, 3600 * 24) key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format( resource.owner_user_id, current_time) redis_pipeline.incr(key, len(rows)) redis_pipeline.expire(key, 3600) key = REDIS_KEYS["counters"]["metrics_per_hour_per_app"].format( resource_id, current_time.replace(minute=0)) redis_pipeline.incr(key, len(rows)) redis_pipeline.expire(key, 3600 * 24 * 7) redis_pipeline.sadd( REDIS_KEYS["apps_that_got_new_data_per_hour"].format( current_time.replace(minute=0)), resource_id, ) redis_pipeline.execute() add_metrics_es(es_docs) return True except Exception as exc: print_traceback(log) if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: raise add_metrics.retry(exc=exc)
def add_metrics(resource_id, request_params, dataset, proto_version): current_time = datetime.utcnow().replace(second=0, microsecond=0) try: resource = ApplicationService.by_id_cached()(resource_id) resource = DBSession.merge(resource, load=False) es_docs = [] rows = [] for metric in dataset: tags = dict(metric['tags']) server_n = tags.get('server_name', metric['server_name']).lower() tags['server_name'] = server_n or 'unknown' new_metric = Metric(timestamp=metric['timestamp'], resource_id=resource.resource_id, namespace=metric['namespace'], tags=tags) rows.append(new_metric) es_docs.append(new_metric.es_doc()) session = DBSession() session.bulk_save_objects(rows) session.flush() action = 'METRICS' metrics_msg = '%s: %s, metrics: %s, proto:%s' % ( action, str(resource), len(dataset), proto_version) log.info(metrics_msg) mark_changed(session) redis_pipeline = Datastores.redis.pipeline(transaction=False) key = REDIS_KEYS['counters']['metrics_per_minute'].format(current_time) redis_pipeline.incr(key, len(rows)) redis_pipeline.expire(key, 3600 * 24) key = REDIS_KEYS['counters']['events_per_minute_per_user'].format( resource.owner_user_id, current_time) redis_pipeline.incr(key, len(rows)) redis_pipeline.expire(key, 3600) key = REDIS_KEYS['counters']['metrics_per_hour_per_app'].format( resource_id, current_time.replace(minute=0)) redis_pipeline.incr(key, len(rows)) redis_pipeline.expire(key, 3600 * 24 * 7) redis_pipeline.sadd( REDIS_KEYS['apps_that_got_new_data_per_hour'].format( current_time.replace(minute=0)), resource_id) redis_pipeline.execute() add_metrics_es(es_docs) return True except Exception as exc: print_traceback(log) add_metrics.retry(exc=exc)