Example #1
0
def after_execute(conn, elt, multiparams, params, result):
    duration = 1000 * (time.time() - conn.info['query_start_time'].pop(-1))
    action = elt.__class__.__name__

    if action == 'Select':
        name = 'unknown'
        try:
            name = _table_name_from_select_element(elt)
        except Exception:
            logging.exception('Failed finding table name.')
    elif action in ['Update', 'Insert', 'Delete']:
        name = elt.table.name
    else:
        # create/drop tables, sqlalchemy internal schema queries, etc
        return

    action = action.lower()

    statsd_client.timing('db.{}.{}'.format(name, action), duration)
    metrics_logger.debug("table=%s query=%s duration=%.2f", name, action,
                         duration)

    if has_request_context():
        g.setdefault('queries_count', 0)
        g.setdefault('queries_duration', 0)
        g.queries_count += 1
        g.queries_duration += duration

    return result
Example #2
0
def after_execute(conn, elt, multiparams, params, result):
    duration = 1000 * (time.time() - conn.info['query_start_time'].pop(-1))
    action = elt.__class__.__name__

    if action == 'Select':
        t = elt.froms[0]
        while isinstance(t, _ORMJoin):
            t = t.left
        name = t.name
    elif action in ['Update', 'Insert', 'Delete']:
        name = elt.table.name
    else:
        # create/drop tables, sqlalchemy internal schema queries, etc
        return

    action = action.lower()

    statsd_client.timing('db.{}.{}'.format(name, action), duration)
    metrics_logger.debug("table=%s query=%s duration=%.2f", name, action,
                         duration)

    if has_request_context():
        g.setdefault('queries_count', 0)
        g.setdefault('queries_duration', 0)
        g.queries_count += 1
        g.queries_duration += duration

    return result
def task_postrun_handler(signal, sender, task_id, task, args, kwargs, retval,
                         state, **kw):
    try:
        run_time = 1000 * (time.time() - tasks_start_time.pop(task_id))

        state = (state or "unknown").lower()
        tags = {"state": state, "hostname": socket.gethostname()}
        if task.name == "redash.tasks.execute_query":
            if isinstance(retval, Exception):
                tags["state"] = "exception"
                state = "exception"

            tags["data_source_id"] = args[1]

        normalized_task_name = task.name.replace("redash.tasks.",
                                                 "").replace(".", "_")
        metric = "celery.task_runtime.{}".format(normalized_task_name)
        logging.debug(
            "metric=%s",
            json_dumps({
                "metric": metric,
                "tags": tags,
                "value": run_time
            }))
        statsd_client.timing(metric_name(metric, tags), run_time)
        statsd_client.incr(
            metric_name(
                "celery.task.{}.{}".format(normalized_task_name, state), tags))
    except Exception:
        logging.exception("Exception during task_postrun handler.")
Example #4
0
def after_execute(conn, elt, multiparams, params, result):
    duration = 1000 * (time.time() - conn.info['query_start_time'].pop(-1))
    action = elt.__class__.__name__

    if action == 'Select':
        name = 'unknown'
        try:
            name = _table_name_from_select_element(elt)
        except Exception:
            logging.exception('Failed finding table name.')
    elif action in ['Update', 'Insert', 'Delete']:
        name = elt.table.name
    else:
        # create/drop tables, sqlalchemy internal schema queries, etc
        return

    action = action.lower()

    statsd_client.timing('db.{}.{}'.format(name, action), duration)
    metrics_logger.debug("table=%s query=%s duration=%.2f", name, action,
                         duration)

    if has_request_context():
        g.setdefault('queries_count', 0)
        g.setdefault('queries_duration', 0)
        g.queries_count += 1
        g.queries_duration += duration

    return result
Example #5
0
def calculate_metrics(response):
    if "start_time" not in g:
        return response

    request_duration = (time.time() - g.start_time) * 1000
    queries_duration = g.get("queries_duration", 0.0)
    queries_count = g.get("queries_count", 0.0)
    endpoint = (request.endpoint or "unknown").replace(".", "_")

    metrics_logger.info(
        "method=%s path=%s endpoint=%s status=%d content_type=%s content_length=%d duration=%.2f query_count=%d query_duration=%.2f",
        request.method,
        request.path,
        endpoint,
        response.status_code,
        response.content_type,
        response.content_length or -1,
        request_duration,
        queries_count,
        queries_duration,
    )

    statsd_client.timing(
        "requests.{}.{}".format(endpoint, request.method.lower()),
        request_duration)

    return response
Example #6
0
def task_postrun_handler(signal, sender, task_id, task, args, kwargs, retval,
                         state, **kw):
    try:
        run_time = 1000 * (time.time() - tasks_start_time.pop(task_id))

        state = (state or 'unknown').lower()
        tags = {'state': state, 'hostname': socket.gethostname()}
        if task.name == 'redash.tasks.execute_query':
            if isinstance(retval, Exception):
                tags['state'] = 'exception'
                state = 'exception'

            tags['data_source_id'] = args[1]

        normalized_task_name = task.name.replace('redash.tasks.',
                                                 '').replace('.', '_')
        metric = "celery.task_runtime.{}".format(normalized_task_name)
        logging.debug(
            "metric=%s",
            json_dumps({
                'metric': metric,
                'tags': tags,
                'value': run_time
            }))
        statsd_client.timing(metric_name(metric, tags), run_time)
        statsd_client.incr(
            metric_name(
                'celery.task.{}.{}'.format(normalized_task_name, state), tags))
    except Exception:
        logging.exception("Exception during task_postrun handler.")
Example #7
0
def task_postrun_handler(signal, sender, task_id, task, args, kwargs, retval,
                         state):
    try:
        run_time = 1000 * (time.time() - tasks_start_time.pop(task_id))

        tags = {
            'name': task.name,
            'state': (state or 'unknown').lower(),
            'hostname': socket.gethostname()
        }
        if task.name == 'redash.tasks.execute_query':
            if isinstance(retval, Exception):
                tags['state'] = 'exception'

            tags['data_source_id'] = args[1]

        metric = "celery.task.runtime"
        logging.debug(
            "metric=%s",
            json.dumps({
                'metric': metric,
                'tags': tags,
                'value': run_time
            }))
        statsd_client.timing(metric_name(metric, tags), run_time)
        statsd_client.incr(metric_name('celery.task.count', tags))
    except Exception:
        logging.exception("Exception during task_postrun handler.")
Example #8
0
 def _execute_and_measure(cls, action, args, kwargs):
     name = cls.__name__
     start_time = time.time()
     try:
         result = getattr(super(MeteredModel, cls), action)(*args, **kwargs)
         return result
     finally:
         duration = (time.time() - start_time)*1000
         statsd_client.timing('db.{}.{}'.format(name, action), duration)
         metrics_logger.debug("model=%s query=%s duration=%.2f", name, action, duration)
Example #9
0
    def metered_execute(self, *args, **kwargs):
        name = self.model_class.__name__

        action = getattr(self, 'model_action', 'unknown')

        start_time = time.time()
        try:
            result = real_execute(self, *args, **kwargs)
            return result
        finally:
            duration = (time.time() - start_time) * 1000
            statsd_client.timing('db.{}.{}'.format(name, action), duration)
            metrics_logger.debug("model=%s query=%s duration=%.2f", name, action, duration)
Example #10
0
    def metered_execute(self, *args, **kwargs):
        name = self.model_class.__name__

        action = getattr(self, 'model_action', 'unknown')

        start_time = time.time()
        try:
            result = real_execute(self, *args, **kwargs)
            return result
        finally:
            duration = (time.time() - start_time) * 1000
            statsd_client.timing('db.{}.{}'.format(name, action), duration)
            metrics_logger.debug("model=%s query=%s duration=%.2f", name,
                                 action, duration)
Example #11
0
def calculate_metrics(response):
    request_duration = (time.time() - g.start_time) * 1000

    metrics_logger.info("method=%s path=%s endpoint=%s status=%d content_type=%s content_length=%d duration=%.2f query_count=%d query_duration=%.2f",
                        request.method,
                        request.path,
                        request.endpoint,
                        response.status_code,
                        response.content_type,
                        response.content_length,
                        request_duration,
                        db.database.query_count,
                        db.database.query_duration)

    statsd_client.timing('requests.{}.{}'.format(request.endpoint, request.method.lower()), request_duration)

    return response
Example #12
0
def task_postrun_handler(signal, sender, task_id, task, args, kwargs, retval, state):
    try:
        run_time = 1000 * (time.time() - tasks_start_time.pop(task_id))

        tags = {"name": task.name, "state": (state or "unknown").lower(), "hostname": socket.gethostname()}
        if task.name == "redash.tasks.execute_query":
            if isinstance(retval, Exception):
                tags["state"] = "exception"

            tags["data_source_id"] = args[1]

        metric = "celery.task.runtime"
        logging.debug("metric=%s", json.dumps({"metric": metric, "tags": tags, "value": run_time}))
        statsd_client.timing(metric_name(metric, tags), run_time)
        statsd_client.incr(metric_name("celery.task.count", tags))
    except Exception:
        logging.exception("Exception during task_postrun handler.")
Example #13
0
def after_execute(conn, elt, multiparams, params, result):
    # pop 移除指定index的元素
    # remove 移除指元素中第一个
    duration = 1000 * (time.time() - conn.info['query_start_time'].pop(-1))
    action = elt.__class__.__name__

    if action == 'Select':
        name = 'unknown'
        try:
            name = _table_name_from_select_element(elt)
        except Exception:
            logging.exception('Failed finding table name.')
    elif action in ['Update', 'Insert', 'Delete']:
        name = elt.table.name
    else:
        # create/drop tables, sqlalchemy internal schema queries, etc
        return

    action = action.lower()

    # 对某个表的,执行了的什么操作(select,update等),消耗多长时间
    statsd_client.timing('db.{}.{}'.format(name, action), duration)
    # 日志记录
    metrics_logger.debug("table=%s query=%s duration=%.2f", name, action,
                         duration)

    if has_request_context():

        # current_app、g就是应用上下文, 线程成内的全局变量
        # requests、session就是请求上下文
        # request 封装了client发出的http请求内容
        # session 储存了用户请求之间的需要记住的值字典


        # 源码你就会发现 session 也是一个 request context 的变量,但它把数据保存到了 cookie 中并发送到了客户端,客户端再次请求的时候又带上了cookie。
        g.setdefault('queries_count', 0)
        g.setdefault('queries_duration', 0)



        g.queries_count += 1
        g.queries_duration += duration

    return result
Example #14
0
def calculate_metrics(response):
    if 'start_time' not in g:
        return response

    request_duration = (time.time() - g.start_time) * 1000
    queries_duration = g.get('queries_duration', 0.0)
    queries_count = g.get('queries_count', 0.0)

    metrics_logger.info(
        "method=%s path=%s endpoint=%s status=%d content_type=%s content_length=%d duration=%.2f query_count=%d query_duration=%.2f",
        request.method, request.path, request.endpoint, response.status_code,
        response.content_type, response.content_length, request_duration,
        queries_count, queries_duration)

    statsd_client.timing(
        'requests.{}.{}'.format(request.endpoint, request.method.lower()),
        request_duration)

    return response
Example #15
0
def task_postrun_handler(signal, sender, task_id, task, args, kwargs, retval, state, **kw):
    try:
        run_time = 1000 * (time.time() - tasks_start_time.pop(task_id))

        state = (state or 'unknown').lower()
        tags = {'state': state, 'hostname': socket.gethostname()}
        if task.name == 'redash.tasks.execute_query':
            if isinstance(retval, Exception):
                tags['state'] = 'exception'
                state = 'exception'

            tags['data_source_id'] = args[1]

        normalized_task_name = task.name.replace('redash.tasks.', '').replace('.', '_')
        metric = "celery.task_runtime.{}".format(normalized_task_name)
        logging.debug("metric=%s", json_dumps({'metric': metric, 'tags': tags, 'value': run_time}))
        statsd_client.timing(metric_name(metric, tags), run_time)
        statsd_client.incr(metric_name('celery.task.{}.{}'.format(normalized_task_name, state), tags))
    except Exception:
        logging.exception("Exception during task_postrun handler.")
Example #16
0
def calculate_metrics(response):

    if 'start_time' not in g:
        ####返还给客户端
        return response

    ####计算整个请求消耗时间########
    request_duration = (time.time() - g.start_time) * 1000

    ####计算此次请求的数据库消耗时间以及频率########

    # D:\redash - master\redash\metrics\database.py
    # if has_request_context():
    #     g.setdefault('queries_count', 0)
    #     g.setdefault('queries_duration', 0)
    #     g.queries_count += 1
    #     g.queries_duration += duration

    queries_duration = g.get('queries_duration', 0.0)
    queries_count = g.get('queries_count', 0.0)

    ###########记录请求资源##############
    endpoint = (request.endpoint or 'unknown').replace('.', '_')

    ###########记录到日志里#############
    metrics_logger.info(
        "method=%s path=%s endpoint=%s status=%d content_type=%s content_length=%d duration=%.2f query_count=%d query_duration=%.2f",
        request.method, request.path, endpoint, response.status_code,
        response.content_type, response.content_length or -1, request_duration,
        queries_count, queries_duration)

    ####### 利用statsd可以记录request和time的关系,制作一个时间流的图形
    ####### statsd可以采集指标,指标就是随时间变化度量的值
    ###### 事实上,logging也可以通过time和记录,通过grep统计出来,但是太麻烦了?????
    statsd_client.timing(
        'requests.{}.{}'.format(endpoint, request.method.lower()),
        request_duration)

    ####返还给客户端
    return response
Example #17
0
def calculate_metrics(response):
    if 'start_time' not in g:
        return response

    request_duration = (time.time() - g.start_time) * 1000
    queries_duration = g.get('queries_duration', 0.0)
    queries_count = g.get('queries_count', 0.0)

    metrics_logger.info("method=%s path=%s endpoint=%s status=%d content_type=%s content_length=%d duration=%.2f query_count=%d query_duration=%.2f",
                        request.method,
                        request.path,
                        request.endpoint,
                        response.status_code,
                        response.content_type,
                        response.content_length,
                        request_duration,
                        queries_count,
                        queries_duration)

    statsd_client.timing('requests.{}.{}'.format(request.endpoint, request.method.lower()), request_duration)

    return response