Пример #1
0
def add_metrics(resource_id, request_params, dataset, proto_version):
    current_time = datetime.utcnow().replace(second=0, microsecond=0)
    try:
        resource = ApplicationService.by_id_cached()(resource_id)
        resource = DBSession.merge(resource, load=False)
        es_docs = []
        rows = []
        for metric in dataset:
            tags = dict(metric["tags"])
            server_n = tags.get("server_name", metric["server_name"]).lower()
            tags["server_name"] = server_n or "unknown"
            new_metric = Metric(
                timestamp=metric["timestamp"],
                resource_id=resource.resource_id,
                namespace=metric["namespace"],
                tags=tags,
            )
            rows.append(new_metric)
            es_docs.append(new_metric.es_doc())
        session = DBSession()
        session.bulk_save_objects(rows)
        session.flush()

        action = "METRICS"
        metrics_msg = "%s: %s, metrics: %s, proto:%s" % (
            action,
            str(resource),
            len(dataset),
            proto_version,
        )
        log.info(metrics_msg)

        mark_changed(session)
        redis_pipeline = Datastores.redis.pipeline(transaction=False)
        key = REDIS_KEYS["counters"]["metrics_per_minute"].format(current_time)
        redis_pipeline.incr(key, len(rows))
        redis_pipeline.expire(key, 3600 * 24)
        key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format(
            resource.owner_user_id, current_time)
        redis_pipeline.incr(key, len(rows))
        redis_pipeline.expire(key, 3600)
        key = REDIS_KEYS["counters"]["metrics_per_hour_per_app"].format(
            resource_id, current_time.replace(minute=0))
        redis_pipeline.incr(key, len(rows))
        redis_pipeline.expire(key, 3600 * 24 * 7)
        redis_pipeline.sadd(
            REDIS_KEYS["apps_that_got_new_data_per_hour"].format(
                current_time.replace(minute=0)),
            resource_id,
        )
        redis_pipeline.execute()
        add_metrics_es(es_docs)
        return True
    except Exception as exc:
        print_traceback(log)
        if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]:
            raise
        add_metrics.retry(exc=exc)
Пример #2
0
def clean_tables():
    tables = Base.metadata.tables.keys()
    transaction.begin()
    for t in tables:
        if not t.startswith('alembic_'):
            DBSession.execute('truncate %s cascade' % t)
    session = DBSession()
    mark_changed(session)
    transaction.commit()
Пример #3
0
def default_application(default_user):
    from appenlight.models import DBSession
    from appenlight.models.application import Application

    transaction.begin()
    session = DBSession()
    application = Application(resource_id=1,
                              resource_name="testapp",
                              api_key="xxxx")
    session.add(application)
    default_user.resources.append(application)
    session.execute("SELECT nextval('resources_resource_id_seq')")
    transaction.commit()
    return application
Пример #4
0
def default_user():
    from appenlight.models import DBSession
    from appenlight.models.user import User
    from appenlight.models.auth_token import AuthToken

    transaction.begin()
    session = DBSession()
    user = User(id=1, user_name="testuser", status=1, email="*****@*****.**")
    session.add(user)
    token = AuthToken(token="1234")
    user.auth_tokens.append(token)
    session.execute("SELECT nextval('users_id_seq')")
    transaction.commit()
    return user
Пример #5
0
def charts_PATCH(request):
    dashboard = request.context.resource

    json_body = copy.deepcopy(request.unsafe_json_body)
    chart_config = json_body["config"]
    # for now just throw error in case something weird is found

    applications = UserService.resources_with_perms(
        request.user, ["view"], resource_types=["application"])

    # CRITICAL - this ensures our resultset is limited to only the ones
    # user has view permissions
    all_possible_app_ids = set([app.resource_id for app in applications])

    schema = ChartConfigSchema().bind(resources=all_possible_app_ids)
    schema.deserialize(chart_config)

    # some processing/normalizing for new/missing variables
    if "timeRange" not in chart_config:
        chart_config["timeRange"] = "1M"
    if "startMoment" not in chart_config:
        chart_config["startMoment"] = "now"
    if "startMomentUnit" not in chart_config:
        chart_config["startMomentUnit"] = "days"
    if "startMomentValue" not in chart_config:
        chart_config["startMomentValue"] = 0
    # ensure we don't have any leftover chart definitions present from
    # removed layout columns
    chart_ids = []
    for row in dashboard.layout_config:
        for col in row["columns"]:
            chart_ids.append(col["chartId"])
    for chart in dashboard.charts:
        if chart.uuid not in chart_ids:
            actions = AlertChannelActionService.by_other_id(chart.uuid)
            for action in actions:
                DBSession.delete(action)
            dashboard.charts.remove(chart)

    chart_config["json_config_version"] = chart.json_config_version
    # make sure we set model field as dirty
    request.context.chart.name = json_body["name"]
    request.context.chart.config = None
    request.context.chart.config = chart_config
    session = DBSession()
    mark_changed(session)
    request.session.flash("Chart saved")
    return True
Пример #6
0
def add_metrics(resource_id, request_params, dataset, proto_version):
    current_time = datetime.utcnow().replace(second=0, microsecond=0)
    try:
        resource = ApplicationService.by_id_cached()(resource_id)
        resource = DBSession.merge(resource, load=False)
        es_docs = []
        rows = []
        for metric in dataset:
            tags = dict(metric['tags'])
            server_n = tags.get('server_name', metric['server_name']).lower()
            tags['server_name'] = server_n or 'unknown'
            new_metric = Metric(timestamp=metric['timestamp'],
                                resource_id=resource.resource_id,
                                namespace=metric['namespace'],
                                tags=tags)
            rows.append(new_metric)
            es_docs.append(new_metric.es_doc())
        session = DBSession()
        session.bulk_save_objects(rows)
        session.flush()

        action = 'METRICS'
        metrics_msg = '%s: %s, metrics: %s, proto:%s' % (
            action, str(resource), len(dataset), proto_version)
        log.info(metrics_msg)

        mark_changed(session)
        redis_pipeline = Datastores.redis.pipeline(transaction=False)
        key = REDIS_KEYS['counters']['metrics_per_minute'].format(current_time)
        redis_pipeline.incr(key, len(rows))
        redis_pipeline.expire(key, 3600 * 24)
        key = REDIS_KEYS['counters']['events_per_minute_per_user'].format(
            resource.owner_user_id, current_time)
        redis_pipeline.incr(key, len(rows))
        redis_pipeline.expire(key, 3600)
        key = REDIS_KEYS['counters']['metrics_per_hour_per_app'].format(
            resource_id, current_time.replace(minute=0))
        redis_pipeline.incr(key, len(rows))
        redis_pipeline.expire(key, 3600 * 24 * 7)
        redis_pipeline.sadd(
            REDIS_KEYS['apps_that_got_new_data_per_hour'].format(
                current_time.replace(minute=0)), resource_id)
        redis_pipeline.execute()
        add_metrics_es(es_docs)
        return True
    except Exception as exc:
        print_traceback(log)
        add_metrics.retry(exc=exc)
Пример #7
0
def add_uptime_stats(params, metric):
    proto_version = parse_proto(params.get("protocol_version"))
    try:
        application = ApplicationService.by_id_cached()(metric["resource_id"])
        application = DBSession.merge(application, load=False)
        if not application:
            return
        start_interval = convert_date(metric["timestamp"])
        start_interval = start_interval.replace(second=0, microsecond=0)
        new_metric = UptimeMetric(
            start_interval=start_interval,
            response_time=metric["response_time"],
            status_code=metric["status_code"],
            is_ok=metric["is_ok"],
            location=metric.get("location", 1),
            tries=metric["tries"],
            resource_id=application.resource_id,
            owner_user_id=application.owner_user_id,
        )
        DBSession.add(new_metric)
        DBSession.flush()
        add_metrics_uptime([new_metric.es_doc()])
        if metric["is_ok"]:
            event_types = [Event.types["uptime_alert"]]
            statuses = [Event.statuses["active"]]
            # get events older than 5 min
            events = EventService.by_type_and_status(
                event_types,
                statuses,
                older_than=(datetime.utcnow() - timedelta(minutes=6)),
                app_ids=[application.resource_id],
            )
            for event in events:
                event.close()
        else:
            UptimeMetricService.check_for_alert(application, metric=metric)
        action = "METRICS UPTIME"
        metrics_msg = "%s: %s, proto:%s" % (action, str(application),
                                            proto_version)
        log.info(metrics_msg)
        session = DBSession()
        mark_changed(session)
        return True
    except Exception as exc:
        print_traceback(log)
        add_uptime_stats.retry(exc=exc)
Пример #8
0
def update_tag_counter(tag_name, tag_value, count):
    try:
        query = DBSession.query(Tag).filter(Tag.name == tag_name).filter(
            sa.cast(Tag.value, sa.types.TEXT) == sa.cast(
                json.dumps(tag_value), sa.types.TEXT))
        query.update(
            {
                'times_seen': Tag.times_seen + count,
                'last_timestamp': datetime.utcnow()
            },
            synchronize_session=False)
        session = DBSession()
        mark_changed(session)
        return True
    except Exception as exc:
        print_traceback(log)
        update_tag_counter.retry(exc=exc)
Пример #9
0
def update_tag_counter(tag_name, tag_value, count):
    try:
        query = (DBSession.query(Tag).filter(Tag.name == tag_name).filter(
            sa.cast(Tag.value, sa.types.TEXT) == sa.cast(
                json.dumps(tag_value), sa.types.TEXT)))
        query.update(
            {
                "times_seen": Tag.times_seen + count,
                "last_timestamp": datetime.utcnow()
            },
            synchronize_session=False,
        )
        session = DBSession()
        mark_changed(session)
        return True
    except Exception as exc:
        print_traceback(log)
        if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]:
            raise
        update_tag_counter.retry(exc=exc)
Пример #10
0
def partitions_remove(request):
    permanent_partitions, daily_partitions = get_partition_stats()
    pg_partitions = []
    es_partitions = []
    for item in list(permanent_partitions.values()) + list(
            daily_partitions.values()):
        es_partitions.extend(item["elasticsearch"])
        pg_partitions.extend(item["pg"])
    FormCls = get_partition_deletion_form(es_partitions, pg_partitions)
    form = FormCls(
        es_index=request.unsafe_json_body["es_indices"],
        pg_index=request.unsafe_json_body["pg_indices"],
        confirm=request.unsafe_json_body["confirm"],
        csrf_context=request,
    )
    if form.validate():
        for ix in form.data["es_index"]:
            log.warning("deleting ES partition: {}".format(ix))
            Datastores.es.indices.delete(ix)
        for ix in form.data["pg_index"]:
            log.warning("deleting PG partition: {}".format(ix))
            stmt = sa.text("DROP TABLE %s CASCADE" % sa.text(ix))
            session = DBSession()
            session.connection().execute(stmt)
            mark_changed(session)

    for field, error in form.errors.items():
        msg = "%s: %s" % (field, error[0])
        request.session.flash(msg, "error")

    permanent_partitions, daily_partitions = get_partition_stats()
    return {
        "permanent_partitions":
        sorted(list(permanent_partitions.items()),
               key=lambda x: x[0],
               reverse=True),
        "daily_partitions":
        sorted(list(daily_partitions.items()),
               key=lambda x: x[0],
               reverse=True),
    }
Пример #11
0
def application_create(request):
    """
    Creates new application instances
    """
    user = request.user
    form = forms.ApplicationCreateForm(MultiDict(request.unsafe_json_body),
                                       csrf_context=request)
    if form.validate():
        session = DBSession()
        resource = Application()
        DBSession.add(resource)
        form.populate_obj(resource)
        resource.api_key = resource.generate_api_key()
        user.resources.append(resource)
        request.session.flash(_('Application created'))
        DBSession.flush()
        mark_changed(session)
    else:
        return HTTPUnprocessableEntity(body=form.errors_json)

    return resource.get_dict()