Ejemplo n.º 1
0
def get_content_timeseries(user, org, content_item_id):
    """
    Query an individual content timeseries.
    """
    c = ContentItem.query\
        .filter_by(id=content_item_id)\
        .filter_by(org_id=org.id)\
        .first()

    if not c:
        raise NotFoundError(
            'A ContentItem with ID {} does not exist'
            .format(content_item_id))

    # select / exclude
    select, exclude = arg_list('select', typ=str, exclusions=True, default=['*'])
    if '*' in select:
        exclude = []
        select = "*"

    kw = dict(
        unit=arg_str('unit', default='hour'),
        sparse=arg_bool('sparse', default=True),
        sig_digits=arg_int('sig_digits', default=2),
        select=select,
        exclude=exclude,
        rm_nulls=arg_bool('rm_nulls', default=False),
        time_since_start=arg_bool('time_since_start', default=False),
        transform=arg_str('transform', default=None),
        before=arg_date('before', default=None),
        after=arg_date('after', default=None)
    )

    q = QueryContentMetricTimeseries(org, [content_item_id], **kw)
    return jsonify(list(q.execute()))
def get_content_timeseries(user, org, content_item_id):
    """
    Query an individual content timeseries.
    """
    c = ContentItem.query\
        .filter_by(id=content_item_id)\
        .filter_by(org_id=org.id)\
        .first()

    if not c:
        raise NotFoundError(
            'A ContentItem with ID {} does not exist'.format(content_item_id))
    kw = request_ts(unit='hour')
    q = QueryContentMetricTimeseries(org, [content_item_id], **kw)
    return jsonify(list(q.execute()))
def get_content_timeseries(user, org, content_item_id):
    """
    Query an individual content timeseries.
    """
    c = ContentItem.query\
        .filter_by(id=content_item_id)\
        .filter_by(org_id=org.id)\
        .first()

    if not c:
        raise NotFoundError(
            'A ContentItem with ID {} does not exist'
            .format(content_item_id))
    kw = request_ts(unit='hour')
    q = QueryContentMetricTimeseries(org, [content_item_id], **kw)
    return jsonify(list(q.execute()))
Ejemplo n.º 4
0
def content_summary_from_content_timeseries(org,
                                            content_item_ids=[],
                                            num_hours=24):
    """
    Rollup content-timseries metrics into summaries.
    Optimize this query by only updating content items
    which have had updates to their metrics in the last X hours.
    """

    # just use this to generate a giant timeseries select with computed
    # metrics.
    ts = QueryContentMetricTimeseries(org, content_item_ids, unit=None)
    ts.compute = False
    metrics, ss = _summary_select(org.content_timeseries_metric_rollups)

    qkw = {
        'select_statements': ss,
        'metrics': metrics,
        'org_id': org.id,
        'last_updated': (dates.now() - timedelta(hours=num_hours)).isoformat(),
        'ts_query': ts.query,
    }

    q = """SELECT upsert_content_metric_summary({org_id}, content_item_id, metrics::text)
           FROM  (
              SELECT
                content_item_id,
                (SELECT row_to_json(_) from (SELECT {metrics}) as _) as metrics
              FROM (
                 SELECT
                    content_item_id,
                    {select_statements}
                FROM ({ts_query}) zzzz
                WHERE zzzz.content_item_id in (
                    SELECT
                        distinct(content_item_id)
                    FROM content_metric_timeseries
                    WHERE updated > '{last_updated}'
                    )
                GROUP BY content_item_id
                ) t1
            ) t2
        """.format(**qkw)
    db.session.execute(q)
    db.session.commit()
    return True
Ejemplo n.º 5
0
def content_summary_from_content_timeseries(org, content_item_ids=[], num_hours=24):
    """
    Rollup content-timseries metrics into summaries.
    Optimize this query by only updating content items
    which have had updates to their metrics in the last X hours.
    """

    # just use this to generate a giant timeseries select with computed
    # metrics.
    ts = QueryContentMetricTimeseries(org, content_item_ids, unit=None)
    ts.compute = False
    metrics, ss = _summary_select(org.content_timeseries_metric_rollups)

    qkw = {
        'select_statements': ss,
        'metrics': metrics,
        'org_id': org.id,
        'last_updated': (dates.now() - timedelta(hours=num_hours)).isoformat(),
        'ts_query': ts.query,
    }

    q = """SELECT upsert_content_metric_summary({org_id}, content_item_id, metrics::text)
           FROM  (
              SELECT
                content_item_id,
                (SELECT row_to_json(_) from (SELECT {metrics}) as _) as metrics
              FROM (
                 SELECT
                    content_item_id,
                    {select_statements}
                FROM ({ts_query}) zzzz
                WHERE zzzz.content_item_id in (
                    SELECT
                        distinct(content_item_id)
                    FROM content_metric_timeseries
                    WHERE updated > '{last_updated}'
                    )
                GROUP BY content_item_id
                ) t1
            ) t2
        """.format(**qkw)
    db.session.execute(q)
    db.session.commit()
    return True
Ejemplo n.º 6
0
def org_timeseries_from_content_timeseries(org,
                                           content_item_ids=[],
                                           num_hours=24):
    """
    Rollup content timeseries => org timeseries.
    """
    # summarize the content timeseries table
    content_ts = QueryContentMetricTimeseries(org,
                                              org.content_item_ids,
                                              unit='hour',
                                              group_by_id=False)
    content_ts.compute = False
    # select statements.
    metrics, ss = _summary_select(org.timeseries_metric_rollups)

    qkw = {
        'org_id': org.id,
        'metrics': metrics,
        'select_statements': ss,
        'ts_query': content_ts.query
    }

    # generate the query
    q = \
        """SELECT upsert_org_metric_timeseries({org_id}, datetime, metrics::text)
           FROM  (
              SELECT
                datetime,
                (SELECT row_to_json(_) from (SELECT {metrics}) as _) as metrics
              FROM (
                 SELECT
                    datetime,
                    {select_statements}
                FROM ({ts_query}) zzzz
                GROUP BY datetime
                ) t1
            ) t2
    """.format(**qkw)
    db.session.execute(q)
    db.session.commit()
    return True
Ejemplo n.º 7
0
def content_timeseries_to_summary(org, num_hours=24):
    """
    Rollup content-timseries metrics into summaries.
    Optimize this query by only updating content items whose
    timeseries have been updated in last X hours.
    """

    # just use this to generate a giant timeseries select with computed
    # metrics.
    ts = QueryContentMetricTimeseries(org, org.content_item_ids)

    # generate aggregation statments + list of metric names.
    summary_pattern = "{agg}({name}) AS {name}"
    select_statements = []
    metrics = []
    for n, m in org.content_timeseries_metric_rollups.items():
        ss = summary_pattern.format(**m)
        select_statements.append(ss)
        metrics.append(n)

    qkw = {
        'select_statements': ",\n".join(select_statements),
        'metrics': ", ".join(metrics),
        'org_id': org.id,
        'last_updated': (dates.now() - timedelta(hours=num_hours)).isoformat(),
        'ts_query': ts.query
    }

    q = """SELECT upsert_content_metric_summary({org_id}, content_item_id, metrics::text)
           FROM  (
              SELECT
                content_item_id,
                (SELECT row_to_json(_) from (SELECT {metrics}) as _) as metrics
              FROM (
                 SELECT
                    content_item_id,
                    {select_statements}
                FROM ({ts_query}) zzzz
                WHERE content_item_id in (
                    SELECT
                        distinct(content_item_id)
                    FROM content_metric_timeseries
                    WHERE updated > '{last_updated}'
                    )
                GROUP BY content_item_id
                ) t1
            ) t2
        """.format(**qkw)
    db.session.execute(q)
    db.session.commit()
    return True
Ejemplo n.º 8
0
def org_timeseries_from_content_timeseries(org, content_item_ids=[], num_hours=24):
    """
    Rollup content timeseries => org timeseries.
    """
    # summarize the content timeseries table
    content_ts = QueryContentMetricTimeseries(org, org.content_item_ids,
                                              unit='hour', group_by_id=False)
    content_ts.compute = False
    # select statements.
    metrics, ss = _summary_select(org.timeseries_metric_rollups)

    qkw = {
        'org_id': org.id,
        'metrics': metrics,
        'select_statements': ss,
        'ts_query': content_ts.query
    }

    # generate the query
    q = \
        """SELECT upsert_org_metric_timeseries({org_id}, datetime, metrics::text)
           FROM  (
              SELECT
                datetime,
                (SELECT row_to_json(_) from (SELECT {metrics}) as _) as metrics
              FROM (
                 SELECT
                    datetime,
                    {select_statements}
                FROM ({ts_query}) zzzz
                GROUP BY datetime
                ) t1
            ) t2
    """.format(**qkw)
    db.session.execute(q)
    db.session.commit()
    return True
def list_content_timeseries(user, org):
    """
    Query the content timeseries for an entire org.
    """

    # query content by:
    incl_cids, excl_cids = \
        arg_list('ids', typ=int, exclusions=True, default=['all'])
    incl_author_ids, excl_author_ids = \
        arg_list('author_ids', typ=int, exclusions=True, default=[])
    incl_st_ids, excl_st_ids = \
        arg_list('subject_tag_ids', typ=int, exclusions=True, default=[])
    incl_im_ids, excl_im_ids = \
        arg_list('impact_tag_ids', typ=int, exclusions=True, default=[])
    incl_event_ids, excl_event_ids = \
        arg_list('event_ids', typ=int, exclusions=True, default=[])
    has_filter = False  # we use this to keep track of whether
    # a filter has been applied.

    # get all cids
    all_cids = copy.copy(org.content_item_ids)

    # add in cids
    cids = []

    # include authors
    if len(incl_author_ids):
        has_filter = True
        res = db.session.query(content_items_authors.c.content_item_id)\
            .filter(content_items_authors.c.author_id.in_(incl_author_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude authors
    if len(excl_author_ids):
        has_filter = True
        res = db.session.query(content_items_authors.c.content_item_id)\
            .filter(~content_items_authors.c.author_id.in_(excl_author_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # include subject tags
    if len(incl_st_ids):
        has_filter = True
        res = db.session.query(content_items_tags.c.content_item_id)\
            .filter(content_items_tags.c.tag_id.in_(incl_st_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude subject tags
    if len(excl_st_ids):
        has_filter = True
        res = db.session.query(content_items_tags.c.content_item_id)\
            .filter(~content_items_tags.c.tag_id.in_(excl_st_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # include events
    if len(incl_event_ids):
        has_filter = True
        res = db.session.query(content_items_events.c.content_item_id)\
            .filter(content_items_events.c.event_id.in_(incl_event_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude events
    if len(excl_event_ids):
        has_filter = True
        res = db.session.query(content_items_events.c.content_item_id)\
            .filter(~content_items_events.c.event_id.in_(incl_event_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # include impact tags
    if len(incl_im_ids):
        has_filter = True
        res = db.session\
            .query(distinct(content_items_events.c.content_item_id))\
            .outerjoin(events_tags, events_tags.c.event_id == content_items_events.c.event_id)\
            .filter(events_tags.c.tag_id.in_(incl_im_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude impact tags
    if len(excl_im_ids):
        has_filter = True
        res = db.session\
            .query(distinct(content_items_events.c.content_item_id))\
            .outerjoin(events_tags, events_tags.c.event_id == content_items_events.c.event_id)\
            .filter(~events_tags.c.tag_id.in_(incl_im_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # remove exlucde cids:
    for c in cids:
        if c not in all_cids:
            cids.remove(c)

    if has_filter and not len(cids):
        raise NotFoundError(
            'Could not find Content Item Ids that matched the input parameters'
        )

    elif not has_filter and not len(cids):
        if 'all' in incl_cids:
            cids.extend(all_cids)
        else:
            has_filter = True
            cids.extend(incl_cids)

        # remove cids
        if not 'all' in excl_cids:
            has_filter = True
            for c in excl_cids:
                all_cids.remove(c)

    # execute the query.
    kw = request_ts(unit='day', group_by_id=True)
    q = QueryContentMetricTimeseries(org, cids, **kw)
    return jsonify(list(q.execute()))
def list_content_timeseries(user, org):
    """
    Query the content timeseries for an entire org.
    """

    # query content by:
    incl_cids, excl_cids = \
        arg_list('ids', typ=int, exclusions=True, default=['all'])
    incl_author_ids, excl_author_ids = \
        arg_list('author_ids', typ=int, exclusions=True, default=[])
    incl_st_ids, excl_st_ids = \
        arg_list('subject_tag_ids', typ=int, exclusions=True, default=[])
    incl_im_ids, excl_im_ids = \
        arg_list('impact_tag_ids', typ=int, exclusions=True, default=[])
    incl_event_ids, excl_event_ids = \
        arg_list('event_ids', typ=int, exclusions=True, default=[])
    has_filter = False  # we use this to keep track of whether
    # a filter has been applied.

    # get all cids
    all_cids = copy.copy(org.content_item_ids)

    # add in cids
    cids = []

    # include authors
    if len(incl_author_ids):
        has_filter = True
        res = db.session.query(content_items_authors.c.content_item_id)\
            .filter(content_items_authors.c.author_id.in_(incl_author_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude authors
    if len(excl_author_ids):
        has_filter = True
        res = db.session.query(content_items_authors.c.content_item_id)\
            .filter(~content_items_authors.c.author_id.in_(excl_author_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # include subject tags
    if len(incl_st_ids):
        has_filter = True
        res = db.session.query(content_items_tags.c.content_item_id)\
            .filter(content_items_tags.c.tag_id.in_(incl_st_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude subject tags
    if len(excl_st_ids):
        has_filter = True
        res = db.session.query(content_items_tags.c.content_item_id)\
            .filter(~content_items_tags.c.tag_id.in_(excl_st_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # include events
    if len(incl_event_ids):
        has_filter = True
        res = db.session.query(content_items_events.c.content_item_id)\
            .filter(content_items_events.c.event_id.in_(incl_event_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude events
    if len(excl_event_ids):
        has_filter = True
        res = db.session.query(content_items_events.c.content_item_id)\
            .filter(~content_items_events.c.event_id.in_(incl_event_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # include impact tags
    if len(incl_im_ids):
        has_filter = True
        res = db.session\
            .query(distinct(content_items_events.c.content_item_id))\
            .outerjoin(events_tags, events_tags.c.event_id == content_items_events.c.event_id)\
            .filter(events_tags.c.tag_id.in_(incl_im_ids))\
            .all()
        for r in res:
            if r[0] not in cids:
                cids.append(r[0])

    # exclude impact tags
    if len(excl_im_ids):
        has_filter = True
        res = db.session\
            .query(distinct(content_items_events.c.content_item_id))\
            .outerjoin(events_tags, events_tags.c.event_id == content_items_events.c.event_id)\
            .filter(~events_tags.c.tag_id.in_(incl_im_ids))\
            .all()
        for r in res:
            if r[0] in all_cids:
                all_cids.remove(r[0])

    # remove exlucde cids:
    for c in cids:
        if c not in all_cids:
            cids.remove(c)

    if has_filter and not len(cids):
        raise NotFoundError(
            'Could not find Content Item Ids that matched the input parameters'
        )

    elif not has_filter and not len(cids):
        if 'all' in incl_cids:
            cids.extend(all_cids)
        else:
            has_filter = True
            cids.extend(incl_cids)

        # remove cids
        if not 'all' in excl_cids:
            has_filter = True
            for c in excl_cids:
                all_cids.remove(c)

    # execute the query.
    kw = request_ts(
        unit='day',
        group_by_id=True
    )
    q = QueryContentMetricTimeseries(org,  cids, **kw)
    return jsonify(list(q.execute()))