Ejemplo n.º 1
0
def api_explorer_sentences_list():
    around_word = 'word' in request.form
    if only_queries_reddit(request.form):
        start_date, end_date = parse_query_dates(request.form)
        provider = RedditPushshiftProvider()
        results = provider.samples(query=request.args['q'],
                                   start_date=start_date,
                                   end_date=end_date,
                                   subreddits=NEWS_SUBREDDITS)
        results = [{
            'sentence': r['title'],
            'publish_date': r['publish_date'],
            'story': r,
        } for r in results]
    else:
        solr_q, solr_fq = parse_query_with_keywords(request.form)
        # so we can support large samples or just a few to show
        rows = int(request.form['rows']) if 'rows' in request.form else 10
        results = apicache.sentence_list(solr_q,
                                         solr_fq,
                                         rows=rows,
                                         include_stories=(not around_word))
    if around_word:
        word = request.form['word']
        results = [
            _sentence_fragment_around(word, s['sentence']) for s in results
            if s['sentence'] is not None
        ]
        results = [s for s in results if s is not None]
    return jsonify({'results': results})
Ejemplo n.º 2
0
def explorer_story_count_csv():
    filename = 'total-story-count'
    data = request.form
    if 'searchId' in data:
        queries = SAMPLE_SEARCHES[data['searchId']]['queries']
    else:
        queries = json.loads(data['queries'])
    label = " ".join([q['label'] for q in queries])
    filename = file_name_for_download(label, filename)
    # now compute total attention for all results
    story_count_results = []
    for q in queries:
        if (len(q['collections']) == 0) and only_queries_reddit(q['sources']):
            start_date, end_date = parse_query_dates(q)
            story_counts = pushshift.reddit_submission_normalized_and_split_story_count(query=q['q'],
                                                                                        start_date=start_date,
                                                                                        end_date=end_date,
                                                                                        subreddits=pushshift.NEWS_SUBREDDITS)
        else:
            solr_q, solr_fq = parse_query_with_keywords(q)
            solr_open_query = concatenate_query_for_solr(solr_seed_query='*', media_ids=q['sources'],
                                                         tags_ids=q['collections'])
            story_counts = apicache.normalized_and_story_count(solr_q, solr_fq, solr_open_query)
        story_count_results.append({
            'query': q['label'],
            'matching_stories': story_counts['total'],
            'total_stories': story_counts['normalized_total'],
            'ratio': float(story_counts['total']) / float(story_counts['normalized_total'])
        })
    props = ['query', 'matching_stories', 'total_stories', 'ratio']
    return csv.stream_response(story_count_results, props, filename)
Ejemplo n.º 3
0
def api_explorer_story_sample():
    if only_queries_reddit(request.form):
        start_date, end_date = parse_query_dates(request.form)
        provider = RedditPushshiftProvider()
        results = provider.sample(query=request.form['q'],
                                  start_date=start_date, end_date=end_date,
                                  subreddits=NEWS_SUBREDDITS)
    else:
        solr_q, solr_fq = parse_query_with_keywords(request.form)
        results = base_cache.story_list(None, solr_q, solr_fq, rows=SAMPLE_STORY_COUNT,
                                        sort=MediaCloud.SORT_RANDOM)
        for story in results:  # add in media info so we can show it to user if they click into the drill-down
            story["media"] = base_cache.media(story["media_id"])
    return jsonify({"results": results})
Ejemplo n.º 4
0
def api_explorer_story_split_count():
    start_date, end_date = parse_query_dates(request.form)
    if only_queries_reddit(request.form):
        provider = RedditPushshiftProvider()
        results = provider.normalized_count_over_time(query=request.form['q'],
                                                      start_date=start_date, end_date=end_date,
                                                      subreddits=NEWS_SUBREDDITS)
    else:
        # get specific stories by keyword
        solr_q, _solr_fq = parse_query_with_keywords(request.form)
        # get all the stories (no keyword) so we can support normalization
        solr_open_query = concatenate_query_for_solr(solr_seed_query='*',
                                                     media_ids=request.form['sources'],
                                                     tags_ids=request.form['collections'],
                                                     custom_ids=request.form['searches'])
        results = apicache.normalized_and_story_split_count(solr_q, solr_open_query, start_date, end_date)
    return jsonify({'results': results})
Ejemplo n.º 5
0
def api_explorer_combined_story_split_count_csv():
    filename = 'stories-over-time'
    data = request.form
    queries = json.loads(data['queries'])
    label = " ".join([q['label'] for q in queries])
    filename = file_name_for_download(label, filename)
    # now compute total attention for all results
    story_count_results = []
    for q in queries:
        start_date, end_date = parse_query_dates(q)
        if (len(q['collections']) == 0) and only_queries_reddit(q['sources']):
            provider = RedditPushshiftProvider()
            story_counts = provider.normalized_count_over_time(
                query=q['q'],
                start_date=start_date,
                end_date=end_date,
                subreddits=NEWS_SUBREDDITS)
        else:
            solr_q, solr_fq = parse_query_with_keywords(q)
            solr_open_query = concatenate_query_for_solr(
                solr_seed_query='*',
                media_ids=q['sources'],
                tags_ids=q['collections'],
                custom_ids=q['searches'])
            story_counts = apicache.normalized_and_story_split_count(
                solr_q, solr_open_query, start_date, end_date)
        story_count_results.append({
            'label': q['label'],
            'by_date': story_counts['counts'],
        })
    # now combine them by date
    data = []
    dates = [d['date'] for d in story_count_results[0]['by_date']]
    for idx in range(len(dates)):
        row = {'date': dates[idx]}
        for q in story_count_results:
            row[q['label'] + '-count'] = q['by_date'][idx]['count']
            row[q['label'] + '-total_count'] = q['by_date'][idx]['total_count']
            row[q['label'] + '-ratio'] = q['by_date'][idx]['ratio']
        data.append(row)
    props = ['date'] + [q['label'] + '-count' for q in queries] + [
        q['label'] + '-total_count' for q in queries
    ] + [q['label'] + '-ratio' for q in queries]
    return csv.stream_response(data, props, filename)
Ejemplo n.º 6
0
def api_explorer_story_split_count():
    search_id = int(request.args['search_id']) if 'search_id' in request.args else None
    start_date, end_date = parse_query_dates(request.args)
    if only_queries_reddit(request.args):
        results = pushshift.reddit_submission_normalized_and_split_story_count(query=request.args['q'],
                                                                               start_date=start_date, end_date=end_date,
                                                                               subreddits=pushshift.NEWS_SUBREDDITS)
    else:
        # get specific stories by keyword
        if isinstance(search_id, int) and search_id not in [None, -1]:
            solr_q, solr_fq = parse_as_sample(search_id, request.args['index'])
        else:
            solr_q, solr_fq = parse_query_with_keywords(request.args)
        # get all the stories (no keyword) so we can support normalization
        solr_open_query = concatenate_query_for_solr(solr_seed_query='*',
                                                     media_ids=request.args['sources'],
                                                     tags_ids=request.args['collections'])
        results = apicache.normalized_and_story_split_count(solr_q, solr_open_query, start_date, end_date)
    return jsonify({'results': results})
Ejemplo n.º 7
0
def explorer_stories_csv():
    logger.info(flask_login.current_user.name)
    filename = 'all-story-urls'
    data = request.form
    q = json.loads(data['q'])
    filename = file_name_for_download(q['label'], filename)
    # now compute total attention for all results
    if (len(q['collections']) == 0) and only_queries_reddit(q['sources']):
        start_date, end_date = parse_query_dates(q)
        provider = RedditPushshiftProvider()
        stories = provider.sample(query=q['q'], limit=2000,
                                  start_date=start_date, end_date=end_date,
                                  subreddits=NEWS_SUBREDDITS)
        props = ['stories_id', 'subreddit', 'publish_date', 'score', 'last_updated', 'title', 'url', 'full_link',
                 'author']
        return csv.stream_response(stories, props, filename)
    else:
        solr_q, solr_fq = parse_query_with_keywords(q)
        # now page through all the stories and download them
        return _stream_story_list_csv(filename, solr_q, solr_fq)
Ejemplo n.º 8
0
def api_explorer_story_split_count_csv():
    filename = 'stories-over-time'
    data = request.form
    q = json.loads(data['q'])
    filename = file_name_for_download(q['label'], filename)
    # now compute total attention for all results
    start_date, end_date = parse_query_dates(q)
    if (len(q['collections']) == 0) and only_queries_reddit(q['sources']):
        provider = RedditPushshiftProvider()
        story_counts = provider.normalized_count_over_time(query=q['q'],
                                                           start_date=start_date,
                                                           end_date=end_date,
                                                           subreddits=NEWS_SUBREDDITS)
    else:
        solr_q, _solr_fq = parse_query_with_keywords(q)
        solr_open_query = concatenate_query_for_solr(solr_seed_query='*',
                                                     media_ids=q['sources'],
                                                     tags_ids=q['collections'],
                                                     custom_ids=q['searches'])
        story_counts = apicache.normalized_and_story_split_count(solr_q, solr_open_query, start_date, end_date)
    props = ['date', 'count', 'total_count', 'ratio']
    return csv.stream_response(story_counts['counts'], props, filename)
Ejemplo n.º 9
0
def api_explorer_story_split_count_csv():
    filename = 'stories-over-time'
    data = request.form
    if 'searchId' in data:
        filename = filename  # don't have this info + current_query['q']
        q = SAMPLE_SEARCHES[data['index']]
    else:
        q = json.loads(data['q'])
    filename = file_name_for_download(q['label'], filename)
    # now compute total attention for all results
    start_date, end_date = parse_query_dates(q)
    if (len(q['collections']) == 0) and only_queries_reddit(q['sources']):
        story_counts = pushshift.reddit_submission_normalized_and_split_story_count(query=q['q'],
                                                                                    start_date=start_date,
                                                                                    end_date=end_date,
                                                                                    subreddits=pushshift.NEWS_SUBREDDITS)
    else:
        solr_q, solr_fq = parse_query_with_keywords(q)
        solr_open_query = concatenate_query_for_solr(solr_seed_query='*', media_ids=q['sources'],
                                                     tags_ids=q['collections'])
        story_counts = apicache.normalized_and_story_split_count(solr_q, solr_open_query, start_date, end_date)
    props = ['date', 'count', 'total_count', 'ratio']
    return csv.stream_response(story_counts['counts'], props, filename)