コード例 #1
0
def collection_source_split_stories(collection_id):
    collections_query = "tags_id_media:{}".format(collection_id)
    exclude_spidered_stories = " tags_id_media:{} AND NOT tags_id_stories:{}".format(
        str(collection_id), TAG_SPIDERED_STORY)\
        if 'separate_spidered' in request.args else collections_query
    interval = 'day'  # default, and not currently passed to the calls above

    all_results = apicache.split_story_count(user_mediacloud_key(),
                                             collections_query)
    # same if request.args doesn't ask to exclude_spidered
    non_spidered_results = apicache.split_story_count(
        user_mediacloud_key(), exclude_spidered_stories)

    all_stories = {
        'total_story_count': all_results['total_story_count'],
        'list': all_results['counts'],
    }
    partial_stories = {
        'total_story_count': non_spidered_results['total_story_count'],
        'list': non_spidered_results['counts'],
    }
    return jsonify({
        'results': {
            'all_stories': all_stories,
            'partial_stories': partial_stories,
            'interval': interval
        }
    })
コード例 #2
0
def stream_split_stories_csv(user_mc_key, filename, q):
    response = {
        'story_splits': apicache.split_story_count(user_mc_key, q)['counts']
    }
    clean_results = [{
        'date': trim_solr_date(item['date']),
        'numStories': item['count']
    } for item in response['story_splits']]
    clean_results = sorted(clean_results, key=itemgetter('date'))
    props = ['date', 'numStories']
    return csv.stream_response(clean_results, props, filename)
コード例 #3
0
def _source_story_split_count_job(info):
    source = info['media']
    q = "media_id:{}".format(source['media_id'])
    split_stories = apicache.split_story_count(user_mediacloud_key(), q, 360)
    source_data = {
        'media_id': source['media_id'],
        'media_name': source['name'],
        'media_url': source['url'],
        'total_story_count': split_stories['total_story_count'],
        'splits_over_time': split_stories['counts'],
    }
    return source_data