def api_collection_sources(collection_id): int(collection_id) results = {'tags_id': collection_id} media_in_collection = media_with_tag(user_mediacloud_key(), collection_id) add_user_favorite_flag_to_sources(media_in_collection) results['sources'] = media_in_collection return jsonify(results)
def api_media_sources_by_ids(): source_list = [] source_id_array = request.args['src[]'].split(',') for mediaId in source_id_array: info = _media_source_details(mediaId) source_list.append(info) add_user_favorite_flag_to_sources(source_list) return jsonify({'results': source_list})
def api_media_sources_by_ids(): source_list = [] source_id_array = request.args['src[]'].split(',') for mediaId in source_id_array: info = _media_source_details(mediaId) source_list.append(info) add_user_favorite_flag_to_sources(source_list) return jsonify({'results': source_list})
def api_collection_sources(collection_id): int(collection_id) results = { 'tags_id': collection_id } media_in_collection = media_with_tag(user_mediacloud_key(), collection_id) add_user_favorite_flag_to_sources(media_in_collection) results['sources'] = media_in_collection return jsonify(results)
def api_collections_by_ids(): collection_ids = request.args['coll[]'].split(',') sources_list = [] for tags_id in collection_ids: all_media = media_with_tag(user_mediacloud_key(), tags_id) info = [{'media_id': m['media_id'], 'name': m['name'], 'url': m['url'], 'public_notes': m['public_notes']} for m in all_media] add_user_favorite_flag_to_sources(info) sources_list += info return jsonify({'results': sources_list})
def api_collections_by_ids(): collection_ids = request.args['coll[]'].split(',') sources_list = [] for tags_id in collection_ids: all_media = media_with_tag(user_mediacloud_key(), tags_id) info = [{'media_id': m['media_id'], 'name': m['name'], 'url': m['url'], 'public_notes': m['public_notes']} for m in all_media] add_user_favorite_flag_to_sources(info) sources_list += info return jsonify({'results': sources_list})
def api_media_search(search_str): tags = None cleaned_search_str = None if search_str == '*' else search_str if 'tags[]' in request.args: tags = request.args['tags[]'].split(',') if tags is None: source_list = media_search(cleaned_search_str)[:MAX_SOURCES] else: source_list = media_search(cleaned_search_str, tags_id=tags[0])[:MAX_SOURCES] add_user_favorite_flag_to_sources(source_list) return jsonify({'list': source_list})
def api_media_search(search_str): tags = None cleaned_search_str = None if search_str == '*' else search_str if 'tags[]' in request.args: tags = request.args['tags[]'].split(',') if tags is None: source_list = media_search(cleaned_search_str)[:MAX_SOURCES] else: source_list = media_search(cleaned_search_str, tags_id=tags[0])[:MAX_SOURCES] add_user_favorite_flag_to_sources(source_list) return jsonify({'list':source_list})
def api_media_source_details(media_id): health = _cached_media_source_health(user_mediacloud_key(), media_id) info = _media_source_details(media_id) info['health'] = health user_mc = user_admin_mediacloud_client() if user_has_auth_role(ROLE_MEDIA_EDIT): info['scrape_status'] = user_mc.feedsScrapeStatus( media_id) # need to know if scrape is running else: info['scrape_status'] = None add_user_favorite_flag_to_sources([info]) add_user_favorite_flag_to_collections(info['media_source_tags']) return jsonify(info)
def api_collection_sources(collection_id): add_in_details = False if ('details' in request.args) and (request.args['details'] == 'true'): add_in_details = True results = {'tags_id': collection_id} media_in_collection = media_with_tag(user_mediacloud_key(), collection_id) add_user_favorite_flag_to_sources(media_in_collection) if add_in_details and user_has_auth_role(ROLE_MEDIA_EDIT): media_in_collection = fetch_collection_source_feed_info( media_in_collection) results['sources'] = media_in_collection return jsonify(results)
def api_media_source_details(media_id): health = _cached_media_source_health(user_mediacloud_key(), media_id) info = _media_source_details(media_id) info['health'] = health user_mc = user_admin_mediacloud_client() if user_has_auth_role(ROLE_MEDIA_EDIT): info['scrape_status'] = user_mc.feedsScrapeStatus(media_id) # need to know if scrape is running else: info['scrape_status'] = None add_user_favorite_flag_to_sources([info]) add_user_favorite_flag_to_collections(info['media_source_tags']) analytics_db.increment_count(analytics_db.TYPE_MEDIA, media_id, analytics_db.ACTION_SOURCE_MGR_VIEW) return jsonify(info)
def api_collection_sources(collection_id): add_in_details = False if ('details' in request.args) and (request.args['details'] == 'true'): add_in_details = True results = {'tags_id': collection_id} media_in_collection = media_with_tag(user_mediacloud_key(), collection_id) add_user_favorite_flag_to_sources(media_in_collection) if add_in_details and user_has_auth_role(ROLE_MEDIA_EDIT): # for editing users, add in last scrape and active feed count (if requested) pool = Pool(processes=FEED_SCRAPE_JOB_POOL_SIZE) jobs = [m['media_id'] for m in media_in_collection] job_results = pool.map(_media_list_edit_worker, jobs) # blocks until they are all done job_by_media_id = {j['media_id']: j for j in job_results} for m in media_in_collection: m['latest_scrape_job'] = job_by_media_id[ m['media_id']]['latest_scrape_job'] m['active_feed_count'] = job_by_media_id[ m['media_id']]['active_feed_count'] pool.terminate() results['sources'] = media_in_collection return jsonify(results)