def browse_release_groups(artist_id=None, release_types=None, limit=None, offset=None): """Get all release groups linked to an artist. You need to provide artist's MusicBrainz ID. """ if release_types is None: release_types = [] key = cache.gen_key(artist_id, limit, offset, *release_types) release_groups = cache.get(key) if not release_groups: try: api_resp = musicbrainzngs.browse_release_groups( artist=artist_id, release_type=release_types, limit=limit, offset=offset) release_groups = api_resp.get('release-group-count'), api_resp.get( 'release-group-list') except ResponseError as e: if e.cause.code == 404: return None else: raise InternalServerError(e.cause.msg) cache.set(key=key, val=release_groups, time=DEFAULT_CACHE_EXPIRATION) return release_groups
def browse_release_groups(*, artist_id, release_types=None, limit=None, offset=None): """Get all release groups linked to an artist. Args: artist_id (uuid): MBID of the artist. release_types (list): List of types of release groups to be fetched. limit (int): Max number of release groups to return. offset (int): Offset that can be used in conjunction with the limit. Returns: Tuple containing the list of dictionaries of release groups ordered by release year and the total count of the release groups. """ artist_id = str(artist_id) if release_types is None: release_types = [] release_types = [ release_type.capitalize() for release_type in release_types ] key = cache.gen_key(artist_id, limit, offset, *release_types) release_groups = cache.get(key) if not release_groups: release_groups = db.get_release_groups_for_artist( artist_id=artist_id, release_types=release_types, limit=limit, offset=offset) cache.set(key=key, val=release_groups, time=DEFAULT_CACHE_EXPIRATION) return release_groups
def _fetch_access_token(refresh=False) -> str: """Get an access token from the OAuth credentials. https://developer.spotify.com/web-api/authorization-guide/#client-credentials-flow """ key = cache.gen_key("spotify_oauth_access_token") access_token = cache.get(key) if refresh or not access_token: client_id = app.config.get("SPOTIFY_CLIENT_ID") client_secret = app.config.get("SPOTIFY_CLIENT_SECRET") auth_value = b64encode(bytes(f"{client_id}:{client_secret}", "utf-8")).decode("utf-8") response = requests.post( "https://accounts.spotify.com/api/token", data={ "grant_type": "client_credentials" }, headers={ "Authorization": f"Basic {auth_value}" }, ).json() access_token = response.get("access_token") if not access_token: raise SpotifyException( "Could not fetch access token for Spotify API") # Making the token stored in cache expire at the same time as the actual token cache.set(key=key, val=access_token, time=response.get("expires_in", 10)) return access_token
def browse_release_groups(*, artist_id, release_types=None, limit=None, offset=None): """Get all release groups linked to an artist. Args: artist_id (uuid): MBID of the artist. release_types (list): List of types of release groups to be fetched. limit (int): Max number of release groups to return. offset (int): Offset that can be used in conjunction with the limit. Returns: Tuple containing the list of dictionaries of release groups ordered by release year and the total count of the release groups. """ artist_id = str(artist_id) includes_data = defaultdict(dict) if release_types is None: release_types = [] release_types = [release_type.capitalize() for release_type in release_types] key = cache.gen_key(artist_id, limit, offset, *release_types) release_groups = cache.get(key) if not release_groups: with mb_session() as db: release_groups_query = _browse_release_groups_query(db, artist_id, release_types) count = release_groups_query.count() release_groups = release_groups_query.order_by( case([(models.ReleaseGroupMeta.first_release_date_year.is_(None), 1)], else_=0), models.ReleaseGroupMeta.first_release_date_year.desc() ).limit(limit).offset(offset).all() for release_group in release_groups: includes_data[release_group.id]['meta'] = release_group.meta release_groups = ([to_dict_release_groups(release_group, includes_data[release_group.id]) for release_group in release_groups], count) cache.set(key=key, val=release_groups, time=DEFAULT_CACHE_EXPIRATION) return release_groups
def browse_releases(artist_id=None, release_group=None, release_types=None, limit=None, offset=None, includes=None): """Get all the releases by a certain artist and/or a release group. You need to provide an artist's MusicBrainz ID or the Release Group's MusicBrainz ID """ if release_types is None: release_types = [] key = cache.gen_key(artist_id, release_group, limit, offset, *release_types, *includes) releases = cache.get(key) if not releases: try: api_resp = musicbrainzngs.browse_releases( artist=artist_id, release_type=release_types, limit=limit, offset=offset, release_group=release_group, includes=includes) releases = api_resp.get('release-list') except ResponseError as e: if e.cause.code == 404: return None else: raise InternalServerError(e.cause.msg) cache.set(key=key, val=releases, time=DEFAULT_CACHE_EXPIRATION) return releases
def get_release_group_by_id(mbid): """Get release group using the MusicBrainz ID.""" key = cache.gen_key(mbid) release_group = cache.get(key) if not release_group: release_group = _get_release_group_by_id(mbid) cache.set(key=key, val=release_group, time=DEFAULT_CACHE_EXPIRATION) return release_group_rel.process(release_group)
def get_release_group_by_id(mbid): """Get release group using the MusicBrainz ID.""" key = cache.gen_key('release-group', mbid) release_group = cache.get(key) if not release_group: release_group = db.fetch_multiple_release_groups( [mbid], includes=['artists', 'releases', 'release-group-rels', 'url-rels', 'tags'], unknown_entities_for_missing=True, )[mbid] cache.set(key=key, val=release_group, time=DEFAULT_CACHE_EXPIRATION) return release_group_rel.process(release_group)
def search(query: str, *, item_types="", limit=20, offset=0) -> dict: """Search for items (artists, albums, or tracks) by a query string. More information is available at https://developer.spotify.com/web-api/search-item/. """ cache_key = cache.gen_key(query, item_types, limit, offset) cache_namespace = "spotify_search" result = cache.get(cache_key, cache_namespace) if not result: query = urllib.parse.quote(query.encode('utf8')) result = _get(f"search?q={query}&type={item_types}&limit={str(limit)}&offset={str(offset)}") cache.set(key=cache_key, namespace=cache_namespace, val=result, time=_DEFAULT_CACHE_EXPIRATION) return result
def get_artist_by_id(mbid): """Get artist with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the artist. Returns: Dictionary containing the artist information """ key = cache.gen_key(mbid) artist = cache.get(key) if not artist: artist = _get_artist_by_id(mbid) cache.set(key=key, val=artist, time=DEFAULT_CACHE_EXPIRATION) return artist_rel.process(artist)
def get_event_by_id(mbid): """Get event with the MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the event. Returns: Dictionary containing the event information. """ key = cache.gen_key(mbid) event = cache.get(key) if not event: event = _get_event_by_id(mbid) cache.set(key=key, val=event, time=DEFAULT_CACHE_EXPIRATION) return event
def get_release_by_id(mbid): """Get release with the MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the release. Returns: Dictionary containing the release information. """ key = cache.gen_key(mbid) release = cache.get(key) if not release: release = _get_release_by_id(mbid) cache.set(key=key, val=release, time=DEFAULT_CACHE_EXPIRATION) return release
def get_place_by_id(mbid): """Get place with the MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the place. Returns: Dictionary containing the place information. """ key = cache.gen_key(mbid) place = cache.get(key) if not place: place = _get_place_by_id(mbid) cache.set(key=key, val=place, time=DEFAULT_CACHE_EXPIRATION) return place_rel.process(place)
def get_label_by_id(mbid): """Get label with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the label. Returns: Dictionary containing the label information """ key = cache.gen_key('label', mbid) label = cache.get(key) if not label: label = db.get_label_by_id(mbid, includes=['artist-rels', 'url-rels'], unknown_entities_for_missing=True) cache.set(key=key, val=label, time=DEFAULT_CACHE_EXPIRATION) return label_rel.process(label)
def get_artist_by_id(mbid): """Get artist with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the artist. Returns: Dictionary containing the artist information """ key = cache.gen_key(mbid) artist = cache.get(key) if not artist: artist = fetch_multiple_artists( [mbid], includes=['artist-rels', 'url-rels'], ).get(mbid) cache.set(key=key, val=artist, time=DEFAULT_CACHE_EXPIRATION) return artist_rel.process(artist)
def get_release_by_id(mbid): """Get release with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the release. Returns: Dictionary containing the release information """ key = cache.gen_key('release', mbid) release = cache.get(key) if not release: release = db.fetch_multiple_releases( [mbid], includes=['media', 'release-groups'], ).get(mbid) cache.set(key=key, val=release, time=DEFAULT_CACHE_EXPIRATION) return release
def get_work_by_id(mbid): """Get work with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the work. Returns: Dictionary containing the work information """ key = cache.gen_key('work', mbid) work = cache.get(key) if not work: work = db.get_work_by_id( mbid, includes=['artist-rels', 'recording-rels'], ) cache.set(key=key, val=work, time=DEFAULT_CACHE_EXPIRATION) return work
def get_label_by_id(mbid): """Get label with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the label. Returns: Dictionary containing the label information """ key = cache.gen_key(mbid) label = cache.get(key) if not label: label = fetch_multiple_labels( [mbid], includes=['artist-rels', 'url-rels'], ).get(mbid) cache.set(key=key, val=label, time=DEFAULT_CACHE_EXPIRATION) return label_rel.process(label)
def get_artist_by_id(mbid): """Get artist with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the artist. Returns: Dictionary containing the artist information """ key = cache.gen_key('artist', mbid) artist = cache.get(key) if not artist: artist = db.get_artist_by_id( mbid, includes=['artist-rels', 'url-rels'], unknown_entities_for_missing=True, ) cache.set(key=key, val=artist, time=DEFAULT_CACHE_EXPIRATION) return artist_rel.process(artist)
def get_release_by_id(mbid): """Get release with MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the release. Returns: Dictionary containing the release information """ key = cache.gen_key('release', mbid) release = cache.get(key) if not release: release = db.get_release_by_id( mbid, includes=['media', 'release-groups'], unknown_entities_for_missing=True, ) cache.set(key=key, val=release, time=DEFAULT_CACHE_EXPIRATION) return release
def get_release_by_id(id): """Get release with the MusicBrainz ID. Returns: Release object with the following includes: recordings, media. """ key = cache.gen_key(id) release = cache.get(key) if not release: try: release = musicbrainzngs.get_release_by_id( id, ['recordings', 'media', 'release-groups']).get('release') except ResponseError as e: if e.cause.code == 404: return None else: raise InternalServerError(e.cause.msg) cache.set(key=key, val=release, time=DEFAULT_CACHE_EXPIRATION) return release
def get_place_by_id(mbid): """Get place with the MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the place. Returns: Dictionary containing the place information. """ key = cache.gen_key('place', mbid) place = cache.get(key) if not place: place = db.fetch_multiple_places( [mbid], includes=[ 'artist-rels', 'place-rels', 'release-group-rels', 'url-rels' ], ).get(mbid) cache.set(key=key, val=place, time=DEFAULT_CACHE_EXPIRATION) return place_rel.process(place)
def get_artist_by_id(id): """Get artist with the MusicBrainz ID. Returns: Artist object with the following includes: url-rels, artist-rels. """ key = cache.gen_key(id) artist = cache.get(key) if not artist: try: artist = musicbrainzngs.get_artist_by_id( id, ['url-rels', 'artist-rels']).get('artist') except ResponseError as e: if e.cause.code == 404: return None else: raise InternalServerError(e.cause.msg) cache.set(key=key, val=artist, time=DEFAULT_CACHE_EXPIRATION) return artist_rel.process(artist)
def search(query, type, limit=20, offset=0): """Get Spotify catalog information about artists, albums, or tracks that match a keyword string. More information is available at https://developer.spotify.com/web-api/search-item/. """ key = cache.gen_key(query, type, limit, offset) namespace = "spotify_search" result = cache.get(key, namespace) if not result: result = requests.get( "%s/search?q=%s&type=%s&limit=%s&offset=%s" % (BASE_URL, urllib.parse.quote( query.encode('utf8')), type, str(limit), str(offset))).json() cache.set(key=key, namespace=namespace, val=result, time=DEFAULT_CACHE_EXPIRATION) return result
def get_place_by_id(id): """Get event with the MusicBrainz ID. Returns: Event object with the following includes: artist-rels, place-rels, series-rels, url-rels. """ key = cache.gen_key(id) place = cache.get(key) if not place: try: place = musicbrainzngs.get_place_by_id(id, [ 'artist-rels', 'place-rels', 'release-group-rels', 'url-rels' ]).get('place') except ResponseError as e: if e.cause.code == 404: return None else: raise InternalServerError(e.cause.msg) cache.set(key=key, val=place, time=DEFAULT_CACHE_EXPIRATION) return place
def get_event_by_id(mbid): """Get event with the MusicBrainz ID. Args: mbid (uuid): MBID(gid) of the event. Returns: Dictionary containing the event information. """ key = cache.gen_key('event', mbid) event = cache.get(key) if not event: event = db.fetch_multiple_events( [mbid], includes=[ 'artist-rels', 'place-rels', 'series-rels', 'url-rels', 'release-group-rels' ], ).get(mbid) cache.set(key=key, val=event, time=DEFAULT_CACHE_EXPIRATION) return event
def get_top_users_overall(): """ Gets top contributors since the beginning Returns: Returns: List of dictionaries where each dictionary has the following structure: { "id": (str), "display_name": (str), "review_count": (int), "comment_count": (int), "vote_count": (int), "score": (int), } """ key = cache.gen_key("top_users_overall", _CACHE_NAMESPACE) top_users = cache.get(key, _CACHE_NAMESPACE) # if could not fetch results from cache, or fetched results have to be updated if not top_users: try: results = get_top_users( review_weight=5, comment_weight=2, vote_weight=1, ) top_users = { "users": results, } cache.set(key=key, val=top_users, namespace=_CACHE_NAMESPACE, time=_DEFAULT_CACHE_EXPIRATION) except db_exceptions.NoDataFoundException: return None return top_users["users"]
def get_release_group_by_id(id): """Get release group with the MusicBrainz ID. Returns: Release group object with the following includes: artists, releases, release-group-rels, url-rels, work-rels. """ key = cache.gen_key(id) release_group = cache.get(key) if not release_group: try: release_group = musicbrainzngs.get_release_group_by_id( id, [ 'artists', 'releases', 'release-group-rels', 'url-rels', 'work-rels', 'tags' ]).get('release-group') except ResponseError as e: if e.cause.code == 404: return None else: raise InternalServerError(e.cause.msg) cache.set(key=key, val=release_group, time=DEFAULT_CACHE_EXPIRATION) return release_group_rel.process(release_group)
def get_popular(limit=None): """Get a list of popular reviews. Popularity is determined by 'popularity' of a particular review. popularity is a difference between positive votes and negative. In this case only votes from the last month are used to calculate popularity to make results more varied. Args: limit (int): Maximum number of reviews to return. Returns: Randomized list of popular reviews which are converted into dictionaries using to_dict method. """ cache_key = cache.gen_key("popular_reviews", limit) reviews = cache.get(cache_key, REVIEW_CACHE_NAMESPACE) defined_limit = 4 * limit if limit else None if not reviews: with db.engine.connect() as connection: results = connection.execute( sqlalchemy.text(""" SELECT review.id, review.entity_id, review.entity_type, review.user_id, review.edits, review.is_draft, review.is_hidden, review.license_id, review.language, review.source, review.source_url, SUM( CASE WHEN vote = 't' THEN 1 WHEN vote = 'f' THEN -1 WHEN vote IS NULL THEN 0 END ) AS popularity, latest_revision.id AS latest_revision_id, latest_revision.timestamp AS latest_revision_timestamp, latest_revision.text AS text, latest_revision.rating AS rating FROM review JOIN revision ON revision.review_id = review.id LEFT JOIN ( SELECT revision_id, vote FROM vote WHERE rated_at > :last_month ) AS votes_last_month ON votes_last_month.revision_id = revision.id JOIN ( revision JOIN ( SELECT review.id AS review_uuid, MAX(timestamp) AS latest_timestamp FROM review JOIN revision ON review.id = review_id GROUP BY review.id ) AS latest ON latest.review_uuid = revision.review_id AND latest.latest_timestamp = revision.timestamp ) AS latest_revision ON review.id = latest_revision.review_id WHERE entity_id IN ( SELECT DISTINCT entity_id FROM ( SELECT entity_id FROM review ORDER BY RANDOM() ) AS randomized_entity_ids ) AND latest_revision.text IS NOT NULL GROUP BY review.id, latest_revision.id ORDER BY popularity LIMIT :limit """), { "limit": defined_limit, "last_month": datetime.now() - timedelta(weeks=4) }) reviews = results.fetchall() reviews = [dict(review) for review in reviews] if reviews: for review in reviews: review["rating"] = RATING_SCALE_1_5.get(review["rating"]) review["last_revision"] = { "id": review.pop("latest_revision_id"), "timestamp": review.pop("latest_revision_timestamp"), "text": review["text"], "rating": review["rating"], "review_id": review["id"], } reviews = [ to_dict(review, confidential=True) for review in reviews ] cache.set(cache_key, reviews, 1 * 60 * 60, REVIEW_CACHE_NAMESPACE) # 1 hour shuffle(reviews) return reviews[:limit]
def get_popular(limit=None): """Get a list of popular reviews. Popularity is determined by 'popularity' of a particular review. popularity is a difference between positive votes and negative. In this case only votes from the last month are used to calculate popularity to make results more varied. Args: limit (int): Maximum number of reviews to return. Returns: Randomized list of popular reviews which are converted into dictionaries using to_dict method. """ cache_key = cache.gen_key("popular_reviews", limit) reviews = cache.get(cache_key, REVIEW_CACHE_NAMESPACE) defined_limit = 4 * limit if limit else None if not reviews: with db.engine.connect() as connection: results = connection.execute(sqlalchemy.text(""" SELECT review.id, review.entity_id, review.entity_type, review.user_id, review.edits, review.is_draft, review.is_hidden, review.license_id, review.language, review.source, review.source_url, SUM( CASE WHEN vote = 't' THEN 1 WHEN vote = 'f' THEN -1 WHEN vote IS NULL THEN 0 END ) AS popularity, latest_revision.id AS latest_revision_id, latest_revision.timestamp AS latest_revision_timestamp, latest_revision.text AS text, latest_revision.rating AS rating FROM review JOIN revision ON revision.review_id = review.id LEFT JOIN ( SELECT revision_id, vote FROM vote WHERE rated_at > :last_month ) AS votes_last_month ON votes_last_month.revision_id = revision.id JOIN ( revision JOIN ( SELECT review.id AS review_uuid, MAX(timestamp) AS latest_timestamp FROM review JOIN revision ON review.id = review_id GROUP BY review.id ) AS latest ON latest.review_uuid = revision.review_id AND latest.latest_timestamp = revision.timestamp ) AS latest_revision ON review.id = latest_revision.review_id WHERE entity_id IN ( SELECT DISTINCT entity_id FROM ( SELECT entity_id FROM review ORDER BY RANDOM() ) AS randomized_entity_ids ) AND latest_revision.text IS NOT NULL AND review.is_hidden = 'f' AND review.is_draft = 'f' GROUP BY review.id, latest_revision.id ORDER BY popularity LIMIT :limit """), { "limit": defined_limit, "last_month": datetime.now() - timedelta(weeks=4) }) reviews = results.fetchall() reviews = [dict(review) for review in reviews] if reviews: for review in reviews: review["rating"] = RATING_SCALE_1_5.get(review["rating"]) review["last_revision"] = { "id": review.pop("latest_revision_id"), "timestamp": review.pop("latest_revision_timestamp"), "text": review["text"], "rating": review["rating"], "review_id": review["id"], } reviews = [to_dict(review, confidential=True) for review in reviews] cache.set(cache_key, reviews, 1 * 60 * 60, REVIEW_CACHE_NAMESPACE) # 1 hour shuffle(reviews) return reviews[:limit]
def review_list_handler(): """Get list of reviews. **Request Example:** .. code-block:: bash $ curl "https://critiquebrainz.org/ws/1/review/?limit=1&offset=50" \\ -X GET **Response Example:** .. code-block:: json { "count": 9197, "limit": 1, "offset": 50, "reviews": [ { "created": "Fri, 16 May 2008 00:00:00 GMT", "edits": 0, "entity_id": "09259937-6477-3959-8b10-af1cbaea8e6e", "entity_type": "release_group", "id": "c807d0b4-0dd0-43fe-a7c4-d29bb61f389e", "language": "en", "last_updated": "Fri, 16 May 2008 00:00:00 GMT", "license": { "full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported", "id": "CC BY-NC-SA 3.0", "info_url": "https://creativecommons.org/licenses/by-nc-sa/3.0/" }, "popularity": 0, "source": "BBC", "source_url": "http://www.bbc.co.uk/music/reviews/vh54", "text": "TEXT CONTENT OF REVIEW", "rating": 5, "user": { "created": "Wed, 07 May 2014 16:20:47 GMT", "display_name": "Jenny Nelson", "id": "3bf3fe0c-6db2-4746-bcf1-f39912113852", "karma": 0, "user_type": "Noob" }, "votes": { "positive": 0, "negative": 0 } } ] } :json uuid entity_id: UUID of the release group that is being reviewed :json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc. **(optional)** :query user_id: user's UUID **(optional)** :query sort: ``popularity`` or ``published_on`` **(optional)** :query limit: results limit, min is 0, max is 50, default is 50 **(optional)** :query offset: result offset, default is 0 **(optional)** :query language: language code (ISO 639-1) **(optional)** :resheader Content-Type: *application/json* """ # TODO: This checking is added to keep old clients working and needs to be removed. release_group = Parser.uuid('uri', 'release_group', optional=True) if release_group: entity_id = release_group entity_type = 'release_group' else: entity_id = Parser.uuid('uri', 'entity_id', optional=True) entity_type = Parser.string('uri', 'entity_type', valid_values=ENTITY_TYPES, optional=True) user_id = Parser.uuid('uri', 'user_id', optional=True) # TODO: "rating" sort value is deprecated and needs to be removed. sort = Parser.string('uri', 'sort', valid_values=['popularity', 'published_on', 'rating'], optional=True) if sort == 'rating': sort = 'popularity' limit = Parser.int('uri', 'limit', min=1, max=50, optional=True) or 50 offset = Parser.int('uri', 'offset', optional=True) or 0 language = Parser.string('uri', 'language', min=2, max=3, optional=True) if language and language not in supported_languages: raise InvalidRequest(desc='Unsupported language') # TODO(roman): Ideally caching logic should live inside the model. Otherwise it # becomes hard to track all this stuff. cache_key = cache.gen_key('list', entity_id, user_id, sort, limit, offset, language) cached_result = cache.get(cache_key, REVIEW_CACHE_NAMESPACE) if cached_result: reviews = cached_result['reviews'] count = cached_result['count'] else: reviews, count = db_review.list_reviews( entity_id=entity_id, entity_type=entity_type, user_id=user_id, sort=sort, limit=limit, offset=offset, language=language, ) reviews = [db_review.to_dict(p) for p in reviews] cache.set(cache_key, { 'reviews': reviews, 'count': count, }, namespace=REVIEW_CACHE_NAMESPACE) return jsonify(limit=limit, offset=offset, count=count, reviews=reviews)
def review_list_handler(): """Get list of reviews. **Request Example:** .. code-block:: bash $ curl "https://critiquebrainz.org/ws/1/review/?limit=1&offset=50" \\ -X GET **Response Example:** .. code-block:: json { "count": 9197, "limit": 1, "offset": 50, "reviews": [ { "created": "Fri, 16 May 2008 00:00:00 GMT", "edits": 0, "entity_id": "09259937-6477-3959-8b10-af1cbaea8e6e", "entity_type": "release_group", "id": "c807d0b4-0dd0-43fe-a7c4-d29bb61f389e", "language": "en", "last_updated": "Fri, 16 May 2008 00:00:00 GMT", "license": { "full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported", "id": "CC BY-NC-SA 3.0", "info_url": "https://creativecommons.org/licenses/by-nc-sa/3.0/" }, "popularity": 0, "source": "BBC", "source_url": "http://www.bbc.co.uk/music/reviews/vh54", "text": "TEXT CONTENT OF REVIEW", "rating": 5, "user": { "created": "Wed, 07 May 2014 16:20:47 GMT", "display_name": "Jenny Nelson", "id": "3bf3fe0c-6db2-4746-bcf1-f39912113852", "karma": 0, "user_type": "Noob" }, "votes": { "positive": 0, "negative": 0 } } ] } :json uuid entity_id: UUID of the release group that is being reviewed :json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc. **(optional)** :query user_id: user's UUID **(optional)** :query sort: ``popularity`` or ``published_on`` **(optional)** :query limit: results limit, min is 0, max is 50, default is 50 **(optional)** :query offset: result offset, default is 0 **(optional)** :query language: language code (ISO 639-1) **(optional)** :resheader Content-Type: *application/json* """ # TODO: This checking is added to keep old clients working and needs to be removed. release_group = Parser.uuid('uri', 'release_group', optional=True) if release_group: entity_id = release_group entity_type = 'release_group' else: entity_id = Parser.uuid('uri', 'entity_id', optional=True) entity_type = Parser.string('uri', 'entity_type', valid_values=ENTITY_TYPES, optional=True) user_id = Parser.uuid('uri', 'user_id', optional=True) sort = Parser.string( 'uri', 'sort', valid_values=['popularity', 'published_on', 'rating', 'created'], optional=True) # "rating" and "created" sort values are deprecated and but allowed here for backward compatibility if sort == 'created': sort = 'published_on' if sort == 'rating': sort = 'popularity' limit = Parser.int('uri', 'limit', min=1, max=50, optional=True) or 50 offset = Parser.int('uri', 'offset', optional=True) or 0 language = Parser.string('uri', 'language', min=2, max=3, optional=True) if language and language not in supported_languages: raise InvalidRequest(desc='Unsupported language') # TODO(roman): Ideally caching logic should live inside the model. Otherwise it # becomes hard to track all this stuff. cache_key = cache.gen_key('list', entity_id, user_id, sort, limit, offset, language) cached_result = cache.get(cache_key, REVIEW_CACHE_NAMESPACE) if cached_result: reviews = cached_result['reviews'] count = cached_result['count'] else: reviews, count = db_review.list_reviews( entity_id=entity_id, entity_type=entity_type, user_id=user_id, sort=sort, limit=limit, offset=offset, language=language, ) reviews = [db_review.to_dict(p) for p in reviews] cache.set(cache_key, { 'reviews': reviews, 'count': count, }, namespace=REVIEW_CACHE_NAMESPACE) return jsonify(limit=limit, offset=offset, count=count, reviews=reviews)
def get_popular(cls, limit=None): """Get list of popular reviews. Popularity is determined by rating of a particular review. Rating is a difference between positive votes and negative. In this case only votes from the last month are used to calculate rating. Results are cached for 12 hours. Args: limit: Maximum number of reviews to return. Returns: Randomized list of popular reviews which are converted into dictionaries using to_dict method. """ cache_key = cache.gen_key('popular_reviews', limit) reviews = cache.get(cache_key, Review.CACHE_NAMESPACE) if not reviews: # Selecting reviews for distinct release groups # TODO(roman): The is a problem with selecting popular reviews like # this: if there are multiple reviews for a release group we don't # choose the most popular. distinct_subquery = db.session.query(Review) \ .filter(Review.is_draft == False) \ .distinct(Review.entity_id).subquery() # Randomizing results to get some variety rand_subquery = db.session.query(aliased(Review, distinct_subquery)) \ .order_by(func.random()).subquery() # Sorting reviews by rating query = db.session.query(aliased(Review, rand_subquery)) # Preparing base query for getting votes vote_query_base = db.session.query( Vote.revision_id, Vote.vote, func.count().label('c')) \ .group_by(Vote.revision_id, Vote.vote) \ .filter(Vote.rated_at > datetime.now() - timedelta(weeks=4)) # Getting positive votes votes_pos = vote_query_base.subquery('votes_pos') query = query.outerjoin(Revision).outerjoin( votes_pos, and_(votes_pos.c.revision_id == Revision.id, votes_pos.c.vote == True)) # Getting negative votes votes_neg = vote_query_base.subquery('votes_neg') query = query.outerjoin(Revision).outerjoin( votes_neg, and_(votes_neg.c.revision_id == Revision.id, votes_neg.c.vote == False)) query = query.order_by( desc( func.coalesce(votes_pos.c.c, 0) - func.coalesce(votes_neg.c.c, 0))) if limit is not None: # Selecting more reviews there so we'll have something # different to show (shuffling is done below). query = query.limit(limit * 4) reviews = query.all() reviews = [review.to_dict(confidential=True) for review in reviews] cache.set(cache_key, reviews, 1 * 60 * 60, Review.CACHE_NAMESPACE) # 1 hour shuffle(reviews) # a bit more variety return reviews[:limit]