Example #1
0
def get_basic_release(mbid):
    """
    Make sure release and its dependencies are present and contain required data.

    @param mbid: a string containing a MusicBrainz ID of an artist

    @return:  a CachedReleaseGroup object containing required minimal data set
    """
    release_group   = CachedReleaseGroup.view('artists/releases',include_docs=True, key=mbid).one()
    if not release_group:
        # TODO: optimize? its just one additional request on rare ocassions tho..
        try:
            t = mmda_logger('mb','request','artist mbid of release',mbid)
            mb_release  = mb_query.getReleaseById(mbid, MB_RELEASE_ARTIST)
            artist_mbid = extractUuid(mb_release.artist.id)
            mmda_logger('mb','result','artist mbid',artist_mbid,t)
        except WebServiceError, e:
            # TODO: add error handling here
            mmda_logger('mb-release','ERROR',e)
            raise e
        else:
            get_basic_artist(artist_mbid)
            release_group = CachedReleaseGroup.view('artists/releases',include_docs=True, key=mbid).one()
Example #2
0
File: artist.py Project: lidel/mmda
def _create_shallow_releases_mb(mb_artist):
    """
    Create CachedReleaseGroup documents using basic MusicBrainz data fetched with artist.

    @param mb_artist: a musicbrainz2.model.Artist object
    """
    mb_releases = mb_artist.getReleases()
    artist_mbid = extractUuid(mb_artist.id)

    # magical place where all data is cached/processed before database commit
    there_will_be_dragons = {}

    for mb_release in mb_releases:
        group_mbid      = extractUuid(mb_release.releaseGroup.id)
        release_mbid    = extractUuid(mb_release.id)

        # its ugly, but we fill this only once (place for future improvements)
        if group_mbid not in there_will_be_dragons:
            release_group                       = {}
            release_group['_id']                = group_mbid
            release_group['artist_mbid']        = artist_mbid
            release_group['artist_name']        = mb_artist.name
            release_group['title']              = mb_release.releaseGroup.title
                                                # small fix: in some rare cases, ReleaseGroup at Musicbrainz has no 'type' property
            release_group['release_type']       = decruft_mb(mb_release.releaseGroup.type) if mb_release.releaseGroup.type else 'Other'
            release_group['releases']           = {}
            there_will_be_dragons[group_mbid]   = release_group
        else:
            release_group = there_will_be_dragons[group_mbid]

        # store only basic information about release event
        mb_release_events = []
        for mb_event in mb_release.getReleaseEvents():
            event = {}
            if mb_event.date:
                event['date']    = mb_event.date
            if mb_event.format:
                event['format']  = decruft_mb(mb_event.format)
            if mb_event.country:
                event['country'] = mb_event.country
            if event:
                mb_release_events.append(event)

        release_group['releases'][release_mbid] = {
                'title':mb_release.title,
                'tracks_count':mb_release.tracksCount,
                'release_events':mb_release_events,
                'cache_state':{'mb':[1,datetime.utcnow()]}
                }

        # primary release is the one with earliest release date (place for future improvements)
        mb_earliest_release_date = mb_release.getEarliestReleaseEvent().getDate() if mb_release.getEarliestReleaseEvent() else None
        if 'primary' not in release_group or release_group['primary'][1] == None or mb_earliest_release_date < release_group['primary'][1]:
            release_group['primary'] = [release_mbid, mb_earliest_release_date]

    # just to make sure no old data is left..
    old_cached_release_groups = get_db('artists').view('artists/release_groups', key=artist_mbid)
    for group in old_cached_release_groups:
        del get_db('artists')[group['id']]

    for release_group in there_will_be_dragons.itervalues():
        cached_release_group = CachedReleaseGroup.wrap(release_group) # TODO: think if wrap is the best way of dealing with this
        cached_release_group.cache_state['mb'] = [1,datetime.utcnow()]
        cached_release_group.save() # TODO: add try in case of ResourceConflict? 
        mmda_logger('db','store', cached_release_group)