def retrieve_cover_links(album_id, extra=None): if extra is None: url = "http://coverartarchive.org/release/" + album_id else: url = "http://coverartarchive.org/release/%s/%s" % (album_id, extra) write_log(translate("MusicBrainz", "Retrieving cover: %s") % url) try: data, code = urlopen(url, code=True) except RetrievalError as e: if e.code == 404: raise RetrievalError( translate("MusicBrainz", "No images exist for this album."), 404) raise e if code == 200: if extra is None: return json.loads(data) else: return data elif code == 400: raise RetrievalError(translate("MusicBrainz", "Invalid UUID")) elif code in (405, 406): raise RetrievalError(translate("MusicBrainz", "Invalid query sent.")) elif code == 503: raise RetrievalError( translate("MusicBrainz", "You have exceeded your rate limit.")) elif code == 404: raise RetrievalError(translate("MusicBrainz", "Image does not exist."))
def parse_album_xml(text, album=None): """Parses the retrieved xml for an album and get's the track listing.""" doc = minidom.parseString(text) album_item = doc.getElementsByTagName('Item')[0] try: tracklist = album_item.getElementsByTagName('Tracks')[0] except IndexError: write_log(translate('Amazon', 'No tracks found in listing.')) write_log(text) return None tracks = [] discs = [disc for disc in tracklist.childNodes if not disc.nodeType == disc.TEXT_NODE] if not (len(discs) > 1 and album): album = None for discnum, disc in enumerate(discs): for track_node in disc.childNodes: if track_node.nodeType == track_node.TEXT_NODE: continue title = get_text(track_node) tracknumber = track_node.attributes['Number'].value if album: tracks.append({'track': tracknumber, 'title': title, 'album': '%s (Disc %s)' % (album, discnum + 1)}) else: tracks.append({'track': tracknumber, 'title': title}) return tracks
def keyword_search(keywords): write_log(translate("Discogs", 'Retrieving search results for keywords: %s') % keywords) keywords = re.sub('(\s+)', u'+', keywords) url = search_url % keywords text = urlopen(url) return parse_search_json(json.loads(text))
def parse_album_xml(text, album=None): """Parses the retrieved xml for an album and get's the track listing.""" doc = minidom.parseString(text) album_item = doc.getElementsByTagName('Item')[0] try: tracklist = album_item.getElementsByTagName('Tracks')[0] except IndexError: write_log(translate('Amazon', 'Invalid XML returned. No tracks listed.')) write_log(text) raise RetrievalError(translate('Amazon', 'Invalid XML returned. No tracks listed.')) tracks = [] discs = [disc for disc in tracklist.childNodes if not disc.nodeType == disc.TEXT_NODE] if not (len(discs) > 1 and album): album = None for discnum, disc in enumerate(discs): for track_node in disc.childNodes: if track_node.nodeType == track_node.TEXT_NODE: continue title = get_text(track_node) tracknumber = track_node.attributes['Number'].value if album: tracks.append({'track': tracknumber, 'title': title, 'album': u'%s (Disc %s)' % (album, discnum + 1)}) else: tracks.append({'track': tracknumber, 'title': title}) return tracks
def keyword_search(self, s): if s.startswith(':a'): artist_id = s[len(':a'):].strip() try: url = search_album('arid:' + solr_escape(artist_id), limit=100, own=True) return parse_album_search(urlopen(url)) except RetrievalError as e: msg = translate("MusicBrainz", '<b>Error:</b> While retrieving %1: %2') write_log(msg.arg(artist_id).arg(escape(e))) raise elif s.startswith(':b'): r_id = s[len(':b'):].strip() try: return [self.retrieve(r_id)] except RetrievalError as e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(r_id).arg(escape(e))) raise else: try: params = parse_searchstring(s) except RetrievalError as e: return parse_album_search(urlopen(search_album(s, limit=100))) if not params: return artist = params[0][0] album = params[0][1] return self.search(album, [artist], 100)
def retrieve_covers(cover_links, size=LARGE): ret = [] for cover in cover_links['images']: desc = cover.get('comment', u"") cover_type = cover['types'][0] if cover_type in mb_imagetypes: cover_type = imagetypes[mb_imagetypes[cover_type]] else: cover_type = imagetypes[u"Other"] if cover == SMALL: image_url = cover['thumbnails']['small'] elif cover == LARGE: image_url = cover['thumbnails']['large'] else: image_url = cover['image'] write_log(translate("MusicBrainz", "Retrieving image %s") % image_url) image_data = urlopen(image_url) ret.append({ 'desc': desc, 'mime': get_mime(image_data), "imagetype": cover_type, "data": image_data }) return ret
def search(self, artist, files=None): if files is not None and self.searchby: keywords = format_value(files[0], self.searchby) else: keywords = artist keywords = re.sub(r'\s+', self._separator, keywords) if self.search_source is None: album = self.retrieve(keywords) return [album] if album else [] url = self._search_base.replace('%s', keywords) write_log(translate('Mp3tag', 'Retrieving search page: %s') % url) set_status(translate('Mp3tag', 'Retrieving search page...')) if self.html is None: page = get_encoding(urlopen(url), True, 'utf8')[1] else: page = get_encoding(self.html, True, 'utf8')[1] write_log(translate('Mp3tag', 'Parsing search page.')) set_status(translate('Mp3tag', 'Parsing search page...')) infos = parse_search_page(self.indexformat, page, self.search_source, url) return [(info, []) for info in infos]
def keyword_search(keywords): write_log( translate("Discogs", 'Retrieving search results for keywords: %s') % keywords) keywords = re.sub('(\s+)', u'+', keywords) url = SEARCH_URL % keywords text = urlopen(url) return parse_search_json(json.loads(text))
def keyword_search(self, s): if s.startswith(u':a'): artist_id = s[len(':a'):].strip() try: url = search_album('arid:' + solr_escape(artist_id.encode('utf8')), limit=100, own=True) return parse_album_search(urlopen(url)) except RetrievalError, e: msg = translate("MusicBrainz", '<b>Error:</b> While retrieving %1: %2') write_log(msg.arg(artist_id).arg(escape(e))) raise
def retrieve_cover_links(album_id, extra=None): if extra is None: url = "http://coverartarchive.org/release/" + album_id else: url = "http://coverartarchive.org/release/%s/%s" % (album_id, extra) write_log(translate("MusicBrainz", "Retrieving cover: %s") % url) try: data, code = urlopen(url, code=True) except RetrievalError, e: if e.code == 404: raise RetrievalError(translate("MusicBrainz", "No images exist for this album."), 404) raise e
def keyword_search(self, s): if s.startswith(u':a'): artist_id = s[len(':a'):].strip() try: url = search_album('arid:' + solr_escape(artist_id.encode('utf8')), limit=100, own=True) return parse_album_search(urlopen(url)) except RetrievalError, e: msg = translate("MusicBrainz", '<b>Error:</b> While retrieving %1: %2') write_log(msg.arg(artist_id).arg(escape(e))) raise
def retrieve(self, albuminfo): try: artist = albuminfo['artist'] album = albuminfo['album'] set_status('Retrieving %s - %s' % (artist, album)) write_log('Retrieving %s - %s' % (artist, album)) except KeyError: set_status('Retrieving album.') write_log('Retrieving album.') write_log('Album URL - %s' % albuminfo['#albumurl']) url = albuminfo['#albumurl'] try: if self._useid: info, tracks, cover = retrieve_album(url, self._getcover) else: info, tracks, cover = retrieve_album(url, self._getcover) except urllib.error.URLError as e: write_log('Error: While retrieving album URL %s - %s' % (url, str(e))) raise RetrievalError(str(e)) if cover: info.update(cover) albuminfo = albuminfo.copy() albuminfo.update(info) return albuminfo, tracks
def retrieve_album(url, coverurl=None, id_field=ALBUM_ID): write_log('Opening Album Page - %s' % url) album_page, code = urlopen(url, False, True) if album_page.find(b"featured new releases") >= 0: raise OldURLError("Old AMG URL used.") info, tracks = parse_albumpage(album_page) info['#albumurl'] = url info['amg_url'] = url if 'album' in info: info['#extrainfo'] = [ info['album'] + ' at AllMusic.com', info['#albumurl']] if coverurl: try: write_log('Retrieving Cover - %s' % info['#cover-url']) cover = retrieve_cover(info['#cover-url']) except KeyError: write_log('No cover found.') cover = None except urllib.error.URLError as e: write_log('Error: While retrieving cover %s - %s' % (info['#cover-url'], str(e))) cover = None else: cover = None return info, tracks, cover
def search(self, album, artists): ret = [] if len(artists) > 1: artist = u'Various Artists' else: if hasattr(artists, 'items'): artist = artists.keys()[0] else: artist = artists[0] if self._useid and hasattr(artists, 'values'): tracks = [] [tracks.extend(z) for z in artists.values()] for field in ('amg_rovi_id', 'amg_pop_id', 'amgsqlid', 'amg_album_id',): album_id = find_id(tracks, field) if album_id: break if not isempty(album_id): write_log(u'Found Album ID %s' % album_id) try: return self.keyword_search(u':id %s' % album_id) except OldURLError: write_log("Invalid URL used. Doing normal search.") if not album: raise RetrievalError('Album name required.') write_log(u'Searching for %s' % album) try: searchpage = search(album) except urllib2.URLError, e: write_log(u'Error: While retrieving search page %s' % unicode(e)) raise RetrievalError(unicode(e))
def retrieve_cover_links(album_id, extra=None): if extra is None: url = "http://coverartarchive.org/release/" + album_id else: url = "http://coverartarchive.org/release/%s/%s" % (album_id, extra) write_log(translate("MusicBrainz", "Retrieving cover: %s") % url) try: data, code = urlopen(url, code=True) except RetrievalError, e: if e.code == 404: raise RetrievalError( translate("MusicBrainz", "No images exist for this album."), 404) raise e
def retrieve_album(url, coverurl=None, id_field=ALBUM_ID): write_log('Opening Album Page - %s' % url) album_page, code = urlopen(url, False, True) if album_page.find("featured new releases") >= 0: raise OldURLError("Old AMG URL used.") album_page = get_encoding(album_page, True, 'utf8')[1] info, tracks = parse_albumpage(album_page) info['#albumurl'] = url info['amg_url'] = url if 'album' in info: info['#extrainfo'] = [ info['album'] + u' at AllMusic.com', info['#albumurl']] if coverurl: try: write_log('Retrieving Cover - %s' % info['#cover-url']) cover = retrieve_cover(info['#cover-url']) except KeyError: write_log('No cover found.') cover = None except urllib2.URLError, e: write_log(u'Error: While retrieving cover %s - %s' % (info['#cover-url'], unicode(e))) cover = None
def keyword_search(keywords): write_log(translate('Amazon', 'Retrieving search results for keywords: %s') % keywords) query_pairs = { "Operation": u"ItemSearch", 'SearchIndex': u'Music', "ResponseGroup":u"ItemAttributes,Images", "Service":u"AWSECommerceService", 'ItemPage': u'1', 'Keywords': keywords, 'AssociateTag': u'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) xml = urlopen(url) return parse_search_xml(xml)
def keyword_search(keywords): write_log(translate('Amazon', 'Retrieving search results for keywords: %s') % keywords) query_pairs = { "Operation": "ItemSearch", 'SearchIndex': 'Music', "ResponseGroup": "ItemAttributes,Images", "Service": "AWSECommerceService", 'ItemPage': '1', 'Keywords': keywords, 'AssociateTag': 'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) xml = urlopen(url) return parse_search_xml(xml)
def retrieve_covers(self, album_id): if not self.__get_images: return [] if self.__num_images == 0: try: image = retrieve_front_cover(album_id) if image: return [image] except RetrievalError, e: import traceback traceback.print_exc() print write_log(translate("MusicBrainz", "Error retrieving image: %s") % unicode(e)) return []
def retrieve_covers(self, album_id): if not self.__get_images: return [] if self.__num_images == 0: try: image = retrieve_front_cover(album_id) if image: return [image] except RetrievalError, e: import traceback traceback.print_exc() print write_log( translate("MusicBrainz", "Error retrieving image: %s") % unicode(e)) return []
def search(self, album, artists='', limit=40): if time.time() - self.__lasttime < 1000: time.sleep(1) ret = [] check_matches = False if isempty(artists): artist = None if len(artists) > 1: artist = 'Various Artists' elif artists: if hasattr(artists, 'items'): artist = list(artists.keys())[0] else: artist = artists[0] if not album and not artist: raise RetrievalError('Album or Artist required.') write_log('Searching for %s' % album) if hasattr(artists, "items"): album_id = find_id(chain(*list(artists.values())), "mbrainz_album_id") if album_id: try: write_log( translate("MusicBrainz", "Found album id %s in tracks. Retrieving") % album_id) return [retrieve_album(album_id)] except RetrievalError as e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(album_id).arg(escape(e))) try: xml = urlopen(search_album(album, artist, limit)) except urllib.error.URLError as e: write_log('Error: While retrieving search page %s' % str(e)) raise RetrievalError(str(e)) write_log('Retrieved search results.') self.__lasttime = time.time() return parse_album_search(xml)
class MusicBrainz(object): name = u'MusicBrainz' group_by = [u'album', 'artist'] def __init__(self): super(MusicBrainz, self).__init__() self.__lasttime = time.time() self.__image_size = LARGE self.__num_images = 0 self.__get_images = True self.preferences = [ [translate('MusicBrainz', 'Retrieve Cover'), CHECKBOX, True], [ translate('MusicBrainz', 'Cover size to retrieve:'), COMBO, [[ translate('Amazon', 'Small'), translate('Amazon', 'Large'), translate('Amazon', 'Original Size') ], 1] ], [ translate('MusicBrainz', 'Amount of images to retrieve:'), COMBO, [[ translate('MusicBrainz', 'Just the front cover'), translate('MusicBrainz', 'All (can take a while)') ], 0] ], ] def keyword_search(self, s): if s.startswith(u':a'): artist_id = s[len(':a'):].strip() try: url = search_album('arid:' + solr_escape(artist_id.encode('utf8')), limit=100, own=True) return parse_album_search(urlopen(url)) except RetrievalError, e: msg = translate("MusicBrainz", '<b>Error:</b> While retrieving %1: %2') write_log(msg.arg(artist_id).arg(escape(e))) raise elif s.startswith(u':b'): r_id = s[len(u':b'):].strip() try: return [self.retrieve(r_id)] except RetrievalError, e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(r_id).arg(escape(e))) raise
def retrieve_album_info(album, tracks): if not album: return album, tracks msg = u'<b>%s - %s</b>' % tuple( map(escape_html, (album['artist'], album['album']))) msg = RETRIEVE_MB_MSG.arg(msg) write_log(msg) set_status(msg) info, new_tracks = retrieve_album(album['mbrainz_album_id']) for t in tracks: try: index = int(t['track']) except KeyError: for index, nt in enumerate(new_tracks): if nt['title'] == t['title']: break t.update(new_tracks[index]) new_tracks[index] = t return info, new_tracks
def retrieve_album_info(album, tracks): if not album: return album, tracks msg = u'<b>%s - %s</b>' % tuple(map(escape_html, (album['artist'], album['album']))) msg = RETRIEVE_MB_MSG.arg(msg) write_log(msg) set_status(msg) info, new_tracks = retrieve_album(album['mbrainz_album_id']) for t in tracks: try: index = int(t['track']) except KeyError: for index, nt in enumerate(new_tracks): if nt['title'] == t['title']: break t.update(new_tracks[index]) new_tracks[index] = t return info, new_tracks
def search(self, album, artists=u'', limit=40): if time.time() - self.__lasttime < 1000: time.sleep(1) ret = [] check_matches = False if isempty(artists): artist = None if len(artists) > 1: artist = u'Various Artists' elif artists: if hasattr(artists, 'items'): artist = artists.keys()[0] else: artist = artists[0] if not album and not artist: raise RetrievalError('Album or Artist required.') write_log(u'Searching for %s' % album) if hasattr(artists, "items"): album_id = find_id(chain(*artists.values()), "mbrainz_album_id") if album_id: try: write_log( translate("MusicBrainz", "Found album id %s in tracks. Retrieving") % album_id) return [retrieve_album(album_id)] except RetrievalError, e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(album_id).arg(escape(e)))
def search(self, album, artists=u'', limit=40): if time.time() - self.__lasttime < 1000: time.sleep(1) ret = [] check_matches = False if isempty(artists): artist = None if len(artists) > 1: artist = u'Various Artists' elif artists: if hasattr(artists, 'items'): artist = artists.keys()[0] else: artist = artists[0] if not album and not artist: raise RetrievalError('Album or Artist required.') write_log(u'Searching for %s' % album) if hasattr(artists, "items"): album_id = find_id(chain(*artists.values()), "mbrainz_album_id") if album_id: try: write_log(translate("MusicBrainz", "Found album id %s in tracks. Retrieving") % album_id) return [retrieve_album(album_id)] except RetrievalError, e: msg = translate("MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(album_id).arg(escape(e)))
def retrieve_covers(cover_links, size=LARGE): ret = [] for cover in cover_links['images']: desc = cover.get('comment', u"") cover_type = cover['types'][0] if cover_type in mb_imagetypes: cover_type = imagetypes[mb_imagetypes[cover_type]] else: cover_type = imagetypes[u"Other"] if cover == SMALL: image_url = cover['thumbnails']['small'] elif cover == LARGE: image_url = cover['thumbnails']['large'] else: image_url = cover['image'] write_log(translate("MusicBrainz", "Retrieving image %s") % image_url) image_data = urlopen(image_url) ret.append({'desc': desc, 'mime': get_mime(image_data), "imagetype": cover_type, "data": image_data}) return ret
def tag_source_search(ts, group, files): """Helper method for tag source searches.""" if not ts.group_by: return ts.search(files), files ret = [] for primary in group: albums = ts.search(primary, group[primary]) if albums: ret.extend(albums) continue audio = {'album': primary} changed, audio = apply_regexps(audio) if changed: audio['album'] = audio['album'].strip() write_log( translate('WebDB', 'Retrying search with %s') % audio['album']) ret.extend(ts.search(audio['album'], group[primary])) return ret, files
def tag_source_search(ts, group, files): """Helper method for tag source searches.""" if not ts.group_by: return ts.search(files), files ret = [] for primary in group: albums = ts.search(primary, group[primary]) if albums: ret.extend(albums) continue audio = {'album': primary} changed, audio = apply_regexps(audio) if changed: audio['album'] = audio['album'].strip() write_log(translate('WebDB', 'Retrying search with %s') % audio['album']) ret.extend(ts.search(audio['album'], group[primary])) return ret, files
def retrieve(self, info): if isinstance(info, basestring): text = info.replace(u' ', self._separator) info = {} else: info = deepcopy(info) text = info['#url'] try: url = self.album_url % text except TypeError: url = self.album_url + text info['#url'] = url try: write_log(translate('Mp3tag', u'Retrieving album page: %s') % url) set_status(translate('Mp3tag', u'Retrieving album page...')) page = get_encoding(urlopen(url), True, 'utf8')[1] except: page = u'' write_log(translate('Mp3tag', u'Parsing album page.')) set_status(translate('Mp3tag', u'Parsing album page...')) new_info, tracks = parse_album_page(page, self.album_source, url) info.update(dict((k,v) for k,v in new_info.iteritems() if v)) if self._get_cover and COVER in info: cover_url = new_info[COVER] if isinstance(cover_url, basestring): info.update(retrieve_cover(cover_url)) else: info.update(map(retrieve_cover, cover_url)) if not tracks: tracks = None return info, tracks
def retrieve(self, info): if isinstance(info, str): text = info.replace(' ', self._separator) info = {} else: info = deepcopy(info) text = info['#url'] try: url = self.album_url % text except TypeError: url = self.album_url + text info['#url'] = url try: write_log(translate('Mp3tag', 'Retrieving album page: %s') % url) set_status(translate('Mp3tag', 'Retrieving album page...')) page = get_encoding(urlopen(url), True, 'utf8')[1] except: page = '' write_log(translate('Mp3tag', 'Parsing album page.')) set_status(translate('Mp3tag', 'Parsing album page...')) new_info, tracks = parse_album_page(page, self.album_source, url) info.update(dict((k, v) for k, v in new_info.items() if v)) if self._get_cover and COVER in info: cover_url = new_info[COVER] if isinstance(cover_url, str): info.update(retrieve_cover(cover_url)) else: info.update(list(map(retrieve_cover, cover_url))) if not tracks: tracks = None return info, tracks
def search(self, artist, files=None): if files is not None and self.searchby: keywords = format_value(files[0], self.searchby) else: keywords = artist keywords = re.sub('\s+', self._separator, keywords) if self.search_source is None: album = self.retrieve(keywords) return [album] if album else [] url = self._search_base.replace(u'%s', keywords) write_log(translate('Mp3tag', u'Retrieving search page: %s') % url) set_status(translate('Mp3tag', u'Retrieving search page...')) if self.html is None: page = get_encoding(urlopen(url), True, 'utf8')[1] else: page = get_encoding(self.html, True, 'utf8')[1] write_log(translate('Mp3tag', u'Parsing search page.')) set_status(translate('Mp3tag', u'Parsing search page...')) infos = parse_search_page(self.indexformat, page, self.search_source, url) return [(info, []) for info in infos]
def retrieve(self, albuminfo): try: artist = albuminfo['artist'] album = albuminfo['album'] set_status('Retrieving %s - %s' % (artist, album)) write_log('Retrieving %s - %s' % (artist, album)) except KeyError: set_status('Retrieving album.') write_log('Retrieving album.') write_log('Album URL - %s' % albuminfo['#albumurl']) url = albuminfo['#albumurl'] try: if self._useid: info, tracks, cover = retrieve_album(url, self._getcover) else: info, tracks, cover = retrieve_album(url, self._getcover) except urllib2.URLError, e: write_log(u'Error: While retrieving album URL %s - %s' % (url, unicode(e))) raise RetrievalError(unicode(e))
def retrieve(self, albuminfo): try: artist = albuminfo['artist'] album = albuminfo['album'] set_status('Retrieving %s - %s' % (artist, album)) write_log('Retrieving %s - %s' % (artist, album)) except KeyError: set_status('Retrieving album.') write_log('Retrieving album.') write_log('Album URL - %s' % albuminfo['#albumurl']) url = albuminfo['#albumurl'] try: if self._useid: info, tracks, cover = retrieve_album(url, self._getcover) else: info, tracks, cover = retrieve_album(url, self._getcover) except urllib2.URLError, e: write_log(u'Error: While retrieving album URL %s - %s' % (url, unicode(e))) raise RetrievalError(unicode(e))
def search(self, album, artists): ret = [] if len(artists) > 1: artist = u'Various Artists' else: if hasattr(artists, 'items'): artist = artists.keys()[0] else: artist = artists[0] if self._useid and hasattr(artists, 'values'): tracks = [] [tracks.extend(z) for z in artists.values()] for field in ( 'amg_rovi_id', 'amg_pop_id', 'amgsqlid', 'amg_album_id', ): album_id = find_id(tracks, field) if album_id: break if not isempty(album_id): write_log(u'Found Album ID %s' % album_id) try: return self.keyword_search(u':id %s' % album_id) except OldURLError: write_log("Invalid URL used. Doing normal search.") if not album: raise RetrievalError('Album name required.') write_log(u'Searching for %s' % album) try: searchpage = search(album) except urllib2.URLError, e: write_log(u'Error: While retrieving search page %s' % unicode(e)) raise RetrievalError(unicode(e))
def search(self, album, artists): if len(artists) > 1: artist = u'Various Artists' else: artist = [z for z in artists][0] if hasattr(artists, 'values'): write_log( translate("Discogs", 'Checking tracks for Discogs Album ID.')) tracks = [] [tracks.extend(z) for z in artists.values()] album_id = find_id(tracks, R_ID) if not album_id: write_log( translate("Discogs", 'No Discogs ID found in tracks.')) else: write_log( translate("Discogs", 'Found Discogs ID: %s') % album_id) return [self.retrieve(album_id)] return []
def search(self, album, artists): if len(artists) > 1: artist = u'Various Artists' else: artist = [z for z in artists][0] if hasattr(artists, 'values'): write_log( translate("Discogs", 'Checking tracks for Discogs Album ID.')) tracks = [] [tracks.extend(z) for z in artists.values()] album_id = find_id(tracks, R_ID) if not album_id: write_log( translate("Discogs", 'No Discogs ID found in tracks.')) else: write_log( translate("Discogs", 'Found Discogs ID: %s') % album_id) return [self.retrieve(album_id)] return []
def retrieve_album(info, image=MEDIUMIMAGE): """Retrieves album from the information in info. image must be either one of image_types or None. If None, no image is retrieved.""" if isinstance(info, basestring): asin = info else: asin = info['#asin'] query_pairs = { "Operation": u"ItemLookup", "Service":u"AWSECommerceService", 'ItemId': asin, 'ResponseGroup': u'Tracks', 'AssociateTag': u'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) if isinstance(info, basestring): write_log(translate('Amazon', 'Retrieving using ASIN: %s') % asin) else: write_log(translate('Amazon', 'Retrieving XML: %1 - %2').arg( info.get('artist', u'')).arg(info.get('album', u''))) xml = urlopen(url) if isinstance(info, basestring): tracks = parse_album_xml(xml) else: tracks = parse_album_xml(xml, info['album']) if image in image_types: url = info[image] write_log(translate("Amazon", 'Retrieving cover: %s') % url) info.update({'__image': retrieve_cover(url)}) return tracks
def retrieve_album(info, image=MEDIUMIMAGE): """Retrieves album from the information in info. image must be either one of image_types or None. If None, no image is retrieved.""" if isinstance(info, str): asin = info else: asin = info['#asin'] query_pairs = { "Operation": "ItemLookup", "Service": "AWSECommerceService", 'ItemId': asin, 'ResponseGroup': 'Tracks', 'AssociateTag': 'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) if isinstance(info, str): write_log(translate('Amazon', 'Retrieving using ASIN: %s') % asin) else: write_log(translate('Amazon', 'Retrieving XML: %1 - %2').arg( info.get('artist', '')).arg(info.get('album', ''))) xml = urlopen(url) if isinstance(info, str): tracks = parse_album_xml(xml) else: tracks = parse_album_xml(xml, info['album']) if image in image_types: url = info[image] write_log(translate("Amazon", 'Retrieving cover: %s') % url) info.update({'__image': retrieve_cover(url)}) return tracks
class AllMusic(object): name = 'AllMusic.com' tooltip = "Enter search parameters here. If empty, the selected files are used. <ul><li><b>artist;album</b> searches for a specific album/artist combination.</li> <li>To list the albums by an artist leave off the album part, but keep the semicolon (eg. <b>Ratatat;</b>). For a album only leave the artist part as in <b>;Resurrection.</li><li>By prefacing the search text with <b>:id</b> you can search for an albums using it's AllMusic sql id eg. <b>:id 10:nstlgr7nth</b> (extraneous spaces are discarded.)<li></ul>" group_by = [u'album', 'artist'] def __init__(self): super(AllMusic, self).__init__() self._getcover = True self._useid = True self.preferences = [ ['Retrieve Covers', CHECKBOX, True], [ 'Use AllMusic Album ID to retrieve albums:', CHECKBOX, self._useid ], ] def keyword_search(self, text): if text.startswith(u':id'): sql = text[len(':id'):].strip().replace(u' ', u'').lower() if sql.startswith('mr'): url = album_url + 'release/' + sql else: url = album_url + sql info, tracks, cover = retrieve_album(url, self._getcover) if cover: info.update(cover) return [(info, tracks)] else: try: params = parse_searchstring(text) except RetrievalError: return self.search(text, [u'']) artists = [params[0][0]] album = params[0][1] return self.search(album, artists) def search(self, album, artists): ret = [] if len(artists) > 1: artist = u'Various Artists' else: if hasattr(artists, 'items'): artist = artists.keys()[0] else: artist = artists[0] if self._useid and hasattr(artists, 'values'): tracks = [] [tracks.extend(z) for z in artists.values()] for field in ( 'amg_rovi_id', 'amg_pop_id', 'amgsqlid', 'amg_album_id', ): album_id = find_id(tracks, field) if album_id: break if not isempty(album_id): write_log(u'Found Album ID %s' % album_id) try: return self.keyword_search(u':id %s' % album_id) except OldURLError: write_log("Invalid URL used. Doing normal search.") if not album: raise RetrievalError('Album name required.') write_log(u'Searching for %s' % album) try: searchpage = search(album) except urllib2.URLError, e: write_log(u'Error: While retrieving search page %s' % unicode(e)) raise RetrievalError(unicode(e)) write_log(u'Retrieved search results.') search_results = parse_searchpage(searchpage, artist, album) if search_results: matched, matches = search_results else: return [] if matched and len(matches) == 1: ret = [(matches[0], [])] elif matched: write_log(u'Ambiguous matches found for: %s - %s' % (artist, album)) ret.extend([(z, []) for z in matches]) else: write_log(u'No exact matches found for: %s - %s' % (artist, album)) ret.extend([(z, []) for z in matches]) return ret
def submit(self, fns): if not self.__user_key: raise SubmissionError(translate("AcoustID", "Please enter AcoustID user key in settings.")) fns_len = len(fns) data = [] for i, fn in enumerate(fns): try: disp_fn = audioinfo.decode_fn(fn.filepath) except AttributeError: disp_fn = fn['__path'] write_log(FILE_MSG.arg(i + 1).arg(disp_fn)) try: fp = id_in_tag(fn) if fp: write_log(FOUND_ID_MSG) dur, fp = fp else: write_log(CALCULATE_MSG) dur, fp = fingerprint_file(fn.filepath) info = { 'duration':unicode(dur), 'fingerprint': unicode(fp), } info.update(convert_for_submit(fn)) data.append(info) if len(data) > 9 or i == fns_len - 1: msg = SUBMIT_MSG.arg(i - len(data) + 2) msg = msg.arg(i + 1).arg(fns_len) write_log(msg) set_status(msg) acoustid.submit(API_KEY, self.__user_key, data) data = [] except acoustid.FingerprintGenerationError, e: traceback.print_exc() write_log(FP_ERROR_MSG.arg(unicode(e))) continue except acoustid.WebServiceError, e: traceback.print_exc() set_status(SUBMIT_ERROR_MSG.arg(unicode(e))) write_log(SUBMIT_ERROR_MSG.arg(unicode(e))) break
def search(self, artist, fns=None): tracks = [] albums = [] fns_len = len(fns) for i, fn in enumerate(fns): try: disp_fn = audioinfo.decode_fn(fn.filepath) except AttributeError: disp_fn = fn['__path'] write_log(disp_fn) try: fp = id_in_tag(fn) if fp: write_log(FOUND_ID_MSG) dur, fp = fp else: write_log(CALCULATE_MSG) dur, fp = (None, None) write_log(RETRIEVE_MSG.arg(i + 1).arg(fns_len)) set_status(RETRIEVE_MSG.arg(i + 1).arg(fns_len)) data, fp = match("gT8GJxhO", fn.filepath, fp, dur) write_log(translate('AcoustID', "Parsing Data")) info = parse_lookup_result(data, fp=fp) except acoustid.FingerprintGenerationError, e: write_log(FP_ERROR_MSG.arg(unicode(e))) continue except acoustid.WebServiceError, e: set_status(WEB_ERROR_MSG.arg(unicode(e))) write_log(WEB_ERROR_MSG.arg(unicode(e))) break
def search(album): search_url = create_search(album.replace(u'/', u' ')) write_log(u'Search URL - %s' % search_url) return urlopen(iri_to_uri(search_url))
def search(self, artist, fns=None): tracks = [] albums = [] fns_len = len(fns) for i, fn in enumerate(fns): try: disp_fn = audioinfo.decode_fn(fn.filepath) except AttributeError: disp_fn = fn['__path'] write_log(disp_fn) try: fp = id_in_tag(fn) if fp: write_log(FOUND_ID_MSG) dur, fp = fp else: write_log(CALCULATE_MSG) dur, fp = (None, None) write_log(RETRIEVE_MSG.arg(i + 1).arg(fns_len)) set_status(RETRIEVE_MSG.arg(i + 1).arg(fns_len)) data, fp = match("gT8GJxhO", fn.filepath, fp, dur) write_log(translate('AcoustID', "Parsing Data")) info = parse_lookup_result(data, fp=fp) except acoustid.FingerprintGenerationError, e: write_log(FP_ERROR_MSG.arg(unicode(e))) continue except acoustid.WebServiceError, e: set_status(WEB_ERROR_MSG.arg(unicode(e))) write_log(WEB_ERROR_MSG.arg(unicode(e))) break
def submit(self, fns): if not self.__user_key: raise SubmissionError( translate("AcoustID", "Please enter AcoustID user key in settings.")) fns_len = len(fns) data = [] for i, fn in enumerate(fns): try: disp_fn = audioinfo.decode_fn(fn.filepath) except AttributeError: disp_fn = fn['__path'] write_log(FILE_MSG.arg(i + 1).arg(disp_fn)) try: fp = id_in_tag(fn) if fp: write_log(FOUND_ID_MSG) dur, fp = fp else: write_log(CALCULATE_MSG) dur, fp = fingerprint_file(fn.filepath) info = { 'duration': unicode(dur), 'fingerprint': unicode(fp), } info.update(convert_for_submit(fn)) data.append(info) if len(data) > 9 or i == fns_len - 1: msg = SUBMIT_MSG.arg(i - len(data) + 2) msg = msg.arg(i + 1).arg(fns_len) write_log(msg) set_status(msg) acoustid.submit(API_KEY, self.__user_key, data) data = [] except acoustid.FingerprintGenerationError, e: traceback.print_exc() write_log(FP_ERROR_MSG.arg(unicode(e))) continue except acoustid.WebServiceError, e: traceback.print_exc() set_status(SUBMIT_ERROR_MSG.arg(unicode(e))) write_log(SUBMIT_ERROR_MSG.arg(unicode(e))) break
if hasattr(artists, "items"): album_id = find_id(chain(*artists.values()), "mbrainz_album_id") if album_id: try: write_log(translate("MusicBrainz", "Found album id %s in tracks. Retrieving") % album_id) return [retrieve_album(album_id)] except RetrievalError, e: msg = translate("MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(album_id).arg(escape(e))) try: xml = urlopen(search_album(album, artist, limit)) except urllib2.URLError, e: write_log(u'Error: While retrieving search page %s' % unicode(e)) raise RetrievalError(unicode(e)) write_log(u'Retrieved search results.') self.__lasttime = time.time() return parse_album_search(xml) def retrieve(self, albuminfo): try: album_id = albuminfo['#album_id'] except TypeError: album_id = albuminfo if time.time() - self.__lasttime < 1000: time.sleep(1) ret = retrieve_album(album_id) self.__lasttime = time.time() image = self.retrieve_covers(album_id)
def retrieve_album(info, image=LARGEIMAGE, rls_type=None): """Retrieves album from the information in info. image must be either one of image_types or None. If None, no image is retrieved.""" if isinstance(info, (int, long)): r_id = unicode(info) info = {} write_log( translate("Discogs", 'Retrieving using Release ID: %s') % r_id) rls_type = u'release' elif isinstance(info, basestring): r_id = info info = {} write_log( translate("Discogs", 'Retrieving using Release ID: %s') % r_id) rls_type = u'release' else: if rls_type is None and '#release_type' in info: rls_type = info['#release_type'] r_id = info['#r_id'] write_log( translate("Discogs", 'Retrieving album %s') % (info['album'])) site_url = SITE_MASTER_URL if rls_type == MASTER else SITE_RELEASE_URL site_url += r_id.encode('utf8') url = MASTER_URL % r_id if rls_type == MASTER else RELEASE_URL % r_id x = urlopen(url) ret = parse_album_json(json.loads(x)) info = deepcopy(info) info.update(ret[0]) if image in IMAGE_TYPES and '#cover-url' in info: data = [] for large, small in info['#cover-url']: if image == LARGEIMAGE and large: write_log( translate("Discogs", 'Retrieving cover: %s') % large) try: data.append({DATA: urlopen(large)}) except RetrievalError as e: write_log(translate( 'Discogs', u'Error retrieving image:') + unicode(e)) else: write_log( translate("Discogs", 'Retrieving cover: %s') % small) try: data.append({DATA: urlopen(small)}) except RetrievalError as e: write_log(translate( 'Discogs', u'Error retrieving image:') + unicode(e)) if data: info.update({'__image': data}) try: info['#extrainfo'] = translate( 'Discogs', '%s at Discogs.com') % info['album'], site_url except KeyError: pass return info, ret[1]
try: write_log( translate("MusicBrainz", "Found album id %s in tracks. Retrieving") % album_id) return [retrieve_album(album_id)] except RetrievalError, e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(album_id).arg(escape(e))) try: xml = urlopen(search_album(album, artist, limit)) except urllib2.URLError, e: write_log(u'Error: While retrieving search page %s' % unicode(e)) raise RetrievalError(unicode(e)) write_log(u'Retrieved search results.') self.__lasttime = time.time() return parse_album_search(xml) def retrieve(self, albuminfo): try: album_id = albuminfo['#album_id'] except TypeError: album_id = albuminfo if time.time() - self.__lasttime < 1000: time.sleep(1) ret = retrieve_album(album_id) self.__lasttime = time.time() image = self.retrieve_covers(album_id)
def retrieve_album(info, image=LARGEIMAGE, rls_type=None): """Retrieves album from the information in info. image must be either one of image_types or None. If None, no image is retrieved.""" if isinstance(info, (int, long)): r_id = unicode(info) info = {} write_log( translate("Discogs", 'Retrieving using Release ID: %s') % r_id) rls_type = u'release' elif isinstance(info, basestring): r_id = info info = {} write_log( translate("Discogs", 'Retrieving using Release ID: %s') % r_id) rls_type = u'release' else: if rls_type is None and '#release_type' in info: rls_type = info['#release_type'] r_id = info['#r_id'] write_log( translate("Discogs", 'Retrieving album %s') % (info['album'])) site_url = SITE_MASTER_URL if rls_type == MASTER else SITE_RELEASE_URL site_url += r_id.encode('utf8') url = MASTER_URL % r_id if rls_type == MASTER else RELEASE_URL % r_id x = urlopen(url) ret = parse_album_json(json.loads(x)) info = deepcopy(info) info.update(ret[0]) if image in IMAGE_TYPES and '#cover-url' in info: data = [] for large, small in info['#cover-url']: if image == LARGEIMAGE and large: write_log( translate("Discogs", 'Retrieving cover: %s') % large) try: data.append({DATA: urlopen(large)}) except RetrievalError as e: write_log(translate( 'Discogs', u'Error retrieving image:') + unicode(e)) else: write_log( translate("Discogs", 'Retrieving cover: %s') % small) try: data.append({DATA: urlopen(small)}) except RetrievalError as e: write_log(translate( 'Discogs', u'Error retrieving image:') + unicode(e)) if data: info.update({'__image': data}) try: info['#extrainfo'] = translate( 'Discogs', '%s at Discogs.com') % info['album'], site_url except KeyError: pass return info, ret[1]
def search(album): search_url = create_search(album.replace('/', ' ')) write_log('Search URL - %s' % search_url) return urlopen(iri_to_uri(search_url))
def search(self, album, artists): ret = [] if len(artists) > 1: artist = 'Various Artists' else: if hasattr(artists, 'items'): artist = list(artists.keys())[0] else: artist = artists[0] if self._useid and hasattr(artists, 'values'): tracks = [] [tracks.extend(z) for z in artists.values()] for field in ('amg_rovi_id', 'amg_pop_id', 'amgsqlid', 'amg_album_id',): album_id = find_id(tracks, field) if album_id: break if not isempty(album_id): write_log('Found Album ID %s' % album_id) try: return self.keyword_search(':id %s' % album_id) except OldURLError: write_log("Invalid URL used. Doing normal search.") if not album: raise RetrievalError('Album name required.') write_log('Searching for %s' % album) try: searchpage = search(album) except urllib.error.URLError as e: write_log('Error: While retrieving search page %s' % str(e)) raise RetrievalError(str(e)) write_log('Retrieved search results.') search_results = parse_searchpage(searchpage, artist, album) if search_results: matched, matches = search_results else: return [] if matched and len(matches) == 1: ret = [(matches[0], [])] elif matched: write_log('Ambiguous matches found for: %s - %s' % (artist, album)) ret.extend([(z, []) for z in matches]) else: write_log('No exact matches found for: %s - %s' % (artist, album)) ret.extend([(z, []) for z in matches]) return ret
def search(self, artist, fns=None): tracks = [] albums = [] fns_len = len(fns) for i, fn in enumerate(fns): try: disp_fn = audioinfo.decode_fn(fn.filepath) except AttributeError: disp_fn = fn['__path'] write_log(disp_fn) try: fp = id_in_tag(fn) if fp: write_log(FOUND_ID_MSG) dur, fp = fp else: write_log(CALCULATE_MSG) dur, fp = (None, None) write_log(RETRIEVE_MSG.arg(i + 1).arg(fns_len)) set_status(RETRIEVE_MSG.arg(i + 1).arg(fns_len)) data, fp = match("gT8GJxhO", fn.filepath, fp, dur) write_log(translate('AcoustID', "Parsing Data")) info = parse_lookup_result(data, fp=fp) except acoustid.FingerprintGenerationError as e: write_log(FP_ERROR_MSG.arg(str(e))) continue except acoustid.WebServiceError as e: set_status(WEB_ERROR_MSG.arg(str(e))) write_log(WEB_ERROR_MSG.arg(str(e))) break if hasattr(info, 'items'): albums.append([{}]) info['#exact'] = fn tracks.append(info) elif info is not None: for album, track in info: if track and track['#score'] >= self.min_score: track['#exact'] = fn tracks.append(track) albums.append(album if album else [{}]) return starmap(retrieve_album_info, best_match(albums, tracks))