def keyword_search(self, s): if s.startswith(':a'): artist_id = s[len(':a'):].strip() try: url = search_album('arid:' + solr_escape(artist_id), limit=100, own=True) return parse_album_search(urlopen(url)) except RetrievalError as e: msg = translate("MusicBrainz", '<b>Error:</b> While retrieving %1: %2') write_log(msg.arg(artist_id).arg(escape(e))) raise elif s.startswith(':b'): r_id = s[len(':b'):].strip() try: return [self.retrieve(r_id)] except RetrievalError as e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(r_id).arg(escape(e))) raise else: try: params = parse_searchstring(s) except RetrievalError as e: return parse_album_search(urlopen(search_album(s, limit=100))) if not params: return artist = params[0][0] album = params[0][1] return self.search(album, [artist], 100)
def retrieve_album(url, coverurl=None, id_field=ALBUM_ID): write_log('Opening Album Page - %s' % url) album_page, code = urlopen(url, False, True) if album_page.find("featured new releases") >= 0: raise OldURLError("Old AMG URL used.") album_page = get_encoding(album_page, True, 'utf8')[1] info, tracks = parse_albumpage(album_page) info['#albumurl'] = url info['amg_url'] = url if 'album' in info: info['#extrainfo'] = [ info['album'] + u' at AllMusic.com', info['#albumurl']] if coverurl: try: write_log('Retrieving Cover - %s' % info['#cover-url']) cover = retrieve_cover(info['#cover-url']) except KeyError: write_log('No cover found.') cover = None except urllib2.URLError, e: write_log(u'Error: While retrieving cover %s - %s' % (info['#cover-url'], unicode(e))) cover = None
def retrieve_cover_links(album_id, extra=None): if extra is None: url = "http://coverartarchive.org/release/" + album_id else: url = "http://coverartarchive.org/release/%s/%s" % (album_id, extra) write_log(translate("MusicBrainz", "Retrieving cover: %s") % url) try: data, code = urlopen(url, code=True) except RetrievalError as e: if e.code == 404: raise RetrievalError( translate("MusicBrainz", "No images exist for this album."), 404) raise e if code == 200: if extra is None: return json.loads(data) else: return data elif code == 400: raise RetrievalError(translate("MusicBrainz", "Invalid UUID")) elif code in (405, 406): raise RetrievalError(translate("MusicBrainz", "Invalid query sent.")) elif code == 503: raise RetrievalError( translate("MusicBrainz", "You have exceeded your rate limit.")) elif code == 404: raise RetrievalError(translate("MusicBrainz", "Image does not exist."))
def search(self, artist, files=None): if files is not None and self.searchby: keywords = format_value(files[0], self.searchby) else: keywords = artist keywords = re.sub(r'\s+', self._separator, keywords) if self.search_source is None: album = self.retrieve(keywords) return [album] if album else [] url = self._search_base.replace('%s', keywords) write_log(translate('Mp3tag', 'Retrieving search page: %s') % url) set_status(translate('Mp3tag', 'Retrieving search page...')) if self.html is None: page = get_encoding(urlopen(url), True, 'utf8')[1] else: page = get_encoding(self.html, True, 'utf8')[1] write_log(translate('Mp3tag', 'Parsing search page.')) set_status(translate('Mp3tag', 'Parsing search page...')) infos = parse_search_page(self.indexformat, page, self.search_source, url) return [(info, []) for info in infos]
def retrieve_covers(cover_links, size=LARGE): ret = [] for cover in cover_links['images']: desc = cover.get('comment', u"") cover_type = cover['types'][0] if cover_type in mb_imagetypes: cover_type = imagetypes[mb_imagetypes[cover_type]] else: cover_type = imagetypes[u"Other"] if cover == SMALL: image_url = cover['thumbnails']['small'] elif cover == LARGE: image_url = cover['thumbnails']['large'] else: image_url = cover['image'] write_log(translate("MusicBrainz", "Retrieving image %s") % image_url) image_data = urlopen(image_url) ret.append({ 'desc': desc, 'mime': get_mime(image_data), "imagetype": cover_type, "data": image_data }) return ret
def retrieve_album(url, coverurl=None, id_field=ALBUM_ID): write_log('Opening Album Page - %s' % url) album_page, code = urlopen(url, False, True) if album_page.find(b"featured new releases") >= 0: raise OldURLError("Old AMG URL used.") info, tracks = parse_albumpage(album_page) info['#albumurl'] = url info['amg_url'] = url if 'album' in info: info['#extrainfo'] = [ info['album'] + ' at AllMusic.com', info['#albumurl']] if coverurl: try: write_log('Retrieving Cover - %s' % info['#cover-url']) cover = retrieve_cover(info['#cover-url']) except KeyError: write_log('No cover found.') cover = None except urllib.error.URLError as e: write_log('Error: While retrieving cover %s - %s' % (info['#cover-url'], str(e))) cover = None else: cover = None return info, tracks, cover
def retrieve_album(album_id): url = SERVER + 'release/' + album_id + \ '?inc=recordings+artist-credits+puids+isrcs+tags+ratings' \ '+artist-rels+recording-rels+release-rels+release-group-rels' \ '+url-rels+work-rels+recording-level-rels+work-level-rels' data = urlopen(url) return parse_album(data)
def keyword_search(self, s): if s.startswith(u':a'): artist_id = s[len(':a'):].strip() try: url = search_album('arid:' + solr_escape(artist_id.encode('utf8')), limit=100, own=True) return parse_album_search(urlopen(url)) except RetrievalError, e: msg = translate("MusicBrainz", '<b>Error:</b> While retrieving %1: %2') write_log(msg.arg(artist_id).arg(escape(e))) raise
def retrieve_cover_links(album_id, extra=None): if extra is None: url = "http://coverartarchive.org/release/" + album_id else: url = "http://coverartarchive.org/release/%s/%s" % (album_id, extra) write_log(translate("MusicBrainz", "Retrieving cover: %s") % url) try: data, code = urlopen(url, code=True) except RetrievalError, e: if e.code == 404: raise RetrievalError(translate("MusicBrainz", "No images exist for this album."), 404) raise e
def retrieve_cover_links(album_id, extra=None): if extra is None: url = "http://coverartarchive.org/release/" + album_id else: url = "http://coverartarchive.org/release/%s/%s" % (album_id, extra) write_log(translate("MusicBrainz", "Retrieving cover: %s") % url) try: data, code = urlopen(url, code=True) except RetrievalError, e: if e.code == 404: raise RetrievalError( translate("MusicBrainz", "No images exist for this album."), 404) raise e
def keyword_search(keywords): write_log(translate('Amazon', 'Retrieving search results for keywords: %s') % keywords) query_pairs = { "Operation": "ItemSearch", 'SearchIndex': 'Music', "ResponseGroup": "ItemAttributes,Images", "Service": "AWSECommerceService", 'ItemPage': '1', 'Keywords': keywords, 'AssociateTag': 'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) xml = urlopen(url) return parse_search_xml(xml)
def keyword_search(keywords): write_log(translate('Amazon', 'Retrieving search results for keywords: %s') % keywords) query_pairs = { "Operation": u"ItemSearch", 'SearchIndex': u'Music', "ResponseGroup":u"ItemAttributes,Images", "Service":u"AWSECommerceService", 'ItemPage': u'1', 'Keywords': keywords, 'AssociateTag': u'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) xml = urlopen(url) return parse_search_xml(xml)
def search(self, album, artists='', limit=40): if time.time() - self.__lasttime < 1000: time.sleep(1) ret = [] check_matches = False if isempty(artists): artist = None if len(artists) > 1: artist = 'Various Artists' elif artists: if hasattr(artists, 'items'): artist = list(artists.keys())[0] else: artist = artists[0] if not album and not artist: raise RetrievalError('Album or Artist required.') write_log('Searching for %s' % album) if hasattr(artists, "items"): album_id = find_id(chain(*list(artists.values())), "mbrainz_album_id") if album_id: try: write_log( translate("MusicBrainz", "Found album id %s in tracks. Retrieving") % album_id) return [retrieve_album(album_id)] except RetrievalError as e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(album_id).arg(escape(e))) try: xml = urlopen(search_album(album, artist, limit)) except urllib.error.URLError as e: write_log('Error: While retrieving search page %s' % str(e)) raise RetrievalError(str(e)) write_log('Retrieved search results.') self.__lasttime = time.time() return parse_album_search(xml)
def retrieve_covers(cover_links, size=LARGE): ret = [] for cover in cover_links['images']: desc = cover.get('comment', u"") cover_type = cover['types'][0] if cover_type in mb_imagetypes: cover_type = imagetypes[mb_imagetypes[cover_type]] else: cover_type = imagetypes[u"Other"] if cover == SMALL: image_url = cover['thumbnails']['small'] elif cover == LARGE: image_url = cover['thumbnails']['large'] else: image_url = cover['image'] write_log(translate("MusicBrainz", "Retrieving image %s") % image_url) image_data = urlopen(image_url) ret.append({'desc': desc, 'mime': get_mime(image_data), "imagetype": cover_type, "data": image_data}) return ret
def retrieve(self, info): if isinstance(info, str): text = info.replace(' ', self._separator) info = {} else: info = deepcopy(info) text = info['#url'] try: url = self.album_url % text except TypeError: url = self.album_url + text info['#url'] = url try: write_log(translate('Mp3tag', 'Retrieving album page: %s') % url) set_status(translate('Mp3tag', 'Retrieving album page...')) page = get_encoding(urlopen(url), True, 'utf8')[1] except: page = '' write_log(translate('Mp3tag', 'Parsing album page.')) set_status(translate('Mp3tag', 'Parsing album page...')) new_info, tracks = parse_album_page(page, self.album_source, url) info.update(dict((k, v) for k, v in new_info.items() if v)) if self._get_cover and COVER in info: cover_url = new_info[COVER] if isinstance(cover_url, str): info.update(retrieve_cover(cover_url)) else: info.update(list(map(retrieve_cover, cover_url))) if not tracks: tracks = None return info, tracks
def retrieve_album(info, image=MEDIUMIMAGE): """Retrieves album from the information in info. image must be either one of image_types or None. If None, no image is retrieved.""" if isinstance(info, basestring): asin = info else: asin = info['#asin'] query_pairs = { "Operation": u"ItemLookup", "Service":u"AWSECommerceService", 'ItemId': asin, 'ResponseGroup': u'Tracks', 'AssociateTag': u'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) if isinstance(info, basestring): write_log(translate('Amazon', 'Retrieving using ASIN: %s') % asin) else: write_log(translate('Amazon', 'Retrieving XML: %1 - %2').arg( info.get('artist', u'')).arg(info.get('album', u''))) xml = urlopen(url) if isinstance(info, basestring): tracks = parse_album_xml(xml) else: tracks = parse_album_xml(xml, info['album']) if image in image_types: url = info[image] write_log(translate("Amazon", 'Retrieving cover: %s') % url) info.update({'__image': retrieve_cover(url)}) return tracks
def search(self, artist, files=None): if files is not None and self.searchby: keywords = format_value(files[0], self.searchby) else: keywords = artist keywords = re.sub('\s+', self._separator, keywords) if self.search_source is None: album = self.retrieve(keywords) return [album] if album else [] url = self._search_base.replace(u'%s', keywords) write_log(translate('Mp3tag', u'Retrieving search page: %s') % url) set_status(translate('Mp3tag', u'Retrieving search page...')) if self.html is None: page = get_encoding(urlopen(url), True, 'utf8')[1] else: page = get_encoding(self.html, True, 'utf8')[1] write_log(translate('Mp3tag', u'Parsing search page.')) set_status(translate('Mp3tag', u'Parsing search page...')) infos = parse_search_page(self.indexformat, page, self.search_source, url) return [(info, []) for info in infos]
def retrieve(self, info): if isinstance(info, basestring): text = info.replace(u' ', self._separator) info = {} else: info = deepcopy(info) text = info['#url'] try: url = self.album_url % text except TypeError: url = self.album_url + text info['#url'] = url try: write_log(translate('Mp3tag', u'Retrieving album page: %s') % url) set_status(translate('Mp3tag', u'Retrieving album page...')) page = get_encoding(urlopen(url), True, 'utf8')[1] except: page = u'' write_log(translate('Mp3tag', u'Parsing album page.')) set_status(translate('Mp3tag', u'Parsing album page...')) new_info, tracks = parse_album_page(page, self.album_source, url) info.update(dict((k,v) for k,v in new_info.iteritems() if v)) if self._get_cover and COVER in info: cover_url = new_info[COVER] if isinstance(cover_url, basestring): info.update(retrieve_cover(cover_url)) else: info.update(map(retrieve_cover, cover_url)) if not tracks: tracks = None return info, tracks
def retrieve_album(info, image=MEDIUMIMAGE): """Retrieves album from the information in info. image must be either one of image_types or None. If None, no image is retrieved.""" if isinstance(info, str): asin = info else: asin = info['#asin'] query_pairs = { "Operation": "ItemLookup", "Service": "AWSECommerceService", 'ItemId': asin, 'ResponseGroup': 'Tracks', 'AssociateTag': 'puddletag-20'} url = create_aws_url(access_key, secret_key, query_pairs) if isinstance(info, str): write_log(translate('Amazon', 'Retrieving using ASIN: %s') % asin) else: write_log(translate('Amazon', 'Retrieving XML: %1 - %2').arg( info.get('artist', '')).arg(info.get('album', ''))) xml = urlopen(url) if isinstance(info, str): tracks = parse_album_xml(xml) else: tracks = parse_album_xml(xml, info['album']) if image in image_types: url = info[image] write_log(translate("Amazon", 'Retrieving cover: %s') % url) info.update({'__image': retrieve_cover(url)}) return tracks
def retrieve_cover(url): data = urlopen(url) return [{DATA: data}]
raise elif s.startswith(u':b'): r_id = s[len(u':b'):].strip() try: return [self.retrieve(r_id)] except RetrievalError, e: msg = translate( "MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(r_id).arg(escape(e))) raise else: try: params = parse_searchstring(s) except RetrievalError, e: return parse_album_search(urlopen(search_album(s, limit=100))) if not params: return artist = params[0][0] album = params[0][1] return self.search(album, [artist], 100) def search(self, album, artists=u'', limit=40): if time.time() - self.__lasttime < 1000: time.sleep(1) ret = [] check_matches = False if isempty(artists): artist = None if len(artists) > 1:
write_log(msg.arg(artist_id).arg(escape(e))) raise elif s.startswith(u':b'): r_id = s[len(u':b'):].strip() try: return [self.retrieve(r_id)] except RetrievalError, e: msg = translate("MusicBrainz", "<b>Error:</b> While retrieving Album ID %1 (%2)") write_log(msg.arg(r_id).arg(escape(e))) raise else: try: params = parse_searchstring(s) except RetrievalError, e: return parse_album_search(urlopen(search_album(s, limit=100))) if not params: return artist = params[0][0] album = params[0][1] return self.search(album, [artist], 100) def search(self, album, artists=u'', limit=40): if time.time() - self.__lasttime < 1000: time.sleep(1) ret = [] check_matches = False if isempty(artists): artist = None if len(artists) > 1:
def search(album): search_url = create_search(album.replace('/', ' ')) write_log('Search URL - %s' % search_url) return urlopen(iri_to_uri(search_url))
def search(album): search_url = create_search(album.replace(u'/', u' ')) write_log(u'Search URL - %s' % search_url) return urlopen(iri_to_uri(search_url))