def update(self, signal=True): new_metadata = self.new_metadata names = set(new_metadata.keys()) names.update(self.orig_metadata.keys()) clear_existing_tags = config.setting["clear_existing_tags"] for name in names: if not name.startswith('~') and self.supports_tag(name): new_values = new_metadata.getall(name) if not (new_values or clear_existing_tags): continue orig_values = self.orig_metadata.getall(name) if orig_values != new_values: self.similarity = self.orig_metadata.compare(new_metadata) if self.state in (File.CHANGED, File.NORMAL): self.state = File.CHANGED break else: if self.orig_metadata.images != self.metadata.images: self.state = File.CHANGED else: self.similarity = 1.0 if self.state in (File.CHANGED, File.NORMAL): self.state = File.NORMAL if signal: log.debug("Updating file %r", self) if self.item: self.item.update()
def _load(self, filename): log.debug("Loading file %r", filename) file = ASF(encode_filename(filename)) metadata = Metadata() for name, values in file.tags.items(): if name == 'WM/Picture': for image in values: (mime, data, type, description) = unpack_image(image.value) extras = { 'desc': description, 'type': image_type_from_id3_num(type) } metadata.add_image(mime, data, extras=extras) continue elif name not in self.__RTRANS: continue elif name == 'WM/SharedUserRating': # Rating in WMA ranges from 0 to 99, normalize this to the range 0 to 5 values[0] = int(round(int(unicode(values[0])) / 99.0 * (config.setting['rating_steps'] - 1))) name = self.__RTRANS[name] values = filter(bool, map(unicode, values)) if values: metadata[name] = values self._info(metadata, file) return metadata
def _move_additional_files(self, old_filename, new_filename): """Move extra files, like playlists...""" old_path = encode_filename(os.path.dirname(old_filename)) new_path = encode_filename(os.path.dirname(new_filename)) patterns = encode_filename(config.setting["move_additional_files_pattern"]) patterns = filter(bool, [p.strip() for p in patterns.split()]) try: names = os.listdir(old_path) except os.error: log.error("Error: {} directory not found".format(old_path)) return filtered_names = filter(lambda x: x[0] != '.', names) for pattern in patterns: pattern_regex = re.compile(fnmatch.translate(pattern), re.IGNORECASE) file_names = names if pattern[0] != '.': file_names = filtered_names for old_file in file_names: if pattern_regex.match(old_file): new_file = os.path.join(new_path, old_file) old_file = os.path.join(old_path, old_file) # FIXME we shouldn't do this from a thread! if self.tagger.files.get(decode_filename(old_file)): log.debug("File loaded in the tagger, not moving %r", old_file) continue log.debug("Moving %r to %r", old_file, new_file) shutil.move(old_file, new_file)
def drop_urls(urls, target): files = [] new_files = [] for url in urls: log.debug("Dropped the URL: %r", url.toString(QtCore.QUrl.RemoveUserInfo)) if url.scheme() == "file" or not url.scheme(): filename = os.path.normpath(os.path.realpath(url.toLocalFile().rstrip("\0"))) file = BaseTreeView.tagger.files.get(filename) if file: files.append(file) elif os.path.isdir(encode_filename(filename)): BaseTreeView.tagger.add_directory(filename) else: new_files.append(filename) elif url.scheme() in ("http", "https"): path = url.path() match = re.search(r"/(release|recording)/([0-9a-z\-]{36})", path) if match: entity = match.group(1) mbid = match.group(2) if entity == "release": BaseTreeView.tagger.load_album(mbid) elif entity == "recording": BaseTreeView.tagger.load_nat(mbid) if files: BaseTreeView.tagger.move_files(files, target) if new_files: BaseTreeView.tagger.add_files(new_files, target=target)
def _move(self, parent): if parent != self.parent: log.debug("Moving %r from %r to %r", self, self.parent, parent) if self.parent: self.parent.remove_file(self) self.parent = parent self.tagger.acoustidmanager.update(self, self.metadata['musicbrainz_recordingid'])
def _move(self, parent): if parent != self.parent: log.debug("Moving %r from %r to %r", self, self.parent, parent) if self.parent: self.parent.remove_file(self) self.parent = parent self._acoustid_update()
def restore_header_state(self): if self.ui.release_list: header = self.ui.release_list.header() state = config.persist[self.dialog_header_state] if state: header.restoreState(state) log.debug("restore_state: %s" % self.dialog_header_state)
def _set_metadata(self, coverartimage, data): try: coverartimage.set_data(data) if coverartimage.can_be_saved_to_metadata: log.debug("Cover art image stored to metadata: %r [%s]" % ( coverartimage, coverartimage.imageinfo_as_string()) ) self.metadata.append_image(coverartimage) for track in self.album._new_tracks: track.metadata.append_image(coverartimage) # If the image already was a front image, # there might still be some other non-CAA front # images in the queue - ignore them. if not self.front_image_found: self.front_image_found = coverartimage.is_front_image() else: log.debug("Thumbnail for cover art image: %r [%s]" % ( coverartimage, coverartimage.imageinfo_as_string()) ) except CoverArtImageIOError as e: self.album.error_append(unicode(e)) self.album._finalize_loading(error=True) raise e except CoverArtImageIdentificationError as e: self.album.error_append(unicode(e))
def add_files(self, filenames, target=None): """Add files to the tagger.""" ignoreregex = None pattern = config.setting['ignore_regex'] if pattern: ignoreregex = re.compile(pattern) ignore_hidden = not config.persist["show_hidden_files"] new_files = [] for filename in filenames: filename = os.path.normpath(os.path.realpath(filename)) if ignore_hidden and is_hidden_path(filename): log.debug("File ignored (hidden): %s" % (filename)) continue if ignoreregex is not None and ignoreregex.search(filename): log.info("File ignored (matching %s): %s" % (pattern, filename)) continue if filename not in self.files: file = open_file(filename) if file: self.files[filename] = file new_files.append(file) if new_files: log.debug("Adding files %r", new_files) new_files.sort(key=lambda x: x.filename) if target is None or target is self.unmatched_files: self.unmatched_files.add_files(new_files) target = None for file in new_files: file.load(partial(self._file_loaded, target=target))
def __fingerprint_submission_finished(self, fingerprints, document, http, error): if error: try: error = load_json(document) message = error["error"]["message"] except : message = "" mparms = { 'error': http.errorString(), 'message': message } log.error( "AcoustID: submission failed with error '%(error)s': %(message)s" % mparms) self.tagger.window.set_statusbar_message( N_("AcoustID submission failed with error '%(error)s': %(message)s"), mparms, echo=None, timeout=3000 ) else: log.debug('AcoustID: successfully submitted') self.tagger.window.set_statusbar_message( N_('AcoustIDs successfully submitted.'), echo=None, timeout=3000 ) for submission in fingerprints: submission.orig_recordingid = submission.recordingid self._check_unsubmitted()
def coverart(album, metadata, release): """Gets all cover art URLs from the metadata and then attempts to download the album art. """ coverart = CoverArt(album, metadata, release) log.debug("New %r", coverart) coverart.retrieve()
def _handle_redirect(self, reply, request, redirect): url = request.url() error = int(reply.error()) # merge with base url (to cover the possibility of the URL being relative) redirect = url.resolved(redirect) if not WebService.urls_equivalent(redirect, reply.request().url()): log.debug("Redirect to %s requested", redirect.toString(QUrl.RemoveUserInfo)) redirect_host = string_(redirect.host()) redirect_port = self.url_port(redirect) redirect_query = dict(QUrlQuery(redirect).queryItems(QUrl.FullyEncoded)) redirect_path = redirect.path() original_host = string_(url.host()) original_port = self.url_port(url) original_host_key = (original_host, original_port) redirect_host_key = (redirect_host, redirect_port) if (original_host_key in REQUEST_DELAY_MINIMUM and redirect_host_key not in REQUEST_DELAY_MINIMUM): log.debug("Setting the minimum rate limit for %s to %i" % (redirect_host_key, REQUEST_DELAY_MINIMUM[original_host_key])) REQUEST_DELAY_MINIMUM[redirect_host_key] = REQUEST_DELAY_MINIMUM[original_host_key] self.get(redirect_host, redirect_port, redirect_path, request.handler, request.parse_response_type, priority=True, important=True, refresh=request.refresh, queryargs=redirect_query, cacheloadcontrol=request.attribute(QtNetwork.QNetworkRequest.CacheLoadControlAttribute)) else: log.error("Redirect loop: %s", reply.request().url().toString(QUrl.RemoveUserInfo) ) request.handler(reply.readAll(), reply, error)
def dropMimeData(self, parent, index, data, action): target = None if parent: if index == parent.childCount(): item = parent else: item = parent.child(index) if item is not None: target = item.obj log.debug("Drop target = %r", target) handled = False # text/uri-list urls = data.urls() if urls: if target is None: target = self.tagger.unmatched_files self.drop_urls(urls, target) handled = True # application/picard.album-list albums = data.data("application/picard.album-list") if albums: if isinstance(self, FileTreeView) and target is None: target = self.tagger.unmatched_files albums = [self.tagger.load_album(id) for id in str(albums).split("\n")] self.tagger.move_files(self.tagger.get_files_from_objects(albums), target) handled = True return handled
def request_finished(document, reply, error): if error: tagger.window.set_statusbar_message( N_("Error loading collections: %(error)s"), {'error': reply.errorString()}, echo=log.error ) return if document and "collections" in document: collection_list = document['collections'] new_collections = set() for node in collection_list: if node["entity-type"] != "release": continue col_id = node['id'] col_name = node['name'] col_size = node['release-count'] new_collections.add(col_id) collection = get_user_collection(col_id, col_name, col_size, refresh=True) # remove collections which aren't returned by the web service anymore old_collections = set(user_collections) - new_collections for collection_id in old_collections: del user_collections[collection_id] log.debug("User collections: %r", [(k, v.name) for k, v in user_collections.items()]) if callback: callback()
def _finished(self, kind, ids, callback, document, reply, error): self.pending -= ids statusbar = self.tagger.window.set_statusbar_message if not error: count = len(ids) if kind == self.COLLECTION_ADD: self.releases |= ids self.size += count status_msg = ngettext( 'Added %(count)i release to collection "%(name)s"', 'Added %(count)i releases to collection "%(name)s"', count) debug_msg = 'Added %(count)i releases to collection "%(name)s"' else: self.releases -= ids self.size -= count status_msg = ngettext( 'Removed %(count)i release from collection "%(name)s"', 'Removed %(count)i releases from collection "%(name)s"', count) debug_msg = 'Removed %(count)i releases from collection "%(name)s"' callback() mparms = {'count': count, 'name': self.name} log.debug(debug_msg % mparms) statusbar(status_msg, mparms, translate=None, echo=None) else: statusbar( N_("Error while modifying collections: %(error)s"), {'error': reply.errorString()}, echo=log.error )
def main(localedir=None, autoupdate=True): # Some libs (ie. Phonon) require those to be set QtWidgets.QApplication.setApplicationName(PICARD_APP_NAME) QtWidgets.QApplication.setOrganizationName(PICARD_ORG_NAME) signal.signal(signal.SIGINT, signal.SIG_DFL) picard_args, unparsed_args = process_picard_args() if picard_args.version: return version() if picard_args.long_version: return longversion() tagger = Tagger(picard_args, unparsed_args, localedir, autoupdate) # Initialize Qt default translations translator = QtCore.QTranslator() locale = QtCore.QLocale() translation_path = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath) log.debug("Looking for Qt locale %s in %s", locale.name(), translation_path) if translator.load(locale, "qtbase_", directory=translation_path): tagger.installTranslator(translator) else: log.debug('Qt locale %s not available', locale.name()) tagger.startTimer(1000) sys.exit(tagger.run())
def _caa_json_downloaded(album, metadata, release, try_list, data, http, error): album._requests -= 1 caa_front_found = False if error: log.error(str(http.errorString())) else: try: caa_data = json.loads(data) except ValueError: log.debug("Invalid JSON: %s", http.url().toString()) else: caa_types = config.setting["caa_image_types"].split() caa_types = map(unicode.lower, caa_types) for image in caa_data["images"]: if config.setting["caa_approved_only"] and not image["approved"]: continue if not image["types"] and "unknown" in caa_types: image["types"] = [u"Unknown"] imagetypes = map(unicode.lower, image["types"]) for imagetype in imagetypes: if imagetype == "front": caa_front_found = True if imagetype in caa_types: _caa_append_image_to_trylist(try_list, image) break if error or not caa_front_found: _fill_try_list(album, release, try_list) _walk_try_list(album, metadata, release, try_list)
def get_files(): try: root, dirs, files = next(walk) if ignore_hidden: dirs[:] = [d for d in dirs if not is_hidden(os.path.join(root, d))] except StopIteration: return None else: number_of_files = len(files) if number_of_files: mparms = { 'count': number_of_files, 'directory': root, } log.debug("Adding %(count)d files from '%(directory)r'" % mparms) self.window.set_statusbar_message( ngettext( "Adding %(count)d file from '%(directory)s' ...", "Adding %(count)d files from '%(directory)s' ...", number_of_files), mparms, translate=None, echo=None ) return (os.path.join(root, f) for f in files)
def _add_directory_non_recursive(self, path): files = [] for f in os.listdir(path): listing = os.path.join(path, f) if os.path.isfile(listing): files.append(listing) number_of_files = len(files) if number_of_files: mparms = { 'count': number_of_files, 'directory': path, } log.debug("Adding %(count)d files from '%(directory)r'" % mparms) self.window.set_statusbar_message( ngettext( "Adding %(count)d file from '%(directory)s' ...", "Adding %(count)d files from '%(directory)s' ...", number_of_files), mparms, translate=None, echo=None ) # Function call only if files exist self.add_files(files)
def _start_request(self, method, host, port, path, data, handler, xml, mblogin=False, cacheloadcontrol=None, refresh=None): if mblogin and host in MUSICBRAINZ_SERVERS and port == 80: urlstring = "https://%s%s" % (host, path) else: urlstring = "http://%s:%d%s" % (host, port, path) log.debug("%s %s", method, urlstring) url = QUrl.fromEncoded(urlstring) if mblogin: url.setUserName(config.setting["username"]) url.setPassword(config.setting["password"]) request = QtNetwork.QNetworkRequest(url) if mblogin or (method == "GET" and refresh): request.setPriority(QtNetwork.QNetworkRequest.HighPriority) request.setAttribute(QtNetwork.QNetworkRequest.CacheLoadControlAttribute, QtNetwork.QNetworkRequest.AlwaysNetwork) elif method == "PUT" or method == "DELETE": request.setPriority(QtNetwork.QNetworkRequest.HighPriority) elif cacheloadcontrol is not None: request.setAttribute(QtNetwork.QNetworkRequest.CacheLoadControlAttribute, cacheloadcontrol) request.setRawHeader("User-Agent", USER_AGENT_STRING) if xml: request.setRawHeader("Accept", "application/xml") if data is not None: if method == "POST" and host == config.setting["server_host"]: request.setHeader(QtNetwork.QNetworkRequest.ContentTypeHeader, "application/xml; charset=utf-8") else: request.setHeader(QtNetwork.QNetworkRequest.ContentTypeHeader, "application/x-www-form-urlencoded") send = self._request_methods[method] reply = send(request, data) if data is not None else send(request) key = (host, port) self._last_request_times[key] = time.time() self._active_requests[reply] = (request, handler, xml, refresh) return True
def read(self, device=None): if device is None: device = discid.get_default_device() log.debug(u"Reading CD using device: %r", device) disc = discid.read(device) self.id = disc.id self.submission_url = disc.submission_url
def _out_of_backoff(hostkey): REQUEST_DELAY_EXPONENT[hostkey] = 0 # Coming out of backoff, so reset. # Shrink the delay between requests with each successive reply to # converge on maximum throughput. delay = max(int(REQUEST_DELAY[hostkey] / 2), REQUEST_DELAY_MINIMUM[hostkey]) cws = CONGESTION_WINDOW_SIZE[hostkey] sst = CONGESTION_SSTHRESH[hostkey] if sst and cws >= sst: # Analogous to TCP's congestion avoidance phase. Window growth is linear. phase = 'congestion avoidance' cws = cws + (1.0 / cws) else: # Analogous to TCP's slow start phase. Window growth is exponential. phase = 'slow start' cws += 1 if (REQUEST_DELAY[hostkey] != delay or CONGESTION_WINDOW_SIZE[hostkey] != cws): log.debug( '%s: oobackoff; delay: %dms -> %dms; %s; window size %.3f -> %.3f', hostkey, REQUEST_DELAY[hostkey], delay, phase, CONGESTION_WINDOW_SIZE[hostkey], cws ) CONGESTION_WINDOW_SIZE[hostkey] = cws REQUEST_DELAY[hostkey] = delay
def _save(self, filename, metadata): log.debug("Saving file %r", filename) file = ASF(encode_filename(filename)) if config.setting['clear_existing_tags']: file.tags.clear() cover = [] for image in metadata.images_to_be_saved_to_tags: tag_data = pack_image(image.mimetype, image.data, image_type_as_id3_num(image.maintype), image.comment) cover.append(ASFByteArrayAttribute(tag_data)) if cover: file.tags['WM/Picture'] = cover for name, values in metadata.rawitems(): if name.startswith('lyrics:'): name = 'lyrics' elif name == '~rating': values[0] = int(values[0]) * 99 / (config.setting['rating_steps'] - 1) elif name == 'discnumber' and 'totaldiscs' in metadata: values[0] = '%s/%s' % (metadata['discnumber'], metadata['totaldiscs']) if name not in self.__TRANS: continue name = self.__TRANS[name] file.tags[name] = map(unicode, values) file.save()
def load_plugindir(self, plugindir): plugindir = os.path.normpath(plugindir) if not os.path.isdir(plugindir): log.info("Plugin directory %r doesn't exist", plugindir) return # first, handle eventual plugin updates for updatepath in [os.path.join(plugindir, file) for file in os.listdir(plugindir) if file.endswith('.update')]: path = os.path.splitext(updatepath)[0] name = is_zip(path) if not name: name = _plugin_name_from_path(path) if name: self._remove_plugin(name) os.rename(updatepath, path) log.debug('Updating plugin %r (%r))', name, path) else: log.error('Cannot get plugin name from %r', updatepath) # now load found plugins names = set() for path in [os.path.join(plugindir, file) for file in os.listdir(plugindir)]: name = is_zip(path) if not name: name = _plugin_name_from_path(path) if name: names.add(name) log.debug("Looking for plugins in directory %r, %d names found", plugindir, len(names)) for name in sorted(names): try: self.load_plugin(name, plugindir) except Exception as e: log.error('Unable to load plugin: %s.\nError occured: %s', name, e)
def _save(self, filename, metadata): log.debug("Saving file %r", filename) file = ASF(encode_filename(filename)) if config.setting["clear_existing_tags"]: file.tags.clear() if config.setting["save_images_to_tags"]: cover = [] for image in metadata.images: if not save_this_image_to_tags(image): continue tag_data = pack_image(image["mime"], image["data"], image_type_as_id3_num(image["type"]), image["desc"]) cover.append(ASFByteArrayAttribute(tag_data)) if cover: file.tags["WM/Picture"] = cover for name, values in metadata.rawitems(): if name.startswith("lyrics:"): name = "lyrics" elif name == "~rating": values[0] = int(values[0]) * 99 / (config.setting["rating_steps"] - 1) if name not in self.__TRANS: continue name = self.__TRANS[name] file.tags[name] = map(unicode, values) file.save()
def restore_table_header_state(self): header = self.table.horizontalHeader() state = config.persist[self.dialog_header_state] if state: header.restoreState(state) header.setSectionResizeMode(QtWidgets.QHeaderView.Interactive) log.debug("restore_state: %s" % self.dialog_header_state)
def _load(self, filename): log.debug("Loading file %r", filename) file = ASF(encode_filename(filename)) metadata = Metadata() for name, values in file.tags.items(): if name == 'WM/Picture': for image in values: (mime, data, type, description) = unpack_image(image.value) try: coverartimage = TagCoverArtImage( file=filename, tag=name, types=types_from_id3(type), comment=description, support_types=True, data=data, ) except CoverArtImageError as e: log.error('Cannot load image from %r: %s' % (filename, e)) else: metadata.append_image(coverartimage) continue elif name not in self.__RTRANS: continue elif name == 'WM/SharedUserRating': # Rating in WMA ranges from 0 to 99, normalize this to the range 0 to 5 values[0] = int(round(int(unicode(values[0])) / 99.0 * (config.setting['rating_steps'] - 1))) name = self.__RTRANS[name] values = filter(bool, map(unicode, values)) if values: metadata[name] = values self._info(metadata, file) return metadata
def _save(self, filename, metadata): log.debug("Saving file %r", filename) file = ASF(encode_filename(filename)) if config.setting['clear_existing_tags']: file.tags.clear() if config.setting['save_images_to_tags']: cover = [] for image in metadata.images: if not save_this_image_to_tags(image): continue tag_data = pack_image(image.mimetype, image.data, image_type_as_id3_num(image.maintype()), image.description) cover.append(ASFByteArrayAttribute(tag_data)) if cover: file.tags['WM/Picture'] = cover for name, values in metadata.rawitems(): if name.startswith('lyrics:'): name = 'lyrics' elif name == '~rating': values[0] = int(values[0]) * 99 / (config.setting['rating_steps'] - 1) if name not in self.__TRANS: continue name = self.__TRANS[name] file.tags[name] = map(unicode, values) file.save()
def fetch_username(self, callback): log.debug("OAuth: fetching username") host, port = config.setting['server_host'], config.setting['server_port'] path = "/oauth2/userinfo" self.webservice.get(host, port, path, partial(self.on_fetch_username_finished, callback), parse_response_type=None, mblogin=True, priority=True, important=True)
def fetch_username(self, callback): log.debug("OAuth: fetching username") host, port = config.setting['server_host'], config.setting['server_port'] path = "/oauth2/userinfo" self.xmlws.get(host, port, path, partial(self.on_fetch_username_finished, callback), xml=False, mblogin=True, priority=True, important=True)
def set_refresh_token(self, refresh_token, scopes): log.debug("OAuth: got refresh_token %s with scopes %s", refresh_token, scopes) self.refresh_token = refresh_token self.refresh_token_scopes = scopes
def _load(self, filename): log.debug("Loading file %r", filename) file = self._File(encode_filename(filename)) file.tags = file.tags or {} metadata = Metadata() for origname, values in file.tags.items(): for value in values: name = origname if name == "date" or name == "originaldate": # YYYY-00-00 => YYYY value = sanitize_date(value) elif name == 'performer' or name == 'comment': # transform "performer=Joe Barr (Piano)" to "performer:Piano=Joe Barr" name += ':' if value.endswith(')'): start = len(value) - 2 count = 1 while count > 0 and start > 0: if value[start] == ')': count += 1 elif value[start] == '(': count -= 1 start -= 1 if start > 0: name += value[start + 2:-1] value = value[:start] elif name.startswith('rating'): try: name, email = name.split(':', 1) except ValueError: email = '' if email != config.setting['rating_user_email']: continue name = '~rating' value = string_( int( round((float(value) * (config.setting['rating_steps'] - 1))))) elif name == "fingerprint" and value.startswith( "MusicMagic Fingerprint"): name = "musicip_fingerprint" value = value[22:] elif name == "tracktotal": if "totaltracks" in file.tags: continue name = "totaltracks" elif name == "disctotal": if "totaldiscs" in file.tags: continue name = "totaldiscs" elif name == "metadata_block_picture": image = mutagen.flac.Picture( base64.standard_b64decode(value)) try: coverartimage = TagCoverArtImage( file=filename, tag=name, types=types_from_id3(image.type), comment=image.desc, support_types=True, data=image.data, ) except CoverArtImageError as e: log.error('Cannot load image from %r: %s' % (filename, e)) else: metadata.append_image(coverartimage) continue elif name in self.__translate: name = self.__translate[name] metadata.add(name, value) if self._File == mutagen.flac.FLAC: for image in file.pictures: try: coverartimage = TagCoverArtImage( file=filename, tag='FLAC/PICTURE', types=types_from_id3(image.type), comment=image.desc, support_types=True, data=image.data, ) except CoverArtImageError as e: log.error('Cannot load image from %r: %s' % (filename, e)) else: metadata.append_image(coverartimage) # Read the unofficial COVERART tags, for backward compatibility only if "metadata_block_picture" not in file.tags: try: for data in file["COVERART"]: try: coverartimage = TagCoverArtImage( file=filename, tag='COVERART', data=base64.standard_b64decode(data)) except CoverArtImageError as e: log.error('Cannot load image from %r: %s' % (filename, e)) else: metadata.append_image(coverartimage) except KeyError: pass self._info(metadata, file) return metadata
def _save(self, filename, metadata): """Save metadata to the file.""" log.debug("Saving file %r", filename) is_flac = self._File == mutagen.flac.FLAC file = self._File(encode_filename(filename)) if file.tags is None: file.add_tags() if config.setting["clear_existing_tags"]: file.tags.clear() if (is_flac and (config.setting["clear_existing_tags"] or metadata.images_to_be_saved_to_tags)): file.clear_pictures() tags = {} for name, value in metadata.items(): if name == '~rating': # Save rating according to http://code.google.com/p/quodlibet/wiki/Specs_VorbisComments if config.setting['rating_user_email']: name = 'rating:%s' % config.setting['rating_user_email'] else: name = 'rating' value = string_( float(value) / (config.setting['rating_steps'] - 1)) # don't save private tags elif name.startswith("~"): continue elif name.startswith('lyrics:'): name = 'lyrics' elif name == "date" or name == "originaldate": # YYYY-00-00 => YYYY value = sanitize_date(value) elif name.startswith('performer:') or name.startswith('comment:'): # transform "performer:Piano=Joe Barr" to "performer=Joe Barr (Piano)" name, desc = name.split(':', 1) if desc: value += ' (%s)' % desc elif name == "musicip_fingerprint": name = "fingerprint" value = "MusicMagic Fingerprint%s" % value elif name in self.__rtranslate: name = self.__rtranslate[name] tags.setdefault(name.upper(), []).append(value) if "totaltracks" in metadata: tags.setdefault("TRACKTOTAL", []).append(metadata["totaltracks"]) if "totaldiscs" in metadata: tags.setdefault("DISCTOTAL", []).append(metadata["totaldiscs"]) for image in metadata.images_to_be_saved_to_tags: picture = mutagen.flac.Picture() picture.data = image.data picture.mime = image.mimetype picture.desc = image.comment picture.type = image_type_as_id3_num(image.maintype) if self._File == mutagen.flac.FLAC: file.add_picture(picture) else: tags.setdefault("METADATA_BLOCK_PICTURE", []).append( base64.b64encode(picture.write()).decode('ascii')) file.tags.update(tags) self._remove_deleted_tags(metadata, file.tags) kwargs = {} if is_flac and config.setting["remove_id3_from_flac"]: kwargs["deleteid3"] = True try: file.save(**kwargs) except TypeError: file.save()
def signal(self, signum, frame): log.debug("signal %i received", signum) # Send a notification about a received signal from the signal handler # to Qt. self.signalfd[0].sendall(b"a")
def __init__(self, picard_args, unparsed_args, localedir, autoupdate): super().__init__(sys.argv) self.__class__.__instance = self config._setup(self, picard_args.config_file) theme.setup(self) self._cmdline_files = picard_args.FILE self.autoupdate_enabled = autoupdate self._no_restore = picard_args.no_restore self._no_plugins = picard_args.no_plugins self.set_log_level(config.setting['log_verbosity']) if picard_args.debug or "PICARD_DEBUG" in os.environ: self.set_log_level(logging.DEBUG) # FIXME: Figure out what's wrong with QThreadPool.globalInstance(). # It's a valid reference, but its start() method doesn't work. self.thread_pool = QtCore.QThreadPool(self) # Provide a separate thread pool for operations that should not be # delayed by longer background processing tasks, e.g. because the user # expects instant feedback instead of waiting for a long list of # operations to finish. self.priority_thread_pool = QtCore.QThreadPool(self) self.priority_thread_pool.setMaxThreadCount(1) # Use a separate thread pool for file saving, with a thread count of 1, # to avoid race conditions in File._save_and_rename. self.save_thread_pool = QtCore.QThreadPool(self) self.save_thread_pool.setMaxThreadCount(1) if not IS_WIN: # Set up signal handling # It's not possible to call all available functions from signal # handlers, therefore we need to set up a QSocketNotifier to listen # on a socket. Sending data through a socket can be done in a # signal handler, so we use the socket to notify the application of # the signal. # This code is adopted from # https://qt-project.org/doc/qt-4.8/unix-signals.html # To not make the socket module a requirement for the Windows # installer, import it here and not globally import socket self.signalfd = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM, 0) self.signalnotifier = QtCore.QSocketNotifier( self.signalfd[1].fileno(), QtCore.QSocketNotifier.Read, self) self.signalnotifier.activated.connect(self.sighandler) signal.signal(signal.SIGHUP, self.signal) signal.signal(signal.SIGINT, self.signal) signal.signal(signal.SIGTERM, self.signal) if IS_MACOS: # On macOS it is not common that the global menu shows icons self.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus) # Setup logging log.debug("Starting Picard from %r", os.path.abspath(__file__)) log.debug("Platform: %s %s %s", platform.platform(), platform.python_implementation(), platform.python_version()) log.debug("Versions: %s", versions.as_string()) log.debug("Configuration file path: %r", config.config.fileName()) log.debug("User directory: %r", os.path.abspath(USER_DIR)) # for compatibility with pre-1.3 plugins QtCore.QObject.tagger = self QtCore.QObject.config = config QtCore.QObject.log = log check_io_encoding() # Must be before config upgrade because upgrade dialogs need to be # translated setup_gettext(localedir, config.setting["ui_language"], log.debug) upgrade_config(config.config) self.webservice = WebService() self.mb_api = MBAPIHelper(self.webservice) load_user_collections() # Initialize fingerprinting acoustid_api = AcoustIdAPIHelper(self.webservice) self._acoustid = acoustid.AcoustIDClient(acoustid_api) self._acoustid.init() self.acoustidmanager = AcoustIDManager(acoustid_api) # Load plugins self.pluginmanager = PluginManager() if not self._no_plugins: if IS_FROZEN: self.pluginmanager.load_plugins_from_directory( os.path.join(os.path.dirname(sys.argv[0]), "plugins")) else: mydir = os.path.dirname(os.path.abspath(__file__)) self.pluginmanager.load_plugins_from_directory( os.path.join(mydir, "plugins")) if not os.path.exists(USER_PLUGIN_DIR): os.makedirs(USER_PLUGIN_DIR) self.pluginmanager.load_plugins_from_directory(USER_PLUGIN_DIR) self.browser_integration = BrowserIntegration() self._pending_files_count = 0 self.files = {} self.clusters = ClusterList() self.albums = {} self.release_groups = {} self.mbid_redirects = {} self.unclustered_files = UnclusteredFiles() self.nats = None self.window = MainWindow() self.exit_cleanup = [] self.stopping = False # Load release version information if self.autoupdate_enabled: self.updatecheckmanager = UpdateCheckManager(parent=self.window)
def _is_write_needed(self, filename): if (os.path.exists(filename) and os.path.getsize(filename) == self.datalength): log.debug("Identical file size, not saving %r", filename) return False return True
def set_access_token(self, access_token, expires_in): log.debug("OAuth: got access_token %s that expires in %s seconds", access_token, expires_in) self.access_token = access_token self.access_token_expires = int(time.time() + expires_in - 60)
def _handle_reply(self, reply, request): hostkey = request.get_host_key() ratecontrol.decrement_requests(hostkey) self._timer_run_next_task.start(0) slow_down = False error = int(reply.error()) handler = request.handler response_code = self.http_response_code(reply) url = self.http_response_safe_url(reply) if error: errstr = reply.errorString() log.error("Network request error for %s: %s (QT code %d, HTTP code %d)", url, errstr, error, response_code) if (not request.max_retries_reached() and (response_code == 503 or response_code == 429 # Sometimes QT returns a http status code of 200 even when there # is a service unavailable error. But it returns a QT error code # of 403 when this happens or error == 403 )): slow_down = True retries = request.mark_for_retry() log.debug("Retrying %s (#%d)", url, retries) self.add_request(request) elif handler is not None: handler(reply.readAll(), reply, error) slow_down = (slow_down or response_code >= 500) else: redirect = reply.attribute(QNetworkRequest.RedirectionTargetAttribute) from_cache = reply.attribute(QNetworkRequest.SourceIsFromCacheAttribute) cached = ' (CACHED)' if from_cache else '' log.debug("Received reply for %s: HTTP %d (%s) %s", url, response_code, self.http_response_phrase(reply), cached ) if handler is not None: # Redirect if found and not infinite if redirect: self._handle_redirect(reply, request, redirect) elif request.response_parser: try: document = request.response_parser(reply) log.debug("Response received: %s", document) except Exception as e: log.error("Unable to parse the response for %s: %s", url, e) document = reply.readAll() error = e finally: handler(document, reply, error) else: handler(reply.readAll(), reply, error) ratecontrol.adjust(hostkey, slow_down)
def _queue_from_relationship(self, url): log.debug("Found cover art link in URL relationship") self.queue_put(CoverArtImage(url))
def launch(self, url): log.debug("webbrowser2: %s" % url) webbrowser2.open(url) return True
def _caa_json_downloaded(self, data, http, error): """Parse CAA JSON file and queue CAA cover art images for download""" self.album._requests -= 1 if error: if not (error == QNetworkReply.ContentNotFoundError and self.ignore_json_not_found_error): self.error(u'CAA JSON error: %s' % (unicode(http.errorString()))) else: try: caa_data = json.loads(data) except ValueError: self.error("Invalid JSON: %s" % (http.url().toString())) else: imagesize = config.setting["caa_image_size"] thumbsize = _CAA_THUMBNAIL_SIZE_MAP.get(imagesize, None) for image in caa_data["images"]: if config.setting["caa_approved_only"] and not image["approved"]: continue is_pdf = image["image"].endswith('.pdf') if is_pdf and not config.setting["save_images_to_files"]: log.debug("Skipping pdf cover art : %s" % image["image"]) continue # if image has no type set, we still want it to match # pseudo type 'unknown' if not image["types"]: image["types"] = [u"unknown"] else: image["types"] = map(unicode.lower, image["types"]) if self.restrict_types: # only keep enabled caa types types = set(image["types"]).intersection( set(self.caa_types)) else: types = True if types: if thumbsize is None or is_pdf: url = image["image"] else: url = image["thumbnails"][thumbsize] coverartimage = self.coverartimage_class( url, types=image["types"], is_front=image['front'], comment=image["comment"], ) if is_pdf: # thumbnail will be used to "display" PDF in info # dialog thumbnail = self.coverartimage_thumbnail_class( url=image["thumbnails"]['small'], types=image["types"], is_front=image['front'], comment=image["comment"], ) self.queue_put(thumbnail) coverartimage.thumbnail = thumbnail # PDFs cannot be saved to tags (as 2014/05/29) coverartimage.can_be_saved_to_tags = False self.queue_put(coverartimage) if config.setting["caa_save_single_front_image"] and \ config.setting["save_images_to_files"] and \ image["front"]: break self.next_in_queue()
def save_table_header_state(self): state = self.table.horizontalHeader().saveState() config.persist[self.dialog_header_state] = state log.debug("save_state: %s" % self.dialog_header_state)
def _caa_json_downloaded(self, data, http, error): """Parse CAA JSON file and queue CAA cover art images for download""" self.album._requests -= 1 if error: if not (error == QNetworkReply.ContentNotFoundError and self.ignore_json_not_found_error): self.error('CAA JSON error: %s' % (http.errorString())) else: if self.restrict_types: log.debug('CAA types: included: %s, excluded: %s' % (self.caa_types, self.caa_types_to_omit,)) try: config = get_config() for image in data["images"]: if config.setting["caa_approved_only"] and not image["approved"]: continue is_pdf = image["image"].endswith('.pdf') if is_pdf and not config.setting["save_images_to_files"]: log.debug("Skipping pdf cover art : %s" % image["image"]) continue # if image has no type set, we still want it to match # pseudo type 'unknown' if not image["types"]: image["types"] = ["unknown"] else: image["types"] = list(map(str.lower, image["types"])) if self.restrict_types: # only keep enabled caa types types = set(image["types"]).intersection( set(self.caa_types)) if types and self.caa_types_to_omit: types = not set(image["types"]).intersection( set(self.caa_types_to_omit)) log.debug('CAA image {status}: {image_name} {image_types}'.format( status=('accepted' if types else 'rejected'), image_name=image['image'], image_types=image['types'],) ) else: types = True if types: urls = caa_url_fallback_list(config.setting["caa_image_size"], image["thumbnails"]) if not urls or is_pdf: url = image["image"] else: # FIXME: try other urls in case of 404 url = urls[0] coverartimage = self.coverartimage_class( url, types=image["types"], is_front=image['front'], comment=image["comment"], ) if urls and is_pdf: # thumbnail will be used to "display" PDF in info # dialog thumbnail = self.coverartimage_thumbnail_class( url=urls[0], types=image["types"], is_front=image['front'], comment=image["comment"], ) self.queue_put(thumbnail) coverartimage.thumbnail = thumbnail # PDFs cannot be saved to tags (as 2014/05/29) coverartimage.can_be_saved_to_tags = False self.queue_put(coverartimage) if config.setting["save_only_one_front_image"] and \ config.setting["save_images_to_files"] and \ image["front"]: break except (AttributeError, KeyError, TypeError) as e: self.error('CAA JSON error: %s' % e) self.next_in_queue()
def __init__(self, picard_args, unparsed_args, localedir, autoupdate): # Set the WM_CLASS to 'MusicBrainz-Picard' so desktop environments # can use it to look up the app QtGui.QApplication.__init__(self, ['MusicBrainz-Picard'] + unparsed_args) self.__class__.__instance = self config._setup(self) self._cmdline_files = picard_args.FILE self._autoupdate = autoupdate self._debug = False # FIXME: Figure out what's wrong with QThreadPool.globalInstance(). # It's a valid reference, but its start() method doesn't work. self.thread_pool = QtCore.QThreadPool(self) # Use a separate thread pool for file saving, with a thread count of 1, # to avoid race conditions in File._save_and_rename. self.save_thread_pool = QtCore.QThreadPool(self) self.save_thread_pool.setMaxThreadCount(1) if not sys.platform == "win32": # Set up signal handling # It's not possible to call all available functions from signal # handlers, therefore we need to set up a QSocketNotifier to listen # on a socket. Sending data through a socket can be done in a # signal handler, so we use the socket to notify the application of # the signal. # This code is adopted from # https://qt-project.org/doc/qt-4.8/unix-signals.html # To not make the socket module a requirement for the Windows # installer, import it here and not globally import socket self.signalfd = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM, 0) self.signalnotifier = QtCore.QSocketNotifier(self.signalfd[1].fileno(), QtCore.QSocketNotifier.Read, self) self.signalnotifier.activated.connect(self.sighandler) signal.signal(signal.SIGHUP, self.signal) signal.signal(signal.SIGINT, self.signal) signal.signal(signal.SIGTERM, self.signal) # Setup logging self.debug(picard_args.debug or "PICARD_DEBUG" in os.environ) log.debug("Starting Picard from %r", os.path.abspath(__file__)) log.debug("Platform: %s %s %s", platform.platform(), platform.python_implementation(), platform.python_version()) log.debug("Versions: %s", versions.as_string()) log.debug("Configuration file path: %r", config.config.fileName()) # TODO remove this before the final release if sys.platform == "win32": olduserdir = "~\\Local Settings\\Application Data\\MusicBrainz Picard" else: olduserdir = "~/.picard" olduserdir = os.path.expanduser(olduserdir) if os.path.isdir(olduserdir): log.info("Moving %r to %r", olduserdir, USER_DIR) try: shutil.move(olduserdir, USER_DIR) except: pass log.debug("User directory: %r", os.path.abspath(USER_DIR)) # for compatibility with pre-1.3 plugins QtCore.QObject.tagger = self QtCore.QObject.config = config QtCore.QObject.log = log check_io_encoding() # Must be before config upgrade because upgrade dialogs need to be # translated setup_gettext(localedir, config.setting["ui_language"], log.debug) upgrade_config() self.xmlws = XmlWebService() load_user_collections() # Initialize fingerprinting self._acoustid = acoustid.AcoustIDClient() self._acoustid.init() # Load plugins self.pluginmanager = PluginManager() if hasattr(sys, "frozen"): self.pluginmanager.load_plugindir(os.path.join(os.path.dirname(sys.argv[0]), "plugins")) else: mydir = os.path.dirname(os.path.abspath(__file__)) self.pluginmanager.load_plugindir(os.path.join(mydir, "plugins")) self.pluginmanager.load_plugindir(os.path.join(mydir, os.pardir, "contrib", "plugins")) if not os.path.exists(USER_PLUGIN_DIR): os.makedirs(USER_PLUGIN_DIR) self.pluginmanager.load_plugindir(USER_PLUGIN_DIR) self.pluginmanager.query_available_plugins() self.acoustidmanager = AcoustIDManager() self.browser_integration = BrowserIntegration() self.files = {} self.clusters = ClusterList() self.albums = {} self.release_groups = {} self.mbid_redirects = {} self.unmatched_files = UnmatchedFiles() self.nats = None self.window = MainWindow() self.exit_cleanup = [] self.stopping = False
def next_in_queue(self): """Downloads next item in queue. If there are none left, loading of album will be finalized. """ if self.album.id not in self.album.tagger.albums: # album removed return if (self.front_image_found and config.setting["save_images_to_tags"] and not config.setting["save_images_to_files"] and config.setting["embed_only_one_front_image"]): # no need to continue self.album._finalize_loading(None) return if self._queue_empty(): if self.providers: # requeue from next provider provider = self.providers.pop(0) ret = CoverArtProvider._STARTED try: p = provider(self) if p.enabled(): log.debug("Trying cover art provider %s ..." % provider.NAME) ret = p.queue_images() else: log.debug("Skipping cover art provider %s ..." % provider.NAME) except: log.error(traceback.format_exc()) raise finally: if ret != CoverArtProvider.WAIT: self.next_in_queue() return else: # nothing more to do self.album._finalize_loading(None) return # We still have some items to try! coverartimage = self._queue_get() if not coverartimage.support_types and self.front_image_found: # we already have one front image, no need to try other type-less # sources log.debug("Skipping %r, one front image is already available", coverartimage) self.next_in_queue() return # local files if hasattr(coverartimage, 'filepath'): try: with open(coverartimage.filepath, 'rb') as file: self._set_metadata(coverartimage, file.read()) except IOError as ioexcept: (errnum, errmsg) = ioexcept.args log.error("Failed to read %r: %s (%d)" % (coverartimage.from_file, errmsg, errnum)) except CoverArtImageIOError: # It doesn't make sense to store/download more images if we can't # save them in the temporary folder, abort. return self.next_in_queue() return # on the web self._message(N_( "Downloading cover art of type '%(type)s' for %(albumid)s from %(host)s ..." ), { 'type': coverartimage.types_as_string(), 'albumid': self.album.id, 'host': coverartimage.host }, echo=None) log.debug("Downloading %r" % coverartimage) self.album.tagger.webservice.download(coverartimage.host, coverartimage.port, coverartimage.path, partial( self._coverart_downloaded, coverartimage), priority=True, important=False) self.album._requests += 1
def _parse_release(self, document): log.debug("Loading release %r ...", self.id) self._tracks_loaded = False release_node = document.metadata[0].release[0] if release_node.id != self.id: self.tagger.mbid_redirects[self.id] = release_node.id album = self.tagger.albums.get(release_node.id) if album: log.debug("Release %r already loaded", release_node.id) album.match_files(self.unmatched_files.files) album.update() self.tagger.remove_album(self) return False else: del self.tagger.albums[self.id] self.tagger.albums[release_node.id] = self self.id = release_node.id # Get release metadata m = self._new_metadata m.length = 0 rg_node = release_node.release_group[0] rg = self.release_group = self.tagger.get_release_group_by_id( rg_node.id) rg.loaded_albums.add(self.id) rg.refcount += 1 release_group_to_metadata(rg_node, rg.metadata, rg) m.copy(rg.metadata) release_to_metadata(release_node, m, album=self) if self._discid: m['musicbrainz_discid'] = self._discid # Custom VA name if m['musicbrainz_albumartistid'] == VARIOUS_ARTISTS_ID: m['albumartistsort'] = m['albumartist'] = config.setting['va_name'] # Convert Unicode punctuation if config.setting['convert_punctuation']: m.apply_func(asciipunct) m['totaldiscs'] = release_node.medium_list[0].count # Add album to collections if "collection_list" in release_node.children: for node in release_node.collection_list[0].collection: if node.editor[0].text.lower( ) == config.persist["oauth_username"].lower(): if node.id not in user_collections: user_collections[node.id] = \ Collection(node.id, node.name[0].text, node.release_list[0].count) user_collections[node.id].releases.add(self.id) # Run album metadata plugins try: run_album_metadata_processors(self, m, release_node) except: self.error_append(traceback.format_exc()) self._release_node = release_node return True
def _queue_from_whitelist(self, url): log.debug("Found cover art link in whitelist") self.queue_put(CoverArtImage(url))
def queue_put(self, coverartimage): "Add an image to queue" log.debug("Queuing cover art image %r", coverartimage) self.__queue.append(coverartimage)
def _handle_reply(self, reply, request): hostkey = request.get_host_key() CONGESTION_UNACK[hostkey] -= 1 log.debug("WebService: %s: outstanding reqs: %d", hostkey, CONGESTION_UNACK[hostkey]) self._timer_run_next_task.start(0) slow_down = False error = int(reply.error()) handler = request.handler if error: code = reply.attribute( QtNetwork.QNetworkRequest.HttpStatusCodeAttribute) code = int(code) if code else 0 errstr = reply.errorString() url = reply.request().url().toString(QUrl.RemoveUserInfo) log.error( "Network request error for %s: %s (QT code %d, HTTP code %d)", url, errstr, error, code) if (not request.max_retries_reached() and (code == 503 or code == 429 # Sometimes QT returns a http status code of 200 even when there # is a service unavailable error. But it returns a QT error code # of 403 when this happens or error == 403)): retries = request.mark_for_retry() log.debug("Retrying %s (#%d)", url, retries) self.add_task(partial(self._start_request, request), request) elif handler is not None: handler(reply.readAll(), reply, error) slow_down = True else: redirect = reply.attribute( QtNetwork.QNetworkRequest.RedirectionTargetAttribute) fromCache = reply.attribute( QtNetwork.QNetworkRequest.SourceIsFromCacheAttribute) cached = ' (CACHED)' if fromCache else '' log.debug( "Received reply for %s: HTTP %d (%s) %s", reply.request().url().toString(QUrl.RemoveUserInfo), reply.attribute( QtNetwork.QNetworkRequest.HttpStatusCodeAttribute), reply.attribute( QtNetwork.QNetworkRequest.HttpReasonPhraseAttribute), cached) if handler is not None: # Redirect if found and not infinite if redirect: self._handle_redirect(reply, request, redirect) elif request.response_parser: try: document = request.response_parser(reply) except Exception as e: log.error("Unable to parse the response. %s", e) document = reply.readAll() finally: handler(document, reply, error) else: handler(reply.readAll(), reply, error) self._adjust_throttle(hostkey, slow_down)
def _process_reply(self, reply): try: request, handler, xml, refresh = self._active_requests.pop(reply) except KeyError: log.error("Request not found for %s" % reply.request().url().toString(QUrl.RemoveUserInfo)) return error = int(reply.error()) if error: log.error( "Network request error for %s: %s (QT code %d, HTTP code %s)", reply.request().url().toString(QUrl.RemoveUserInfo), reply.errorString(), error, repr( reply.attribute( QtNetwork.QNetworkRequest.HttpStatusCodeAttribute))) if handler is not None: handler(str(reply.readAll()), reply, error) else: redirect = reply.attribute( QtNetwork.QNetworkRequest.RedirectionTargetAttribute) fromCache = reply.attribute( QtNetwork.QNetworkRequest.SourceIsFromCacheAttribute) cached = ' (CACHED)' if fromCache else '' log.debug( "Received reply for %s: HTTP %d (%s) %s", reply.request().url().toString(QUrl.RemoveUserInfo), reply.attribute( QtNetwork.QNetworkRequest.HttpStatusCodeAttribute), reply.attribute( QtNetwork.QNetworkRequest.HttpReasonPhraseAttribute), cached) if handler is not None: # Redirect if found and not infinite if redirect and not XmlWebService.urls_equivalent( redirect, reply.request().url()): log.debug("Redirect to %s requested", redirect.toString(QUrl.RemoveUserInfo)) redirect_host = str(redirect.host()) redirect_port = redirect.port(80) url = request.url() original_host = str(url.host()) original_port = url.port(80) if ((original_host, original_port) in REQUEST_DELAY and (redirect_host, redirect_port) not in REQUEST_DELAY): log.debug( "Setting rate limit for %s:%i to %i" % (redirect_host, redirect_port, REQUEST_DELAY[(original_host, original_port)])) REQUEST_DELAY[(redirect_host, redirect_port)] =\ REQUEST_DELAY[(original_host, original_port)] self.get( redirect_host, redirect_port, # retain path, query string and anchors from redirect URL redirect.toString(QUrl.RemoveAuthority | QUrl.RemoveScheme), handler, xml, priority=True, important=True, refresh=refresh, cacheloadcontrol=request.attribute( QtNetwork.QNetworkRequest.CacheLoadControlAttribute )) elif redirect: log.error( "Redirect loop: %s", reply.request().url().toString(QUrl.RemoveUserInfo)) handler(str(reply.readAll()), reply, error) elif xml: document = _read_xml(QXmlStreamReader(reply)) handler(document, reply, error) else: handler(str(reply.readAll()), reply, error) reply.close() self.num_pending_web_requests -= 1 self.tagger.tagger_stats_changed.emit()
def _save(self, filename, metadata): """Save metadata to the file.""" log.debug("Saving file %r", filename) config = get_config() is_flac = self._File == mutagen.flac.FLAC file = self._File(encode_filename(filename)) if file.tags is None: file.add_tags() if config.setting["clear_existing_tags"]: preserve_tags = ['waveformatextensible_channel_mask'] if not is_flac and config.setting["preserve_images"]: preserve_tags.append('metadata_block_picture') preserve_tags.append('coverart') preserved_values = {} for name in preserve_tags: if name in file.tags and file.tags[name]: preserved_values[name] = file.tags[name] file.tags.clear() for name, value in preserved_values.items(): file.tags[name] = value images_to_save = list(metadata.images.to_be_saved_to_tags()) if is_flac and (images_to_save or (config.setting["clear_existing_tags"] and not config.setting["preserve_images"])): file.clear_pictures() tags = {} for name, value in metadata.items(): if name == '~rating': # Save rating according to http://code.google.com/p/quodlibet/wiki/Specs_VorbisComments user_email = sanitize_key(config.setting['rating_user_email']) if user_email: name = 'rating:%s' % user_email else: name = 'rating' value = str( float(value) / (config.setting['rating_steps'] - 1)) # don't save private tags elif name.startswith("~") or not self.supports_tag(name): continue elif name.startswith('lyrics:'): name = 'lyrics' elif name == "date" or name == "originaldate": # YYYY-00-00 => YYYY value = sanitize_date(value) elif name.startswith('performer:') or name.startswith('comment:'): # transform "performer:Piano=Joe Barr" to "performer=Joe Barr (Piano)" name, desc = name.split(':', 1) if desc: value += ' (%s)' % desc elif name == "musicip_fingerprint": name = "fingerprint" value = "MusicMagic Fingerprint%s" % value elif name in self.__rtranslate: name = self.__rtranslate[name] tags.setdefault(name.upper(), []).append(value) if "totaltracks" in metadata: tags.setdefault("TRACKTOTAL", []).append(metadata["totaltracks"]) if "totaldiscs" in metadata: tags.setdefault("DISCTOTAL", []).append(metadata["totaldiscs"]) for image in images_to_save: picture = mutagen.flac.Picture() picture.data = image.data picture.mime = image.mimetype picture.desc = image.comment picture.width = image.width picture.height = image.height picture.type = image_type_as_id3_num(image.maintype) if is_flac: # See https://xiph.org/flac/format.html#metadata_block_picture expected_block_size = (8 * 4 + len(picture.data) + len(picture.mime) + len(picture.desc.encode('UTF-8'))) if expected_block_size > FLAC_MAX_BLOCK_SIZE: log.error( 'Failed saving image to %r: Image size of %d bytes exceeds maximum FLAC block size of %d bytes', filename, expected_block_size, FLAC_MAX_BLOCK_SIZE) continue file.add_picture(picture) else: tags.setdefault("METADATA_BLOCK_PICTURE", []).append( base64.b64encode(picture.write()).decode('ascii')) file.tags.update(tags) self._remove_deleted_tags(metadata, file.tags) if is_flac: flac_sort_pics_after_tags(file.metadata_blocks) kwargs = {} if is_flac and config.setting["remove_id3_from_flac"]: kwargs["deleteid3"] = True try: file.save(**kwargs) except TypeError: file.save()
def website_process(self, barcode, zeros, response, reply, error): if error: log.error("%s: Error retrieving info for barcode %s", PLUGIN_NAME, barcode) tuples = self.albumpage_queue.remove(barcode) for track, album, tint in tuples: self.album_remove_request(album) return html = bytes(response).decode() tangoinfo_album_data = self.barcode_process_metadata(barcode, html) self.albumpage_cache[barcode] = tangoinfo_album_data tuples = self.albumpage_queue.remove(barcode) if tangoinfo_album_data: if zeros > 0: log.debug( "%s: " "tango.info does not seem to have data for barcode %s. However, " "retrying with barcode %s (i.e. the same with 0 prepended) was " "successful. This most likely means either MusicBrainz or " "tango.info has stored a wrong barcode for this release. You might " "want to investigate this discrepancy and report it.", PLUGIN_NAME, barcode[zeros:], barcode) for track, album, tint in tuples: tm = track.metadata for field in ['genre', 'date', 'vocal']: # Write track metadata if self.albumpage_cache[barcode][tint].get(field): tm[field] = self.albumpage_cache[barcode][tint][field] for file in track.iterfiles(True): # from track.py: def iterfiles(self, save=False)... fm = file.metadata for field in ['genre', 'date', 'vocal']: # Write file metadata if self.albumpage_cache[barcode][tint][field]: fm[field] = self.albumpage_cache[barcode][tint][ field] self.album_remove_request(album) else: if zeros >= 2: log.debug( "%s: " "Could not load album with barcode %s even with zero " "prepended(%s). This most likely means tango.info does " "not have a release for this barcode (or MusicBrainz has a " "wrong barcode)", PLUGIN_NAME, barcode[1:], barcode) for track, album, tint in tuples: self.album_remove_request(album) return log.debug("%s: Retrying with 0-padded barcode for barcode %s", PLUGIN_NAME, barcode) for track, album, tint in tuples: retry_barcode = "0" + str(barcode) retry_tint = "0" + tint # Try again with new barcode, but at most two times(param zero) self.website_add_track(album, track, retry_barcode, retry_tint, zeros=(zeros + 1)) self.album_remove_request(album)
def _select_and_add_cover_art(self, url, types): log.debug("TheAudioDB: Found artwork %s" % url) self.queue_put(TheAudioDbCoverArtImage(url, types=types))
def _adjust_throttle(hostkey, slow_down): """Adjust `REQUEST` and `CONGESTION` metrics when a HTTP request completes. Args: hostkey: `(host, port)`. slow_down: `True` if we encountered intermittent server trouble and need to slow down. """ def in_backoff_phase(hostkey): return CONGESTION_UNACK[hostkey] > CONGESTION_WINDOW_SIZE[hostkey] if slow_down: # Backoff exponentially until ~30 seconds between requests. delay = max( pow(2, REQUEST_DELAY_EXPONENT[hostkey]) * 1000, REQUEST_DELAY_MINIMUM[hostkey]) log.debug('WebService: %s: delay: %dms -> %dms.', hostkey, REQUEST_DELAY[hostkey], delay) REQUEST_DELAY[hostkey] = delay REQUEST_DELAY_EXPONENT[hostkey] = min( REQUEST_DELAY_EXPONENT[hostkey] + 1, 5) # Slow start threshold is ~1/2 of the window size up until we saw # trouble. Shrink the new window size back to 1. CONGESTION_SSTHRESH[hostkey] = int( CONGESTION_WINDOW_SIZE[hostkey] / 2.0) log.debug('WebService: %s: ssthresh: %d.', hostkey, CONGESTION_SSTHRESH[hostkey]) CONGESTION_WINDOW_SIZE[hostkey] = 1.0 log.debug('WebService: %s: cws: %.3f.', hostkey, CONGESTION_WINDOW_SIZE[hostkey]) elif not in_backoff_phase(hostkey): REQUEST_DELAY_EXPONENT[ hostkey] = 0 # Coming out of backoff, so reset. # Shrink the delay between requests with each successive reply to # converge on maximum throughput. delay = max(int(REQUEST_DELAY[hostkey] / 2), REQUEST_DELAY_MINIMUM[hostkey]) if delay != REQUEST_DELAY[hostkey]: log.debug('WebService: %s: delay: %dms -> %dms.', hostkey, REQUEST_DELAY[hostkey], delay) REQUEST_DELAY[hostkey] = delay cws = CONGESTION_WINDOW_SIZE[hostkey] sst = CONGESTION_SSTHRESH[hostkey] if sst and cws >= sst: # Analogous to TCP's congestion avoidance phase. Window growth is linear. phase = 'congestion avoidance' cws = cws + (1.0 / cws) else: # Analogous to TCP's slow start phase. Window growth is exponential. phase = 'slow start' cws += 1 if CONGESTION_WINDOW_SIZE[hostkey] != cws: log.debug('WebService: %s: %s: window size %.3f -> %.3f', hostkey, phase, CONGESTION_WINDOW_SIZE[hostkey], cws) CONGESTION_WINDOW_SIZE[hostkey] = cws
def callback(self, objs): current_directory = (self.config.persist["current_directory"] or QtCore.QDir.homePath()) current_directory = find_existing_path(string_(current_directory)) # Default playlist filename set as "%albumartist% - %album%.m3u8", # except where "Various Artists" is suppressed if _debug_level > 1: log.debug( "{}: VARIOUS_ARTISTS_ID is {}, musicbrainz_albumartistid is {}" .format(PLUGIN_NAME, VARIOUS_ARTISTS_ID, objs[0].metadata["musicbrainz_albumartistid"])) if objs[0].metadata["musicbrainz_albumartistid"] != VARIOUS_ARTISTS_ID: default_filename = get_safe_filename( objs[0].metadata["albumartist"] + " - " + objs[0].metadata["album"] + ".m3u8") else: default_filename = get_safe_filename(objs[0].metadata["album"] + ".m3u8") if _debug_level > 1: log.debug("{}: default playlist filename sanitized to {}".format( PLUGIN_NAME, default_filename)) b_filename, b_selected_format = QtWidgets.QFileDialog.getSaveFileName( None, "Save new playlist", os.path.join(current_directory, default_filename), "Playlist (*.m3u8 *.m3u)") if b_filename: filename = string_(b_filename) playlist = Playlist(filename) playlist.add_header("#EXTM3U") for album in objs: for track in album.tracks: if track.linked_files: entry = PlaylistEntry(playlist, len(playlist.entries)) playlist.entries.append(entry) # M3U EXTINF row track_length_seconds = int( round(track.metadata.length / 1000.0)) # EXTINF format assumed to be fixed as follows: entry.add( "#EXTINF:{duration:d},{artist} - {title}".format( duration=track_length_seconds, artist=track.metadata["artist"], title=track.metadata["title"])) # M3U URL row - assumes only one file per track audio_filename = track.linked_files[0].filename if _debug_level > 1: for i, file in enumerate(track.linked_files): log.debug("{}: linked_file {}: {}".format( PLUGIN_NAME, i, str(file))) # If playlist is in same directory as audio files, then use # local (relative) pathname, otherwise use absolute pathname if _debug_level > 1: log.debug( "{}: audio_filename: {}, selected dir: {}". format(PLUGIN_NAME, audio_filename, os.path.dirname(filename))) if os.path.dirname(filename) == os.path.dirname( audio_filename): audio_filename = os.path.basename(audio_filename) entry.add(string_(audio_filename)) playlist.write()
def _parse_release(self, release_node): log.debug("Loading release %r ...", self.id) self._tracks_loaded = False release_id = release_node['id'] if release_id != self.id: self.tagger.mbid_redirects[self.id] = release_id album = self.tagger.albums.get(release_id) if album: log.debug("Release %r already loaded", release_id) album.match_files(self.unmatched_files.files) album.update() self.tagger.remove_album(self) return False else: del self.tagger.albums[self.id] self.tagger.albums[release_id] = self self.id = release_id # Make the release artist nodes available, since they may # contain supplementary data (aliases, tags, genres, ratings) # which aren't present in the release group, track, or # recording artist nodes. We can copy them into those places # wherever the IDs match, so that the data is shared and # available for use in mbjson.py and external plugins. self._release_artist_nodes = _create_artist_node_dict(release_node) # Get release metadata m = self._new_metadata m.length = 0 rg_node = release_node['release-group'] rg = self.release_group = self.tagger.get_release_group_by_id( rg_node['id']) rg.loaded_albums.add(self.id) rg.refcount += 1 _copy_artist_nodes(self._release_artist_nodes, rg_node) release_group_to_metadata(rg_node, rg.metadata, rg) m.copy(rg.metadata) release_to_metadata(release_node, m, album=self) # Custom VA name if m['musicbrainz_albumartistid'] == VARIOUS_ARTISTS_ID: m['albumartistsort'] = m['albumartist'] = config.setting['va_name'] # Convert Unicode punctuation if config.setting['convert_punctuation']: m.apply_func(asciipunct) m['totaldiscs'] = len(release_node['media']) # Add album to collections add_release_to_user_collections(release_node) # Run album metadata plugins try: run_album_metadata_processors(self, m, release_node) except BaseException: self.error_append(traceback.format_exc()) self._release_node = release_node return True
def fetch_cleanup(self): for cell in self.cover_cells: if cell.fetch_task is not None: log.debug("Removing cover art fetch task for %s", cell.release['musicbrainz_albumid']) self.tagger.webservice.remove_task(cell.fetch_task)
def remove(self, from_parent=True): if from_parent and self.parent: log.debug("Removing %r from %r", self, self.parent) self.parent.remove_file(self) self.tagger.acoustidmanager.remove(self) self.state = File.REMOVED
def _load(self, filename): log.debug("Loading file %r", filename) file = self._get_file(encode_filename(filename)) tags = file.tags or {} # upgrade custom 2.3 frames to 2.4 for old, new in self.__upgrade.items(): if old in tags and new not in tags: f = tags.pop(old) tags.add(getattr(id3, new)(encoding=f.encoding, text=f.text)) metadata = Metadata() for frame in tags.values(): frameid = frame.FrameID if frameid in self.__translate: name = self.__translate[frameid] if frameid.startswith('T'): for text in frame.text: if text: metadata.add(name, unicode(text)) elif frameid == 'COMM': for text in frame.text: if text: metadata.add('%s:%s' % (name, frame.desc), unicode(text)) else: metadata.add(name, unicode(frame)) elif frameid == "TMCL": for role, name in frame.people: if role or name: metadata.add('performer:%s' % role, name) elif frameid == "TIPL": # If file is ID3v2.3, TIPL tag could contain TMCL # so we will test for TMCL values and add to TIPL if not TMCL for role, name in frame.people: if role in self._tipl_roles and name: metadata.add(self._tipl_roles[role], name) else: metadata.add('performer:%s' % role, name) elif frameid == 'TXXX': name = frame.desc if name in self.__translate_freetext: name = self.__translate_freetext[name] elif ((name in self.__rtranslate) != (name in self.__rtranslate_freetext)): # If the desc of a TXXX frame conflicts with the name of a # Picard tag, load it into ~id3:TXXX:desc rather than desc. # # This basically performs an XOR, making sure that 'name' # is in __rtranslate or __rtranslate_freetext, but not # both. (Being in both implies we support reading it both # ways.) Currently, the only tag in both is license. name = '~id3:TXXX:' + name for text in frame.text: metadata.add(name, unicode(text)) elif frameid == 'USLT': name = 'lyrics' if frame.desc: name += ':%s' % frame.desc metadata.add(name, unicode(frame.text)) elif frameid == 'UFID' and frame.owner == 'http://musicbrainz.org': metadata['musicbrainz_recordingid'] = frame.data.decode( 'ascii', 'ignore') elif frameid in self.__tag_re_parse.keys(): m = self.__tag_re_parse[frameid].search(frame.text[0]) if m: for name, value in m.groupdict().iteritems(): if value is not None: metadata[name] = value else: log.error("Invalid %s value '%s' dropped in %r", frameid, frame.text[0], filename) elif frameid == 'APIC': try: coverartimage = TagCoverArtImage( file=filename, tag=frameid, types=types_from_id3(frame.type), comment=frame.desc, support_types=True, data=frame.data, ) except CoverArtImageError as e: log.error('Cannot load image from %r: %s' % (filename, e)) else: metadata.append_image(coverartimage) elif frameid == 'POPM': # Rating in ID3 ranges from 0 to 255, normalize this to the range 0 to 5 if frame.email == config.setting['rating_user_email']: rating = unicode( int( round(frame.rating / 255.0 * (config.setting['rating_steps'] - 1)))) metadata.add('~rating', rating) if 'date' in metadata: sanitized = sanitize_date(metadata.getall('date')[0]) if sanitized: metadata['date'] = sanitized self._info(metadata, file) return metadata
def _save(self, filename, metadata): """Save metadata to the file.""" log.debug("Saving file %r", filename) tags = self._get_tags(filename) config = get_config() if config.setting['clear_existing_tags']: tags.clear() images_to_save = list(metadata.images.to_be_saved_to_tags()) if images_to_save: tags.delall('APIC') encoding = { 'utf-8': 3, 'utf-16': 1 }.get(config.setting['id3v2_encoding'], 0) if 'tracknumber' in metadata: if 'totaltracks' in metadata: text = '%s/%s' % (metadata['tracknumber'], metadata['totaltracks']) else: text = metadata['tracknumber'] tags.add(id3.TRCK(encoding=0, text=id3text(text, 0))) if 'discnumber' in metadata: if 'totaldiscs' in metadata: text = '%s/%s' % (metadata['discnumber'], metadata['totaldiscs']) else: text = metadata['discnumber'] tags.add(id3.TPOS(encoding=0, text=id3text(text, 0))) if 'movementnumber' in metadata: if 'movementtotal' in metadata: text = '%s/%s' % (metadata['movementnumber'], metadata['movementtotal']) else: text = metadata['movementnumber'] tags.add(id3.MVIN(encoding=0, text=id3text(text, 0))) # This is necessary because mutagens HashKey for APIC frames only # includes the FrameID (APIC) and description - it's basically # impossible to save two images, even of different types, without # any description. counters = defaultdict(lambda: 0) for image in images_to_save: desc = desctag = image.comment if counters[desc] > 0: if desc: desctag = "%s (%i)" % (desc, counters[desc]) else: desctag = "(%i)" % counters[desc] counters[desc] += 1 tags.add( id3.APIC(encoding=0, mime=image.mimetype, type=image_type_as_id3_num(image.maintype), desc=id3text(desctag, 0), data=image.data)) tmcl = mutagen.id3.TMCL(encoding=encoding, people=[]) tipl = mutagen.id3.TIPL(encoding=encoding, people=[]) for name, values in metadata.rawitems(): values = [id3text(v, encoding) for v in values] name = id3text(name, encoding) name_lower = name.lower() if not self.supports_tag(name): continue elif name.startswith('performer:'): role = name.split(':', 1)[1] for value in values: if config.setting['write_id3v23']: # TIPL will be upgraded to IPLS tipl.people.append([role, value]) else: tmcl.people.append([role, value]) elif name == 'comment' or name.startswith('comment:'): (lang, desc) = parse_comment_tag(name) if desc.lower()[:4] == 'itun': tags.delall('COMM:' + desc) tags.add( id3.COMM(encoding=0, desc=desc, lang='eng', text=[v + '\x00' for v in values])) else: tags.add( id3.COMM(encoding=encoding, desc=desc, lang=lang, text=values)) elif name.startswith('lyrics:') or name == 'lyrics': if ':' in name: desc = name.split(':', 1)[1] else: desc = '' for value in values: tags.add(id3.USLT(encoding=encoding, desc=desc, text=value)) elif name in self._rtipl_roles: for value in values: tipl.people.append([self._rtipl_roles[name], value]) elif name == 'musicbrainz_recordingid': tags.add( id3.UFID(owner='http://musicbrainz.org', data=bytes(values[0], 'ascii'))) elif name == '~rating': # Search for an existing POPM frame to get the current playcount for frame in tags.values(): if frame.FrameID == 'POPM' and frame.email == config.setting[ 'rating_user_email']: count = getattr(frame, 'count', 0) break else: count = 0 # Convert rating to range between 0 and 255 rating = int( round( float(values[0]) * 255 / (config.setting['rating_steps'] - 1))) tags.add( id3.POPM(email=config.setting['rating_user_email'], rating=rating, count=count)) elif name == 'grouping': if config.setting['itunes_compatible_grouping']: tags.add(id3.GRP1(encoding=encoding, text=values)) else: tags.add(id3.TIT1(encoding=encoding, text=values)) elif name == 'work' and config.setting[ 'itunes_compatible_grouping']: tags.add(id3.TIT1(encoding=encoding, text=values)) tags.delall('TXXX:Work') tags.delall('TXXX:WORK') elif name in self.__rtranslate: frameid = self.__rtranslate[name] if frameid.startswith('W'): valid_urls = all([all(urlparse(v)[:2]) for v in values]) if frameid == 'WCOP': # Only add WCOP if there is only one license URL, otherwise use TXXX:LICENSE if len(values) > 1 or not valid_urls: tags.add( self.build_TXXX( encoding, self.__rtranslate_freetext[name], values)) else: tags.add(id3.WCOP(url=values[0])) elif frameid == 'WOAR' and valid_urls: for url in values: tags.add(id3.WOAR(url=url)) elif frameid.startswith('T') or frameid == 'MVNM': if config.setting['write_id3v23']: if frameid == 'TMOO': tags.add(self.build_TXXX(encoding, 'mood', values)) # No need to care about the TMOO tag being added again as it is # automatically deleted by Mutagen if id2v23 is selected tags.add( getattr(id3, frameid)(encoding=encoding, text=values)) if frameid == 'TSOA': tags.delall('XSOA') elif frameid == 'TSOP': tags.delall('XSOP') elif frameid == 'TSO2': tags.delall('TXXX:ALBUMARTISTSORT') elif name_lower in self.__rtranslate_freetext_ci: if name_lower in self.__casemap: description = self.__casemap[name_lower] else: description = self.__rtranslate_freetext_ci[name_lower] delall_ci(tags, 'TXXX:' + description) tags.add(self.build_TXXX(encoding, description, values)) elif name in self.__rtranslate_freetext: description = self.__rtranslate_freetext[name] if description in self.__rrename_freetext: tags.delall('TXXX:' + self.__rrename_freetext[description]) tags.add(self.build_TXXX(encoding, description, values)) elif name.startswith('~id3:'): name = name[5:] if name.startswith('TXXX:'): tags.add(self.build_TXXX(encoding, name[5:], values)) else: frameclass = getattr(id3, name[:4], None) if frameclass: tags.add(frameclass(encoding=encoding, text=values)) # don't save private / already stored tags elif not name.startswith( "~") and name not in self.__other_supported_tags: tags.add(self.build_TXXX(encoding, name, values)) if tmcl.people: tags.add(tmcl) if tipl.people: tags.add(tipl) self._remove_deleted_tags(metadata, tags) self._save_tags(tags, encode_filename(filename)) if self._IsMP3 and config.setting["remove_ape_from_mp3"]: try: mutagen.apev2.delete(encode_filename(filename)) except BaseException: pass