def query(resource, mbid, includes=[]): """Queries MusicBrainz' web service for *resource* with *mbid* and the given list of includes. Returns an LXML ElementTree root node. All namespaces are removed from the result. """ url = '{}/{}/{}'.format(wsURL, resource, mbid) if queryCallback: queryCallback(url) if len(includes) > 0: url += '?inc={}'.format('+'.join(includes)) logging.debug(__name__, 'querying {}'.format(url)) ans = db.query("SELECT xml FROM {}musicbrainzqueries WHERE url=?".format(db.prefix), url) try: data = ans.getSingle() except db.EmptyResultException: try: request = urllib.request.Request(url) request.add_header('User-Agent', 'Maestro/0.4.0 (https://github.com/maestromusic/maestro)') with urllib.request.urlopen(request) as response: data = response.read() except urllib.error.HTTPError as e: if e.code == 404: raise e else: raise ConnectionError(e.msg) db.query("INSERT INTO {}musicbrainzqueries (url, xml) VALUES (?,?)" .format(db.prefix), url, data) root = etree.fromstring(data) # remove namespace tags for node in root.iter(): if node.tag.startswith('{'): node.tag = node.tag.rsplit('}', 1)[-1] return root
def checkHashes(self): """Called periodically during hashes computation. If new hashes have been computed, updates the database. If hash computation is finished, calls the appropriate next function. """ finish = self.hashThread.lastJobDone.is_set() changedFiles = [] try: while True: path, hash = self.hashThread.resultQueue.get(False) if path not in self.files: continue file = self.files[path] file.hash = hash changedFiles.append(file) except queue.Empty: if len(changedFiles): logging.debug(__name__, 'Adding {} new file hashes to the database'.format(len(changedFiles))) self.updateHashesAndVerified(changedFiles) if finish: if self.scanInterrupted: self.scan() # re-initialize scan after all hashes are complete else: self.scanTimer.stop() if self.scanState == ScanState.computingHashes: self.scanCheckModified() else: self.scanState = ScanState.notScanning
def fillReleaseForDisc(MBrelease, discid): """Given a stub Release object *MBrelease* (as created by findReleasesForDiscid) and a disc id, creates recordings, works etc. for the given disc. """ release = query("release", MBrelease.mbid, ("recordings",)).find("release") pos, MBmedium = MBrelease.mediumForDiscid(discid) MBmedium.currentDiscid = discid from .elements import Recording, Medium # find the medium in the xml tree for medium in release.iterfind('medium-list/medium'): if int(medium.findtext('position')) == pos: break for track in medium.iterfind('track-list/track'): recording = track.find('recording') tracknr = int(track.findtext('number')) MBrec = Recording(recording.get("id"), int(track.findtext("position")), MBmedium, tracknr) MBrec.tags.add("title", recording.findtext("title")) MBrec.backendUrl = urls.URL("audiocd://{0}.{1}{2}/{0}/{1}.flac" .format(discid, tracknr, os.path.abspath(config.options.audiocd.rippath))) for _, MBrec in sorted(MBmedium.children.items()): MBrec.lookupInfo() MBmedium.insertWorks() if len(MBrelease.children) == 1: logging.debug(__name__, 'single child release -> removing release container') del MBrelease.children[pos] for p, child in MBmedium.children.items(): MBrelease.insertChild(p, child) MBrelease.passTags(excludes=['title']) for p in list(MBrelease.children.keys()): if isinstance(MBrelease.children[p], Medium) and MBrelease.children[p] != MBmedium: MBrelease.children[p].ignore = True
def handleMissingFiles(self): """Called after all missing hashes have been computed and all modified files have been examined for tag changes. """ self.handleTagAndHashChanges() if len(self.missingDB) > 0: # some files have been (re)moved outside Maestro missingHashes = {} # hashes of missing files mapped to Track objects for file in self.missingDB: if file.hash is not None: missingHashes[file.hash] = file if len(missingHashes) > 0: # search newfiles for the missing hashes in order to detect moves detectedMoves = [] for file in self.files.values(): if file.id is None and file.hash in missingHashes: oldFile = missingHashes[file.hash] detectedMoves.append((oldFile, file.url)) self.missingDB.remove(oldFile) del missingHashes[file.hash] for file, newURL in detectedMoves: db.query('UPDATE {p}files SET url=? WHERE element_id=?', str(newURL), file.id) logging.info(__name__, 'renamed outside maestro: {}->{}'.format(file.url, newURL)) self.moveFile(file, newURL) if len(self.missingDB) > 0: # --> some files are lost. Show a dialog and let the user fix this from maestro.filesystem import dialogs dialog = dialogs.MissingFilesDialog([file.id for file in self.missingDB]) dialog.exec_() stack.clear() self.scanState = ScanState.notScanning self.scanTimer.stop() logging.debug(__name__, 'scan of source {} finished'.format(self.name))
def scanCheckModified(self): self.scanState = ScanState.checkModified toCheck = [] for path, stamp in self.fsFiles.items(): file = self.files[path] if file.id and stamp > file.verified: toCheck.append(file) if len(toCheck): logging.debug(__name__, '{} files modified since last check'.format(len(toCheck))) self.fsThread = threading.Thread(target=checkFiles, args=(toCheck, self), daemon=True) self.fsThread.start() self.scanTimer.start(1000) else: self.handleMissingFiles()
def enable(self): self.enabled = True self.files = {} self.folders = {} self.hashThread = HashThread() self.scanInterrupted = False self.scanState = ScanState.notScanning logging.debug(__name__, 'loading filesystem source {}'.format(self.name)) self.load() # update all folder states. Start from children to avoid recursive state updates for folder in sorted(self.folders.values(), key=lambda f: f.path, reverse=True): folder.updateState(False) QtCore.QTimer.singleShot(5000, self.scan) levels.real.filesystemDispatcher.connect(self.handleRealFileEvent)
def disconnectClient(self, skipSocket=False): """Disconnect from MPD. If *skipSocket* is True, nothing is done on the socket (useful after connection failures). """ logging.debug(__name__, "calling MPD disconnect host {}".format(self.host)) if self.idleTimer.isActive(): self.idleTimer.stop() if not skipSocket: self.checkIdle(False) self.client.close() self.client.disconnect() self.client = None self.connectionState = player.ConnectionState.Connected self.connectionStateChanged.emit(player.ConnectionState.Disconnected) stack.resetSubstack(self.stack)
def query(resource, mbid, includes=[]): """Queries MusicBrainz' web service for *resource* with *mbid* and the given list of includes. Returns an LXML ElementTree root node. All namespaces are removed from the result. """ url = '{}/{}/{}'.format(wsURL, resource, mbid) if queryCallback: queryCallback(url) if len(includes) > 0: url += '?inc={}'.format('+'.join(includes)) logging.debug(__name__, 'querying {}'.format(url)) ans = db.query( "SELECT xml FROM {}musicbrainzqueries WHERE url=?".format(db.prefix), url) try: data = ans.getSingle() except db.EmptyResultException: try: request = urllib.request.Request(url) request.add_header( 'User-Agent', 'Maestro/0.4.0 (https://github.com/maestromusic/maestro)') with urllib.request.urlopen(request) as response: data = response.read() except urllib.error.HTTPError as e: if e.code == 404: raise e else: raise ConnectionError(e.msg) db.query( "INSERT INTO {}musicbrainzqueries (url, xml) VALUES (?,?)".format( db.prefix), url, data) root = etree.fromstring(data) # remove namespace tags for node in root.iter(): if node.tag.startswith('{'): node.tag = node.tag.rsplit('}', 1)[-1] return root
def scan(self): """Initiates a filesystem scan in order to synchronize Maestro's database with the real filesystem layout. The filesystem scan consists of multiple stages: 1. Walk through the filesystem, storing existing files / directories and modification timestamps of all files. This is performed in a different thread by readFilesystem(). 2. Compare the results of 1) with the Source's internal structures (handleInitialScan()) 3. Compute missing hashes of files (class HashThread). checkHashes() inserts them into DB. 4. For files that were modified since last verification, check if tags and/or audio data have changed. This is done in a separate thread by checkFiles(). 5. Finally,nalyzeScanResults() analyzes the results and, if necessary, displays dialogs. """ self.fsFiles = {} self.modifiedTags = queue.Queue() self.changedHash = queue.Queue() self.missingDB = [] self.fsThread = threading.Thread(target=readFilesystem, args=(self.path, self), daemon=True) self.fsThread.start() self.scanInterrupted = False self.scanState = ScanState.initialScan self.scanTimer.start(200) logging.debug(__name__, 'source {} scanning path {}'.format(self.name, self.path))
def fillReleaseForDisc(MBrelease, discid): """Given a stub Release object *MBrelease* (as created by findReleasesForDiscid) and a disc id, creates recordings, works etc. for the given disc. """ release = query("release", MBrelease.mbid, ("recordings", )).find("release") pos, MBmedium = MBrelease.mediumForDiscid(discid) MBmedium.currentDiscid = discid from .elements import Recording, Medium # find the medium in the xml tree for medium in release.iterfind('medium-list/medium'): if int(medium.findtext('position')) == pos: break for track in medium.iterfind('track-list/track'): recording = track.find('recording') tracknr = int(track.findtext('number')) MBrec = Recording(recording.get("id"), int(track.findtext("position")), MBmedium, tracknr) MBrec.tags.add("title", recording.findtext("title")) MBrec.backendUrl = urls.URL("audiocd://{0}.{1}{2}/{0}/{1}.flac".format( discid, tracknr, os.path.abspath(config.options.audiocd.rippath))) for _, MBrec in sorted(MBmedium.children.items()): MBrec.lookupInfo() MBmedium.insertWorks() if len(MBrelease.children) == 1: logging.debug(__name__, 'single child release -> removing release container') del MBrelease.children[pos] for p, child in MBmedium.children.items(): MBrelease.insertChild(p, child) MBrelease.passTags(excludes=['title']) for p in list(MBrelease.children.keys()): if isinstance(MBrelease.children[p], Medium) and MBrelease.children[p] != MBmedium: MBrelease.children[p].ignore = True
def checkFiles(files: list, source: Source): """Compares database and filesystem state of *files*. If tags differ, adds an entry in *tagDiffs*. If the hash is different, adds an entry in *newHash*. """ identifier = AudioFileIdentifier() for file in files: hash = identifier(file.url.path) if file.id in levels.real: dbTags = levels.real.collect(file.id).tags else: dbTags = db.tags.getStorage(file.id) backendFile = file.url.backendFile() backendFile.readTags() if dbTags.withoutPrivateTags() != backendFile.tags: logging.debug(__name__, 'Detected modification on file "{}": tags differ'.format(file.url)) source.modifiedTags.put((file, hash, dbTags, backendFile.tags)) else: if hash != file.hash: logging.debug(__name__, "audio data of {} modified!".format(file.url.path)) else: logging.debug(__name__, 'updating verification timestamp of {}'.format(file.url.path)) source.changedHash.put((file, hash))
def updatePlaylist(self): """Update the playlist if it has changed on the server. Currently, two special cases are detected: Insertion of consecutive songs, and removal of consecutive songs. In any other case, a complete playlist change is issued. """ newVersion = int(self.mpdStatus["playlist"]) if newVersion == self.playlistVersion: return logging.debug(__name__, "detected new plVersion: {}-->{}".format(self.playlistVersion, newVersion)) if self.playlistVersion is None: # this happens only on initialization. self.mpdPlaylist = [x["file"] for x in self.client.playlistinfo()] self.playlistVersion = newVersion self.playlist.initFromUrls(self.makeUrls(self.mpdPlaylist)) return plChanges = self.client.plchanges(self.playlistVersion) changedFiles = [ a["file"] for a in plChanges ] self.playlistVersion = newVersion newLength = int(self.mpdStatus["playlistlength"]) # first special case: find out if only consecutive songs were removed if newLength < len(self.mpdPlaylist): numRemoved = len(self.mpdPlaylist) - newLength oldSongsThere = self.mpdPlaylist[-len(plChanges):] if len(plChanges) > 0 else [] if changedFiles == oldSongsThere: firstRemoved = newLength - len(plChanges) del self.mpdPlaylist[firstRemoved:firstRemoved+numRemoved] self.playlist.removeByOffset(firstRemoved, numRemoved, updateBackend='onundoredo') return # second special case: find out if a number of consecutive songs were inserted elif newLength > len(self.mpdPlaylist): numInserted = newLength - len(self.mpdPlaylist) numShifted = len(plChanges) - numInserted if numShifted == 0: newSongsThere = [] oldSongsThere = [] else: newSongsThere = plChanges oldSongsThere = self.mpdPlaylist[-numShifted:] if newSongsThere == oldSongsThere: firstInserted = len(self.mpdPlaylist) - numShifted paths = changedFiles[:numInserted] self.mpdPlaylist[firstInserted:firstInserted] = paths urls = self.makeUrls(paths) pos = int(plChanges[0]["pos"]) self.playlist.insertUrlsAtOffset(pos, urls, updateBackend='onundoredo') return if len(plChanges) == 0: logging.warning(__name__, 'no changes???') return # other cases: update self.mpdPlaylist and perform a general playlist change reallyChange = False for pos, file in sorted((int(a["pos"]),a["file"]) for a in plChanges): if pos < len(self.mpdPlaylist): if self.mpdPlaylist[pos] != file: reallyChange = True self.mpdPlaylist[pos] = file else: reallyChange = True self.mpdPlaylist.append(file) if reallyChange: # this might not happen e.g. when a stream is updated self.playlist.resetFromUrls(self.makeUrls(self.mpdPlaylist), updateBackend='onundoredo')