def download(self, songs=None, artists=None ,albums=None, query=None): logger.debug("%s (%s)\tdownload(songs=%s, artists=%s, albums=%s, query=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, songs, artists, albums, query, cherrypy.request.headers)) if cfg['REQUIRE_LOGIN'] and cherrypy.request.login not in cfg['GROUPS']['download']: logger.warn("%(user)s (%(ip)s) requested a download, but was denied because %(user)s is not a member of the download group." % {'user': cherrypy.request.login, 'ip': utils.find_originating_host(cherrypy.request.headers)}) raise cherrypy.HTTPError(401,'Not Authorized') file_list = [] if not songs and not artists and not albums and not query: raise cherrypy.HTTPError(501) elif songs: songs = songs.split(',') if len(songs) > 100: return "Too many songs! Please narrow your criteria." for song in songs: try: file_list.append(self.library.db[song]) except: raise cherrypy.HTTPError(404,'Not Found') else: if artists: artists = artists.split(',') if albums: albums = albums.split(',') files = self.library.songs(artists, albums, query) if len(files) > 100: return "Too many songs! Please narrow your criteria." for song in files: file_list.append(self.library.db[song['id']]) archive = create_archive(file_list) try: return serve_file(archive, 'application/zip', 'download.zip') except: logger.debug("Something went wrong while sending the archive.")
def config(self, set_option=None, get_option=None, value=None): logger.debug("%s (%s)\tconfig()\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, cherrypy.request.headers)) if cfg['REQUIRE_LOGIN'] and cherrypy.request.login not in cfg['GROUPS']['admin']: logger.warn("%(user)s (%(ip)s) requested configuration data, but was denied because %(user)s is not a member of the admin group." % {'user': cherrypy.request.login, 'ip': utils.find_originating_host(cherrypy.request.headers)}) raise cherrypy.HTTPError(401,'Not Authorized') if set_option: if not value: raise cherrypy.HTTPError(501,'No value provided for the requested option') if set_option not in cfg.keys(): raise cherrypy.HTTPError(501,'The requested option does not exist') try: if type(cfg[set_option]) is types.ListType: value = value.split(',') if type(cfg[set_option]) is types.BooleanType: value = json.loads(value) if type(cfg[set_option]) is types.StringType: value = str(value) except: raise cherrypy.HTTPError(501, 'The value provided was the wrong type. Expected a %s' % type(cfg[set_option])) try: cfg[set_option] = value cfg.save_config() cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps({'config': {set_option: cfg[set_option]}}) except Exception as x: logger.error("Could not save configuration. The error was: %s" % str(x)) if get_option: if get_option not in cfg.keys(): raise cherrypy.HTTPError(501,'The requested option does not exist') cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps({'config': {get_option: cfg[get_option]}}) cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps({'config': cfg})
def song(self, songid, bitrate=None): logger.debug("%s (%s)\tsong(songid=%s, bitrate=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, songid, bitrate, cherrypy.request.headers)) try: songid, format = songid.split('.') except: format = None return self.get_song(songid, format, bitrate)
def transcode(self, path, format='mp3', bitrate=False): if self.stopping.is_set(): return try: stop = Event() start_time = time.time() parent_conn, child_conn = Pipe() process = Process(target=transcode_process, args=(child_conn, path, stop, format, bitrate)) process.start() while not (self.stopping.is_set() or stop.is_set()): data = parent_conn.recv() if not data: break yield data logger.debug("Transcoded %s in %0.2f seconds." % (path.encode(cfg['ENCODING']), time.time() - start_time)) except GeneratorExit: stop.set() logger.debug("User canceled the request during transcoding.") except: stop.set() logger.warn("Some type of error occured during transcoding.") finally: parent_conn.close() process.join()
def resize_cover(song, cover, size): """Resizes the cover image for a specific song to a given size and caches the resized image for any subsequent requests.""" logger.debug("resize_cover(song=%s, cover=%s, size=%s)" % (song, cover, size)) # This is the path to the resized image in the cache img_path = os.path.join(cfg['CACHE_DIR'], str(size), hashlib.sha1(song['artist_hash'] + song['album_hash']).hexdigest() + '.jpg') # Make sure our cache directory exists if not os.path.exists(os.path.split(img_path)[0]): os.makedirs(os.path.split(img_path)[0]) try: # Try to create a file object pointing to the image in the cache artwork = open(img_path, "rb") artwork.close() artwork = img_path except: # Load the source image file with PIL image = Image.open(cover) # Check if the image is larger than what the client asked for. If it # is, we'll resize it. Otherwise we'll just send the original. if image.size[0] > size or image.size[1] > size: # Figure out the aspect ratio so we can maintain it wpercent = (size/float(image.size[0])) hsize = int((float(image.size[1])*float(wpercent))) # Resize the image image = image.resize((size,hsize), Image.ANTIALIAS) # Save it to the cache so we won't have to do this again. image.save(img_path) # Create a file object pointing to the image in the cache artwork = img_path else: artwork = cover return artwork
def _process_read_queue(self): """Spawns a pool of worker processes to handle reading the metadata from all of the files in the read_queue. """ self.reading.set() status = 'Importing songs to the library' self.jobs[self.current_job]['status'] = status # Create a pool of processes to handle the actual reading of tags. # Using processes instead of threads lets us take full advantage of # multi-core CPUs so this operation doesn't take as long. self.pool = Pool() queue_size = self.read_queue.qsize() while self.read_queue.qsize() > 0 and not self.stopping.is_set(): args_list = [] # Get 100 songs from the queue for x in range(100): try: args_list.append(self.read_queue.get(timeout=1)) except: break # Read the tags from the 100 songs and then stick the results in # the database queue. try: self.db_queue.put(self.pool.map(read_metadata, args_list)) n_items = len(args_list) self.jobs[self.current_job]['processed_items'] += n_items except: logger.error("Error processing read queue.") logger.debug("Processed %d items in %0.2f seconds" % (queue_size - self.read_queue.qsize(), time() - self.start_time)) self.pool.close() self.pool.join() self.reading.clear()
def list_artists(self, query=None, output='json'): logger.debug("%s (%s)\tlist_artists(query=%s, output=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, query, output, cherrypy.request.headers)) artists = self.library.artists(query) if output == 'json': cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps({'artists': artists}) elif output == 'html': template = Template(file=os.path.join(cfg['THEME_DIR'], 'list_artists.tmpl')) template.artists = artists return template.respond() else: raise cherrypy.HTTPError(501,'Not Implemented')
def flush_db(self): logger.debug("%s (%s)\tshutdown()\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, cherrypy.request.headers)) if cfg['REQUIRE_LOGIN'] and cherrypy.request.login not in cfg['GROUPS']['admin']: logger.warn("%(user)s (%(ip)s) requested that the database be flushed, but was denied because %(user)s is not a member of the admin group." % {'user': cherrypy.request.login, 'ip': utils.find_originating_host(cherrypy.request.headers)}) raise cherrypy.HTTPError(401,'Not Authorized') try: cherrypy.response.headers['Content-Type'] = 'application/json' logger.info("Received flush database request, complying.") return json.dumps({'flush_db': True}) except: pass finally: self.library.db.flush()
def get_playlist(self, artists=None, albums=None, query=None, format=None, list_all=False, bitrate=None, output='xspf'): logger.debug("%s (%s)\tget_playlist(artists=%s, albums=%s, query=%s, format=%s, list_all=%s, bitrate=%s, output=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, artists, albums, query, format, list_all, bitrate, output, cherrypy.request.headers)) if not (list_all or artists or albums or query): songs = [] else: if artists: artists = artists.split(',') if albums: albums = albums.split(',') songs = self.library.songs(artists, albums, query) playlist, ct = json_to_playlist(cherrypy.request.base, songs, output, format, bitrate) cherrypy.response.headers['Content-Type'] = ct return playlist
def shutdown(self): logger.debug("%s (%s)\tshutdown()\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, cherrypy.request.headers)) if cfg['REQUIRE_LOGIN'] and cherrypy.request.login not in cfg['GROUPS']['admin']: logger.warn("%(user)s (%(ip)s) requested that the server shut down, but was denied because %(user)s is not a member of the admin group." % {'user': cherrypy.request.login, 'ip': utils.find_originating_host(cherrypy.request.headers)}) raise cherrypy.HTTPError(401,'Not Authorized') try: cherrypy.response.headers['Content-Type'] = 'application/json' logger.info("Received shutdown request, complying.") self.transcoder.stop() self.library.scanner.stop() return json.dumps({'shutdown': True}) except: pass finally: shutting_down.set() logger.debug("Stopping CherryPy.") cherrypy.engine.exit()
def get_tags(self, songid=None): logger.debug("%s (%s)\tget_tags(songid=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, songid, cherrypy.request.headers)) if not songid: raise cherrypy.HTTPError(501, "You must supply a song id.") if self.library.db.doc_exist(songid): song = self.library.db[songid] if song['type'] != 'song': raise cherrypy.HTTPError(501, "The specified document is not a song.") song['id'] = song['_id'] del song['_id'] del song['location'] del song['_rev'] del song['type'] cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps({"song": song}) else: raise cherrypy.HTTPError(404, "That song doesn't exist in the database.")
def view(self, view_name, obj=None, wrapper=None, **params): key = anyjson.serialize({ 'view_name': view_name, 'obj': obj, 'wrapper': wrapper, 'params': params }) if not self.has_key(key): self[key] = { 'version': 0, 'view': self.db.view(view_name, obj, wrapper, **params) } self.version = self.db.info()['update_seq'] if self[key]['version'] < self.version: self[key]['version'] = self.version self[key]['view'].fetch() else: logger.debug("Successful cache hit for %s" % key) return self[key]['view']
def artists(self, query=None): '''Returns a list of artists as dictionary objects.''' logger.debug("Generating artist list.") start_time = time() # Make a list to hold the results of our search result = [] if query: # Clean up the search term query = utils.clean_text(query).split(' ') # Get all the artists from the database using the search view, # the key of which is a list consisting of [artist, album, title]. # This is done so that the results will be alphabetized by artist # name and also so we have those fields to search. for artist in self.cache.view('artists/search'): # Create an object to append to our results entry = { 'id': artist['value'], 'name': artist['key'][0] } if entry in result: continue search_field = utils.clean_text(';'.join(artist['key'])) match = True for term in query: if term not in search_field: match = False if match: result.append(entry) else: # Get all the artists from the database and append them to the # results list. for artist in self.cache.view('artists/all', group="true"): entry = { 'id': artist['value'], 'name': artist['key'] } result.append(entry) finish_time = time() - start_time logger.debug("Generated list of %d artists in %0.2f seconds." % (len(result), finish_time)) return result
def random(self, songs=None, artists=None ,albums=None, query=None, limit=None): logger.debug("%s (%s)\trandom(songs=%s, artists=%s, albums=%s, query=%s, limit=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, songs, artists, albums, query, limit, cherrypy.request.headers)) song_list = [] if artists: artists = artists.split(',') if albums: albums = albums.split(',') files = self.library.songs(artists, albums, query) if songs: songs = songs.split(',') for song in files: if song['id'] in songs: songs_list.append(song) else: song_list = files shuffle(song_list) try: limit = int(limit) except: limit = len(song_list) cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps(song_list[:limit])
def list_songs(self, artists=None ,albums=None, start=None, length=None, query=None, list_all=False, archive=False, output='json'): logger.debug("%s (%s)\tlist_songs(artists=%s, albums=%s, start=%s, length=%s, query=%s, list_all=%s, archive=%s, output=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, artists, albums, start, length, query, list_all, archive, output, cherrypy.request.headers)) if not list_all and not artists and not albums and not query and not archive: songs = [] else: if artists: artists = artists.split(',') if albums: albums = albums.split(',') songs = self.library.songs(artists, albums, query) #log_message = "%s (%s) is " % (cherrypy.request.login, utils.find_originating_host(cherrypy.request.headers)) #if query: # log_message += 'searching for "%s".' % query #elif artists or albums: # log_message += "browsing " # if artists and not albums: # log_message += "albums by %s." % ', '.join(self.library.artists(artists, query)) # elif albums: # log_message += "%s by %s." % (', '.join(self.library.albums(artists, query)), ', '.join(self.library.artists(artists, albums, query))) #if not archive and not list_all: # logger.info(log_message) if start and length: start = int(start) end = int(length) + start if len(songs) - 1 < end: end = -1 songs = songs[start:end] songs.sort(key=itemgetter('albumartist', 'album', 'date', 'discnumber', 'tracknumber')) if output == 'json': cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps({'songs': songs}) elif output == 'html': template = Template(file=os.path.join(cfg['THEME_DIR'], 'list_songs.tmpl')) template.songs = songs return template.respond() else: raise cherrypy.HTTPError(501,'Not Implemented')
def get_cover(self, songid=None, size='original', download=False): logger.debug("%s (%s)\tget_cover(songid=%s, size=%s, download=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, songid, size, download, cherrypy.request.headers)) try: song = self.library.db[songid] except: raise cherrypy.HTTPError(404) try: size = int(size) except: size = 'original' cover = find_cover(song) if cover is None: raise cherrypy.HTTPError(404,'Not Found') if download: return serve_file(cover, mimetypes.guess_type(cover)[0], "attachment", os.path.basename(cover)) if size != 'original': artwork = resize_cover(song, cover, size) else: artwork = cover cherrypy.response.headers['Content-Type'] = mimetypes.guess_type(cover)[0] return serve_file(artwork, mimetypes.guess_type(artwork)[0], "inline", os.path.basename(artwork))
def _scan(self): """Scans the music_path to find songs that need to be added to the database and adds those files to the read_queue. """ if self.stopping.is_set(): return logger.debug("Scanning for new files.") self.scanning.set() self.jobs[self.current_job]['status'] = 'Scanning for new files' self.jobs[self.current_job]['unchanged_items'] = 0 # Iterate through all the folders and files in music_path for root, dirs, files in os.walk(self.music_path): if self.stopping.is_set(): break for item in files: if '.AppleDouble' in os.path.split(root): continue # Get the file extension, e.g. 'mp3' or 'flac', and see if # it's in the list of extensions we're supposed to look for. extension = os.path.splitext(item)[1].lower()[1:] if extension in cfg['MUSIC_EXTENSIONS']: # Get the full decoded path of the file. The decoding # part is important if the filename includes non-ASCII # characters. location = os.path.join(root, item) if isinstance(location, unicode): location = location.encode('utf-8') filename = os.path.split(location)[1] self.jobs[self.current_job]['current_item'] = filename # Generate a unique ID for this song by making a SHA-1 # hash of its location. id = hashlib.sha1(location).hexdigest() # Get the time that this file was last modified mtime = os.stat(os.path.join(root, item))[8] # Find out if this song is already in the database and # if so whether it has been modified since the last time # we scanned it. try: record_mtime = self.records[id]['mtime'] except: record_mtime = None if mtime != record_mtime: try: revision = self.records[id]['_rev'] except: revision = None # Add the song to the queue to be read self.read_queue.put((location, id, mtime, revision)) else: self.jobs[self.current_job]['unchanged_items'] += 1 self.jobs[self.current_job]['queued_items'] = self.read_queue.qsize() self.scanning.clear() if self.read_queue.qsize() < 1: logger.debug("No new files found.") return False logger.debug("Queued %d songs to be read." % self.read_queue.qsize()) return True
def _update(self): self.start_time = time() # Clean the database of files that no longer exist and get a list # of the remaining songs in the database. self._clean() # Create a queue of files from which we need to read metadata. self._scan() if self.read_queue.qsize() > 0: # Spawn a new thread to handle the queue of files that need read. self.read_thread = Thread(target=self._process_read_queue) self.read_thread.start() # Give the read_thread a chance to get going and then spawn a # new thread to handle inserting metadata into the database. sleep(5) self.db_thread = Thread(target=self._add_items_to_db) self.db_thread.start() # Block while we wait for everything to finish self.db_working.wait(None) # Join our threads back self.read_thread.join() self.db_thread.join() self.jobs[self.current_job]['current_item'] = 'None' # Compact the database so it doesn't get unreasonably large. We do # this in a separate thread so we can go ahead and return since the # we've added everything we need to the database already and we # don't want to wait for this to finish. self.compact_thread = Thread(target=self._compact) self.compact_thread.start() self._compact() self.updating.clear() self.finished.set() finish_time = time() - self.start_time self.jobs[self.current_job]['status'] = 'Finished' self.jobs[self.current_job]['total_time'] = finish_time logger.debug("Added all new songs in %0.2f seconds." % finish_time) logger.info("Updated library in %0.2f seconds." % finish_time)
def _add_items_to_db(self): """Watches the db_queue for metadata from songs and inserts that data into the database. """ # While the read queue is still being processed or we have stuff # waiting to be added to the database... self.db_working.set() while self.reading.is_set() or self.db_queue.qsize() > 0 \ and not self.stopping.is_set(): if self.db_queue.qsize() > 0: songs = [] while self.db_queue.qsize() > 0: # Grab all the items out of the database queue songs.extend(self.db_queue.get()) updated = 0 new = [] for song in songs: if not song: # There must've been a problem reading this file so # we'll skip it. continue # Get the metadata ready to send to the database new.append(song) if "_rev" in song: updated += 1 filename = os.path.split(song['location'])[1] self.jobs[self.current_job]['current_item'] = filename # Make our changes to the database self.couchdb.bulk_save(new) new_items = len(new) - updated self.jobs[self.current_job]['new_items'] += new_items self.jobs[self.current_job]['changed_items'] += updated logger.debug("Added %d songs to the database, %d of which\ already existed." % (len(new), updated)) sleep(5) self.db_working.clear()
def _clean(self): """Searches music_path to find if any of the songs in records have been removed, and then remove them from couchdb. """ if self.stopping.is_set(): return self.cleaning.set() status = 'Searching for changed/removed files' self.jobs[self.current_job]['status'] = status logger.debug(status) start_time = time() records = self.couchdb.view('songs/mtime') # Create a list of files that are missing and need to be removed # and also a dict that is going to hold all of the songs from the # database whose corresponding files still exist. remove = [] songs = {} removed = 0 for song in records: if self.stopping.is_set(): break path = song['key'] if isinstance(path, unicode): path = path.encode('utf-8') filename = os.path.split(path)[1] self.jobs[self.current_job]['current_item'] = filename # Check if the file this database record points to is still # there, and add it to the list to be removed if it's not. if not (os.path.isfile(path) and path.startswith(self.music_path)): remove.append(self.couchdb[song['id']]) removed += 1 # Once our list of songs to be removed hits 100, delete # them all in a batch. This is much quicker than doing them # one at a time. if removed % 100 == 0: self.couchdb.bulk_delete(remove) self.jobs[self.current_job]['removed_items'] = removed remove = [] logger.debug("Removed %d songs in %0.2f seconds." % (removed, time() - start_time)) else: # Add the song to the dict we're going to return songs[song['id']] = song['value'] self.jobs[self.current_job]['current_item'] = 'None' # We ran out of songs without hitting the magic number 100 to # trigger a batch delete, so let's get any stragglers now. self.couchdb.bulk_delete(remove) self.jobs[self.current_job]['removed_items'] = removed logger.debug("Removed %d songs in %0.2f seconds." % (removed, time() - start_time)) self.records = songs self.cleaning.clear()
def stop(self): if not self.stopping.is_set(): logger.debug("Stopping the file scanner.") self.stopping.set() for timeout in range(10): if not self.updating.is_set(): break sleep(1) if self.updating.is_set(): logger.debug("Timed out waiting for file scanner to stop.") else: self.stopping.clear() if not self.stopping.is_set(): logger.debug("File scanner has stopped.")
def __init__(self, db_url=cfg['COUCHDB_URL'], db_username=cfg['COUCHDB_USER'], db_password=cfg['COUCHDB_PASSWORD']): """Sets up the database connection and starts loading songs.""" # Initiate a connection to the database server self.shutting_down = threading.Event() self.building_cache = threading.Event() logger.debug("Initiating the database connection.") filters = [] if db_username or db_password: filters.append(BasicAuth(db_username, db_password)) self._server = Server(db_url, filters=filters) connect_retries = 0 while True: try: # Get a reference to our database self.db = self._server.get_or_create_db(cfg['DATABASE_NAME']) break except Exception as e: logger.error(str(e)) if connect_retries < 3: logger.error("Error connecting to CouchDB.") connect_retries += 1 sleep(3) else: logger.error("Could not connect to CouchDB. Quitting.") sys.exit(1) logger.debug("Loading database design documents.") # Load our database views from the filesystem loader = FileSystemDocsLoader(os.path.join(cfg['ASSETS_DIR'], 'views/_design')) try: loader.sync(self.db, verbose=True) except: pass logger.debug("Initializing the database cache.") self.cache = BlofeldCache(self.db) self.scanner = Scanner(cfg['MUSIC_PATH'], self.db) self.update()
def create_archive(songs): try: files = [] for song in songs: location = song["location"].encode("utf8") print type(location) zip_path = song["location"].replace(cfg["MUSIC_PATH"], "").encode("utf8") files.append((location, zip_path)) zip_cover_path = os.path.join(os.path.dirname(zip_path), "Cover.jpg") cover = (find_cover(song), zip_cover_path) if cover not in files: files.append(cover) path = os.path.join(cfg["CACHE_DIR"], "%s.zip") % hashlib.sha1(str(files)).hexdigest() logger.debug("Creating archive at %s" % path) logger.debug(files) archive = zipfile.ZipFile(path, "w", zipfile.ZIP_STORED) for item in files: logger.debug('Added "%s" to "%s"' % item) archive.write(*item) archive.close() return path except Exception as e: logger.exception(e)
def _build_cache(self): if self.scanner.updating.is_set(): self.scanner.finished.wait(None) self.scanner.finished.clear() logger.debug("Building the database cache.") self.building_cache.set() start_time = time() if not (self.shutting_down.is_set() or self.scanner.updating.is_set()): self.artists() if not (self.shutting_down.is_set() or self.scanner.updating.is_set()): self.albums() if not (self.shutting_down.is_set() or self.scanner.updating.is_set()): self.songs(suggest="The") if not (self.shutting_down.is_set() or self.scanner.updating.is_set()): self.artists(query="The") if not (self.shutting_down.is_set() or self.scanner.updating.is_set()): self.albums(query="The") if not (self.shutting_down.is_set() or self.scanner.updating.is_set()): self.songs(query="The") self.building_cache.clear() if not (self.shutting_down.is_set() or self.scanner.updating.is_set()): logger.debug("Finished building the database cache in %0.2d seconds." % (time() - start_time)) else: logger.debug("Database cache building was interrupted after %0.2d seconds." % (time() - start_time))
def stop(): transcoder.stop() library.stop() logger.debug("Stopping web server.") shutting_down.set() cherrypy.engine.exit()
def index(self): logger.debug("%s (%s)\tindex()\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, cherrypy.request.headers)) template = Template(file=os.path.join(cfg['THEME_DIR'], 'index.tmpl')) return template.respond()
def query(self, object_type, query=None, include=None, offset=0, limit=0, sort=None): logger.debug("%s (%s)\tquery(object_type=%s, query=%s, include=%s, offset=%s, limit=%s, sort=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, object_type, query, include, offset, limit, sort, cherrypy.request.headers)) cherrypy.response.headers['Content-Type'] = 'application/json' if object_type not in ('artists', 'albums', 'songs'): raise cherrypy.HTTPError(501,'The requested option does not exist') retval = {'offset': offset, object_type: []} offset = int(offset) limit = int(limit) if query: query = shlex.split(query.lower()) if include: include = include.lower().split(' ') if sort: sort = shlex.split(sort.lower()) else: sort = ('albumartist', 'album', 'year', 'discnumber', 'tracknumber') tree = {} for item in self.library.cache.view('query/songs'): song = item['value'] if query: match = True for term in query: field = term.split(':', 1) if len(field) == 1: if term not in song['query']: match = False break else: field, word = field if field not in song.keys(): match = False break else: data = song[field] if type(data) is not list: data = [data] if not data: match = False break for entry in data: if not fnmatch.fnmatch(unicode(entry).lower(), word): match = False break if not match: continue try: del song['query'] except: pass if object_type == 'songs': retval[object_type].append(song) continue if song.has_key('albumartist'):# and not (song['albumartist'].lower() in ('various', 'various artists', 'va')): artist = song['albumartist'] else: artist = song['artist'] if not tree.has_key(artist): tree[artist] = {} if not tree[artist].has_key(song['album']): tree[artist][song['album']] = [] tree[artist][song['album']].append(song) if object_type in ('albums', 'artists'): artists = [] for artist, albums in tree.iteritems(): item = {'artist': artist} if (include and 'albums' in include) or object_type == 'albums': item['albums'] = [] for album, songs in albums.iteritems(): album_item = {'album': album} # fields = ('date', 'year', 'genre', 'totaltracks', # 'tracktotal', 'totaldiscs', 'disctotal', # 'compilation', 'albumartist') fields = ('date', 'year') for key in fields: try: album_item[key] = songs[0][key] except: continue if include and ('songs' in include): utils.complex_sort(album_item['songs'], *sort) if album_item not in item['albums']: item['albums'].append(album_item) utils.complex_sort(item['albums'], *sort) elif include and ('songs' in include): item['songs'] = [] for album, songs in albums.iteritems(): item['songs'] += songs utils.complex_sort(item['songs'], *sort) artists.append(item) if object_type == 'albums': for artist in artists: retval[object_type] += artist['albums'] if include and ('artists' in include): retval['artists'] = sorted(tree.keys()) else: retval['artists'] = artists utils.complex_sort(retval[object_type], *sort) retval['total'] = len(retval[object_type]) if limit: retval[object_type] = retval[object_type][offset:offset+limit] else: retval[object_type] = retval[object_type][offset:] return json.dumps(retval, indent=4)
def suggest(self, term=None): logger.debug("%s (%s)\tsuggest(term=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, term, cherrypy.request.headers)) result = self.library.songs(suggest=True, query=term) return json.dumps(result)
def albums(self, artists=None, query=None): '''Returns a list of albums as dictionary objects.''' logger.debug("Generating album list.") start_time = time() # Create a list to hold the albums we're going to return to the client result = [] # If the client didn't give any arguments, get all the artists from the # database and append them to our results list. if not artists and not query: for album in self.cache.view('albums/all', group="true"): result.append({'id': album['value'], 'title': album['key']}) if query and artists: # Clean up the search term query = utils.clean_text(query).split(' ') for artist in artists: # Get all the albums from the database using the search view for album in self.cache.view('albums/search', key=artist): # Create an object that we can append to the results list entry = { 'id': album['value']['album_hash'], 'title': album['value']['album'] } if entry in result: continue match = True for term in query: if term not in utils.clean_text(album['value']['search_string']): match = False if match: result.append(entry) if query and not artists: # Clean up the search term query = utils.clean_text(query).split(' ') # Get all the albums from the database using the search view for album in self.cache.view('albums/search'): # Create an object that we can append to the results list entry = { 'id': album['value']['album_hash'], 'title': album['value']['album'] } if entry in result: continue # Clean up the search field and see if our search term is # in it. If it is, make sure it's not a duplicate result # and then append it to the results list. match = True for term in query: if term not in utils.clean_text(album['value']['search_string']): match = False if match: result.append(entry) if artists and not query: # Client asked for albums by a specific artist(s), so get only # those albums from the database. for artist_hash in artists: for album in self.cache.view('albums/by_artist_id', key=artist_hash): # Create an object to append to our results list entry = { 'id': album['value']['album_hash'], 'title': album['value']['album'] } # Get the artist ID artist = album['key'] # Make sure this isn't a duplicate result if entry not in result: result.append(entry) finish_time = time() - start_time logger.debug("Generated list of %d albums in %0.2f seconds." % (len(result), finish_time)) return sorted(result, key=itemgetter('title'))
def get_song(self, songid=None, format=False, bitrate=False): logger.debug("%s (%s)\tget_song(songid=%s, format=%s, bitrate=%s)\tHeaders: %s" % (utils.find_originating_host(cherrypy.request.headers), cherrypy.request.login, songid, format, bitrate, cherrypy.request.headers)) log_message = "%s (%s) is listening to " % (cherrypy.request.login, utils.find_originating_host(cherrypy.request.headers)) last = self.multi_requests.get(songid, None) show_log = False if not last or (last and time.time() > (last + 30)): show_log = True self.multi_requests[songid] = time.time() try: range_request = cherrypy.request.headers['Range'] except: range_request = "bytes=0-" try: song = self.library.db[songid] path = song['location'] except: log_message += "a song ID which could not be found: %s" % str(songid) logger.error(log_message) raise cherrypy.HTTPError(404) log_message += '"%s" by %s from %s ' % (song['title'].encode(cfg['ENCODING']), song['artist'].encode(cfg['ENCODING']), song['album'].encode(cfg['ENCODING'])) try: client_os, client_browser = httpagentparser.simple_detect(cherrypy.request.headers['User-Agent']) # b = self.bc(cherrypy.request.headers['User-Agent']) # if b: # browser = "%s %s.%s on %s" % (b.name(), b.version()[0], b.version()[1], b.get("platform")) # else: # browser = cherrypy.request.headers['User-Agent'] # log_message += "using %s." % browser except: client_os = 'an OS' client_browser = 'a browser' try: if bitrate: bitrate = str(bitrate) force_transcode = False if bitrate and \ (int(bitrate) in [8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320]) and \ (song['bitrate'] / 1024 > int(bitrate)): force_transcode = True except: pass try: song_format = [song['mimetype'].split('/')[1], os.path.splitext(path)[1].lower()[1:]] except: song_format = [os.path.splitext(path)[1].lower()[1:]] if True in [True for x in song_format if x in ['mp3']]: song_mime = 'audio/mpeg' song_format = ['mp3'] elif True in [True for x in song_format if x in ['ogg', 'vorbis', 'oga']]: song_mime = 'audio/ogg' song_format = ['ogg', 'vorbis', 'oga'] elif True in [True for x in song_format if x in ['m4a', 'aac', 'mp4']]: song_mime = 'audio/x-m4a' song_format = ['m4a', 'aac', 'mp4'] else: song_mime = 'application/octet-stream' if not (format or bitrate): #log_message += " The client did not request any specific format or bitrate so the file is being sent as-is (%s kbps %s)." % (str(song['bitrate'] / 1000), str(song_format)) log_message += "(%skbps %s)" % (str(song['bitrate']), song_format[0]) if client_os and client_browser: log_message += " using %s on %s." % (client_browser, client_os) else: log_message += "." logger.info(log_message) if not os.name == 'nt': path = path.encode(cfg['ENCODING']) return serve_file(path, song_mime, "inline", os.path.split(path)[1]) if format: format = str(format).split(',') else: format = song_format logger.debug("The client wants %s and the file is %s" % (format, song_format)) if True in [True for x in format if x in song_format] and not force_transcode: #if bitrate: # log_message += " The client requested %s kbps %s, but the file is already %s kbps %s, so the file is being sent as-is." % (bitrate, format, str(song['bitrate'] / 1000), str(song_format)) #else: # log_message += " The client requested %s, but the file is already %s, so the file is being sent as-is." % (format, str(song_format)) log_message += "(%skbps %s)" % (str(song['bitrate'] / 1000), song_format[0]) if client_os and client_browser: log_message += " using %s on %s." % (client_browser, client_os) else: log_message += "." if show_log: logger.info(log_message) if not os.name == 'nt': path = path.encode(cfg['ENCODING']) return serve_file(path, song_mime, "inline", os.path.split(path)[1]) else: #if bitrate: # log_message = " The client requested %s kbps %s, but the file is %s kbps %s, so we're transcoding the file for them." % (bitrate, format, str(song['bitrate'] / 1000), str(song_format)) #else: # log_message += " The client requested %s, but the file %s, so we're transcoding the file for them." % (format, str(song_format)) log_message += "(transcoded from %skbps %s to %skbps %s)" % (str(song['bitrate'] / 1000), song_format[0], str(bitrate), format[0]) if client_os and client_browser: log_message += " using %s on %s." % (client_browser, client_os) else: log_message += "." if show_log: logger.info(log_message) # If we're transcoding audio and the client is trying to make range # requests, we have to throw an error 416. This sucks because it breaks # <audio> in all the WebKit browsers I've tried, but at least it stops # them from spawning a zillion transcoder threads (I'm looking at you, # Chromium). if True in [True for x in format if x in ['mp3']]: # cherrypy.response.headers['Content-Length'] = '-1' if range_request != 'bytes=0-': logger.debug("Got a range request for a file that needs transcoded: %s" % range_request) raise cherrypy.HTTPError(416) else: cherrypy.response.headers['Content-Type'] = 'audio/mpeg' try: if cherrypy.request.headers['Referer'].lower().endswith('jplayer.swf'): cherrypy.response.headers['Content-Type'] = 'audio/mp3' except: pass #cherrypy.response.headers['Content-Type'] = 'application/octet-stream' return self.transcoder.transcode(path, 'mp3', bitrate) elif True in [True for x in format if x in ['ogg', 'vorbis', 'oga']]: # cherrypy.response.headers['Content-Length'] = '-1' if range_request != 'bytes=0-': logger.debug("Got a range request for a file that needs transcoded: %s" % range_request) raise cherrypy.HTTPError(416) else: cherrypy.response.headers['Content-Type'] = 'audio/ogg' #cherrypy.response.headers['Content-Type'] = 'application/octet-stream' return self.transcoder.transcode(path, 'ogg', bitrate) elif True in [True for x in format if x in ['m4a', 'aac', 'mp4']]: # cherrypy.response.headers['Content-Length'] = '-1' if range_request != 'bytes=0-': logger.debug("Got a range request for a file that needs transcoded: %s" % range_request) raise cherrypy.HTTPError(416) else: cherrypy.response.headers['Content-Type'] = 'audio/x-m4a' #cherrypy.response.headers['Content-Type'] = 'application/octet-stream' return self.transcoder.transcode(path, 'm4a', bitrate) else: raise cherrypy.HTTPError(501)