def show_file_list(path='.'): search = request.params.get('p') resp_format = request.params.get('f', '') conf = request.app.config is_missing = False is_search = False files = init_filemanager() if search: relpath = '.' up = '' dirs, file_list = files.get_search_results(search) is_search = True if not len(file_list) and len(dirs) == 1: redirect(i18n_url('files:path', path=dirs[0].path.replace('\\', '/'))) if not dirs and not file_list: is_missing = True readme = _('The files you were looking for could not be found') else: readme = _('This list represents the search results') else: is_search = False try: dir_contents = files.get_dir_contents(path) (path, relpath, dirs, file_list, readme) = dir_contents except files.DoesNotExist: is_missing = True relpath = '.' dirs = [] file_list = [] readme = _('This folder does not exist') except files.IsFileError as err: if resp_format == 'json': fstat = os.stat(path) response.content_type = 'application/json' return json.dumps(dict( name=os.path.basename(path), size=fstat[stat.ST_SIZE], )) options = {'download': request.params.get('filename', False)} return static_file(err.path, root=files.filedir, **options) up = os.path.normpath(os.path.join(path, '..')) up = os.path.relpath(up, conf['content.filedir']) if resp_format == 'json': response.content_type = 'application/json' return json.dumps(dict( dirs=dirs, files=dictify_file_list(file_list), readme=to_unicode(readme), is_missing=is_missing, is_search=is_search, )) return dict(path=relpath, dirs=dirs, files=file_list, up=up, readme=readme, is_missing=is_missing, is_search=is_search)
def import_content(srcdir, destdir, meta_filenames, fsal, notifications, notifications_db): """Discover content directories under ``srcdir`` using the first generation folder structure and copy them into ``destdir``, while dropping the old nested structure and putting them into a single folder which name is generated from the slugified title of the content.""" srcdir = os.path.abspath(srcdir) if not os.path.exists(srcdir): logging.info(u"Content directory: {0} does not exist.".format(srcdir)) return logging.info(u"Starting content import of {0}".format(srcdir)) added = 0 for src_path in find_content_dirs(srcdir, meta_filenames): meta = read_meta(src_path, meta_filenames) if not meta: logging.error(u"Content import of {0} skipped. No valid metadata " "was found.".format(src_path)) continue # metadata couldn't be found or read, skip this item # process and save the found metadata upgrade_meta(meta) meta_path = os.path.join(src_path, meta_filenames[0]) with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) # delete any other meta files delete_old_meta(src_path, meta_filenames) # move content folder into library title = to_unicode( to_bytes( safe_title(meta['title']) or safe_title(meta['url']) or get_random_title())[:MAX_TITLE_LENGTH]) match = FIRST_CHAR.search(title) first_letter = (match.group() if match else None) or title[0] dest_path = os.path.join(destdir, first_letter.upper(), title) if not fsal.exists(dest_path, unindexed=True): (success, error) = fsal.transfer(src_path, dest_path) if not success: logging.error(u"Content import of {0} failed with " "{1}".format(src_path, error)) continue # adding to database will happen when we're notified by fsal about # the event added += 1 success_msg = "{0} content items imported from {1}.".format(added, srcdir) logging.info(success_msg) notifications.send(success_msg, db=notifications_db)
def import_content(srcdir, destdir, meta_filenames, fsal, notifications, notifications_db): """Discover content directories under ``srcdir`` using the first generation folder structure and copy them into ``destdir``, while dropping the old nested structure and putting them into a single folder which name is generated from the slugified title of the content.""" srcdir = os.path.abspath(srcdir) if not os.path.exists(srcdir): logging.info(u"Content directory: {0} does not exist.".format(srcdir)) return logging.info(u"Starting content import of {0}".format(srcdir)) added = 0 for src_path in find_content_dirs(srcdir, meta_filenames): meta = read_meta(src_path, meta_filenames) if not meta: logging.error(u"Content import of {0} skipped. No valid metadata " "was found.".format(src_path)) continue # metadata couldn't be found or read, skip this item # process and save the found metadata upgrade_meta(meta) meta_path = os.path.join(src_path, meta_filenames[0]) with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) # delete any other meta files delete_old_meta(src_path, meta_filenames) # move content folder into library title = to_unicode(to_bytes(safe_title(meta['title']) or safe_title(meta['url']) or get_random_title())[:MAX_TITLE_LENGTH]) match = FIRST_CHAR.search(title) first_letter = (match.group() if match else None) or title[0] dest_path = os.path.join(destdir, first_letter.upper(), title) if not fsal.exists(dest_path, unindexed=True): (success, error) = fsal.transfer(src_path, dest_path) if not success: logging.error(u"Content import of {0} failed with " "{1}".format(src_path, error)) continue # adding to database will happen when we're notified by fsal about # the event added += 1 success_msg = "{0} content items imported from {1}.".format(added, srcdir) logging.info(success_msg) notifications.send(success_msg, db=notifications_db)
def extract(self): data = dict() # read and convert to unicode all lines with self.fsal.open(self.path, 'r') as dirinfo_file: raw = [to_unicode(line) for line in dirinfo_file.readlines()] # unpack each line for line in raw: (key, value) = line.split(self.SPLITTER) match = self.ENTRY_REGEX.match(key) if match: (key, language) = match.groups() else: language = NO_LANGUAGE data.setdefault(language, {}) data[language][key] = value.strip() # if there is no folder cover image under the language-less version of # the metadata, check for the default cover and add if available if not data[NO_LANGUAGE].get('cover'): self._add_default_cover(data) return data
def read_file(self): """Read dirinfo file from disk.""" info_file_path = os.path.join(self.path, self.FILENAME) fsal = self.supervisor.exts.fsal if fsal.exists(info_file_path): try: with fsal.open(info_file_path, 'r') as info_file: info = [to_unicode(line) for line in info_file.readlines()] for line in info: key, value = line.split('=') match = self.ENTRY_REGEX.match(key) if match: (key, language) = match.groups() else: language = self.NO_LANGUAGE self._data.setdefault(language, {}) self._data[language][key] = value.strip() except Exception: self._data = dict() msg = ".dirinfo reading of {0} failed.".format(self.path) logging.exception(msg)
def _extend_file(self, fs_obj): mimetype, encoding = mimetypes.guess_type(fs_obj.rel_path) fs_obj.mimetype = mimetype fs_obj.parent_name = to_unicode( os.path.basename(os.path.dirname(fs_obj.rel_path))) return fs_obj
def test_to_unicode(): assert mod.to_unicode(1) == '1' assert mod.to_unicode(b'foobar') == 'foobar' assert mod.to_unicode('foo') == 'foo'
def __init__(self, path, fsal): self.path = to_unicode(path) self.fsal = fsal
def test_to_unicode(): assert mod.to_unicode(1) == "1" assert mod.to_unicode(b"foobar") == "foobar" assert mod.to_unicode("foo") == "foo"