def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) total_runtime = 0 total_size = 0 count_by_genre = defaultdict(lambda: 0) count_by_director = defaultdict(lambda: 0) count_by_quality = defaultdict(lambda: 0) count_by_container = defaultdict(lambda: 0) for movie_hash, movie in mdb.itermovies(): movie_fullpath = os.path.join(args.tree, '.kolekto', 'movies', movie_hash) movie = mds.attach(movie_hash, movie) total_runtime += movie.get('runtime', 0) total_size += os.path.getsize(movie_fullpath) for genre in movie.get('genres', []): count_by_genre[genre] += 1 for director in movie.get('directors', []): count_by_director[director] += 1 count_by_quality[movie.get('quality', 'n/a')] += 1 count_by_container[movie.get('container', 'n/a')] += 1 printer.p(bold('Number of movies:'), mdb.count()) printer.p(bold('Total runtime:'), timedelta(seconds=total_runtime * 60)) printer.p(bold('Total size:'), humanize_filesize(total_size)) printer.p(bold('Genres top3:'), format_top(count_by_genre)) printer.p(bold('Director top3:'), format_top(count_by_director)) printer.p(bold('Quality:'), format_top(count_by_quality, None)) printer.p(bold('Container:'), format_top(count_by_container, None))
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) listing = self._config(args, config) def _sorter((movie_hash, movie)): return tuple(movie.get(x) for x in listing['order']) movies = sorted(mdb.itermovies(), key=_sorter) # Get the current used listing: for movie_hash, movie in movies: movie = mds.attach(movie_hash, movie) prepared_env = parse_pattern(listing['pattern'], movie, ListingFormatWrapper) printer.p(u'<inv><b> {hash} </b></inv> ' + listing['pattern'], hash=movie_hash, **prepared_env)
def run(self, args, config): mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) mdb = self.get_metadata_db(args.tree) hash_by_title = defaultdict(lambda: []) for movie_hash, movie in mdb.itermovies(): movie = mds.attach(movie_hash, movie) hash_info = '<inv> %s </inv> (%s/%s)' % (movie_hash, movie.get('quality'), movie.get('ext')) hash_by_title[movie.get('title', None), movie.get('year', None)].append(hash_info) for (title, year), hashs in hash_by_title.iteritems(): if len(hashs) > 1: printer.p('<b>{title}</b> ({year}): {hashs}', title=title, year=year, hashs=' '.join(hashs))
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) movie_hash = get_hash(args.input) try: movie = mdb.get(movie_hash) except KeyError: printer.p('Unknown movie hash.') return movie = mds.attach(movie_hash, movie) show(movie)
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) webexport = self._config(args, config) columns = [(x.args, x) for x in webexport['columns']] movies = [] for movie_hash, movie in sorted(mdb.itermovies(), key=lambda x: x[1].get('title')): filename = os.path.join(args.pool_path, movie_hash) movie_columns = [] movie = mds.attach(movie_hash, movie) for column in webexport['columns']: prepared_env = parse_pattern(column.get('pattern'), movie, WebExportFormatWrapper) movie_columns.append(column.get('pattern').format(**prepared_env)) movies.append((filename, movie, movie_columns)) template = jinja2.Template(LISTING_TEMPLATE) with open(args.destination, 'w') as fdest: fdest.write(template.render(columns=columns, movies=movies, title=webexport['page_title'], credits=webexport['page_credits']).encode('utf8'))
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) if args.dry_run: printer.p('Dry run: I will not create or delete any link') # Create the list of links that must exists on the fs: db_links = {} with printer.progress(mdb.count(), task=True) as update: for movie_hash, movie in mdb.itermovies(): movie = mds.attach(movie_hash, movie) for view in config.subsections('view'): for pattern in view.get('pattern'): for result in format_all(pattern, movie): filename = os.path.join(view.args, result) if filename in db_links: printer.p('Warning: duplicate link {link}', link=filename) else: db_links[filename] = movie_hash update(1) # Create the list of links already existing on the fs: fs_links = {} for view in config.subsections('view'): view_links = walk_links(os.path.join(args.tree, view.args), prefix=view.args, linkbase=os.path.join(args.tree, '.kolekto', 'movies')) fs_links.update(view_links) db_links = set(db_links.iteritems()) fs_links = set(fs_links.iteritems()) links_to_delete = fs_links - db_links links_to_create = db_links - fs_links printer.p('Found {rem} links to delete, {add} links to create', rem=len(links_to_delete), add=len(links_to_create)) dirs_to_cleanup = set() # Delete the old links: for filename, link in links_to_delete: printer.verbose('Deleting {file}', file=filename) if not args.dry_run: os.remove(os.path.join(args.tree, filename)) while filename: filename = os.path.split(filename)[0] dirs_to_cleanup.add(filename) dirs_to_cleanup.discard('') # Avoid to delete view roots # Delete empty directories: for directory in dirs_to_cleanup: if not args.dry_run: try: os.rmdir(os.path.join(args.tree, directory)) except OSError, err: if err.errno != 39: # Ignore "Directory not empty" error raise else: printer.verbose('Deleted directory {dir}', dir=directory)