def run(self, args, config): # Check the args: if args.symlink and args.delete: raise KolektoRuntimeError('--delete can\'t be used with --symlink') elif args.symlink and args.hardlink: raise KolektoRuntimeError('--symlink and --hardlink are mutually exclusive') # Load the metadata database: mdb = self.get_metadata_db(args.tree) # Load informations from db: mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) attachment_store = AttachmentStore(os.path.join(args.tree, '.kolekto', 'attachments')) for filename in args.file: filename = filename.decode('utf8') movie = self._import(mdb, mds, args, config, filename) # Refresh the full data for the choosen movie: movie = mds.refresh(movie) # Append the import date movie['import_date'] = datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S') if args.show: show(movie) printer.p('') # Edit available data: if not args.auto and printer.ask('Do you want to edit the movie metadata', default=False): movie = self.profile.object_class(json.loads(printer.edit(json.dumps(movie, indent=True)))) # Hardlink or copy the movie in the tree if args.hardlink or args.symlink: printer.p('\nComputing movie sha1sum...') movie_hash = link(args.tree, filename, args.symlink) else: printer.p('\nCopying movie in kolekto tree...') movie_hash = copy(args.tree, filename) printer.p('') mdb.save(movie_hash, movie) printer.debug('Movie {hash} saved to the database', hash=movie_hash) if args.delete: os.unlink(filename) printer.debug('Deleted original file {filename}', filename=filename) # Import the attachments if movie_hash is not None and args.import_attachments: attachments = list_attachments(filename) if attachments: printer.p('Found {nb} attachment(s) for this movie:', nb=len(attachments)) for attach in attachments: printer.p(' - {filename}', filename=attach) if not args.auto and printer.ask('Import them?', default=True): for attach in attachments: _, ext = os.path.splitext(attach) attachment_store.store(movie_hash, ext.lstrip('.'), open(attach))
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) total_runtime = 0 total_size = 0 count_by_genre = defaultdict(lambda: 0) count_by_director = defaultdict(lambda: 0) count_by_quality = defaultdict(lambda: 0) count_by_container = defaultdict(lambda: 0) for movie_hash, movie in mdb.itermovies(): movie_fullpath = os.path.join(args.tree, '.kolekto', 'movies', movie_hash) movie = mds.attach(movie_hash, movie) total_runtime += movie.get('runtime', 0) total_size += os.path.getsize(movie_fullpath) for genre in movie.get('genres', []): count_by_genre[genre] += 1 for director in movie.get('directors', []): count_by_director[director] += 1 count_by_quality[movie.get('quality', 'n/a')] += 1 count_by_container[movie.get('container', 'n/a')] += 1 printer.p(bold('Number of movies:'), mdb.count()) printer.p(bold('Total runtime:'), timedelta(seconds=total_runtime * 60)) printer.p(bold('Total size:'), humanize_filesize(total_size)) printer.p(bold('Genres top3:'), format_top(count_by_genre)) printer.p(bold('Director top3:'), format_top(count_by_director)) printer.p(bold('Quality:'), format_top(count_by_quality, None)) printer.p(bold('Container:'), format_top(count_by_container, None))
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) if args.input is None: # Refresh all movies if printer.ask('Would you like to refresh all movies?', default=True): with printer.progress(mdb.count(), task=True) as update: for movie_hash, movie in list(mdb.itermovies()): movie = mds.refresh(movie) mdb.save(movie_hash, movie) printer.verbose('Saved {hash}', hash=movie_hash) update(1) else: movie_hash = get_hash(args.input) try: movie = mdb.get(movie_hash) except KeyError: printer.p('Unknown movie hash.') return else: movie = mds.refresh(movie) show(movie) if printer.ask('Would you like to save the movie?', default=True): mdb.save(movie_hash, movie) printer.p('Saved.')
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) listing = self._config(args, config) def _sorter((movie_hash, movie)): return tuple(movie.get(x) for x in listing['order']) movies = sorted(mdb.itermovies(), key=_sorter) # Get the current used listing: for movie_hash, movie in movies: movie = mds.attach(movie_hash, movie) prepared_env = parse_pattern(listing['pattern'], movie, ListingFormatWrapper) printer.p(u'<inv><b> {hash} </b></inv> ' + listing['pattern'], hash=movie_hash, **prepared_env)
def run(self, args, config): mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) mdb = self.get_metadata_db(args.tree) hash_by_title = defaultdict(lambda: []) for movie_hash, movie in mdb.itermovies(): movie = mds.attach(movie_hash, movie) hash_info = '<inv> %s </inv> (%s/%s)' % (movie_hash, movie.get('quality'), movie.get('ext')) hash_by_title[movie.get('title', None), movie.get('year', None)].append(hash_info) for (title, year), hashs in hash_by_title.iteritems(): if len(hashs) > 1: printer.p('<b>{title}</b> ({year}): {hashs}', title=title, year=year, hashs=' '.join(hashs))
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) movie_hash = get_hash(args.input) try: movie = mdb.get(movie_hash) except KeyError: printer.p('Unknown movie hash.') return movie = mds.attach(movie_hash, movie) show(movie)
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) webexport = self._config(args, config) columns = [(x.args, x) for x in webexport['columns']] movies = [] for movie_hash, movie in sorted(mdb.itermovies(), key=lambda x: x[1].get('title')): filename = os.path.join(args.pool_path, movie_hash) movie_columns = [] movie = mds.attach(movie_hash, movie) for column in webexport['columns']: prepared_env = parse_pattern(column.get('pattern'), movie, WebExportFormatWrapper) movie_columns.append(column.get('pattern').format(**prepared_env)) movies.append((filename, movie, movie_columns)) template = jinja2.Template(LISTING_TEMPLATE) with open(args.destination, 'w') as fdest: fdest.write(template.render(columns=columns, movies=movies, title=webexport['page_title'], credits=webexport['page_credits']).encode('utf8'))
def run(self, args, config): mdb = self.get_metadata_db(args.tree) mds = MovieDatasource(config.subsections('datasource'), args.tree, self.profile.object_class) if args.dry_run: printer.p('Dry run: I will not create or delete any link') # Create the list of links that must exists on the fs: db_links = {} with printer.progress(mdb.count(), task=True) as update: for movie_hash, movie in mdb.itermovies(): movie = mds.attach(movie_hash, movie) for view in config.subsections('view'): for pattern in view.get('pattern'): for result in format_all(pattern, movie): filename = os.path.join(view.args, result) if filename in db_links: printer.p('Warning: duplicate link {link}', link=filename) else: db_links[filename] = movie_hash update(1) # Create the list of links already existing on the fs: fs_links = {} for view in config.subsections('view'): view_links = walk_links(os.path.join(args.tree, view.args), prefix=view.args, linkbase=os.path.join(args.tree, '.kolekto', 'movies')) fs_links.update(view_links) db_links = set(db_links.iteritems()) fs_links = set(fs_links.iteritems()) links_to_delete = fs_links - db_links links_to_create = db_links - fs_links printer.p('Found {rem} links to delete, {add} links to create', rem=len(links_to_delete), add=len(links_to_create)) dirs_to_cleanup = set() # Delete the old links: for filename, link in links_to_delete: printer.verbose('Deleting {file}', file=filename) if not args.dry_run: os.remove(os.path.join(args.tree, filename)) while filename: filename = os.path.split(filename)[0] dirs_to_cleanup.add(filename) dirs_to_cleanup.discard('') # Avoid to delete view roots # Delete empty directories: for directory in dirs_to_cleanup: if not args.dry_run: try: os.rmdir(os.path.join(args.tree, directory)) except OSError, err: if err.errno != 39: # Ignore "Directory not empty" error raise else: printer.verbose('Deleted directory {dir}', dir=directory)