def process_movie_file(movie_file: Path, dst_folder: Optional[Path], delete_source: bool = False) -> Path: assert movie_file.suffix in MOVIE_SUFFIXES # Determine filename to '<movie> (<year>)' movie = query_movie_data_google(movie_file.stem) dst_file = (dst_folder or movie_file.parent) / (sanitize_name(str(movie)) + movie_file.suffix) # File operations if movie_file != dst_file: shutil.copy(movie_file, dst_file) if delete_source: movie_file.unlink() return dst_file
def create_project_item(path, gen): def render_md(name, asset_filter=None): p = os.path.join(path, name) if not os.path.exists(p): return "" md = open(p).read() html = markdown.markdown(md.decode('utf-8')) return link_assets(html, path, gen, asset_filter=asset_filter) data_path = os.path.join(path, 'data.json') data = json.load(open(data_path)) if os.path.exists(data_path) else {} preview_html = render_md('preview.markdown') d = { "preview_html": preview_html, "preview_html_for_tile": kill_links(render_md('preview.markdown')), "content_html": render_md('content.markdown'), "name": path.split('/')[-1], "classes": u" ".join(data.get('classes', [])), "title": u"".join(map(unicode, BeautifulSoup(preview_html, 'lxml').find('h1').contents)), "link": data.get("link") } from crop import SquareCropAssetFilter tile_filter = SquareCropAssetFilter(size=500) tile_path = os.path.join(path, 'tile.png') if os.path.exists(tile_path): colors = ui_colors(tile_path) d['project_css'] = "<style>.header{ background-color: BG; color: COLOR; } .project_content a:link, .project_content a:visited { color: COLOR } </style>".replace('COLOR', colors['text']).replace('BG', colors['background']) d['style'] = u"background-image: url({0})".format(gen.include_asset(tile_path, filter=tile_filter)) if d['link']: d['url'] = d['link'] else: d['url'] = '/projects/' + sanitize_name(d['name']) + '.html' return d
def process_movie_folder(movie_folder: Path, dst_folder: Optional[Path], delete_source: bool = False) -> Path: dst_folder = dst_folder or movie_folder # Determine the Movie and Subtitle files movie_files = [ file for file in recursive_iterdir(movie_folder) if file.suffix in MOVIE_SUFFIXES ] subtitle_files = [ file for file in recursive_iterdir(movie_folder) if file.suffix in ['.srt'] ] # Select the largest movie file (smaller files are probably samples) movie_file = max(movie_files, key=lambda file: file.stat().st_size) # if 'movie already has embedded subtitles' # or 'there are no srt files' # or 'file type can't be used to embed subtitles' # -> process movie file without embedding subtitles if get_embedded_subtitles(movie_file) \ or not subtitle_files \ or movie_file.suffix not in VALID_FFMPEG_SUFFIXES: dst_file = process_movie_file(movie_file, dst_folder, delete_source) else: movie = query_movie_data_google(movie_folder.stem) dst_file = (dst_folder or movie_file.parent) / ( sanitize_name(str(movie)) + movie_file.suffix) merge_all(movie_file, subtitle_files, dst_file) if delete_source and movie_folder != dst_folder: shutil.rmtree(str(movie_folder)) return dst_file
def create_detail_page(path, gen): data = create_project_item(path, gen) page_path = os.path.join(gen.site_path, 'projects', sanitize_name(data['name']) + '.html') open(page_path, 'w').write(template("project.html", data).encode('utf-8'))