def export_gif(concat_file, filter_string, export_directory): """Export a GIF from the input frames, scaled down to 720p""" meta.write({"processing": "Exporting GIF"}) benchmark = time() # GIF encoding technique taken from # http://blog.pkh.me/p/21-high-quality-gif-with-ffmpeg.html # TODO: Should we force-scale GIF for any reason (decision) # filter_string += ", scale=720:-1:flags=lanczos" palette_file = path.join(export_directory, "palette.png") subprocess.run([ FFMPEG_EXECUTABLE, "-y", "-f", "concat", "-i", concat_file, "-filter_complex", filter_string + ", palettegen", palette_file ]) export_file = path.join(export_directory, "exported.gif") subprocess.run([ FFMPEG_EXECUTABLE, "-y", "-f", "concat", "-i", concat_file, "-i", palette_file, "-filter_complex", filter_string + " [comp]; [comp][1:v] paletteuse", export_file ]) filesize = path.getsize(export_file) meta.write({ "processing": None, "gif": { "filePath": "exported.gif", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
def update_models(): meta.write({'processing': 'Updating models'}) updates_occurred = False # Update existing models for obj in bpy.data.objects: if obj.get('elmyra-url') is not None: if update_object(obj): updates_occurred = True meta.write({'processing': None}) return updates_occurred
def export_png(ffmpeg_input_options, export_directory): meta.write({"processing": "Exporting PNG"}) benchmark = time() export_file = path.join(export_directory, "exported.png") ffmpeg_call = ffmpeg_input_options + [export_file] subprocess.run(ffmpeg_call) filesize = path.getsize(export_file) meta.write({ "processing": None, "png": { "filePath": "exported.png", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
def export_svg(image_directory, export_directory): vector_input_files = glob(path.join(image_directory, "*.svg")) if len(vector_input_files) > 0: meta.write({"processing": "Exporting SVG"}) benchmark = time() export_file = path.join(export_directory, "exported.svg") copy(vector_input_files[0], export_file) filesize = path.getsize(export_file) meta.write({ "processing": None, "svg": { "filePath": "exported.svg", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
def export_webm(concat_file, filter_string, export_directory): meta.write({"processing": "Exporting WEBM"}) benchmark = time() export_file = path.join(export_directory, "exported.webm") subprocess.run([ FFMPEG_EXECUTABLE, "-y", "-f", "concat", "-i", concat_file, "-filter_complex", filter_string, "-c:v", "libvpx-vp9", "-crf", "4", "-speed", "1", "-b:v", "32M", export_file ]) filesize = path.getsize(export_file) meta.write({ "processing": None, "webm": { "filePath": "exported.webm", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
def export_ogv(concat_file, filter_string, export_directory): meta.write({"processing": "Exporting OGV"}) benchmark = time() export_file = path.join(export_directory, "exported.ogv") subprocess.run([ FFMPEG_EXECUTABLE, "-y", "-f", "concat", "-i", concat_file, "-filter_complex", filter_string, "-c:v", "libtheora", "-qscale:v", "10", export_file ]) filesize = path.getsize(export_file) meta.write({ "processing": None, "ogv": { "filePath": "exported.ogv", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
def export_mp4(concat_file, filter_string, export_directory): meta.write({"processing": "Exporting MP4"}) benchmark = time() export_file = path.join(export_directory, "exported.mp4") subprocess.run([ FFMPEG_EXECUTABLE, "-y", "-f", "concat", "-i", concat_file, "-filter_complex", filter_string, "-c:v", "libx264", "-preset", "slow", "-crf", "4", export_file ]) filesize = path.getsize(export_file) meta.write({ "processing": None, "mp4": { "filePath": "exported.mp4", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
def export_svg_sequence(image_directory, export_directory): vector_input_files = glob(path.join(image_directory, "*.svg")) if len(vector_input_files) > 0: meta.write({"processing": "Exporting SVG Sequence"}) benchmark = time() export_file = path.join(export_directory, "exported.svg.zip") zip_file = ZipFile(export_file, 'w') for frame in vector_input_files: zip_file.write(frame, path.basename(frame)) filesize = path.getsize(export_file) meta.write({ "processing": None, "svg.zip": { "filePath": "exported.svg.zip", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
def export_png_sequence(image_directory, export_directory): """Export all input frames as PNGs inside a ZIP""" meta.write({"processing": "Exporting PNG Sequence"}) benchmark = time() raster_input_files = glob(path.join(image_directory, "*.png")) export_file = path.join(export_directory, "exported.png.zip") zip_file = ZipFile(export_file, 'w') for frame in raster_input_files: zip_file.write(frame, path.basename(frame)) filesize = path.getsize(export_file) meta.write({ "processing": None, "png.zip": { "filePath": "exported.png.zip", "exported": datetime.now().isoformat(), "processingTime": time() - benchmark, "fileSize": filesize } })
args = parse_custom_args() common.ensure_addons() if args.upload_id: common.open_upload(args.upload_id) version.save_new(args.id) meta.write_media_info() else: run_updates = True if args.min_interval: meta = meta.get() if 'lastUpdate' in meta: run_updates = time() - meta['lastUpdate'] < args.min_interval if run_updates: version.open_latest(args.id) # TODO: Find problem: Why does it update although hash stayed the same? # (Happened on update form external sources manually) # (Note 06/03/2016 - not sure if still applies) if update.update_models(): version.save_new(args.id) meta.write_media_info() else: meta.write({'lastUpdate': time()})
def render(target_time, device): begin_time = time() bpy.context.scene.cycles.seed = int( begin_time) # Imagestacking random seed bpy.context.scene.cycles.device = device render_directory = path.join(bpy.path.abspath("//"), "rendered_frames") if not path.exists(render_directory): makedirs(render_directory) first = bpy.context.scene.frame_start last = bpy.context.scene.frame_end total_frames = last - first + 1 rendered_frames = sorted(glob(path.join(render_directory, "*.png"))) requested_frames = [] if len(rendered_frames) < total_frames: meta.write({"processing": "Rendering missing frames"}) # Build the initial list of frames based on a binary split pattern: # # | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | (10 example frame numbers) # ----------------------------------------- # | | | | | 1 | | | | | | # | | | 2 | | | | | 3 | | | (split pattern and # | 4 | | | 5 | | 6 | | | 7 | | render order) # | | 8 | | | | | 9 | | |10 | all_frames = range(first, last + 1) numbers = [ int(path.basename(r).split(".")[0]) for r in rendered_frames ] all_ranges = [] buffer_range = [] for frame in all_frames: if frame not in numbers: buffer_range.append(frame) elif len(buffer_range) > 0: all_ranges.append(buffer_range) buffer_range = [] if len(buffer_range) > 0: all_ranges.append(buffer_range) buffer_range = [] while len(all_ranges) > 0: largest_range = max(all_ranges, key=lambda r: [len(r), -min(r)]) all_ranges.remove(largest_range) split_index = len(largest_range) // 2 for index, frame in enumerate(largest_range): if index == split_index: requested_frames.append({ "number": frame, "available_samples": 0, "requested_samples": SAMPLES_INITIAL, "available_frame": None }) if len(buffer_range) > 0: all_ranges.append(buffer_range) buffer_range = [] else: buffer_range.append(frame) if len(buffer_range) > 0: all_ranges.append(buffer_range) buffer_range = [] else: samples = [ int(path.basename(r).split(".")[1]) for r in rendered_frames ] min_samples = min(samples) max_samples = max(samples) if min_samples < QUALITY_PREVIEW: render_quality = "draft" elif min_samples > QUALITY_PREVIEW: render_quality = "preview" elif min_samples > QUALITY_PRODUCTION: render_quality = "production" meta.write({ "processing": "Rendering more samples", "minimumSamples": min_samples, "renderQuality": render_quality }) if min_samples != max_samples: for frame in rendered_frames: frame_info = path.basename(frame).split(".") frame_number = int(frame_info[0]) frame_samples = int(frame_info[1]) if frame_samples < max_samples: requested_frames.append({ "number": frame_number, "available_samples": frame_samples, "requested_samples": max_samples - frame_samples, "available_frame": frame }) elif min_samples < SAMPLES_CAP: for frame in rendered_frames: frame_info = path.basename(frame).split(".") frame_number = int(frame_info[0]) requested_frames.append({ "number": frame_number, "available_samples": min_samples, "requested_samples": int(min_samples * SAMPLES_MULTIPLIER), "available_frame": frame }) for frame in requested_frames: render_frame(render_directory, frame["number"], frame["available_samples"], frame["requested_samples"], frame["available_frame"]) if time() - begin_time > target_time: break meta.write({"processing": False})
def render_frame(render_directory, frame, existing_samples, additional_samples, existing_frame=None): begin_time = time() bpy.context.scene.frame_current = frame bpy.context.scene.cycles.samples = additional_samples # Enable SVG export when using Freestyle if bpy.context.scene.render.use_freestyle: bpy.context.scene.svg_export.use_svg_export = True cache_filename = ".render-cache.png" cache_filepath = path.join(render_directory, cache_filename) bpy.context.scene.render.filepath = cache_filepath bpy.ops.render.render(write_still=True) if existing_frame: alpha = float(existing_samples) / float(existing_samples + additional_samples) result_samples = existing_samples + additional_samples result_filename = "{0:06}.{1}.png".format(frame, result_samples) result_filepath = path.join(render_directory, result_filename) ffmpeg_call = [ FFMPEG_EXECUTABLE, "-y", "-i", existing_frame, "-i", cache_filepath, "-filter_complex", f"[1:v][0:v]blend=all_expr='A*{alpha}+B*{1 - alpha}'", result_filepath ] subprocess.run(ffmpeg_call) remove(existing_frame) remove(cache_filepath) else: result_filename = "{0:06}.{1}.png".format(frame, additional_samples) result_filepath = path.join(render_directory, result_filename) rename(cache_filepath, result_filepath) if bpy.context.scene.render.use_freestyle: svg_old_filepath = path.join( render_directory, "{0}{1:04}.svg".format(cache_filename, frame)) svg_new_filepath = path.join(render_directory, "{0:06}.svg".format(frame)) if existing_frame: remove(svg_new_filepath) rename(svg_old_filepath, svg_new_filepath) # Thumbnail creation thumbnail_filepath = path.join(render_directory, "..", "thumbnail.png") subprocess.run([ FFMPEG_EXECUTABLE, "-y", "-f", "image2", "-i", result_filepath, "-vf", "scale=480:270:force_original_aspect_ratio=decrease", thumbnail_filepath ]) meta.write({ "renderDevice": bpy.context.scene.cycles.device, "lastRenderedFrame": frame, "lastRenderDuration": time() - begin_time, "lastRender": datetime.now().isoformat(), "lastRenderedSamples": additional_samples })