def render_podcast(args, audio_segments, video_segments, output, duration):
    """Stitch together the various input components into the final podcast."""
    fn = "render_podcast"
    globals.log.info("Rendering final podcast...")
    command = FFmpegConcatCommand(has_audio=len(audio_segments) > 0,
                                  has_video=len(video_segments) > 0,
                                  max_progress=duration,
                                  quiet=args.quiet and not args.debug,
                                  process_audio=args.process_audio,
                                  process_video=args.process_video,
                                  audio_codec=args.audio_codec,
                                  video_codec=args.video_codec)
    input_files = Segment.input_files()
    for f in input_files:
        if (input_files[f]):
            command.append_input_options(input_files[f])
        command.append_input_options(["-i", f])
    for s in (audio_segments + video_segments):
        command.append_filter(s.trim_filter())
    command.append_concat_filter("a", [s for s in audio_segments])
    if (args.normalise):
        command.append_normalisation_filter()
    command.append_concat_filter("v", [s for s in video_segments])
    if args.preview:
        globals.log.info("PREVIEW MODE: {fps} fps".format(fps=args.preview))
        command.append_output_options(["-r", args.preview])
    command.append_output_options([output])
    globals.log.debug("{fn}(): {c}".format(fn=fn, c=command))
    if (command.run() != 0):
        globals.log.error("Failed to render final podcast")
def main():
    fn = "main"
    logging.basicConfig(
        level=logging.INFO,
        format="%(levelname)s: {p}: %(message)s".format(p=globals.PROGRAM))
    segments = None
    
    try:
        args = parse_command_line()
        check_arguments(args)
    
        config = get_configuration(args)
    
        segments = process_input_streams(args, config)
        globals.log.debug("{fn}(): audio segments = {a}".format(
            fn=fn, a=[s for s in segments if isinstance(s, AudioSegment)]))
        globals.log.debug("{fn}(): video segments = {v}".format(
            fn=fn, v=[s for s in segments if isinstance(s, VideoSegment)]))
    
        audio_segments = [s for s in segments if isinstance(s, AudioSegment)]
        video_segments = [s for s in segments if isinstance(s, VideoSegment)]
    
        audio_duration = sum([s.get_duration() for s in audio_segments])
        video_duration = sum([s.get_duration() for s in video_segments])
        globals.log.debug("{fn}(): audio duration = "
                          "{a}".format(fn=fn, a=audio_duration))
        globals.log.debug("{fn}(): video duration = "
                          "{v}".format(fn=fn, v=video_duration))
    
        if (len(audio_segments) and len(video_segments)):
            if (audio_duration != video_duration):
                globals.log.warning("total video duration ({v}s) doesn't match "
                            "total audio duration "
                            "({a}s)".format(v=video_duration, a=audio_duration))
        
        width, height = smallest_video_dimensions(args, video_segments)
        globals.log.debug("{fn}(): width = {w}, height = "
                          "{h}".format(fn=fn, w=width, h=height))
        
        process_frame_segments(args, segments, width, height)
    
        globals.log.debug("{fn}(): input files = "
                          "{i}".format(fn=fn, i=Segment.input_files()))
    
        render_podcast(args, audio_segments, video_segments, args.output,
                       max(audio_duration, video_duration))

    except (KeyboardInterrupt):
        pass
    finally:
        if segments and not args.keep:
            cleanup(segments)