def makeVideoFile(input_, chunks, output): from videoUtils import handleAudioTracks, muxVideo continueVid = handleAudioTracks(ffmpeg, output, args, tracks, chunks, speeds, fps, TEMP, log) if (continueVid): if (args.render == 'auto'): if (args.zoom != [] or args.rectangle != []): args.render = 'opencv' else: try: import av args.render = 'av' except ImportError: args.render = 'opencv' log.debug(f'Using {args.render} method') if (args.render == 'av'): if (args.zoom != []): log.error( 'Zoom effect is not supported on the av render method.' ) if (args.rectangle != []): log.error( 'Rectangle effect is not supported on the av render method.' ) from renderVideo import renderAv renderAv(ffmpeg, ffprobe, input_, args, chunks, speeds, fps, TEMP, log) if (args.render == 'opencv'): from renderVideo import renderOpencv renderOpencv(ffmpeg, ffprobe, input_, args, chunks, speeds, fps, effects, TEMP, log) # Now mix new audio(s) and the new video. muxVideo(ffmpeg, output, args, tracks, TEMP, log) if (output is not None and not os.path.isfile(output)): log.bug(f'The file {output} was not created.')
def main(): dirPath = os.path.dirname(os.path.realpath(__file__)) # Fixes pip not able to find other included modules. sys.path.append(os.path.abspath(dirPath)) # Print the version if only the -v option is added. if (sys.argv[1:] == ['-v'] or sys.argv[1:] == ['-V']): print(f'Auto-Editor version {version}\nPlease use --version instead.') sys.exit() # If the users just runs: $ auto-editor if (sys.argv[1:] == []): # Print print( '\nAuto-Editor is an automatic video/audio creator and editor.\n') print( 'By default, it will detect silence and create a new video with ') print( 'those sections cut out. By changing some of the options, you can') print( 'export to a traditional editor like Premiere Pro and adjust the') print( 'edits there, adjust the pacing of the cuts, and change the method' ) print('of editing like using audio loudness and video motion to judge') print('making cuts.') print( '\nRun:\n auto-editor --help\n\nTo get the list of options.\n') sys.exit() from vanparse import ParseOptions from usefulFunctions import Log, Timer if (len(sys.argv) > 1 and sys.argv[1] == 'generate_test'): option_data = generate_options() args = ParseOptions(sys.argv[2:], Log(), 'generate_test', option_data) if (args.help): genHelp(option_data) sys.exit() from generateTestMedia import generateTestMedia from usefulFunctions import FFmpeg ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, Log()) generateTestMedia(ffmpeg, args.output_file, args.fps, args.duration, args.width, args.height) sys.exit() elif (len(sys.argv) > 1 and sys.argv[1] == 'test'): from testAutoEditor import testAutoEditor testAutoEditor() sys.exit() elif (len(sys.argv) > 1 and sys.argv[1] == 'info'): option_data = info_options() args = ParseOptions(sys.argv[2:], Log(), 'info', option_data) if (args.help): genHelp(option_data) sys.exit() from info import getInfo from usefulFunctions import FFmpeg, FFprobe log = Log() ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, log) ffprobe = FFprobe(dirPath, args.my_ffmpeg, log) getInfo(args.input, ffmpeg, ffprobe, log) sys.exit() else: option_data = main_options() args = ParseOptions(sys.argv[1:], Log(True), 'auto-editor', option_data) timer = Timer(args.quiet) # Print the help screen for the entire program. if (args.help): print('\n Have an issue? Make an issue. '\ 'Visit https://github.com/wyattblue/auto-editor/issues\n') print(' The help option can also be used on a specific option:') print(' auto-editor --frame_margin --help\n') genHelp(option_data) sys.exit() del option_data from usefulFunctions import FFmpeg, FFprobe, sep ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, Log()) ffprobe = FFprobe(dirPath, args.my_ffmpeg, Log()) makingDataFile = (args.export_to_premiere or args.export_to_resolve or args.export_to_final_cut_pro or args.export_as_json) is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit' if (args.debug and args.input == []): import platform print('Python Version:', platform.python_version(), is64bit) print('Platform:', platform.system(), platform.release()) print('Config File path:', dirPath + sep() + 'config.txt') print('FFmpeg path:', ffmpeg.getPath()) ffmpegVersion = ffmpeg.pipe(['-version']).split('\n')[0] ffmpegVersion = ffmpegVersion.replace('ffmpeg version', '').strip() ffmpegVersion = ffmpegVersion.split(' ')[0] print('FFmpeg version:', ffmpegVersion) print('Auto-Editor version', version) sys.exit() if (is64bit == '32-bit'): log.warning('You have the 32-bit version of Python, which may lead to' \ 'memory crashes.') if (args.version): print('Auto-Editor version', version) sys.exit() TEMP = tempfile.mkdtemp() log = Log(args.debug, args.show_ffmpeg_debug, args.quiet, temp=TEMP) log.debug(f'\n - Temp Directory: {TEMP}') ffmpeg.updateLog(log) ffprobe.updateLog(log) from wavfile import read from usefulFunctions import isLatestVersion if (not args.quiet and isLatestVersion(version, log)): log.print('\nAuto-Editor is out of date. Run:\n') log.print(' pip3 install -U auto-editor') log.print('\nto upgrade to the latest version.\n') from argsCheck import hardArgsCheck, softArgsCheck hardArgsCheck(args, log) args = softArgsCheck(args, log) from validateInput import validInput inputList = validInput(args.input, ffmpeg, args, log) # Figure out the output file names. def newOutputName(oldFile: str, exa=False, data=False, exc=False) -> str: dotIndex = oldFile.rfind('.') if (exc): return oldFile[:dotIndex] + '.json' elif (data): return oldFile[:dotIndex] + '.xml' ext = oldFile[dotIndex:] if (exa): ext = '.wav' return oldFile[:dotIndex] + '_ALTERED' + ext if (len(args.output_file) < len(inputList)): for i in range(len(inputList) - len(args.output_file)): args.output_file.append( newOutputName(inputList[i], args.export_as_audio, makingDataFile, args.export_as_json)) if (args.combine_files): # Combine video files, then set input to 'combined.mp4'. cmd = [] for fileref in inputList: cmd.extend(['-i', fileref]) cmd.extend([ '-filter_complex', f'[0:v]concat=n={len(inputList)}:v=1:a=1', '-codec:v', 'h264', '-pix_fmt', 'yuv420p', '-strict', '-2', f'{TEMP}{sep()}combined.mp4' ]) ffmpeg.run(cmd) del cmd inputList = [f'{TEMP}{sep()}combined.mp4'] speeds = [args.silent_speed, args.video_speed] log.debug(f' - Speeds: {speeds}') audioExtensions = [ '.wav', '.mp3', '.m4a', '.aiff', '.flac', '.ogg', '.oga', '.acc', '.nfa', '.mka' ] # videoExtensions = ['.mp4', '.mkv', '.mov', '.webm', '.ogv'] for i, INPUT_FILE in enumerate(inputList): fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):] chunks = None if (fileFormat == '.json'): log.debug('Reading .json file') from makeCutList import readCutList INPUT_FILE, chunks, speeds = readCutList(INPUT_FILE, version, log) newOutput = newOutputName(INPUT_FILE, args.export_as_audio, makingDataFile, False) fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):] else: newOutput = args.output_file[i] log.debug(f' - INPUT_FILE: {INPUT_FILE}') log.debug(f' - newOutput: {newOutput}') if (os.path.isfile(newOutput) and INPUT_FILE != newOutput): log.debug(f' Removing already existing file: {newOutput}') os.remove(newOutput) if (args.sample_rate is None): sampleRate = ffprobe.getSampleRate(INPUT_FILE) if (sampleRate == 'N/A'): sampleRate = '48000' log.warning( f"Samplerate couldn't be detected, using {sampleRate}.") else: sampleRate = str(args.sample_rate) log.debug(f' - sampleRate: {sampleRate}') if (args.audio_bitrate is None): if (INPUT_FILE.endswith('.mkv')): # audio bitrate not supported in the mkv container. audioBitrate = None else: audioBitrate = ffprobe.getPrettyABitrate(INPUT_FILE) if (audioBitrate == 'N/A'): log.warning("Couldn't automatically detect audio bitrate.") audioBitrate = None else: audioBitrate = args.audio_bitrate log.debug(f' - audioBitrate: {audioBitrate}') audioFile = fileFormat in audioExtensions if (audioFile): if (args.force_fps_to is None): fps = 30 # Audio files don't have frames, so give fps a dummy value. else: fps = args.force_fps_to if (args.force_tracks_to is None): tracks = 1 else: tracks = args.force_tracks_to cmd = ['-i', INPUT_FILE] if (audioBitrate is not None): cmd.extend(['-b:a', audioBitrate]) cmd.extend([ '-ac', '2', '-ar', sampleRate, '-vn', f'{TEMP}{sep()}fastAud.wav' ]) ffmpeg.run(cmd) del cmd sampleRate, audioData = read(f'{TEMP}{sep()}fastAud.wav') else: if (args.force_fps_to is not None): fps = args.force_fps_to elif (args.export_to_premiere or args.export_to_final_cut_pro or args.export_to_resolve): # Based on timebase. fps = int(ffprobe.getFrameRate(INPUT_FILE)) else: fps = ffprobe.getFrameRate(INPUT_FILE) log.debug(f'Frame rate: {fps}') tracks = args.force_tracks_to if (tracks is None): tracks = ffprobe.getAudioTracks(INPUT_FILE) if (args.cut_by_this_track >= tracks): allTracks = '' for trackNum in range(tracks): allTracks += f'Track {trackNum}\n' if (tracks == 1): message = f'is only {tracks} track' else: message = f'are only {tracks} tracks' log.error("You choose a track that doesn't exist.\n" \ f'There {message}.\n {allTracks}') # Split audio tracks into: 0.wav, 1.wav, etc. for trackNum in range(tracks): cmd = ['-i', INPUT_FILE] if (audioBitrate is not None): cmd.extend(['-ab', audioBitrate]) cmd.extend([ '-ac', '2', '-ar', sampleRate, '-map', f'0:a:{trackNum}', f'{TEMP}{sep()}{trackNum}.wav' ]) ffmpeg.run(cmd) del cmd # Check if the `--cut_by_all_tracks` flag has been set or not. if (args.cut_by_all_tracks): # Combine all audio tracks into one audio file, then read. cmd = [ '-i', INPUT_FILE, '-filter_complex', f'[0:a]amix=inputs={tracks}:duration=longest', '-ar', sampleRate, '-ac', '2', '-f', 'wav', f'{TEMP}{sep()}combined.wav' ] ffmpeg.run(cmd) sampleRate, audioData = read(f'{TEMP}{sep()}combined.wav') del cmd else: # Read only one audio file. if (os.path.isfile( f'{TEMP}{sep()}{args.cut_by_this_track}.wav')): sampleRate, audioData = read( f'{TEMP}{sep()}{args.cut_by_this_track}.wav') else: log.bug('Audio track not found!') log.debug(f' - Frame Rate: {fps}') if (chunks is None): from cutting import audioToHasLoud, motionDetection audioList = None motionList = None if ('audio' in args.edit_based_on): log.debug('Analyzing audio volume.') audioList = audioToHasLoud(audioData, sampleRate, args.silent_threshold, fps, log) if ('motion' in args.edit_based_on): log.debug('Analyzing video motion.') motionList = motionDetection(INPUT_FILE, ffprobe, args.motion_threshold, log, width=args.width, dilates=args.dilates, blur=args.blur) if (audioList is not None): if (len(audioList) != len(motionList)): log.debug(f'audioList Length: {len(audioList)}') log.debug(f'motionList Length: {len(motionList)}') if (len(audioList) > len(motionList)): log.debug( 'Reducing the size of audioList to match motionList.' ) audioList = audioList[:len(motionList)] elif (len(motionList) > len(audioList)): log.debug( 'Reducing the size of motionList to match audioList.' ) motionList = motionList[:len(audioList)] from cutting import combineArrs, applySpacingRules hasLoud = combineArrs(audioList, motionList, args.edit_based_on, log) del audioList, motionList chunks = applySpacingRules(hasLoud, fps, args.frame_margin, args.min_clip_length, args.min_cut_length, args.ignore, args.cut_out, log) del hasLoud clips = [] numCuts = len(chunks) for chunk in chunks: if (speeds[chunk[2]] != 99999): clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100]) if (fps is None and not audioFile): if (makingDataFile): dotIndex = INPUT_FILE.rfind('.') end = '_constantFPS' + INPUT_FILE[dotIndex:] constantLoc = INPUT_FILE[:dotIndex] + end else: constantLoc = f'{TEMP}{sep()}constantVid{fileFormat}' ffmpeg.run( ['-i', INPUT_FILE, '-filter:v', 'fps=fps=30', constantLoc]) INPUT_FILE = constantLoc if (args.export_as_json): from makeCutList import makeCutList makeCutList(INPUT_FILE, newOutput, version, chunks, speeds, log) continue if (args.preview): newOutput = None from preview import preview preview(INPUT_FILE, chunks, speeds, fps, audioFile, log) continue if (args.export_to_premiere or args.export_to_resolve): from editor import editorXML editorXML(INPUT_FILE, TEMP, newOutput, clips, chunks, tracks, sampleRate, audioFile, args.export_to_resolve, fps, log) continue if (audioFile): from fastAudio import fastAudio, handleAudio, convertAudio theFile = handleAudio(ffmpeg, INPUT_FILE, audioBitrate, str(sampleRate), TEMP, log) fastAudio(theFile, f'{TEMP}{sep()}convert.wav', chunks, speeds, log, fps, args.machine_readable_progress, args.no_progress) convertAudio(ffmpeg, ffprobe, f'{TEMP}{sep()}convert.wav', INPUT_FILE, newOutput, args, log) continue from videoUtils import handleAudioTracks, muxVideo continueVid = handleAudioTracks(ffmpeg, newOutput, args, tracks, chunks, speeds, fps, TEMP, log) if (continueVid): if (args.render == 'auto'): try: import av args.render = 'av' except ImportError: args.render = 'opencv' log.debug(f'Using {args.render} method') if (args.render == 'av'): from renderVideo import renderAv renderAv(ffmpeg, INPUT_FILE, args, chunks, speeds, TEMP, log) if (args.render == 'opencv'): from renderVideo import renderOpencv renderOpencv(ffmpeg, INPUT_FILE, args, chunks, speeds, fps, TEMP, log) # Now mix new audio(s) and the new video. muxVideo(ffmpeg, newOutput, args, tracks, TEMP, log) if (newOutput is not None and not os.path.isfile(newOutput)): log.bug(f'The file {newOutput} was not created.') if (not args.preview and not makingDataFile): timer.stop() if (not args.preview and makingDataFile): from usefulFunctions import humanReadableTime # Assume making each cut takes about 30 seconds. timeSave = humanReadableTime(numCuts * 30) s = 's' if numCuts != 1 else '' log.print(f'Auto-Editor made {numCuts} cut{s}', end='') log.print( f', which would have taken about {timeSave} if edited manually.') if (not args.no_open): from usefulFunctions import smartOpen smartOpen(newOutput, log) log.debug('Deleting temp dir') rmtree(TEMP)