Esempio n. 1
0
def writeWaveAtPitch(snd, noteOffset, samplePath):
    fileName = samplePath + '/' + str(noteOffset + (12 * 6)) + ".wav"
    factor = 2.0**(1.0 * noteOffset / 12.0)
    new_sample_rate = (int(snd.frame_rate * factor) // 2) * 2

    shifted_sound = snd._spawn(snd.raw_data,
                               overrides={'frame_rate': new_sample_rate})
    shifted_sound = shifted_sound.set_frame_rate(44100)
    shifted_sound = shifted_sound.set_channels(2)
    shifted_sound = shifted_sound.set_sample_width(2)
    shifted_sound.export(fileName, format="wav")

    # filthy hack for looping :(
    thing = wavfile.read(fileName)

    wavfile.write(fileName,
                  thing[0],
                  thing[1],
                  loops=[{
                      'cuepointid': 0,
                      'datatype': 0,
                      'start': 0,
                      'end': len(thing[1]),
                      'fraction': 0,
                      'playcount': 0
                  }])
Esempio n. 2
0
    def plot_current_call(self):
        self.fig.clear()

        filename = self.filenames[self.file_index]

        try:
            self.fs, self.data = wavfile.read(filename)
        except ValueError:
            print('File %s cannot be read as a wav file.' % filename)
            return

        if len(self.data.shape) == 1:
            self.data = self.data.reshape(self.data.shape[0], 1)

        for ch in range(self.data.shape[1]):
            ax = self.fig.add_subplot(self.data.shape[1], 1, ch + 1)
            ax.set_title('%s, channel %i' % (filename, ch + 1))
            # TODO: edit spectrogram parameter
            # SEE: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.specgram
            f_, t_, Sxx_, im = ax.specgram(self.data[:, ch], Fs=self.fs,  vmin=-43)
            # f, t, Sxx = spectrogram(self.data[:, ch], fs=self.fs,
            #                         detrend=False,
            #                         window='hanning',
            #                         noverlap=128)
            # Sxx = 10. * np.log10(Sxx)
            # # import pdb; pdb.set_trace()
            # extent = min(t), max(t), f[0], f[-1]
            # plt.imshow(Sxx, extent=extent)
            # plt.axis('auto')
            # # ax.pcolormesh(t, f, Sxx)

        self.fig.canvas.draw()
Esempio n. 3
0
def writeWaveAtPitch(noteOffset, samplePath):
    fileName = samplePath + '/' + str(noteOffset + (12 * 6)) + ".wav"
    factor = 2.0**(1.0 * noteOffset / 12.0)

    snd = AudioSegment(data=pitchShift(getWave(samples, x['a'], x['b']),
                                       factor),
                       sample_width=1,
                       frame_rate=44100,
                       channels=1)

    snd = snd.set_channels(2)
    snd = snd.set_sample_width(2)
    snd.export(fileName, format="wav")

    # filthy hack for looping :(
    thing = wavfile.read(fileName)

    wavfile.write(fileName,
                  thing[0],
                  thing[1],
                  loops=[{
                      'cuepointid': 0,
                      'datatype': 0,
                      'start': 0,
                      'end': len(thing[1]),
                      'fraction': 0,
                      'playcount': 0
                  }])
Esempio n. 4
0
def convolve(args):
    sample_res = wavfile.read(args['input'], normalized=True, forcestereo=True)
    impulse_res = wavfile.read(args['impulse'],
                               normalized=True,
                               forcestereo=True)

    debug = args['debug']
    stereo = args['channels'] == 'stereo'

    if debug:
        logger.debug('sample data: \n{data}', data=sample_res[1])
        logger.debug('impulse_res: \n{data}', data=impulse_res[1])

    sr = sample_res[1]
    ir = impulse_res[1]

    if debug:
        logger.debug('sample data as float: \n{data}', data=sr)

    if args['output'] == 'convolve':
        # use numpy convolve
        logger.info('Using numpy.convolve')
        out_0 = numpy.convolve(sr[:, 0], ir[:, 0])
        if stereo:
            out_1 = numpy.convolve(sr[:, 1], ir[:, 1])
    else:
        # use scipy fftconvolve
        logger.info('Using scipy.signal.fftconvolve')
        out_0 = signal.fftconvolve(sr[:, 0], ir[:, 0])
        if stereo:
            out_1 = signal.fftconvolve(sr[:, 1], ir[:, 1])

    if stereo:
        # merge channels
        out = numpy.vstack((out_0, out_1)).T
    else:
        out = out_0.T

    # save output
    wavfile.write(args['output'], sample_res[0], out, normalized=True)

    if args['play']:
        playsound(args['output'])
def load_wav(filename):
    try:
        wavedata=wavfile.read(filename)
        samplerate=int(wavedata[0])
        smp=wavedata[1]*(1.0/32768.0)
        if len(smp.shape)>1: #convert to mono
            smp=(smp[:,0]+smp[:,1])*0.5
        return (samplerate,smp)
    except:
        print ("Error loading wav: "+filename)
        return None
Esempio n. 6
0
def load_wav(filename):
    try:
        wavedata=wavfile.read(filename)
        samplerate=int(wavedata[0])
        smp=wavedata[1]*(1.0/32768.0)
        smp=smp.transpose()
        if len(smp.shape)==1: #convert to stereo
            smp=tile(smp,(2,1))
        return (samplerate,smp)
    except:
        print ("Error loading wav: "+filename)
        return None
Esempio n. 7
0
def levels(inputs: list, track, outfile, ffmpeg, ffprobe, temp, log):

    file = inputs[0]

    tracks = ffprobe.getAudioTracks(file)
    fps = ffprobe.getFrameRate(file)

    # Split audio tracks into: 0.wav, 1.wav, etc.
    for trackNum in range(tracks):
        ffmpeg.run([
            '-i', file, '-ac', '2', '-map', f'0:a:{trackNum}',
            f'{temp}{sep()}{trackNum}.wav'
        ])

    track = 0

    # Read only one audio file.
    if (os.path.isfile(f'{temp}{sep()}{track}.wav')):
        sampleRate, audioData = read(f'{temp}{sep()}{track}.wav')
    else:
        log.error('Audio track not found!')

    audioSampleCount = audioData.shape[0]

    def getMaxVolume(s: np.ndarray) -> float:
        maxv = float(np.max(s))
        minv = float(np.min(s))
        return max(maxv, -minv)

    maxAudioVolume = getMaxVolume(audioData)

    samplesPerFrame = sampleRate / fps
    audioFrameCount = int(math.ceil(audioSampleCount / samplesPerFrame))

    with open(outfile, 'w') as out:
        for i in range(audioFrameCount):
            start = int(i * samplesPerFrame)
            end = min(int((i + 1) * samplesPerFrame), audioSampleCount)
            audiochunks = audioData[start:end]
            out.write(f'{getMaxVolume(audiochunks) / maxAudioVolume}\n')

    log.debug('Deleting temp dir')

    from shutil import rmtree
    try:
        rmtree(temp)
    except PermissionError:
        from time import sleep
        sleep(1)
        try:
            rmtree(temp)
        except PermissionError:
            log.debug('Failed to delete temp dir.')
def load_wav(filename):
    try:
        wavedata = wavfile.read(filename)
        samplerate = int(wavedata[0])
        smp = wavedata[1] * (1.0 / 32768.0)
        smp = smp.transpose()
        if len(smp.shape) == 1:  #convert to 2D-array
            smp = array([smp])
        return (samplerate, smp)
    except (e):
        print("error: " + e)
        print("Error loading wav: " + filename)
        return None
Esempio n. 9
0
def open_wav(file_name, make_plot=False):

    # TODO add option for other file types -  maybe change the name from open_wav?
    # TODO trim the vector to make all the length uniform
    # TODO normalize the vector so all the values are between 0 and 1
    # TODO add option to play the sound
    # TODO Check to make the inputs are the right variable types

    # use read to return the sample rate and data from the .wav file
    samp_rate, data, bits = read(file_name, normalized=True)

    # create a time vector that corresponds to the real time of each data point
    endtime = ((1 / samp_rate) * data.shape[0])
    time = np.linspace(0, endtime, num=data.shape[0])

    if make_plot:
        # plot the file to just to prove we did it!
        plot = plt.plot(time, data, '.')
        plt.show(plot)

    return samp_rate, time, data
Esempio n. 10
0
def parse_wav(input_filename,
              output_filename,
              loop_start=None,
              loop_end=None,
              channels=2,
              rate=48000):
    input_filename = audio.get_processed_wav(input_filename,
                                             channels=channels,
                                             rate=rate,
                                             bits=16)

    if not input_filename:
        return

    rate, data, bits, loops = wavfile.read(input_filename, readloops=True)
    channels = 1 if len(data.shape) == 1 else data.shape[1]

    if len(loops) > 0:
        if len(loops) > 1:
            print("Found %d loops, only reading first loop" % len(loops))
        loop_start, loop_end = loops[0]
    else:
        loop_start = 0
        loop_end = 0

    output = adpcmwave.encode_data(data, channels)

    with open(output_filename, "wb") as outfile:
        outfile.write("BMP\0".encode('ascii'))
        outfile.write(struct.pack(">I", len(output)))
        outfile.write(struct.pack(">I", loop_start))
        outfile.write(struct.pack(">I", loop_end))
        outfile.write(struct.pack("<H", channels))
        outfile.write(struct.pack("<H", bits))
        outfile.write(struct.pack(">I", rate))
        outfile.write(bytearray([0] * 8))
        outfile.write(output)
Esempio n. 11
0
def load_audio(filename, convert_to_mono = True, resampled_rate = 22050):
    """
    Loads audio from wav file and does preliminary processing.

    """
    # load wavfile
    rate, audio = wavfile.read(filename)
    # scale to [-1, 1). assumes dtype is correctly set for input array
    # (scipy sets dtype correctly).
    audio = audio / (-np.iinfo(audio.dtype).min)

    # make at least 2-d (so shape of second dimension is number of channels).
    audio = np.atleast_2d(audio)

    # sum energy in each channel to convert to mono.
    if convert_to_mono and audio.shape[1] > 1:
        audio = audio.sum(axis = 1) / audio.shape[1]

    # resample to target rate if requested (don't if empty).
    if resampled_rate is not None and resampled_rate != rate and audio.shape[0]:
        audio = resample(audio, resampled_rate, rate)
        rate  = resampled_rate

    return rate, audio
Esempio n. 12
0
def originalMethod(ffmpeg, vidFile, outFile, frameMargin, silentT,
                   LOUD_THRESHOLD, SAMPLE_RATE, audioBit, SILENT_SPEED,
                   VIDEO_SPEED, KEEP_SEP, BACK_MUS, BACK_VOL, NEW_TRAC,
                   BASE_TRAC, COMBINE_TRAC, verbose, HWACCEL, CACHE):
    """
    This function takes in the path the the input file (and a bunch of other options)
    and outputs a new output file. This is both the safest and slowest of all methods.

    Safest in the fact that if feature isn't supported here, like handling certain
    commands or support obscure file type, it's not supported anywhere.
    """

    print('Running from originalMethod.py')

    speeds = [SILENT_SPEED, VIDEO_SPEED]
    TEMP = tempfile.mkdtemp()

    dotIndex = vidFile.rfind('.')
    extension = vidFile[dotIndex:]
    if (outFile == ''):
        outFile = vidFile[:dotIndex] + '_ALTERED' + extension
    else:
        outFile = outFile

    if (not os.path.isfile(vidFile)):
        print('Could not find file:', vidFile)
        sys.exit(1)

    fileSize = os.stat(vidFile).st_size

    try:
        frameRate = getFrameRate(ffmpeg, vidFile)
    except AttributeError:
        print('Warning! frame rate detection has failed, defaulting to 30.')
        frameRate = 30

    SKIP = False
    print(CACHE)
    try:
        os.mkdir(CACHE)
    except OSError:
        # There must a cache already, check if that's usable.
        if (os.path.isfile(f'{CACHE}/cache.txt')):
            file = open(f'{CACHE}/cache.txt', 'r')
            x = file.read().splitlines()
            baseFile = os.path.basename(vidFile)
            if (x[:3] == [baseFile, str(frameRate),
                          str(fileSize)] and x[4] == str(COMBINE_TRAC)):
                print('Using cache.')
                SKIP = True
                tracks = int(x[3])
            file.close()
        if (not SKIP):
            print('Removing cache')
            rmtree(CACHE)
            os.mkdir(CACHE)

    if (not SKIP):
        # Videos can have more than one audio track os we need to extract them all.
        tracks = vidTracks(vidFile, ffmpeg)

        if (BASE_TRAC >= tracks):
            print("Error! You choose a track that doesn't exist.")
            print(f'There are only {tracks-1} tracks. (starting from 0)')
            sys.exit(1)
        for trackNumber in range(tracks):
            cmd = [ffmpeg]
            if (HWACCEL is not None):
                cmd.extend(['-hwaccel', HWACCEL])
            cmd.extend([
                '-i', vidFile, '-map', f'0:a:{trackNumber}',
                f'{CACHE}/{trackNumber}.wav'
            ])
            if (not verbose):
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)

            if (COMBINE_TRAC):
                from pydub import AudioSegment

                for i in range(tracks):
                    if (not os.path.isfile(f'{CACHE}/{i}.wav')):
                        print('Error! Audio file(s) could not be found.')
                        sys.exit(1)
                    if (i == 0):
                        allAuds = AudioSegment.from_file(f'{CACHE}/{i}.wav')
                    else:
                        newTrack = AudioSegment.from_file(f'{CACHE}/{i}.wav')
                        allAuds = allAuds.overlay(newTrack)
                allAuds.export(f'{CACHE}/0.wav', format='wav')
                tracks = 1

            # Now deal with the video.
            conwrite('Splitting video into jpgs. (This can take a while)')
            cmd = [ffmpeg]
            if (HWACCEL is not None):
                cmd.extend(['-hwaccel', HWACCEL])
            cmd.extend(
                ['-i', vidFile, '-qscale:v', '1', f'{CACHE}/frame%06d.jpg'])
            if (verbose):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)

    if (NEW_TRAC is None):
        sampleRate, audioData = read(f'{CACHE}/{BASE_TRAC}.wav')
    else:
        cmd = [
            ffmpeg, '-i', NEW_TRAC, '-ac', '2', '-ar',
            str(SAMPLE_RATE), '-vn', f'{TEMP}/NEW_TRAC.wav'
        ]
        if (verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        sampleRate, audioData = read(f'{TEMP}/NEW_TRAC.wav')
    audioSampleCount = audioData.shape[0]
    maxAudioVolume = getMaxVolume(audioData)

    samplesPerFrame = sampleRate / frameRate
    audioFrameCount = int(math.ceil(audioSampleCount / samplesPerFrame))
    hasLoudAudio = np.zeros((audioFrameCount), dtype=np.uint8)

    for i in range(audioFrameCount):
        start = int(i * samplesPerFrame)
        end = min(int((i + 1) * samplesPerFrame), audioSampleCount)
        audiochunks = audioData[start:end]
        maxchunksVolume = getMaxVolume(audiochunks) / maxAudioVolume
        if (maxchunksVolume >= LOUD_THRESHOLD):
            hasLoudAudio[i] = 2
        elif (maxchunksVolume >= silentT):
            hasLoudAudio[i] = 1

    chunks = [[0, 0, 0]]
    shouldIncludeFrame = np.zeros((audioFrameCount), dtype=np.uint8)
    for i in range(audioFrameCount):
        start = int(max(0, i - frameMargin))
        end = int(min(audioFrameCount, i + 1 + frameMargin))
        shouldIncludeFrame[i] = min(1, np.max(hasLoudAudio[start:end]))

        if (i >= 1 and shouldIncludeFrame[i] != shouldIncludeFrame[i - 1]):
            chunks.append([chunks[-1][1], i, shouldIncludeFrame[i - 1]])

    chunks.append([chunks[-1][1], audioFrameCount, shouldIncludeFrame[i - 1]])
    chunks = chunks[1:]

    zooms = getZooms(chunks, audioFrameCount, hasLoudAudio, frameMargin,
                     frameRate)

    handleAudio(ffmpeg, tracks, CACHE, TEMP, silentT, frameMargin, SAMPLE_RATE,
                audioBit, verbose, SILENT_SPEED, VIDEO_SPEED, chunks,
                frameRate)

    splitVideo(ffmpeg, chunks, speeds, frameRate, zooms, samplesPerFrame,
               SAMPLE_RATE, audioData, extension, verbose, TEMP, CACHE)

    if (BACK_MUS is not None):
        from pydub import AudioSegment

        cmd = [
            ffmpeg, '-i', f'{TEMP}/new{BASE_TRAC}.wav', '-vn', '-ar', '44100',
            '-ac', '2', '-ab', '192k', '-f', 'mp3', f'{TEMP}/output.mp3'
        ]
        subprocess.call(cmd)

        vidSound = AudioSegment.from_file(f'{TEMP}/output.mp3')

        back = AudioSegment.from_file(BACK_MUS)
        if (len(back) > len(vidSound)):
            back = back[:len(vidSound)]

        def match_target_amplitude(back, vidSound, target):
            diff = back.dBFS - vidSound.dBFS
            change_in_dBFS = target - diff
            return back.apply_gain(change_in_dBFS)

        # Fade the background music out by 1 second.
        back = match_target_amplitude(back, vidSound, BACK_VOL).fade_out(1000)
        back.export(f'{TEMP}/new{tracks}.wav', format='wav')

        if (not os.path.isfile(f'{TEMP}/new{tracks}.wav')):
            print('Error! The new music audio file was not created.')
            sys.exit(1)
        tracks += 1

    if (KEEP_SEP):
        # Mux the video and audio so that there are still multiple audio tracks.
        cmd = [ffmpeg, '-y']
        if (HWACCEL is not None):
            cmd.extend(['-hwaccel', HWACCEL])
        for i in range(tracks):
            cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
        cmd.extend(['-i', f'{TEMP}/output{extension}'])
        for i in range(tracks):
            cmd.extend(['-map', f'{i}:a:0'])
        cmd.extend([
            '-map', f'{tracks}:v:0', '-c:v', 'copy', '-movflags', '+faststart',
            outFile
        ])
        if (verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)
    else:
        if (tracks > 1):
            cmd = [ffmpeg]
            for i in range(tracks):
                cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
            cmd.extend([
                '-filter_complex', f'amerge=inputs={tracks}', '-ac', '2',
                f'{TEMP}/newAudioFile.wav'
            ])
            if (verbose):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
        else:
            os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav')

        cmd = [ffmpeg, '-y']
        if (HWACCEL is not None):
            cmd.extend(['-hwaccel', HWACCEL])
        cmd.extend([
            '-i', f'{TEMP}/newAudioFile.wav', '-i',
            f'{TEMP}/output{extension}', '-c:v', 'copy', '-movflags',
            '+faststart', outFile
        ])
        if (verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

    with open(f'{TEMP}/Renames.txt', 'r') as f:
        renames = f.read().splitlines()
        for i in range(0, len(renames), 2):
            os.rename(renames[i + 1], renames[i])

    rmtree(TEMP)

    # Create cache.txt to see if the created cache is usable for next time.
    if (BACK_MUS is not None):
        tracks -= 1
    file = open(f'{CACHE}/cache.txt', 'w')
    baseFile = os.path.basename(vidFile)
    file.write(
        f'{baseFile}\n{frameRate}\n{fileSize}\n{tracks}\n{COMBINE_TRAC}\n')
    file.close()

    conwrite('')
    return outFile
Esempio n. 13
0
def main():
    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    from usefulFunctions import Log, Timer

    option_data = options()

    # Print the version if only the -v option is added.
    if (sys.argv[1:] == ['-v'] or sys.argv[1:] == ['-V']):
        print(f'Auto-Editor version {version}\nPlease use --version instead.')
        sys.exit()

    # If the users just runs: $ auto-editor
    if (sys.argv[1:] == []):
        # Print
        print(
            '\nAuto-Editor is an automatic video/audio creator and editor.\n')
        print(
            'By default, it will detect silence and create a new video with ')
        print(
            'those sections cut out. By changing some of the options, you can')
        print(
            'export to a traditional editor like Premiere Pro and adjust the')
        print(
            'edits there, adjust the pacing of the cuts, and change the method'
        )
        print('of editing like using audio loudness and video motion to judge')
        print('making cuts.')
        print(
            '\nRun:\n    auto-editor --help\n\nTo get the list of options.\n')
        sys.exit()

    from vanparse import ParseOptions
    args = ParseOptions(sys.argv[1:], Log(), option_data)

    log = Log(args.debug, args.show_ffmpeg_debug, args.quiet)
    log.debug('')

    # Print the help screen for the entire program.
    if (args.help):
        print('\n  Have an issue? Make an issue. '\
            'Visit https://github.com/wyattblue/auto-editor/issues\n')
        print('  The help option can also be used on a specific option:')
        print('      auto-editor --frame_margin --help\n')
        for option in option_data:
            if (option['grouping'] == 'auto-editor'):
                print(' ', ', '.join(option['names']) + ':', option['help'])
                if (option['action'] == 'grouping'):
                    print('     ...')
        print('')
        sys.exit()

    del option_data

    if (args.version):
        print('Auto-Editor version', version)
        sys.exit()

    from usefulFunctions import getBinaries, pipeToConsole, ffAddDebug
    from mediaMetadata import vidTracks, getSampleRate, getAudioBitrate
    from mediaMetadata import getVideoCodec, ffmpegFPS
    from wavfile import read

    ffmpeg, ffprobe = getBinaries(platform.system(), dirPath, args.my_ffmpeg)
    makingDataFile = (args.export_to_premiere or args.export_to_resolve
                      or args.export_as_json)
    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if (args.debug and args.input == []):
        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system(), platform.release())
        # Platform can be 'Linux', 'Darwin' (macOS), 'Java', 'Windows'
        ffmpegVersion = pipeToConsole([ffmpeg, '-version']).split('\n')[0]
        ffmpegVersion = ffmpegVersion.replace('ffmpeg version', '').strip()
        ffmpegVersion = ffmpegVersion.split(' ')[0]
        print('FFmpeg path:', ffmpeg)
        print('FFmpeg version:', ffmpegVersion)
        print('Auto-Editor version', version)
        sys.exit()

    if (is64bit == '32-bit'):
        log.warning('You have the 32-bit version of Python, which may lead to' \
            'memory crashes.')

    from usefulFunctions import isLatestVersion
    if (not args.quiet and isLatestVersion(version, log)):
        log.print('\nAuto-Editor is out of date. Run:\n')
        log.print('    pip3 install -U auto-editor')
        log.print('\nto upgrade to the latest version.\n')

    from argsCheck import hardArgsCheck, softArgsCheck
    hardArgsCheck(args, log)
    args = softArgsCheck(args, log)

    from validateInput import validInput
    inputList = validInput(args.input, ffmpeg, log)

    timer = Timer(args.quiet)

    # Figure out the output file names.

    def newOutputName(oldFile: str, exa=False, data=False, exc=False) -> str:
        dotIndex = oldFile.rfind('.')
        if (exc):
            return oldFile[:dotIndex] + '.json'
        elif (data):
            return oldFile[:dotIndex] + '.xml'
        ext = oldFile[dotIndex:]
        if (exa):
            ext = '.wav'
        return oldFile[:dotIndex] + '_ALTERED' + ext

    if (len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            args.output_file.append(
                newOutputName(inputList[i], args.export_as_audio,
                              makingDataFile, args.export_as_json))

    TEMP = tempfile.mkdtemp()
    log.debug(f'\n   - Temp Directory: {TEMP}')

    if (args.combine_files):
        # Combine video files, then set input to 'combined.mp4'.
        cmd = [ffmpeg, '-y']
        for fileref in inputList:
            cmd.extend(['-i', fileref])
        cmd.extend([
            '-filter_complex', f'[0:v]concat=n={len(inputList)}:v=1:a=1',
            '-codec:v', 'h264', '-pix_fmt', 'yuv420p', '-strict', '-2',
            f'{TEMP}/combined.mp4'
        ])
        cmd = ffAddDebug(cmd, log.is_ffmpeg)
        subprocess.call(cmd)
        inputList = [f'{TEMP}/combined.mp4']

    speeds = [args.silent_speed, args.video_speed]
    log.debug(f'   - Speeds: {speeds}')

    audioExtensions = [
        '.wav', '.mp3', '.m4a', '.aiff', '.flac', '.ogg', '.oga', '.acc',
        '.nfa', '.mka'
    ]

    # videoExtensions = ['.mp4', '.mkv', '.mov', '.webm', '.ogv']

    for i, INPUT_FILE in enumerate(inputList):
        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        chunks = None
        if (fileFormat == '.json'):
            log.debug('Reading .json file')
            from makeCutList import readCutList
            INPUT_FILE, chunks, speeds = readCutList(INPUT_FILE, version, log)

            newOutput = newOutputName(INPUT_FILE, args.export_as_audio,
                                      makingDataFile, False)

            fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]
        else:
            newOutput = args.output_file[i]

        log.debug(f'   - INPUT_FILE: {INPUT_FILE}')
        log.debug(f'   - newOutput: {newOutput}')

        if (os.path.isfile(newOutput) and INPUT_FILE != newOutput):
            log.debug(f'  Removing already existing file: {newOutput}')
            os.remove(newOutput)

        sampleRate = getSampleRate(INPUT_FILE, ffmpeg, args.sample_rate)
        audioBitrate = getAudioBitrate(INPUT_FILE, ffprobe, log,
                                       args.audio_bitrate)

        log.debug(f'   - sampleRate: {sampleRate}')
        log.debug(f'   - audioBitrate: {audioBitrate}')

        audioFile = fileFormat in audioExtensions
        if (audioFile):
            if (args.force_fps_to is None):
                fps = 30  # Audio files don't have frames, so give fps a dummy value.
            else:
                fps = args.force_fps_to
            if (args.force_tracks_to is None):
                tracks = 1
            else:
                tracks = args.force_tracks_to
            cmd = [ffmpeg, '-y', '-i', INPUT_FILE]
            if (audioBitrate is not None):
                cmd.extend(['-b:a', audioBitrate])
            cmd.extend(
                ['-ac', '2', '-ar', sampleRate, '-vn', f'{TEMP}/fastAud.wav'])
            cmd = ffAddDebug(cmd, log.is_ffmpeg)
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        else:
            if (args.force_fps_to is not None):
                fps = args.force_fps_to
            elif (args.export_to_premiere):
                # This is the default fps value for Premiere Pro Projects.
                fps = 29.97
            else:
                # Grab fps to know what the output video's fps should be.
                # DaVinci Resolve doesn't need fps, but grab it away just in case.
                fps = ffmpegFPS(ffmpeg, INPUT_FILE, log)

            tracks = args.force_tracks_to
            if (tracks is None):
                tracks = vidTracks(INPUT_FILE, ffprobe, log)

            if (args.cut_by_this_track >= tracks):
                allTracks = ''
                for trackNum in range(tracks):
                    allTracks += f'Track {trackNum}\n'

                if (tracks == 1):
                    message = f'is only {tracks} track'
                else:
                    message = f'are only {tracks} tracks'
                log.error("You choose a track that doesn't exist.\n" \
                    f'There {message}.\n {allTracks}')

            # Get video codec
            vcodec = getVideoCodec(INPUT_FILE, ffmpeg, log, args.video_codec)

            # Split audio tracks into: 0.wav, 1.wav, etc.
            for trackNum in range(tracks):
                cmd = [ffmpeg, '-y', '-i', INPUT_FILE]
                if (audioBitrate is not None):
                    cmd.extend(['-ab', audioBitrate])
                cmd.extend([
                    '-ac', '2', '-ar', sampleRate, '-map', f'0:a:{trackNum}',
                    f'{TEMP}/{trackNum}.wav'
                ])
                cmd = ffAddDebug(cmd, log.is_ffmpeg)
                subprocess.call(cmd)

            # Check if the `--cut_by_all_tracks` flag has been set or not.
            if (args.cut_by_all_tracks):
                # Combine all audio tracks into one audio file, then read.
                cmd = [
                    ffmpeg, '-y', '-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amerge=inputs={tracks}', '-map', 'a', '-ar',
                    sampleRate, '-ac', '2', '-f', 'wav', f'{TEMP}/combined.wav'
                ]
                cmd = ffAddDebug(cmd, log.is_ffmpeg)
                subprocess.call(cmd)

                sampleRate, audioData = read(f'{TEMP}/combined.wav')
            else:
                # Read only one audio file.
                if (os.path.isfile(f'{TEMP}/{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(
                        f'{TEMP}/{args.cut_by_this_track}.wav')
                else:
                    log.bug('Audio track not found!')

        log.debug(f'   - Frame Rate: {fps}')
        if (chunks is None):
            from cutting import audioToHasLoud, motionDetection

            audioList = None
            motionList = None
            if ('audio' in args.edit_based_on):
                log.debug('Analyzing audio volume.')
                audioList = audioToHasLoud(audioData, sampleRate,
                                           args.silent_threshold, fps, log)

            if ('motion' in args.edit_based_on):
                log.debug('Analyzing video motion.')
                motionList = motionDetection(INPUT_FILE,
                                             ffprobe,
                                             args.motion_threshold,
                                             log,
                                             width=args.width,
                                             dilates=args.dilates,
                                             blur=args.blur)

                if (audioList is not None):
                    if (len(audioList) != len(motionList)):
                        log.debug(f'audioList Length:  {len(audioList)}')
                        log.debug(f'motionList Length: {len(motionList)}')
                    if (len(audioList) > len(motionList)):
                        log.debug(
                            'Reducing the size of audioList to match motionList.'
                        )
                        audioList = audioList[:len(motionList)]
                    elif (len(motionList) > len(audioList)):
                        log.debug(
                            'Reducing the size of motionList to match audioList.'
                        )
                        motionList = motionList[:len(audioList)]

            from cutting import combineArrs, applySpacingRules

            hasLoud = combineArrs(audioList, motionList, args.edit_based_on,
                                  log)
            del audioList, motionList

            chunks, includeFrame = applySpacingRules(
                hasLoud, fps, args.frame_margin, args.min_clip_length,
                args.min_cut_length, args.ignore, args.cut_out, log)
            del hasLoud
        else:
            from cutting import generateIncludes

            includeFrame = generateIncludes(chunks, log)

        clips = []
        numCuts = len(chunks)
        for chunk in chunks:
            if (speeds[chunk[2]] != 99999):
                clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])

        if (fps is None and not audioFile):
            if (makingDataFile):
                dotIndex = INPUT_FILE.rfind('.')
                end = '_constantFPS' + INPUT_FILE[dotIndex:]
                constantLoc = INPUT_FILE[:dotIndex] + end
            else:
                constantLoc = f'{TEMP}/constantVid{fileFormat}'
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-filter:v', 'fps=fps=30',
                constantLoc
            ]
            cmd = ffAddDebug(cmd, log.is_ffmpeg)
            subprocess.call(cmd)
            INPUT_FILE = constantLoc

        if (args.export_as_json):
            from makeCutList import makeCutList
            makeCutList(INPUT_FILE, newOutput, version, chunks, speeds, log)
            continue

        if (args.preview):
            newOutput = None
            from preview import preview
            preview(INPUT_FILE, chunks, speeds, fps, audioFile, log)
            continue

        if (args.export_to_premiere):
            from premiere import exportToPremiere
            exportToPremiere(INPUT_FILE, TEMP, newOutput, clips, tracks,
                             sampleRate, audioFile, log)
            continue
        if (args.export_to_resolve):
            duration = chunks[len(chunks) - 1][1]
            from resolve import exportToResolve
            exportToResolve(INPUT_FILE, newOutput, clips, duration, sampleRate,
                            audioFile, log)
            continue
        if (audioFile):
            from fastAudio import fastAudio, handleAudio
            theFile = handleAudio(ffmpeg, INPUT_FILE, audioBitrate,
                                  str(sampleRate), TEMP, log)
            fastAudio(theFile, newOutput, chunks, speeds, log, fps)
            continue

        from fastVideo import handleAudioTracks, fastVideo, muxVideo
        continueVid = handleAudioTracks(ffmpeg, newOutput,
                                        args.export_as_audio, tracks,
                                        args.keep_tracks_seperate, chunks,
                                        speeds, fps, TEMP, log)
        if (continueVid):
            fastVideo(INPUT_FILE, chunks, includeFrame, speeds, fps, TEMP, log)
            muxVideo(ffmpeg, newOutput, args.keep_tracks_seperate, tracks,
                     args.video_bitrate, args.tune, args.preset, vcodec,
                     args.constant_rate_factor, TEMP, log)

    if (newOutput is not None and not os.path.isfile(newOutput)):
        log.bug(f'The file {newOutput} was not created.')

    if (not args.preview and not makingDataFile):
        timer.stop()

    if (not args.preview and makingDataFile):
        from usefulFunctions import humanReadableTime
        # Assume making each cut takes about 30 seconds.
        timeSave = humanReadableTime(numCuts * 30)

        s = 's' if numCuts != 1 else ''
        log.print(f'Auto-Editor made {numCuts} cut{s}', end='')
        log.print(
            f', which would have taken about {timeSave} if edited manually.')

    if (not args.no_open):
        from usefulFunctions import smartOpen
        smartOpen(newOutput, log)

    rmtree(TEMP)
Esempio n. 14
0
def main():
    parser = argparse.ArgumentParser(prog='Auto-Editor', usage='auto-editor [input] [options]')

    basic = parser.add_argument_group('Basic Options')
    basic.add_argument('input', nargs='*',
        help='the path to the file(s), folder, or url you want edited.')
    basic.add_argument('--frame_margin', '-m', type=int, default=6, metavar='6',
        help='set how many "silent" frames of on either side of "loud" sections be included.')
    basic.add_argument('--silent_threshold', '-t', type=float_type, default=0.04, metavar='0.04',
        help='set the volume that frames audio needs to surpass to be "loud". (0-1)')
    basic.add_argument('--video_speed', '--sounded_speed', '-v', type=float_type, default=1.00, metavar='1',
        help='set the speed that "loud" sections should be played at.')
    basic.add_argument('--silent_speed', '-s', type=float_type, default=99999, metavar='99999',
        help='set the speed that "silent" sections should be played at.')
    basic.add_argument('--output_file', '-o', nargs='*', metavar='',
        help='set the name(s) of the new output.')

    advance = parser.add_argument_group('Advanced Options')
    advance.add_argument('--no_open', action='store_true',
        help='do not open the file after editing is done.')
    advance.add_argument('--min_clip_length', '-mclip', type=int, default=3, metavar='3',
        help='set the minimum length a clip can be. If a clip is too short, cut it.')
    advance.add_argument('--min_cut_length', '-mcut', type=int, default=6, metavar='6',
        help="set the minimum length a cut can be. If a cut is too short, don't cut")
    advance.add_argument('--combine_files', action='store_true',
        help='combine all input files into one before editing.')
    advance.add_argument('--preview', action='store_true',
        help='show stats on how the input will be cut.')

    cutting = parser.add_argument_group('Cutting Options')
    cutting.add_argument('--cut_by_this_audio', '-ca', type=file_type, metavar='',
        help="base cuts by this audio file instead of the video's audio.")
    cutting.add_argument('--cut_by_this_track', '-ct', type=int, default=0, metavar='0',
        help='base cuts by a different audio track in the video.')
    cutting.add_argument('--cut_by_all_tracks', '-cat', action='store_true',
        help='combine all audio tracks into one before basing cuts.')
    cutting.add_argument('--keep_tracks_seperate', action='store_true',
        help="don't combine audio tracks when exporting.")

    debug = parser.add_argument_group('Developer/Debugging Options')
    debug.add_argument('--my_ffmpeg', action='store_true',
        help='use your ffmpeg and other binaries instead of the ones packaged.')
    debug.add_argument('--version', action='store_true',
        help='show which auto-editor you have.')
    debug.add_argument('--debug', '--verbose', action='store_true',
        help='show helpful debugging values.')

    misc = parser.add_argument_group('Export Options')
    misc.add_argument('--export_as_audio', '-exa', action='store_true',
        help='export as a WAV audio file.')
    misc.add_argument('--export_to_premiere', '-exp', action='store_true',
        help='export as an XML file for Adobe Premiere Pro instead of outputting a media file.')
    misc.add_argument('--export_to_resolve', '-exr', action='store_true',
        help='export as an XML file for DaVinci Resolve instead of outputting a media file.')

    size = parser.add_argument_group('Size Options')
    size.add_argument('--video_bitrate', '-vb', metavar='',
        help='set the number of bits per second for video.')
    size.add_argument('--audio_bitrate', '-ab', metavar='',
        help='set the number of bits per second for audio.')
    size.add_argument('--sample_rate', '-r', type=sample_rate_type, metavar='',
        help='set the sample rate of the input and output videos.')
    size.add_argument('--video_codec', '-vcodec', metavar='',
        help='set the video codec for the output file.')

    args = parser.parse_args()

    dirPath = os.path.dirname(os.path.realpath(__file__))
    # fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    if(args.version):
        print('Auto-Editor version', version)
        sys.exit()

    if(args.export_to_premiere):
        print('Exporting to Adobe Premiere Pro XML file.')
    if(args.export_to_resolve):
        print('Exporting to DaVinci Resolve XML file.')
    if(args.export_as_audio):
        print('Exporting as audio.')

    newF = None
    newP = None
    if(platform.system() == 'Windows' and not args.my_ffmpeg):
        newF = os.path.join(dirPath, 'win-ffmpeg/bin/ffmpeg.exe')
        newP = os.path.join(dirPath, 'win-ffmpeg/bin/ffprobe.exe')
    if(platform.system() == 'Darwin' and not args.my_ffmpeg):
        newF = os.path.join(dirPath, 'mac-ffmpeg/bin/ffmpeg')
        newP = os.path.join(dirPath, 'mac-ffmpeg/bin/ffprobe')
    if(newF is not None and os.path.isfile(newF)):
        ffmpeg = newF
        ffprobe = newP
    else:
        ffmpeg = 'ffmpeg'
        ffprobe = 'ffprobe'

    makingDataFile = args.export_to_premiere or args.export_to_resolve

    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if(args.debug):
        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system())
        # Platform can be 'Linux', 'Darwin' (macOS), 'Java', 'Windows'

        print('FFmpeg path:', ffmpeg)
        print('Auto-Editor version', version)
        if(args.input == []):
            sys.exit()

    from usefulFunctions import Log
    log = Log(3 if args.debug else 2)

    if(is64bit == '32-bit'):
        # I should have put this warning a long time ago.
        log.warning("You have the 32-bit version of Python, which means you won't be " \
            'able to handle long videos.')

    if(args.frame_margin < 0):
        log.error('Frame margin cannot be negative.')

    if(args.input == []):
        log.error('The following arguments are required: input\n' \
            'In other words, you need the path to a video or an audio file ' \
            'so that auto-editor can do the work for you.')

    if(args.silent_speed <= 0 or args.silent_speed > 99999):
        args.silent_speed = 99999
    if(args.video_speed <= 0 or args.video_speed > 99999):
        args.video_speed = 99999

    inputList = []
    for myInput in args.input:
        if(os.path.isdir(myInput)):
            def validFiles(path):
                for f in os.listdir(path):
                    if(not f.startswith('.') and not f.endswith('.xml')
                        and not f.endswith('.png') and not f.endswith('.md')
                        and not os.path.isdir(f)):
                        yield os.path.join(path, f)

            inputList += sorted(validFiles(myInput))
        elif(os.path.isfile(myInput)):
            inputList.append(myInput)
        elif(myInput.startswith('http://') or myInput.startswith('https://')):
            print('URL detected, using youtube-dl to download from webpage.')
            basename = re.sub(r'\W+', '-', myInput)
            cmd = ['youtube-dl', '-f', 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4',
                   myInput, '--output', basename, '--no-check-certificate']
            if(ffmpeg != 'ffmpeg'):
                cmd.extend(['--ffmpeg-location', ffmpeg])
            subprocess.call(cmd)
            inputList.append(basename + '.mp4')
        else:
            log.error('Could not find file: ' + myInput)

    if(args.output_file is None):
        args.output_file = []

    if(len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            oldFile = inputList[i]
            dotIndex = oldFile.rfind('.')
            if(args.export_to_premiere or args.export_to_resolve):
                args.output_file.append(oldFile[:dotIndex] + '.xml')
            else:
                ext = oldFile[dotIndex:]
                if(args.export_as_audio):
                    ext = '.wav'
                end = '_ALTERED' + ext
                args.output_file.append(oldFile[:dotIndex] + end)

    TEMP = tempfile.mkdtemp()

    if(args.combine_files):
        with open(f'{TEMP}/combines.txt', 'w') as outfile:
            for fileref in inputList:
                outfile.write(f"file '{fileref}'\n")

        cmd = [ffmpeg, '-f', 'concat', '-safe', '0', '-i', f'{TEMP}/combines.txt',
            '-c', 'copy', 'combined.mp4']
        subprocess.call(cmd)
        inputList = ['combined.mp4']


    speeds = [args.silent_speed, args.video_speed]

    startTime = time.time()

    from usefulFunctions import isAudioFile, vidTracks, conwrite, getAudioChunks
    from wavfile import read, write

    numCuts = 0
    for i, INPUT_FILE in enumerate(inputList):
        newOutput = args.output_file[i]
        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        # Grab the sample rate from the input.
        sr = args.sample_rate
        if(sr is None):
            output = pipeToConsole([ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
            try:
                matchDict = re.search(r'\s(?P<grp>\w+?)\sHz', output).groupdict()
                sr = matchDict['grp']
            except AttributeError:
                sr = 48000
        args.sample_rate = sr

        # Grab the audio bitrate from the input.
        abit = args.audio_bitrate
        if(abit is None):
            output = pipeToConsole([ffprobe, '-v', 'error', '-select_streams',
                'a:0', '-show_entries', 'stream=bit_rate', '-of',
                'compact=p=0:nk=1', INPUT_FILE])
            try:
                abit = int(output)
            except:
                log.warning("Couldn't automatically detect audio bitrate.")
                abit = '500k'
                log.debug('Setting audio bitrate to ' + abit)
            else:
                abit = str(round(abit / 1000)) + 'k'
        else:
            abit = str(abit)
        args.audio_bitrate = abit

        if(isAudioFile(INPUT_FILE)):
            fps = 30
            tracks = 1
            cmd = [ffmpeg, '-y', '-i', INPUT_FILE, '-b:a', args.audio_bitrate, '-ac', '2',
                '-ar', str(args.sample_rate), '-vn', f'{TEMP}/fastAud.wav']
            if(args.debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        else:
            if(args.export_to_premiere):
                fps = 29.97
            else:
                fps = ffmpegFPS(ffmpeg, INPUT_FILE, log)
            tracks = vidTracks(INPUT_FILE, ffprobe, log)
            if(args.cut_by_this_track >= tracks):
                log.error("You choose a track that doesn't exist.\n" \
                    f'There are only {tracks-1} tracks. (starting from 0)')

            vcodec = args.video_codec
            if(vcodec is None):
                output = pipeToConsole([ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
                try:
                    matchDict = re.search(r'Video:\s(?P<video>\w+?)\s', output).groupdict()
                    vcodec = matchDict['video']
                    log.debug(vcodec)
                except AttributeError:
                    vcodec = 'copy'
                    log.warning("Couldn't automatically detect the video codec.")

            vbit = args.video_bitrate
            if(vbit is None):
                output = pipeToConsole([ffprobe, '-v', 'error', '-select_streams',
                    'v:0', '-show_entries', 'stream=bit_rate', '-of',
                    'compact=p=0:nk=1', INPUT_FILE])
                try:
                    vbit = int(output)
                except:
                    log.warning("Couldn't automatically detect video bitrate.")
                    vbit = '500k'
                    log.debug('Setting vbit to ' + vbit)
                else:
                    vbit += 300 * 1000 # Add more for better quality.
                    vbit = str(round(vbit / 1000)) + 'k'
            else:
                vbit = str(vbit)
                if(vcodec == 'copy'):
                    log.warning('Your bitrate will not be applied because' \
                        ' the video codec is "copy".')
            args.video_bitrate = vbit

            for trackNum in range(tracks):
                cmd = [ffmpeg, '-y', '-i', INPUT_FILE, '-ab', args.audio_bitrate,
                '-ac', '2', '-ar', str(args.sample_rate), '-map', f'0:a:{trackNum}',
                f'{TEMP}/{trackNum}.wav']
                if(args.debug):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '0'])
                subprocess.call(cmd)

            if(args.cut_by_all_tracks):
                cmd = [ffmpeg, '-y', '-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amerge=inputs={tracks}', '-map', 'a', '-ar',
                    str(args.sample_rate), '-ac', '2', '-f', 'wav', f'{TEMP}/combined.wav']
                if(args.debug):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '0'])

                subprocess.call(cmd)

                sampleRate, audioData = read(f'{TEMP}/combined.wav')
            else:
                if(os.path.isfile(f'{TEMP}/{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(f'{TEMP}/{args.cut_by_this_track}.wav')
                else:
                    log.error('Audio track not found!')

        chunks = getAudioChunks(audioData, sampleRate, fps, args.silent_threshold,
            args.frame_margin, args.min_clip_length, args.min_cut_length, log)

        clips = []
        for chunk in chunks:
            if(speeds[chunk[2]] == 99999):
                numCuts += 1
            else:
                clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])

        if(fps is None and not isAudioFile(INPUT_FILE)):
            if(makingDataFile):
                dotIndex = INPUT_FILE.rfind('.')
                end = '_constantFPS' + oldFile[dotIndex:]
                constantLoc = oldFile[:dotIndex] + end
            else:
                constantLoc = f'{TEMP}/constantVid{fileFormat}'
            cmd = [ffmpeg, '-y', '-i', INPUT_FILE, '-filter:v', f'fps=fps=30', constantLoc]
            if(args.debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
            INPUT_FILE = constancLoc

        if(args.preview):
            args.no_open = True
            from preview import preview

            preview(INPUT_FILE, chunks, speeds, args.debug)
            continue

        if(args.export_to_premiere):
            args.no_open = True
            from premiere import exportToPremiere

            exportToPremiere(INPUT_FILE, TEMP, newOutput, clips, tracks, sampleRate, log)
            continue
        if(args.export_to_resolve):
            args.no_open = True
            duration = chunks[len(chunks) - 1][1]
            from resolve import exportToResolve

            exportToResolve(INPUT_FILE, newOutput, clips, duration, sampleRate, log)
            continue
        if(isAudioFile(INPUT_FILE) and not makingDataFile):
            from fastAudio import fastAudio

            fastAudio(ffmpeg, INPUT_FILE, newOutput, chunks, speeds, args.audio_bitrate,
            sampleRate, args.debug, True, log)
            continue

        from fastVideo import fastVideo
        fastVideo(ffmpeg, INPUT_FILE, newOutput, chunks, speeds, tracks,
            args.audio_bitrate, sampleRate, args.debug, TEMP,
            args.keep_tracks_seperate, vcodec, fps, args.export_as_audio,
            args.video_bitrate, log)

    if(not os.path.isfile(newOutput)):
        log.error(f'The file {newOutput} was not created.')

    if(not args.preview and not makingDataFile):
        timeLength = round(time.time() - startTime, 2)
        minutes = timedelta(seconds=round(timeLength))
        print(f'Finished. took {timeLength} seconds ({minutes})')

    if(not args.preview and makingDataFile):
        timeSave = numCuts * 2 # assuming making each cut takes about 2 seconds.
        units = 'seconds'
        if(timeSave >= 3600):
            timeSave = round(timeSave / 3600, 1)
            if(timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'hours'
        if(timeSave >= 60):
            timeSave = round(timeSave / 60, 1)
            if(timeSave >= 10 or timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'minutes'

        print(f'Auto-Editor made {numCuts} cuts', end='') # Don't add a newline.
        if(numCuts > 4):
            print(f', which would have taken about {timeSave} {units} if edited manually.')
        else:
            print('.')

    if(not args.no_open):
        try:  # should work on Windows
            os.startfile(newOutput)
        except AttributeError:
            try:  # should work on MacOS and most Linux versions
                subprocess.call(['open', newOutput])
            except:
                try: # should work on WSL2
                    subprocess.call(['cmd.exe', '/C', 'start', newOutput])
                except:
                    log.warning('Could not open output file.')
    rmtree(TEMP)
Esempio n. 15
0
                tempLink = np.array([[g, p, X[g, p]]])
                while nnRight[tempLink[-1, 0], tempLink[-1, 1]] > 0:
                    new_row = [nnRight[tempLink[-1, 0], tempLink[-1, 1]],
                               tempLink[-1, 1] + 1,
                               X[nnRight[tempLink[-1, 0], tempLink[-1, 1]], tempLink[-1, 1]+1]]
                    tempLink = np.vstack((tempLink, new_row))
                    nnRight[tempLink[-2, 0], tempLink[-2, 1]] == 0
                if tempLink.shape[0] >= self.min_link_len:
                    linkOutput.append(tempLink)

        return linkOuput

if __name__ == "__main__":
    import wavfile
    import sys

    if len(sys.argv) != 2:
        print('Usage: %s wavfile' % sys.argv[0])

    try:
        fs, data = wavfile.read(sys.argv[1])
    except IOError:
        print("Cannot find file: %s" % sys.argv[1])
        exit()

    links_model = GaussianModel('linksModel', '/Users/felix/Documents/batcloud/callviewer 2/callviewer/linksModel.mat')
    links_model_lr = GaussianModel('linksModel', '/Users/felix/Documents/batcloud/callviewer 2/callviewer/linksModel.mat', LR=True)

    outliner = Outliner(HPFcutoff = 15)
    outline = outliner.extract_features(data, fs)
Esempio n. 16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--duration',
                        '-d',
                        default=5,
                        type=int,
                        help='Recording duration in seconds')
    parser.add_argument('--rate',
                        '-r',
                        default=44100,
                        type=int,
                        help='Audio sample rate')
    parser.add_argument('--window',
                        '-w',
                        choices=['kaiser'],
                        default='kaiser',
                        help='Filter window type')
    parser.add_argument('--cutoffhz1',
                        '-wc1',
                        default=1900,
                        type=int,
                        help='The cutoff frequency 1 of the filter')
    parser.add_argument('--cutoffhz2',
                        '-wc2',
                        default=2100,
                        type=int,
                        help='The cutoff frequency 2 of the filter')
    parser.add_argument('--ripple_db',
                        '-rd',
                        default=60,
                        type=int,
                        help='The desired attenuation in the stop band, in dB')
    parser.add_argument('--noise_1',
                        '-n1',
                        default=1950,
                        type=int,
                        help='Noise minimum frequency')
    parser.add_argument('--noise_2',
                        '-n2',
                        default=2050,
                        type=int,
                        help='Noise maximum frequency')
    parser.add_argument('--play',
                        '-p',
                        default=True,
                        type=bool,
                        help='Play audios with and without noise')

    args = vars(parser.parse_args())

    files_prefix = '_'.join(['noise', str(args['noise_1']), str(args['noise_2'])\
        , str(args['cutoffhz1']), str(args['cutoffhz2']), str(args['ripple_db'])])

    audio_path = Recorder(args['duration'], args['rate']).record(files_prefix)
    x = wavfile.read(audio_path, normalized=True, forcestereo=False)[1]
    noise = Noiser().band_limited_noise(min_freq=args['noise_1'], max_freq=args['noise_2']\
        , samples=len(x), samplerate=args['rate']) * NOISE_A

    if args['window'] == 'kaiser':
        KaiserFilter(args['rate'], args['cutoffhz1'], args['cutoffhz2'], args['ripple_db'])\
            .add_noise_and_filter(x, noise, args['play'], files_prefix)
Esempio n. 17
0
            for (_, _, filenames) in walk(path):
                for filename in filenames:
                    if index == filename:
                        wavdir = path + '/' + index
                        return wavdir


config = Config('conv')

df = pd.read_csv(config.train_cats + '.csv')
df.set_index('fname', inplace=True)

for f in df.index:
    wavdir = find_wavdir(f)
    try:
        rate, signal, _ = wavfile.read(wavdir)
        df.at[f, 'length'] = signal.shape[0] / rate
    except:
        print(sys.exc_info()[0])
    #gives the legnth of the signal in terms of seconds

classes = list(np.unique(df.label))
class_dist = df.groupby(['label'])['length'].mean()

fig, ax = plt.subplots()
ax.set_title('Class Distribution', y=1.08)
ax.pie(class_dist,
       labels=class_dist.index,
       autopct='%1.1f%%',
       shadow=False,
       startangle=90)
Esempio n. 18
0
def fastAudio(ffmpeg: str, theFile: str, outFile: str, chunks: list,
              speeds: list, audioBit, samplerate, needConvert: bool, temp: str,
              log, fps: float):

    if (len(chunks) == 1 and chunks[0][2] == 0):
        log.error('Trying to create empty audio.')

    if (not os.path.isfile(theFile)):
        log.error('fastAudio.py could not find file: ' + theFile)

    if (needConvert):
        cmd = [ffmpeg, '-y', '-i', theFile]
        if (audioBit is not None):
            cmd.extend(['-b:a', str(audioBit)])
        cmd.extend(
            ['-ac', '2', '-ar',
             str(samplerate), '-vn', f'{temp}/faAudio.wav'])
        if (log.is_ffmpeg):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '8'])
        subprocess.call(cmd)

        theFile = f'{temp}/faAudio.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('\n   - Total Samples: ' + str(yPointer))
    log.debug('   - Samples per Frame: ' + str(samplerate / fps))
    log.debug('   - Expected video length: ' + str(yPointer /
                                                   (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if (needConvert):
        conwrite('')
Esempio n. 19
0
def fastAudio(theFile, outFile, chunks: list, speeds: list, log, fps: float,
              machineReadable, hideBar):
    from wavfile import read, write
    import os

    import numpy as np

    log.checkType(chunks, 'chunks', list)
    log.checkType(speeds, 'speeds', list)

    def speedsOtherThan1And99999(a: list) -> bool:
        return len([x for x in a if x != 1 and x != 99999]) > 0

    if (speedsOtherThan1And99999(speeds)):
        from audiotsm2 import phasevocoder
        from audiotsm2.io.array import ArrReader, ArrWriter

    if (len(chunks) == 1 and chunks[0][2] == 0):
        log.error('Trying to create an empty file.')

    if (not os.path.isfile(theFile)):
        log.error('fastAudio.py could not find file: ' + theFile)

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0

    audioProgress = ProgressBar(len(chunks), 'Creating new audio',
                                machineReadable, hideBar)

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        audioProgress.tick(chunkNum)

    log.debug('\n   - Total Samples: ' + str(yPointer))
    log.debug('   - Samples per Frame: ' + str(samplerate / fps))
    log.debug('   - Expected video length: ' + str(yPointer /
                                                   (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)
Esempio n. 20
0
def read_wav(path):
    res = read(path)
    rate = res[0]
    sig = res[1]
    return rate, sig
Esempio n. 21
0
def preview(ffmpeg, myInput, silentT, zoomT, frameMargin, sampleRate, videoSpeed,
        silentSpeed, cutByThisTrack, bitrate):
    TEMP = tempfile.mkdtemp()

    extension = myInput[myInput.rfind('.'):]
    audioFile = extension in ['.wav', '.mp3', '.m4a']

    if(audioFile):
        fps = 30

        cmd = [ffmpeg, '-i', myInput, '-b:a', bitrate, '-ac', '2', '-ar',
            str(sampleRate), '-vn', f'{TEMP}/fastAud.wav', '-nostats', '-loglevel', '0']
        subprocess.call(cmd)

        sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin)
    else:
        import cv2

        cap = cv2.VideoCapture(myInput)
        fps = cap.get(cv2.CAP_PROP_FPS)

        tracks = vidTracks(myInput, ffmpeg)

        if(cutByThisTrack >= tracks):
            print("Error! You choose a track that doesn't exist.")
            print(f'There are only {tracks-1} tracks. (starting from 0)')
            sys.exit(1)

        for trackNumber in range(tracks):
            cmd = [ffmpeg, '-i', myInput, '-ab', bitrate, '-ac', '2', '-ar',
                str(sampleRate),'-map', f'0:a:{trackNumber}',  f'{TEMP}/{trackNumber}.wav',
                '-nostats', '-loglevel', '0']
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav')
            chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin)

    rmtree(TEMP)

    def printTimeFrame(title, frames, fps):
        inSec = round(frames / fps, 1)
        if(fps % 1 == 0):
            fps = round(fps)
        if(inSec < 1):
            minutes = f'{int(frames)}/{fps} frames'
        else:
            minutes = timedelta(seconds=round(inSec))
        print(f'{title}: {inSec} secs ({minutes})')


    oldTime = chunks[len(chunks)-1][1]
    printTimeFrame('Old length', oldTime, fps)

    speeds = [silentSpeed, videoSpeed]
    newL = getNewLength(chunks, speeds, fps)
    printTimeFrame('New length', newL * fps, fps)

    clips = 0
    cuts = 0
    clipLengths = []
    for chunk in chunks:
        state = chunk[2]
        if(speeds[state] != 99999):
            clips += 1
            leng = (chunk[1] - chunk[0]) / speeds[state]
            clipLengths.append(leng)
        else:
            cuts += 1

    print('Number of clips:', clips)
    #print('Number of cuts:', cuts)
    printTimeFrame('Smallest clip length', min(clipLengths), fps)
    printTimeFrame('Largest clip length', max(clipLengths), fps)
    printTimeFrame('Average clip length', sum(clipLengths) / len(clipLengths), fps)
Esempio n. 22
0
def fastAudio(ffmpeg,
              theFile,
              outFile,
              chunks,
              speeds,
              audioBit,
              samplerate,
              debug,
              needConvert,
              log,
              fps=30):

    if (not os.path.isfile(theFile)):
        log.error('Could not find file ' + theFile)

    if (needConvert):
        # Only print this here so other scripts can use this function.
        print('Running from fastAudio.py')

        import tempfile
        from shutil import rmtree

        TEMP = tempfile.mkdtemp()

        cmd = [
            ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar',
            str(samplerate), '-vn', f'{TEMP}/fastAud.wav'
        ]
        if (not debug):
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        theFile = f'{TEMP}/fastAud.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('yPointer: ' + str(yPointer))
    log.debug('samples per frame: ' + str(samplerate / fps))
    log.debug('Expected video length: ' + str(yPointer / (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if ('TEMP' in locals()):
        rmtree(TEMP)

    if (needConvert):
        conwrite('')
Esempio n. 23
0
def fastAudio(ffmpeg,
              theFile,
              outFile,
              silentT,
              frameMargin,
              SAMPLE_RATE,
              audioBit,
              verbose,
              silentSpeed,
              soundedSpeed,
              needConvert,
              chunks=[],
              fps=30):

    if (not os.path.isfile(theFile)):
        print('Could not find file:', theFile)
        sys.exit(1)

    if (outFile == ''):
        fileName = theFile[:theFile.rfind('.')]
        outFile = f'{fileName}_ALTERED.wav'

    if (needConvert):
        # Only print this here so other scripts can use this function.
        print('Running from fastAudio.py')

        import tempfile
        from shutil import rmtree

        TEMP = tempfile.mkdtemp()

        cmd = [
            ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar',
            str(SAMPLE_RATE), '-vn', f'{TEMP}/fastAud.wav'
        ]
        if (not verbose):
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        theFile = f'{TEMP}/fastAud.wav'

    speeds = [silentSpeed, soundedSpeed]

    sampleRate, audioData = read(theFile)
    if (chunks == []):
        print('Creating chunks')
        chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2,
                                frameMargin)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int((newL * sampleRate) * 1.5) + int(sampleRate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0

    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * sampleRate)
        audioSampleEnd = int(audioSampleStart + (sampleRate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]

        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, sampleRate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, sampleRate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * sampleRate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    if (verbose):
        print('yPointer', yPointer)
        print('samples per frame', sampleRate / fps)
        print('Expected video length', yPointer / (sampleRate / fps))
    newAudio = newAudio[:yPointer]
    write(outFile, sampleRate, newAudio)

    if ('TEMP' in locals()):
        rmtree(TEMP)

    if (needConvert):
        return outFile
Esempio n. 24
0
def main():
    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    # Print the version if only the -v option is added.
    if (sys.argv[1:] == ['-v'] or sys.argv[1:] == ['-V']):
        print(f'Auto-Editor version {version}\nPlease use --version instead.')
        sys.exit()

    # If the users just runs: $ auto-editor
    if (sys.argv[1:] == []):
        # Print
        print(
            '\nAuto-Editor is an automatic video/audio creator and editor.\n')
        print(
            'By default, it will detect silence and create a new video with ')
        print(
            'those sections cut out. By changing some of the options, you can')
        print(
            'export to a traditional editor like Premiere Pro and adjust the')
        print(
            'edits there, adjust the pacing of the cuts, and change the method'
        )
        print('of editing like using audio loudness and video motion to judge')
        print('making cuts.')
        print(
            '\nRun:\n    auto-editor --help\n\nTo get the list of options.\n')
        sys.exit()

    from vanparse import ParseOptions
    from usefulFunctions import Log, Timer

    if (len(sys.argv) > 1 and sys.argv[1] == 'generate_test'):
        option_data = generate_options()
        args = ParseOptions(sys.argv[2:], Log(), 'generate_test', option_data)

        if (args.help):
            genHelp(option_data)
            sys.exit()

        from generateTestMedia import generateTestMedia
        from usefulFunctions import FFmpeg

        ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, Log())
        generateTestMedia(ffmpeg, args.output_file, args.fps, args.duration,
                          args.width, args.height)
        sys.exit()

    elif (len(sys.argv) > 1 and sys.argv[1] == 'test'):

        from testAutoEditor import testAutoEditor
        testAutoEditor()
        sys.exit()

    elif (len(sys.argv) > 1 and sys.argv[1] == 'info'):
        option_data = info_options()
        args = ParseOptions(sys.argv[2:], Log(), 'info', option_data)

        if (args.help):
            genHelp(option_data)
            sys.exit()

        from info import getInfo
        from usefulFunctions import FFmpeg, FFprobe

        log = Log()
        ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, log)
        ffprobe = FFprobe(dirPath, args.my_ffmpeg, log)

        getInfo(args.input, ffmpeg, ffprobe, log)
        sys.exit()
    else:
        option_data = main_options()
        args = ParseOptions(sys.argv[1:], Log(True), 'auto-editor',
                            option_data)

    timer = Timer(args.quiet)

    # Print the help screen for the entire program.
    if (args.help):
        print('\n  Have an issue? Make an issue. '\
            'Visit https://github.com/wyattblue/auto-editor/issues\n')
        print('  The help option can also be used on a specific option:')
        print('      auto-editor --frame_margin --help\n')
        genHelp(option_data)
        sys.exit()

    del option_data

    from usefulFunctions import FFmpeg, FFprobe, sep
    ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, Log())
    ffprobe = FFprobe(dirPath, args.my_ffmpeg, Log())

    makingDataFile = (args.export_to_premiere or args.export_to_resolve
                      or args.export_to_final_cut_pro or args.export_as_json)
    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if (args.debug and args.input == []):
        import platform

        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system(), platform.release())
        print('Config File path:', dirPath + sep() + 'config.txt')
        print('FFmpeg path:', ffmpeg.getPath())
        ffmpegVersion = ffmpeg.pipe(['-version']).split('\n')[0]
        ffmpegVersion = ffmpegVersion.replace('ffmpeg version', '').strip()
        ffmpegVersion = ffmpegVersion.split(' ')[0]
        print('FFmpeg version:', ffmpegVersion)
        print('Auto-Editor version', version)
        sys.exit()

    if (is64bit == '32-bit'):
        log.warning('You have the 32-bit version of Python, which may lead to' \
            'memory crashes.')

    if (args.version):
        print('Auto-Editor version', version)
        sys.exit()

    TEMP = tempfile.mkdtemp()
    log = Log(args.debug, args.show_ffmpeg_debug, args.quiet, temp=TEMP)
    log.debug(f'\n   - Temp Directory: {TEMP}')

    ffmpeg.updateLog(log)
    ffprobe.updateLog(log)

    from wavfile import read
    from usefulFunctions import isLatestVersion

    if (not args.quiet and isLatestVersion(version, log)):
        log.print('\nAuto-Editor is out of date. Run:\n')
        log.print('    pip3 install -U auto-editor')
        log.print('\nto upgrade to the latest version.\n')

    from argsCheck import hardArgsCheck, softArgsCheck
    hardArgsCheck(args, log)
    args = softArgsCheck(args, log)

    from validateInput import validInput
    inputList = validInput(args.input, ffmpeg, args, log)

    # Figure out the output file names.
    def newOutputName(oldFile: str, exa=False, data=False, exc=False) -> str:
        dotIndex = oldFile.rfind('.')
        if (exc):
            return oldFile[:dotIndex] + '.json'
        elif (data):
            return oldFile[:dotIndex] + '.xml'
        ext = oldFile[dotIndex:]
        if (exa):
            ext = '.wav'
        return oldFile[:dotIndex] + '_ALTERED' + ext

    if (len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            args.output_file.append(
                newOutputName(inputList[i], args.export_as_audio,
                              makingDataFile, args.export_as_json))

    if (args.combine_files):
        # Combine video files, then set input to 'combined.mp4'.
        cmd = []
        for fileref in inputList:
            cmd.extend(['-i', fileref])
        cmd.extend([
            '-filter_complex', f'[0:v]concat=n={len(inputList)}:v=1:a=1',
            '-codec:v', 'h264', '-pix_fmt', 'yuv420p', '-strict', '-2',
            f'{TEMP}{sep()}combined.mp4'
        ])
        ffmpeg.run(cmd)
        del cmd
        inputList = [f'{TEMP}{sep()}combined.mp4']

    speeds = [args.silent_speed, args.video_speed]
    log.debug(f'   - Speeds: {speeds}')

    audioExtensions = [
        '.wav', '.mp3', '.m4a', '.aiff', '.flac', '.ogg', '.oga', '.acc',
        '.nfa', '.mka'
    ]

    # videoExtensions = ['.mp4', '.mkv', '.mov', '.webm', '.ogv']

    for i, INPUT_FILE in enumerate(inputList):
        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        chunks = None
        if (fileFormat == '.json'):
            log.debug('Reading .json file')
            from makeCutList import readCutList
            INPUT_FILE, chunks, speeds = readCutList(INPUT_FILE, version, log)

            newOutput = newOutputName(INPUT_FILE, args.export_as_audio,
                                      makingDataFile, False)

            fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]
        else:
            newOutput = args.output_file[i]

        log.debug(f'   - INPUT_FILE: {INPUT_FILE}')
        log.debug(f'   - newOutput: {newOutput}')

        if (os.path.isfile(newOutput) and INPUT_FILE != newOutput):
            log.debug(f'  Removing already existing file: {newOutput}')
            os.remove(newOutput)

        if (args.sample_rate is None):
            sampleRate = ffprobe.getSampleRate(INPUT_FILE)
            if (sampleRate == 'N/A'):
                sampleRate = '48000'
                log.warning(
                    f"Samplerate couldn't be detected, using {sampleRate}.")
        else:
            sampleRate = str(args.sample_rate)
        log.debug(f'   - sampleRate: {sampleRate}')

        if (args.audio_bitrate is None):
            if (INPUT_FILE.endswith('.mkv')):
                # audio bitrate not supported in the mkv container.
                audioBitrate = None
            else:
                audioBitrate = ffprobe.getPrettyABitrate(INPUT_FILE)
                if (audioBitrate == 'N/A'):
                    log.warning("Couldn't automatically detect audio bitrate.")
                    audioBitrate = None
        else:
            audioBitrate = args.audio_bitrate

        log.debug(f'   - audioBitrate: {audioBitrate}')

        audioFile = fileFormat in audioExtensions
        if (audioFile):
            if (args.force_fps_to is None):
                fps = 30  # Audio files don't have frames, so give fps a dummy value.
            else:
                fps = args.force_fps_to
            if (args.force_tracks_to is None):
                tracks = 1
            else:
                tracks = args.force_tracks_to
            cmd = ['-i', INPUT_FILE]
            if (audioBitrate is not None):
                cmd.extend(['-b:a', audioBitrate])
            cmd.extend([
                '-ac', '2', '-ar', sampleRate, '-vn',
                f'{TEMP}{sep()}fastAud.wav'
            ])
            ffmpeg.run(cmd)
            del cmd

            sampleRate, audioData = read(f'{TEMP}{sep()}fastAud.wav')
        else:
            if (args.force_fps_to is not None):
                fps = args.force_fps_to
            elif (args.export_to_premiere or args.export_to_final_cut_pro
                  or args.export_to_resolve):
                # Based on timebase.
                fps = int(ffprobe.getFrameRate(INPUT_FILE))
            else:
                fps = ffprobe.getFrameRate(INPUT_FILE)
            log.debug(f'Frame rate: {fps}')

            tracks = args.force_tracks_to
            if (tracks is None):
                tracks = ffprobe.getAudioTracks(INPUT_FILE)

            if (args.cut_by_this_track >= tracks):
                allTracks = ''
                for trackNum in range(tracks):
                    allTracks += f'Track {trackNum}\n'

                if (tracks == 1):
                    message = f'is only {tracks} track'
                else:
                    message = f'are only {tracks} tracks'
                log.error("You choose a track that doesn't exist.\n" \
                    f'There {message}.\n {allTracks}')

            # Split audio tracks into: 0.wav, 1.wav, etc.
            for trackNum in range(tracks):
                cmd = ['-i', INPUT_FILE]
                if (audioBitrate is not None):
                    cmd.extend(['-ab', audioBitrate])
                cmd.extend([
                    '-ac', '2', '-ar', sampleRate, '-map', f'0:a:{trackNum}',
                    f'{TEMP}{sep()}{trackNum}.wav'
                ])
                ffmpeg.run(cmd)
                del cmd

            # Check if the `--cut_by_all_tracks` flag has been set or not.
            if (args.cut_by_all_tracks):
                # Combine all audio tracks into one audio file, then read.
                cmd = [
                    '-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amix=inputs={tracks}:duration=longest', '-ar',
                    sampleRate, '-ac', '2', '-f', 'wav',
                    f'{TEMP}{sep()}combined.wav'
                ]
                ffmpeg.run(cmd)
                sampleRate, audioData = read(f'{TEMP}{sep()}combined.wav')
                del cmd
            else:
                # Read only one audio file.
                if (os.path.isfile(
                        f'{TEMP}{sep()}{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(
                        f'{TEMP}{sep()}{args.cut_by_this_track}.wav')
                else:
                    log.bug('Audio track not found!')

        log.debug(f'   - Frame Rate: {fps}')
        if (chunks is None):
            from cutting import audioToHasLoud, motionDetection

            audioList = None
            motionList = None
            if ('audio' in args.edit_based_on):
                log.debug('Analyzing audio volume.')
                audioList = audioToHasLoud(audioData, sampleRate,
                                           args.silent_threshold, fps, log)

            if ('motion' in args.edit_based_on):
                log.debug('Analyzing video motion.')
                motionList = motionDetection(INPUT_FILE,
                                             ffprobe,
                                             args.motion_threshold,
                                             log,
                                             width=args.width,
                                             dilates=args.dilates,
                                             blur=args.blur)

                if (audioList is not None):
                    if (len(audioList) != len(motionList)):
                        log.debug(f'audioList Length:  {len(audioList)}')
                        log.debug(f'motionList Length: {len(motionList)}')
                    if (len(audioList) > len(motionList)):
                        log.debug(
                            'Reducing the size of audioList to match motionList.'
                        )
                        audioList = audioList[:len(motionList)]
                    elif (len(motionList) > len(audioList)):
                        log.debug(
                            'Reducing the size of motionList to match audioList.'
                        )
                        motionList = motionList[:len(audioList)]

            from cutting import combineArrs, applySpacingRules

            hasLoud = combineArrs(audioList, motionList, args.edit_based_on,
                                  log)
            del audioList, motionList

            chunks = applySpacingRules(hasLoud, fps, args.frame_margin,
                                       args.min_clip_length,
                                       args.min_cut_length, args.ignore,
                                       args.cut_out, log)
            del hasLoud

        clips = []
        numCuts = len(chunks)
        for chunk in chunks:
            if (speeds[chunk[2]] != 99999):
                clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])

        if (fps is None and not audioFile):
            if (makingDataFile):
                dotIndex = INPUT_FILE.rfind('.')
                end = '_constantFPS' + INPUT_FILE[dotIndex:]
                constantLoc = INPUT_FILE[:dotIndex] + end
            else:
                constantLoc = f'{TEMP}{sep()}constantVid{fileFormat}'
            ffmpeg.run(
                ['-i', INPUT_FILE, '-filter:v', 'fps=fps=30', constantLoc])
            INPUT_FILE = constantLoc

        if (args.export_as_json):
            from makeCutList import makeCutList
            makeCutList(INPUT_FILE, newOutput, version, chunks, speeds, log)
            continue

        if (args.preview):
            newOutput = None
            from preview import preview
            preview(INPUT_FILE, chunks, speeds, fps, audioFile, log)
            continue

        if (args.export_to_premiere or args.export_to_resolve):
            from editor import editorXML
            editorXML(INPUT_FILE, TEMP, newOutput, clips, chunks, tracks,
                      sampleRate, audioFile, args.export_to_resolve, fps, log)
            continue

        if (audioFile):
            from fastAudio import fastAudio, handleAudio, convertAudio
            theFile = handleAudio(ffmpeg, INPUT_FILE, audioBitrate,
                                  str(sampleRate), TEMP, log)
            fastAudio(theFile, f'{TEMP}{sep()}convert.wav', chunks, speeds,
                      log, fps, args.machine_readable_progress,
                      args.no_progress)
            convertAudio(ffmpeg, ffprobe, f'{TEMP}{sep()}convert.wav',
                         INPUT_FILE, newOutput, args, log)
            continue

        from videoUtils import handleAudioTracks, muxVideo

        continueVid = handleAudioTracks(ffmpeg, newOutput, args, tracks,
                                        chunks, speeds, fps, TEMP, log)
        if (continueVid):

            if (args.render == 'auto'):
                try:
                    import av
                    args.render = 'av'
                except ImportError:
                    args.render = 'opencv'

            log.debug(f'Using {args.render} method')
            if (args.render == 'av'):
                from renderVideo import renderAv
                renderAv(ffmpeg, INPUT_FILE, args, chunks, speeds, TEMP, log)

            if (args.render == 'opencv'):
                from renderVideo import renderOpencv
                renderOpencv(ffmpeg, INPUT_FILE, args, chunks, speeds, fps,
                             TEMP, log)

            # Now mix new audio(s) and the new video.
            muxVideo(ffmpeg, newOutput, args, tracks, TEMP, log)

    if (newOutput is not None and not os.path.isfile(newOutput)):
        log.bug(f'The file {newOutput} was not created.')

    if (not args.preview and not makingDataFile):
        timer.stop()

    if (not args.preview and makingDataFile):
        from usefulFunctions import humanReadableTime
        # Assume making each cut takes about 30 seconds.
        timeSave = humanReadableTime(numCuts * 30)

        s = 's' if numCuts != 1 else ''
        log.print(f'Auto-Editor made {numCuts} cut{s}', end='')
        log.print(
            f', which would have taken about {timeSave} if edited manually.')

    if (not args.no_open):
        from usefulFunctions import smartOpen
        smartOpen(newOutput, log)

    log.debug('Deleting temp dir')
    rmtree(TEMP)
Esempio n. 25
0
def fastVideo(ffmpeg, videoFile, outFile, silentT, frameMargin, SAMPLE_RATE,
    AUD_BITRATE, verbose, videoSpeed, silentSpeed, cutByThisTrack, keepTracksSep):

    print('Running from fastVideo.py')

    import cv2

    conwrite('Reading audio.')

    if(not os.path.isfile(videoFile)):
        print('Could not find file:', videoFile)
        sys.exit(1)

    TEMP = tempfile.mkdtemp()
    speeds = [silentSpeed, videoSpeed]

    cap = cv2.VideoCapture(videoFile)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    fps = cap.get(cv2.CAP_PROP_FPS)

    tracks = vidTracks(videoFile, ffmpeg)

    if(cutByThisTrack >= tracks):
        print("Error! You choose a track that doesn't exist.")
        print(f'There are only {tracks-1} tracks. (starting from 0)')
        sys.exit(1)

    for trackNumber in range(tracks):
        cmd = [ffmpeg, '-i', videoFile, '-ab', AUD_BITRATE, '-ac', '2', '-ar',
        str(SAMPLE_RATE),'-map', f'0:a:{trackNumber}', f'{TEMP}/{trackNumber}.wav']
        if(verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)


    sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav')
    chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin)

    # Handle the Audio
    for trackNumber in range(tracks):
        fastAudio(ffmpeg, f'{TEMP}/{trackNumber}.wav', f'{TEMP}/new{trackNumber}.wav',
            silentT, frameMargin, SAMPLE_RATE, AUD_BITRATE, verbose, silentSpeed,
            videoSpeed, False, chunks=chunks, fps=fps)

        if(not os.path.isfile(f'{TEMP}/new{trackNumber}.wav')):
            print('Error! Audio file not created.')
            sys.exit(1)

    out = cv2.VideoWriter(f'{TEMP}/spedup.mp4', fourcc, fps, (width, height))
    totalFrames = chunks[len(chunks) - 1][1]
    beginTime = time.time()

    remander = 0
    framesWritten = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if(not ret):
            break

        cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame
        state = None
        for chunk in chunks:
            if(cframe >= chunk[0] and cframe <= chunk[1]):
                state = chunk[2]
                break

        if(state is not None):
            mySpeed = speeds[state]

            if(mySpeed != 99999):
                doIt = (1 / mySpeed) + remander
                for __ in range(int(doIt)):
                    out.write(frame)
                    framesWritten += 1
                remander = doIt % 1

        progressBar(cframe, totalFrames, beginTime, title='Creating new video')

    conwrite('Writing the output file.')

    cap.release()
    out.release()
    cv2.destroyAllWindows()

    if(verbose):
        print('Frames written', framesWritten)

    first = videoFile[:videoFile.rfind('.')]
    extension = videoFile[videoFile.rfind('.'):]

    if(outFile == ''):
        outFile = f'{first}_ALTERED{extension}'

    # Now mix new audio(s) and the new video.
    if(keepTracksSep):
        cmd = [ffmpeg, '-y']
        for i in range(tracks):
            cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
        cmd.extend(['-i', f'{TEMP}/spedup.mp4'])
        for i in range(tracks):
            cmd.extend(['-map', f'{i}:a:0'])
        cmd.extend(['-map', f'{tracks}:v:0','-c:v', 'copy', '-movflags', '+faststart',
            outFile])
        if(verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
    else:
        # Merge all the audio tracks into one.
        if(tracks > 1):
            cmd = [ffmpeg]
            for i in range(tracks):
                cmd.extend(['-i', f'{TEMP}/new{i}.wav'])
            cmd.extend(['-filter_complex', f'amerge=inputs={tracks}', '-ac', '2',
                f'{TEMP}/newAudioFile.wav'])
            if(verbose):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
        else:
            os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav')

        cmd = [ffmpeg, '-y', '-i', f'{TEMP}/newAudioFile.wav', '-i',
            f'{TEMP}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart',
            outFile]
        if(verbose):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

    rmtree(TEMP)
    conwrite('')

    return outFile
Esempio n. 26
0
def main():
    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    # Print the version if only the -v option is added.
    if(sys.argv[1:] == ['-v'] or sys.argv[1:] == ['-V']):
        print(f'Auto-Editor version {version}\nPlease use --version instead.')
        sys.exit()

    if(sys.argv[1:] == []):
        print('\nAuto-Editor is an automatic video/audio creator and editor.\n')
        print('By default, it will detect silence and create a new video with ')
        print('those sections cut out. By changing some of the options, you can')
        print('export to a traditional editor like Premiere Pro and adjust the')
        print('edits there, adjust the pacing of the cuts, and change the method')
        print('of editing like using audio loudness and video motion to judge')
        print('making cuts.')
        print('\nRun:\n    auto-editor --help\n\nTo get the list of options.\n')
        sys.exit()

    from vanparse import ParseOptions
    from usefulFunctions import Log, Timer

    subcommands = ['create', 'test', 'info', 'levels']

    if(len(sys.argv) > 1 and sys.argv[1] in subcommands):
        if(sys.argv[1] == 'create'):
            from create import create, create_options
            from usefulFunctions import FFmpeg
            args = ParseOptions(sys.argv[2:], Log(), 'create', create_options())

            ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, True, Log())
            create(ffmpeg, args.input, args.output_file, args.frame_rate, args.duration,
                args.width, args.height, Log())

        if(sys.argv[1] == 'test'):
            from testAutoEditor import testAutoEditor
            testAutoEditor()

        if(sys.argv[1] == 'info'):
            from info import getInfo, info_options
            from usefulFunctions import FFmpeg, FFprobe

            args = ParseOptions(sys.argv[2:], Log(), 'info', info_options())

            log = Log()
            ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, False, log)
            ffprobe = FFprobe(dirPath, args.my_ffmpeg, False, log)
            getInfo(args.input, ffmpeg, ffprobe, args.fast, log)
        if(sys.argv[1] == 'levels'):
            from levels import levels, levels_options
            from usefulFunctions import FFmpeg, FFprobe
            args = ParseOptions(sys.argv[2:], Log(), 'levels', levels_options())

            TEMP = tempfile.mkdtemp()
            log = Log(temp=TEMP)
            ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, False, log)
            ffprobe = FFprobe(dirPath, args.my_ffmpeg, False, log)
            levels(args.input, args.track, args.output_file, ffmpeg, ffprobe, TEMP, log)
        sys.exit()
    else:
        option_data = main_options()
        args = ParseOptions(sys.argv[1:], Log(True), 'auto-editor', option_data)

    timer = Timer(args.quiet)

    del option_data

    from usefulFunctions import FFmpeg, FFprobe, sep
    ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, args.show_ffmpeg_debug, Log())
    ffprobe = FFprobe(dirPath, args.my_ffmpeg, args.show_ffmpeg_debug, Log())

    # Stops "The file {file} does not exist." from showing.
    if(args.export_as_clip_sequence):
        args.no_open = True

    makingDataFile = (args.export_to_premiere or args.export_to_resolve or
        args.export_to_final_cut_pro or args.export_as_json)
    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if(args.debug and args.input == []):
        import platform

        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system(), platform.release())
        print('Config File path:', dirPath + sep() + 'config.txt')
        print('FFmpeg path:', ffmpeg.getPath())
        print('FFmpeg version:', ffmpeg.getVersion())
        print('Auto-Editor version', version)
        sys.exit()

    TEMP = tempfile.mkdtemp()
    log = Log(args.debug, args.quiet, temp=TEMP)
    log.debug(f'\n   - Temp Directory: {TEMP}')

    if(is64bit == '32-bit'):
        log.warning('You have the 32-bit version of Python, which may lead to' \
            'memory crashes.')

    if(args.version):
        print('Auto-Editor version', version)
        sys.exit()

    ffmpeg.updateLog(log)
    ffprobe.updateLog(log)

    from usefulFunctions import isLatestVersion

    if(not args.quiet and not isLatestVersion(version, log)):
        log.print('\nAuto-Editor is out of date. Run:\n')
        log.print('    pip3 install -U auto-editor')
        log.print('\nto upgrade to the latest version.\n')

    from argsCheck import hardArgsCheck, softArgsCheck
    hardArgsCheck(args, log)
    args = softArgsCheck(args, log)

    from validateInput import validInput
    inputList = validInput(args.input, ffmpeg, args, log)

    # Figure out the output file names.
    def newOutputName(oldFile: str, audio, final_cut_pro, data, json) -> str:
        dotIndex = oldFile.rfind('.')
        print(oldFile)
        if(json):
            return oldFile[:dotIndex] + '.json'
        if(final_cut_pro):
            return oldFile[:dotIndex] + '.fcpxml'
        if(data):
            return oldFile[:dotIndex] + '.xml'
        if(audio):
            return oldFile[:dotIndex] + '_ALTERED.wav'
        return oldFile[:dotIndex] + '_ALTERED' + oldFile[dotIndex:]

    if(len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            args.output_file.append(newOutputName(inputList[i],
                args.export_as_audio, args.export_to_final_cut_pro, makingDataFile,
                args.export_as_json))

    if(args.combine_files):
        # Combine video files, then set input to 'combined.mp4'.
        cmd = []
        for fileref in inputList:
            cmd.extend(['-i', fileref])
        cmd.extend(['-filter_complex', f'[0:v]concat=n={len(inputList)}:v=1:a=1',
            '-codec:v', 'h264', '-pix_fmt', 'yuv420p', '-strict', '-2',
            f'{TEMP}{sep()}combined.mp4'])
        ffmpeg.run(cmd)
        del cmd
        inputList = [f'{TEMP}{sep()}combined.mp4']

    speeds = [args.silent_speed, args.video_speed]
    if(args.cut_out != [] and 99999 not in speeds):
        speeds.append(99999)

    for item in args.set_speed_for_range:
        if(item[0] not in speeds):
            speeds.append(float(item[0]))

    log.debug(f'   - Speeds: {speeds}')

    from wavfile import read
    audioExtensions = ['.wav', '.mp3', '.m4a', '.aiff', '.flac', '.ogg', '.oga',
        '.acc', '.nfa', '.mka']
    sampleRate = None

    for i, INPUT_FILE in enumerate(inputList):

        if(len(inputList) > 1):
            log.conwrite(f'Working on {INPUT_FILE}')

        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        chunks = None
        if(fileFormat == '.json'):
            log.debug('Reading .json file')
            from makeCutList import readCutList
            INPUT_FILE, chunks, speeds = readCutList(INPUT_FILE, version, log)
            newOutput = newOutputName(INPUT_FILE, args.export_as_audio,
                args.export_to_final_cut_pro, makingDataFile, False)

            fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]
        else:
            newOutput = args.output_file[i]
            if(not os.path.isdir(INPUT_FILE) and '.' not in newOutput):
                newOutput += INPUT_FILE[INPUT_FILE.rfind('.'):]

        log.debug(f'   - INPUT_FILE: {INPUT_FILE}')
        log.debug(f'   - newOutput: {newOutput}')

        if(os.path.isfile(newOutput) and INPUT_FILE != newOutput):
            log.debug(f'  Removing already existing file: {newOutput}')
            os.remove(newOutput)

        if(args.sample_rate is None):
            sampleRate = ffprobe.getSampleRate(INPUT_FILE)
            if(sampleRate == 'N/A'):
                sampleRate = '48000'
                log.warning(f"Samplerate wasn't detected, so it will be set to {sampleRate}.")
        else:
            sampleRate = str(args.sample_rate)
        log.debug(f'   - sampleRate: {sampleRate}')

        if(args.audio_bitrate is None):
            if(INPUT_FILE.endswith('.mkv')):
                # audio bitrate not supported in the mkv container.
                audioBitrate = None
            else:
                audioBitrate = ffprobe.getPrettyBitrate(INPUT_FILE, 'a')
                if(audioBitrate == 'N/A'):
                    log.warning("Couldn't automatically detect audio bitrate.")
                    audioBitrate = None
        else:
            audioBitrate = args.audio_bitrate

        log.debug(f'   - audioBitrate: {audioBitrate}')

        audioData = None
        audioFile = fileFormat in audioExtensions
        if(audioFile):
            if(args.force_fps_to is None):
                fps = 30 # Audio files don't have frames, so give fps a dummy value.
            else:
                fps = args.force_fps_to
            if(args.force_tracks_to is None):
                tracks = 1
            else:
                tracks = args.force_tracks_to
            cmd = ['-i', INPUT_FILE]
            if(audioBitrate is not None):
                cmd.extend(['-b:a', audioBitrate])
            cmd.extend(['-ac', '2', '-ar', sampleRate, '-vn', f'{TEMP}{sep()}fastAud.wav'])
            ffmpeg.run(cmd)
            del cmd

            sampleRate, audioData = read(f'{TEMP}{sep()}fastAud.wav')
        else:
            if(args.force_fps_to is not None):
                fps = args.force_fps_to
            elif(args.export_to_premiere or args.export_to_final_cut_pro or
                args.export_to_resolve):
                # Based on timebase.
                fps = int(ffprobe.getFrameRate(INPUT_FILE))
            else:
                fps = ffprobe.getFrameRate(INPUT_FILE)

            if(fps < 1):
                log.error(f"{INPUT_FILE}: Frame rate cannot be below 1. fps: {fps}")

            tracks = args.force_tracks_to
            if(tracks is None):
                tracks = ffprobe.getAudioTracks(INPUT_FILE)

            if(args.cut_by_this_track >= tracks):
                allTracks = ''
                for trackNum in range(tracks):
                    allTracks += f'Track {trackNum}\n'

                if(tracks == 1):
                    message = f'is only {tracks} track'
                else:
                    message = f'are only {tracks} tracks'
                log.error("You choose a track that doesn't exist.\n" \
                    f'There {message}.\n {allTracks}')

            # Split audio tracks into: 0.wav, 1.wav, etc.
            for trackNum in range(tracks):
                cmd = ['-i', INPUT_FILE]
                if(audioBitrate is not None):
                    cmd.extend(['-ab', audioBitrate])
                cmd.extend(['-ac', '2', '-ar', sampleRate, '-map',
                    f'0:a:{trackNum}', f'{TEMP}{sep()}{trackNum}.wav'])
                ffmpeg.run(cmd)
                del cmd

            # Check if the `--cut_by_all_tracks` flag has been set or not.
            if(args.cut_by_all_tracks):
                # Combine all audio tracks into one audio file, then read.
                cmd = ['-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amix=inputs={tracks}:duration=longest', '-ar',
                    sampleRate, '-ac', '2', '-f', 'wav', f'{TEMP}{sep()}combined.wav']
                ffmpeg.run(cmd)
                sampleRate, audioData = read(f'{TEMP}{sep()}combined.wav')
                del cmd
            else:
                # Read only one audio file.
                if(os.path.isfile(f'{TEMP}{sep()}{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(f'{TEMP}{sep()}{args.cut_by_this_track}.wav')
                else:
                    log.bug('Audio track not found!')

        log.debug(f'   - Frame Rate: {fps}')
        if(chunks is None):
            from cutting import audioToHasLoud, motionDetection

            audioList = None
            motionList = None
            if('audio' in args.edit_based_on):
                log.debug('Analyzing audio volume.')
                audioList = audioToHasLoud(audioData, sampleRate,
                    args.silent_threshold,  fps, log)

            if('motion' in args.edit_based_on):
                log.debug('Analyzing video motion.')
                motionList = motionDetection(INPUT_FILE, ffprobe,
                    args.motion_threshold, log, width=args.width,
                    dilates=args.dilates, blur=args.blur)

                if(audioList is not None):
                    if(len(audioList) != len(motionList)):
                        log.debug(f'audioList Length:  {len(audioList)}')
                        log.debug(f'motionList Length: {len(motionList)}')
                    if(len(audioList) > len(motionList)):
                        log.debug('Reducing the size of audioList to match motionList.')
                        audioList = audioList[:len(motionList)]
                    elif(len(motionList) > len(audioList)):
                        log.debug('Reducing the size of motionList to match audioList.')
                        motionList = motionList[:len(audioList)]

            from cutting import combineArrs, applySpacingRules

            hasLoud = combineArrs(audioList, motionList, args.edit_based_on, log)
            del audioList, motionList

            effects = []
            if(args.zoom != []):
                from cutting import applyZooms
                effects += applyZooms(args.zoom, audioData, sampleRate, fps, log)
            if(args.rectangle != []):
                from cutting import applyRects
                effects += applyRects(args.rectangle, audioData, sampleRate, fps, log)

            chunks = applySpacingRules(hasLoud, speeds, fps, args, log)
            del hasLoud


        def isClip(chunk):
            nonlocal speeds
            return speeds[chunk[2]] != 99999

        def getNumberOfCuts(chunks, speeds):
            return len(list(filter(isClip, chunks)))

        def getClips(chunks, speeds):
            clips = []
            for chunk in chunks:
                if(isClip(chunk)):
                    clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])
            return clips

        numCuts = getNumberOfCuts(chunks, speeds)
        clips = getClips(chunks, speeds)

        if(fps is None and not audioFile):
            if(makingDataFile):
                constantLoc = appendFileName(INPUT_FILE, '_constantFPS')
            else:
                constantLoc = f'{TEMP}{sep()}constantVid{fileFormat}'
            ffmpeg.run(['-i', INPUT_FILE, '-filter:v', 'fps=fps=30', constantLoc])
            INPUT_FILE = constantLoc

        if(args.export_as_json):
            from makeCutList import makeCutList
            makeCutList(INPUT_FILE, newOutput, version, chunks, speeds, log)
            continue

        if(args.preview):
            newOutput = None
            from preview import preview
            preview(INPUT_FILE, chunks, speeds, fps, audioFile, log)
            continue

        if(args.export_to_premiere or args.export_to_resolve):
            from editor import editorXML
            editorXML(INPUT_FILE, TEMP, newOutput, ffprobe, clips, chunks, tracks,
                sampleRate, audioFile, args.export_to_resolve, fps, log)
            continue

        if(args.export_to_final_cut_pro):
            from editor import fcpXML
            fcpXML(INPUT_FILE, TEMP, newOutput, ffprobe, clips, chunks, tracks,
                sampleRate, audioFile, fps, log)
            continue

        def makeAudioFile(input_, chunks, output):
            from fastAudio import fastAudio, handleAudio, convertAudio
            theFile = handleAudio(ffmpeg, input_, audioBitrate, str(sampleRate),
                TEMP, log)

            TEMP_FILE = f'{TEMP}{sep()}convert.wav'
            fastAudio(theFile, TEMP_FILE, chunks, speeds, log, fps,
                args.machine_readable_progress, args.no_progress)
            convertAudio(ffmpeg, ffprobe, TEMP_FILE, input_, output, args, log)

        if(audioFile):
            if(args.export_as_clip_sequence):
                i = 1
                for item in chunks:
                    if(speeds[item[2]] == 99999):
                        continue
                    makeAudioFile(INPUT_FILE, [item], appendFileName(newOutput, f'-{i}'))
                    i += 1
            else:
                makeAudioFile(INPUT_FILE, chunks, newOutput)
            continue

        def makeVideoFile(input_, chunks, output):
            from videoUtils import handleAudioTracks, muxVideo
            continueVid = handleAudioTracks(ffmpeg, output, args, tracks, chunks, speeds,
                fps, TEMP, log)
            if(continueVid):
                if(args.render == 'auto'):
                    if(args.zoom != [] or args.rectangle != []):
                        args.render = 'opencv'
                    else:
                        try:
                            import av
                            args.render = 'av'
                        except ImportError:
                            args.render = 'opencv'

                log.debug(f'Using {args.render} method')
                if(args.render == 'av'):
                    if(args.zoom != []):
                        log.error('Zoom effect is not supported on the av render method.')

                    if(args.rectangle != []):
                        log.error('Rectangle effect is not supported on the av render method.')

                    from renderVideo import renderAv
                    renderAv(ffmpeg, ffprobe, input_, args, chunks, speeds, fps,
                    TEMP, log)

                if(args.render == 'opencv'):
                    from renderVideo import renderOpencv
                    renderOpencv(ffmpeg, ffprobe, input_, args, chunks, speeds, fps,
                        effects, TEMP, log)

                # Now mix new audio(s) and the new video.
                muxVideo(ffmpeg, output, args, tracks, TEMP, log)
                if(output is not None and not os.path.isfile(output)):
                    log.bug(f'The file {output} was not created.')

        if(args.export_as_clip_sequence):
            i = 1
            totalFrames = chunks[len(chunks) - 1][1]
            speeds.append(99999) # guarantee we have a cut speed to work with.
            for item in chunks:
                if(speeds[item[2]] == 99999):
                    continue

                makeVideoFile(INPUT_FILE, padChunk(item, totalFrames),
                    appendFileName(newOutput, f'-{i}'))
                i += 1
        else:
            makeVideoFile(INPUT_FILE, chunks, newOutput)

    if(not args.preview and not makingDataFile):
        timer.stop()

    if(not args.preview and makingDataFile):
        from usefulFunctions import humanReadableTime
        # Assume making each cut takes about 30 seconds.
        timeSave = humanReadableTime(numCuts * 30)

        s = 's' if numCuts != 1 else ''
        log.print(f'Auto-Editor made {numCuts} cut{s}', end='')
        log.print(f', which would have taken about {timeSave} if edited manually.')

    if(not args.no_open):
        from usefulFunctions import openWithSystemDefault
        openWithSystemDefault(newOutput, log)

    log.debug('Deleting temp dir')

    try:
        rmtree(TEMP)
    except PermissionError:
        from time import sleep
        sleep(1)
        try:
            rmtree(TEMP)
        except PermissionError:
            log.debug('Failed to delete temp dir.')
Esempio n. 27
0
def main():
    options = []
    option_names = []

    def add_argument(*names,
                     nargs=1,
                     type=str,
                     default=None,
                     action='default',
                     range=None,
                     choices=None,
                     help='',
                     extra=''):
        nonlocal options
        nonlocal option_names

        newDic = {}
        newDic['names'] = names
        newDic['nargs'] = nargs
        newDic['type'] = type
        newDic['default'] = default
        newDic['action'] = action
        newDic['help'] = help
        newDic['extra'] = extra
        newDic['range'] = range
        newDic['choices'] = choices
        options.append(newDic)
        option_names = option_names + list(names)

    add_argument('(input)',
                 nargs='*',
                 help='the path to a file, folder, or url you want edited.')
    add_argument('--help',
                 '-h',
                 action='store_true',
                 help='print this message and exit.')

    add_argument(
        '--frame_margin',
        '-m',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        'set how many "silent" frames of on either side of "loud" sections be included.'
    )
    add_argument(
        '--silent_threshold',
        '-t',
        type=float_type,
        default=0.04,
        range='0 to 1',
        help='set the volume that frames audio needs to surpass to be "loud".')
    add_argument(
        '--video_speed',
        '--sounded_speed',
        '-v',
        type=float_type,
        default=1.00,
        range='0 to 999999',
        help='set the speed that "loud" sections should be played at.')
    add_argument(
        '--silent_speed',
        '-s',
        type=float_type,
        default=99999,
        range='0 to 99999',
        help='set the speed that "silent" sections should be played at.')
    add_argument('--output_file',
                 '-o',
                 nargs='*',
                 help='set the name(s) of the new output.')

    add_argument('--no_open',
                 action='store_true',
                 help='do not open the file after editing is done.')
    add_argument(
        '--min_clip_length',
        '-mclip',
        type=int,
        default=3,
        range='0 to Infinity',
        help=
        'set the minimum length a clip can be. If a clip is too short, cut it.'
    )
    add_argument(
        '--min_cut_length',
        '-mcut',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        "set the minimum length a cut can be. If a cut is too short, don't cut"
    )
    add_argument('--combine_files',
                 action='store_true',
                 help='combine all input files into one before editing.')
    add_argument('--preview',
                 action='store_true',
                 help='show stats on how the input will be cut.')

    add_argument(
        '--cut_by_this_audio',
        '-ca',
        type=file_type,
        help="base cuts by this audio file instead of the video's audio.")
    add_argument('--cut_by_this_track',
                 '-ct',
                 type=int,
                 default=0,
                 range='0 to the number of audio tracks minus one',
                 help='base cuts by a different audio track in the video.')
    add_argument('--cut_by_all_tracks',
                 '-cat',
                 action='store_true',
                 help='combine all audio tracks into one before basing cuts.')
    add_argument('--keep_tracks_seperate',
                 action='store_true',
                 help="don't combine audio tracks when exporting.")

    add_argument(
        '--my_ffmpeg',
        action='store_true',
        help='use your ffmpeg and other binaries instead of the ones packaged.'
    )
    add_argument('--version',
                 action='store_true',
                 help='show which auto-editor you have.')
    add_argument('--debug',
                 '--verbose',
                 action='store_true',
                 help='show debugging messages and values.')
    add_argument('--show_ffmpeg_debug',
                 action='store_true',
                 help='show ffmpeg progress and output.')

    # TODO: add export_as_video
    add_argument('--export_as_audio',
                 '-exa',
                 action='store_true',
                 help='export as a WAV audio file.')
    add_argument(
        '--export_to_premiere',
        '-exp',
        action='store_true',
        help=
        'export as an XML file for Adobe Premiere Pro instead of outputting a media file.'
    )
    add_argument(
        '--export_to_resolve',
        '-exr',
        action='store_true',
        help=
        'export as an XML file for DaVinci Resolve instead of outputting a media file.'
    )

    add_argument('--video_bitrate',
                 '-vb',
                 help='set the number of bits per second for video.')
    add_argument('--audio_bitrate',
                 '-ab',
                 help='set the number of bits per second for audio.')
    add_argument('--sample_rate',
                 '-r',
                 type=sample_rate_type,
                 help='set the sample rate of the input and output videos.')
    add_argument('--video_codec',
                 '-vcodec',
                 default='uncompressed',
                 help='set the video codec for the output media file.')
    add_argument(
        '--preset',
        '-p',
        default='medium',
        choices=[
            'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium',
            'slow', 'slower', 'veryslow'
        ],
        help=
        'set the preset for ffmpeg to help save file size or increase quality.'
    )
    add_argument('--tune',
                 default='none',
                 choices=[
                     'film', 'animation', 'grain', 'stillimage', 'fastdecode',
                     'zerolatency', 'none'
                 ],
                 help='set the tune for ffmpeg to compress video better.')

    add_argument('--ignore',
                 nargs='*',
                 help='the range that will be marked as "loud"')
    add_argument('--cut_out',
                 nargs='*',
                 help='the range that will be marked as "silent"')
    add_argument('--motion_threshold',
                 type=float_type,
                 default=0.02,
                 range='0 to 1',
                 help='how much motion is required to be considered "moving"')
    add_argument('--edit_based_on',
                 default='audio',
                 choices=[
                     'audio', 'motion', 'not_audio', 'not_motion',
                     'audio_or_motion', 'audio_and_motion', 'audio_xor_motion',
                     'audio_and_not_motion'
                 ],
                 help='decide which method to use when making edits.')

    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    from usefulFunctions import Log

    audioExtensions = [
        '.wav', '.mp3', '.m4a', '.aiff', '.flac', '.ogg', '.oga', '.acc',
        '.nfa', '.mka'
    ]

    # videoExtensions = ['.mp4', '.mkv', '.mov', '.webm', '.ogv']

    invalidExtensions = [
        '.txt', '.md', '.rtf', '.csv', '.cvs', '.html', '.htm', '.xml',
        '.json', '.yaml', '.png', '.jpeg', '.jpg', '.gif', '.exe', '.doc',
        '.docx', '.odt', '.pptx', '.xlsx', '.xls', 'ods', '.pdf', '.bat',
        '.dll', '.prproj', '.psd', '.aep', '.zip', '.rar', '.7z', '.java',
        '.class', '.js', '.c', '.cpp', '.csharp', '.py', '.app', '.git',
        '.github', '.gitignore', '.db', '.ini', '.BIN'
    ]

    class parse_options():
        def __init__(self, userArgs, log, *args):
            # Set the default options.
            for options in args:
                for option in options:
                    key = option['names'][0].replace('-', '')
                    if (option['action'] == 'store_true'):
                        value = False
                    elif (option['nargs'] != 1):
                        value = []
                    else:
                        value = option['default']
                    setattr(self, key, value)

            def get_option(item, the_args: list):
                for options in the_args:
                    for option in options:
                        if (item in option['names']):
                            return option
                return None

            # Figure out attributes changed by user.
            myList = []
            settingInputs = True
            optionList = 'input'
            i = 0
            while i < len(userArgs):
                item = userArgs[i]
                if (i == len(userArgs) - 1):
                    nextItem = None
                else:
                    nextItem = userArgs[i + 1]

                option = get_option(item, args)

                if (option is not None):
                    if (optionList is not None):
                        setattr(self, optionList, myList)
                    settingInputs = False
                    optionList = None
                    myList = []

                    key = option['names'][0].replace('-', '')

                    # Show help for specific option.
                    if (nextItem == '-h' or nextItem == '--help'):
                        print(' ', ', '.join(option['names']))
                        print('   ', option['help'])
                        print('   ', option['extra'])
                        if (option['action'] == 'default'):
                            print('    type:', option['type'].__name__)
                            print('    default:', option['default'])
                            if (option['range'] is not None):
                                print('    range:', option['range'])
                            if (option['choices'] is not None):
                                print('    choices:',
                                      ', '.join(option['choices']))
                        else:
                            print('    type: flag')
                        sys.exit()

                    if (option['nargs'] != 1):
                        settingInputs = True
                        optionList = key
                    elif (option['action'] == 'store_true'):
                        value = True
                    else:
                        try:
                            # Convert to correct type.
                            value = option['type'](nextItem)
                        except:
                            typeName = option['type'].__name__
                            log.error(
                                f'Couldn\'t convert "{nextItem}" to {typeName}'
                            )
                        if (option['choices'] is not None):
                            if (value not in option['choices']):
                                optionName = option['names'][0]
                                myChoices = ', '.join(option['choices'])
                                log.error(f'{value} is not a choice for {optionName}' \
                                    f'\nchoices are:\n  {myChoices}')
                        i += 1
                    setattr(self, key, value)
                else:
                    if (settingInputs and not item.startswith('-')):
                        # Input file names
                        myList.append(item)
                    else:
                        # Unknown Option!
                        hmm = difflib.get_close_matches(item, option_names)
                        potential_options = ', '.join(hmm)
                        append = ''
                        if (hmm != []):
                            append = f'\n\n    Did you mean:\n        {potential_options}'
                        log.error(f'Unknown option: {item}{append}')
                i += 1
            if (settingInputs):
                setattr(self, optionList, myList)

    args = parse_options(sys.argv[1:], Log(3), options)

    # Print the help screen for the entire program.
    if (args.help):
        print('')
        for option in options:
            print(' ', ', '.join(option['names']) + ':', option['help'])
        print('\nThe help command can also be used on a specific option.')
        print('example:')
        print('    auto-editor --frame_margin --help')
        print('\nHave an issue? Make an issue. '\
            'Visit https://github.com/wyattblue/auto-editor/issues')
        sys.exit()

    if (args.version):
        print('Auto-Editor version', version)
        sys.exit()

    from usefulFunctions import vidTracks, conwrite, getBinaries
    from wavfile import read

    if (not args.preview):
        if (args.export_to_premiere):
            conwrite('Exporting to Adobe Premiere Pro XML file.')
        elif (args.export_to_resolve):
            conwrite('Exporting to DaVinci Resolve XML file.')
        elif (args.export_as_audio):
            conwrite('Exporting as audio.')
        else:
            conwrite('Starting.')

    ffmpeg, ffprobe = getBinaries(platform.system(), dirPath, args.my_ffmpeg)
    makingDataFile = args.export_to_premiere or args.export_to_resolve
    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if (args.debug and args.input == []):
        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system(), platform.release())
        # Platform can be 'Linux', 'Darwin' (macOS), 'Java', 'Windows'
        ffmpegVersion = pipeToConsole([ffmpeg, '-version']).split('\n')[0]
        ffmpegVersion = ffmpegVersion.replace('ffmpeg version', '').strip()
        ffmpegVersion = ffmpegVersion.split(' ')[0]
        print('FFmpeg path:', ffmpeg)
        print('FFmpeg version:', ffmpegVersion)
        print('Auto-Editor version', version)
        sys.exit()

    log = Log(args.debug, args.show_ffmpeg_debug)
    log.debug('')

    if (is64bit == '32-bit'):
        log.warning(
            'You have the 32-bit version of Python, which may lead to memory crashes.'
        )
    if (args.frame_margin < 0):
        log.error('Frame margin cannot be negative.')

    if (args.input == []):
        log.error(
            'You need the (input) argument so that auto-editor can do the work for you.'
        )

    try:
        from requests import get
        latestVersion = get(
            'https://raw.githubusercontent.com/wyattblue/auto-editor/master/resources/version.txt'
        )
        if (latestVersion.text != version):
            print('\nAuto-Editor is out of date. Run:\n')
            print('    pip3 install -U auto-editor')
            print('\nto upgrade to the latest version.\n')
        del latestVersion
    except Exception as err:
        log.debug('Check for update Error: ' + str(err))

    if (args.silent_speed <= 0 or args.silent_speed > 99999):
        args.silent_speed = 99999
    if (args.video_speed <= 0 or args.video_speed > 99999):
        args.video_speed = 99999

    inputList = []
    for myInput in args.input:
        if (os.path.isdir(myInput)):

            def validFiles(path: str, badExts: list):
                for f in os.listdir(path):
                    if (not f[f.rfind('.'):] in badExts):
                        yield os.path.join(path, f)

            inputList += sorted(validFiles(myInput, invalidExtensions))
        elif (os.path.isfile(myInput)):
            inputList.append(myInput)
        elif (myInput.startswith('http://') or myInput.startswith('https://')):
            basename = re.sub(r'\W+', '-', myInput)

            if (not os.path.isfile(basename + '.mp4')):
                print(
                    'URL detected, using youtube-dl to download from webpage.')
                cmd = [
                    'youtube-dl', '-f',
                    'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4', myInput,
                    '--output', basename, '--no-check-certificate'
                ]
                if (ffmpeg != 'ffmpeg'):
                    cmd.extend(['--ffmpeg-location', ffmpeg])
                subprocess.call(cmd)

            inputList.append(basename + '.mp4')
        else:
            log.error('Could not find file: ' + myInput)

    startTime = time.time()

    if (args.output_file is None):
        args.output_file = []

    # Figure out the output file names.
    if (len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            oldFile = inputList[i]
            dotIndex = oldFile.rfind('.')
            if (args.export_to_premiere or args.export_to_resolve):
                args.output_file.append(oldFile[:dotIndex] + '.xml')
            else:
                ext = oldFile[dotIndex:]
                if (args.export_as_audio):
                    ext = '.wav'
                end = '_ALTERED' + ext
                args.output_file.append(oldFile[:dotIndex] + end)

    TEMP = tempfile.mkdtemp()
    log.debug(f'\n   - Temp Directory: {TEMP}')

    if (args.combine_files):
        # Combine video files, then set input to 'combined.mp4'.
        cmd = [ffmpeg, '-y']
        for fileref in inputList:
            cmd.extend(['-i', fileref])
        cmd.extend([
            '-filter_complex', f'[0:v]concat=n={len(inputList)}:v=1:a=1',
            '-codec:v', 'h264', '-pix_fmt', 'yuv420p', '-strict', '-2',
            f'{TEMP}/combined.mp4'
        ])
        if (log.ffmpeg):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '8'])

        subprocess.call(cmd)
        inputList = [f'{TEMP}/combined.mp4']

    speeds = [args.silent_speed, args.video_speed]
    numCuts = 0
    for i, INPUT_FILE in enumerate(inputList):
        log.debug(f'   - INPUT_FILE: {INPUT_FILE}')
        # Ignore folders
        if (os.path.isdir(INPUT_FILE)):
            continue

        # Throw error if file referenced doesn't exist.
        if (not os.path.isfile(INPUT_FILE)):
            log.error(f"{INPUT_FILE} doesn't exist!")

        # Check if the file format is valid.
        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        if (fileFormat in invalidExtensions):
            log.error(
                f'Invalid file extension "{fileFormat}" for {INPUT_FILE}')

        audioFile = fileFormat in audioExtensions

        # Get output file name.
        newOutput = args.output_file[i]
        log.debug(f'   - newOutput: {newOutput}')

        # Grab the sample rate from the input.
        sr = args.sample_rate
        if (sr is None):
            output = pipeToConsole([ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
            try:
                matchDict = re.search(r'\s(?P<grp>\w+?)\sHz',
                                      output).groupdict()
                sr = matchDict['grp']
            except AttributeError:
                sr = 48000
        args.sample_rate = sr

        # Grab the audio bitrate from the input.
        abit = args.audio_bitrate
        if (abit is None):
            if (not INPUT_FILE.endswith('.mkv')):
                output = pipeToConsole([
                    ffprobe, '-v', 'error', '-select_streams', 'a:0',
                    '-show_entries', 'stream=bit_rate', '-of',
                    'compact=p=0:nk=1', INPUT_FILE
                ])
                try:
                    abit = int(output)
                except:
                    log.warning("Couldn't automatically detect audio bitrate.")
                    abit = '500k'
                    log.debug('Setting audio bitrate to ' + abit)
                else:
                    abit = str(round(abit / 1000)) + 'k'
        else:
            abit = str(abit)
        args.audio_bitrate = abit

        if (audioFile):
            fps = 30  # Audio files don't have frames, so give fps a dummy value.
            tracks = 1
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-b:a', args.audio_bitrate,
                '-ac', '2', '-ar',
                str(args.sample_rate), '-vn', f'{TEMP}/fastAud.wav'
            ]
            if (log.is_ffmpeg):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '8'])
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        else:
            if (args.export_to_premiere):
                # This is the default fps value for Premiere Pro Projects.
                fps = 29.97
            else:
                # Grab fps to know what the output video's fps should be.
                # DaVinci Resolve doesn't need fps, but grab it away just in case.
                fps = ffmpegFPS(ffmpeg, INPUT_FILE, log)

            tracks = vidTracks(INPUT_FILE, ffprobe, log)
            if (args.cut_by_this_track >= tracks):
                log.error("You choose a track that doesn't exist.\n" \
                    f'There are only {tracks-1} tracks. (starting from 0)')

            vcodec = args.video_codec
            if (vcodec == 'copy'):
                output = pipeToConsole(
                    [ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
                try:
                    matchDict = re.search(r'Video:\s(?P<video>\w+?)\s',
                                          output).groupdict()
                    vcodec = matchDict['video']
                    log.debug(vcodec)
                except AttributeError:
                    vcodec = 'uncompressed'
                    log.warning("Couldn't automatically detect video codec.")

            if (args.video_bitrate is not None and vcodec == 'uncompressed'):
                log.warning('Your bitrate will not be applied because' \
                        ' the video codec is "uncompressed".')

            if (vcodec == 'uncompressed'):
                # FFmpeg copies the uncompressed output that cv2 spits out.
                vcodec = 'copy'

            # Split audio tracks into: 0.wav, 1.wav, etc.
            for trackNum in range(tracks):
                cmd = [ffmpeg, '-y', '-i', INPUT_FILE]
                if (args.audio_bitrate is not None):
                    cmd.extend(['-ab', args.audio_bitrate])

                cmd.extend([
                    '-ac', '2', '-ar',
                    str(args.sample_rate), '-map', f'0:a:{trackNum}',
                    f'{TEMP}/{trackNum}.wav'
                ])

                if (log.is_ffmpeg):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '8'])
                subprocess.call(cmd)

            # Check if the `--cut_by_all_tracks` flag has been set or not.
            if (args.cut_by_all_tracks):
                # Combine all audio tracks into one audio file, then read.
                cmd = [
                    ffmpeg, '-y', '-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amerge=inputs={tracks}', '-map', 'a', '-ar',
                    str(args.sample_rate), '-ac', '2', '-f', 'wav',
                    f'{TEMP}/combined.wav'
                ]
                if (log.is_ffmpeg):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '8'])

                subprocess.call(cmd)

                sampleRate, audioData = read(f'{TEMP}/combined.wav')
            else:
                # Read only one audio file.
                if (os.path.isfile(f'{TEMP}/{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(
                        f'{TEMP}/{args.cut_by_this_track}.wav')
                else:
                    log.error('Audio track not found!')

        from cutting import audioToHasLoud, motionDetection, applySpacingRules
        import numpy as np

        audioList = None
        motionList = None
        if ('audio' in args.edit_based_on):
            log.debug('Analyzing audio volume.')
            audioList = audioToHasLoud(audioData, sampleRate,
                                       args.silent_threshold, fps, log)

        if ('motion' in args.edit_based_on):
            log.debug('Analyzing video motion.')
            motionList = motionDetection(INPUT_FILE,
                                         ffprobe,
                                         args.motion_threshold,
                                         log,
                                         width=400,
                                         dilates=2,
                                         blur=21)

            if (audioList is not None):
                if (len(audioList) > len(motionList)):
                    log.debug(
                        'Reducing the size of audioList to match motionList')
                    log.debug(f'audioList Length:  {len(audioList)}')
                    log.debug(f'motionList Length: {len(motionList)}')
                    audioList = audioList[:len(motionList)]

        if (args.edit_based_on == 'audio'
                or args.edit_based_on == 'not_audio'):
            if (max(audioList) == 0):
                log.error(
                    'There was no place where audio exceeded the threshold.')
        if (args.edit_based_on == 'motion'
                or args.edit_based_on == 'not_motion'):
            if (max(motionList) == 0):
                log.error(
                    'There was no place where motion exceeded the threshold.')

        # Only raise a warning for other cases.
        if (audioList is not None and max(audioList) == 0):
            log.warning(
                'There was no place where audio exceeded the threshold.')
        if (motionList is not None and max(motionList) == 0):
            log.warning(
                'There was no place where motion exceeded the threshold.')

        if (args.edit_based_on == 'audio'):
            hasLoud = audioList

        if (args.edit_based_on == 'motion'):
            hasLoud = motionList

        if (args.edit_based_on == 'not_audio'):
            hasLoud = np.invert(audioList)

        if (args.edit_based_on == 'not_motion'):
            hasLoud = np.invert(motionList)

        if (args.edit_based_on == 'audio_and_motion'):
            log.debug('Applying "Python bitwise and" on arrays.')
            hasLoud = audioList & motionList

        if (args.edit_based_on == 'audio_or_motion'):
            log.debug('Applying "Python bitwise or" on arrays.')
            hasLoud = audioList | motionList

        if (args.edit_based_on == 'audio_xor_motion'):
            log.debug('Applying "numpy bitwise_xor" on arrays')
            hasLoud = np.bitwise_xor(audioList, motionList)

        if (args.edit_based_on == 'audio_and_not_motion'):
            log.debug(
                'Applying "Python bitwise and" with "numpy bitwise not" on arrays.'
            )
            hasLoud = audioList & np.invert(motionList)

        chunks, includeFrame = applySpacingRules(
            hasLoud, fps, args.frame_margin, args.min_clip_length,
            args.min_cut_length, args.ignore, args.cut_out, log)

        clips = []
        for chunk in chunks:
            if (speeds[chunk[2]] == 99999):
                numCuts += 1
            else:
                clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])

        if (fps is None and not audioFile):
            if (makingDataFile):
                dotIndex = INPUT_FILE.rfind('.')
                end = '_constantFPS' + oldFile[dotIndex:]
                constantLoc = oldFile[:dotIndex] + end
            else:
                constantLoc = f'{TEMP}/constantVid{fileFormat}'
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-filter:v', 'fps=fps=30',
                constantLoc
            ]
            if (log.is_ffmpeg):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '8'])
            subprocess.call(cmd)
            INPUT_FILE = constantLoc

        if (args.preview):
            args.no_open = True
            from preview import preview

            preview(INPUT_FILE, chunks, speeds, fps, audioFile, log)
            continue

        if (args.export_to_premiere):
            args.no_open = True
            from premiere import exportToPremiere

            exportToPremiere(INPUT_FILE, TEMP, newOutput, clips, tracks,
                             sampleRate, audioFile, log)
            continue
        if (args.export_to_resolve):
            args.no_open = True
            duration = chunks[len(chunks) - 1][1]
            from resolve import exportToResolve

            exportToResolve(INPUT_FILE, newOutput, clips, duration, sampleRate,
                            audioFile, log)
            continue
        if (audioFile and not makingDataFile):
            from fastAudio import fastAudio

            fastAudio(ffmpeg, INPUT_FILE, newOutput, chunks, speeds,
                      args.audio_bitrate, sampleRate, True, TEMP, log, fps)
            continue

        from fastVideo import fastVideo
        fastVideo(ffmpeg, INPUT_FILE, newOutput, chunks, includeFrame, speeds,
                  tracks, args.audio_bitrate, sampleRate, TEMP,
                  args.keep_tracks_seperate, vcodec, fps, args.export_as_audio,
                  args.video_bitrate, args.preset, args.tune, log)

    if (not os.path.isfile(newOutput)):
        log.error(f'The file {newOutput} was not created.')

    if (not args.preview and not makingDataFile):
        timeLength = round(time.time() - startTime, 2)
        minutes = timedelta(seconds=round(timeLength))
        print(f'Finished. took {timeLength} seconds ({minutes})')

    if (not args.preview and makingDataFile):
        timeSave = numCuts * 2  # assuming making each cut takes about 2 seconds.
        units = 'seconds'
        if (timeSave >= 3600):
            timeSave = round(timeSave / 3600, 1)
            if (timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'hours'
        if (timeSave >= 60):
            timeSave = round(timeSave / 60, 1)
            if (timeSave >= 10 or timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'minutes'

        plural = 's' if numCuts != 1 else ''

        print(f'Auto-Editor made {numCuts} cut{plural}', end='')
        if (numCuts > 4):
            print(f', which would have taken about {timeSave} {units} if' \
                ' edited manually.')
        else:
            print('.')

    if (not args.no_open):
        try:  # should work on Windows
            os.startfile(newOutput)
        except AttributeError:
            try:  # should work on MacOS and most Linux versions
                subprocess.call(['open', newOutput])
            except:
                try:  # should work on WSL2
                    subprocess.call(['cmd.exe', '/C', 'start', newOutput])
                except:
                    log.warning('Could not open output file.')
    rmtree(TEMP)
Esempio n. 28
0
def main():
    options = []
    option_names = []

    def add_argument(*names,
                     nargs=1,
                     type=str,
                     default=None,
                     action='default',
                     range=None,
                     choices=None,
                     help='',
                     extra=''):
        nonlocal options
        nonlocal option_names

        newDic = {}
        newDic['names'] = names
        newDic['nargs'] = nargs
        newDic['type'] = type
        newDic['default'] = default
        newDic['action'] = action
        newDic['help'] = help
        newDic['extra'] = extra
        newDic['range'] = range
        newDic['choices'] = choices
        options.append(newDic)
        option_names = option_names + list(names)

    add_argument('(input)',
                 nargs='*',
                 help='the path to a file, folder, or url you want edited.')
    add_argument('--help',
                 '-h',
                 action='store_true',
                 help='print this message and exit.')

    add_argument(
        '--frame_margin',
        '-m',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        'set how many "silent" frames of on either side of "loud" sections be included.'
    )
    add_argument(
        '--silent_threshold',
        '-t',
        type=float_type,
        default=0.04,
        range='0 to 1',
        help='set the volume that frames audio needs to surpass to be "loud".')
    add_argument(
        '--video_speed',
        '--sounded_speed',
        '-v',
        type=float_type,
        default=1.00,
        range='0 to 999999',
        help='set the speed that "loud" sections should be played at.')
    add_argument(
        '--silent_speed',
        '-s',
        type=float_type,
        default=99999,
        range='0 to 99999',
        help='set the speed that "silent" sections should be played at.')
    add_argument('--output_file',
                 '-o',
                 nargs='*',
                 help='set the name(s) of the new output.')

    add_argument('--no_open',
                 action='store_true',
                 help='do not open the file after editing is done.')
    add_argument(
        '--min_clip_length',
        '-mclip',
        type=int,
        default=3,
        range='0 to Infinity',
        help=
        'set the minimum length a clip can be. If a clip is too short, cut it.'
    )
    add_argument(
        '--min_cut_length',
        '-mcut',
        type=int,
        default=6,
        range='0 to Infinity',
        help=
        "set the minimum length a cut can be. If a cut is too short, don't cut"
    )
    add_argument('--combine_files',
                 action='store_true',
                 help='combine all input files into one before editing.')
    add_argument('--preview',
                 action='store_true',
                 help='show stats on how the input will be cut.')

    add_argument(
        '--cut_by_this_audio',
        '-ca',
        type=file_type,
        help="base cuts by this audio file instead of the video's audio.")
    add_argument('--cut_by_this_track',
                 '-ct',
                 type=int,
                 default=0,
                 range='0 to the number of audio tracks',
                 help='base cuts by a different audio track in the video.')
    add_argument('--cut_by_all_tracks',
                 '-cat',
                 action='store_true',
                 help='combine all audio tracks into one before basing cuts.')
    add_argument('--keep_tracks_seperate',
                 action='store_true',
                 help="don't combine audio tracks when exporting.")

    add_argument(
        '--my_ffmpeg',
        action='store_true',
        help='use your ffmpeg and other binaries instead of the ones packaged.'
    )
    add_argument('--version',
                 action='store_true',
                 help='show which auto-editor you have.')
    add_argument('--debug',
                 '--verbose',
                 action='store_true',
                 help='show helpful debugging values.')

    # TODO: add export_as_video
    add_argument('--export_as_audio',
                 '-exa',
                 action='store_true',
                 help='export as a WAV audio file.')
    add_argument(
        '--export_to_premiere',
        '-exp',
        action='store_true',
        help=
        'export as an XML file for Adobe Premiere Pro instead of outputting a media file.'
    )
    add_argument(
        '--export_to_resolve',
        '-exr',
        action='store_true',
        help=
        'export as an XML file for DaVinci Resolve instead of outputting a media file.'
    )

    add_argument('--video_bitrate',
                 '-vb',
                 help='set the number of bits per second for video.')
    add_argument('--audio_bitrate',
                 '-ab',
                 help='set the number of bits per second for audio.')
    add_argument('--sample_rate',
                 '-r',
                 type=sample_rate_type,
                 help='set the sample rate of the input and output videos.')
    add_argument('--video_codec',
                 '-vcodec',
                 help='set the video codec for the output file.')
    add_argument(
        '--preset',
        '-p',
        default='medium',
        choices=[
            'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium',
            'slow', 'slower', 'veryslow'
        ],
        help=
        'set the preset for ffmpeg to help save file size or increase quality.'
    )
    add_argument('--tune',
                 default='none',
                 choices=[
                     'film', 'animation', 'grain', 'stillimage', 'fastdecode',
                     'zerolatency', 'none'
                 ],
                 help='set the tune for ffmpeg to help compress video better.')

    add_argument(
        '--ignore',
        nargs='*',
        help=
        "the range (in seconds) that shouldn't be edited at all. (uses range syntax)"
    )
    add_argument('--cut_out', nargs='*',
        help='the range (in seconds) that should be cut out completely, '\
            'regardless of anything else. (uses range syntax)')

    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    from usefulFunctions import Log

    class parse_options():
        def __init__(self, userArgs, log, *args):
            # Set the default options.
            for options in args:
                for option in options:
                    key = option['names'][0].replace('-', '')
                    if (option['action'] == 'store_true'):
                        value = False
                    elif (option['nargs'] != 1):
                        value = []
                    else:
                        value = option['default']
                    setattr(self, key, value)

            def get_option(item, the_args):
                for options in the_args:
                    for option in options:
                        if (item in option['names']):
                            return option
                return None

            # Figure out attributes changed by user.
            myList = []
            settingInputs = True
            optionList = 'input'
            i = 0
            while i < len(userArgs):
                item = userArgs[i]
                if (i == len(userArgs) - 1):
                    nextItem = None
                else:
                    nextItem = userArgs[i + 1]

                option = get_option(item, args)

                if (option is not None):
                    if (optionList is not None):
                        setattr(self, optionList, myList)
                    settingInputs = False
                    optionList = None
                    myList = []

                    key = option['names'][0].replace('-', '')

                    # show help for specific option.
                    if (nextItem == '-h' or nextItem == '--help'):
                        print(' ', ', '.join(option['names']))
                        print('   ', option['help'])
                        print('   ', option['extra'])
                        if (option['action'] == 'default'):
                            print('    type:', option['type'].__name__)
                            print('    default:', option['default'])
                            if (option['range'] is not None):
                                print('    range:', option['range'])
                            if (option['choices'] is not None):
                                print('    choices:',
                                      ', '.join(option['choices']))
                        else:
                            print(f'    type: flag')
                        sys.exit()

                    if (option['nargs'] != 1):
                        settingInputs = True
                        optionList = key
                    elif (option['action'] == 'store_true'):
                        value = True
                    else:
                        try:
                            # Convert to correct type.
                            value = option['type'](nextItem)
                        except:
                            typeName = option['type'].__name__
                            log.error(
                                f'Couldn\'t convert "{nextItem}" to {typeName}'
                            )
                        if (option['choices'] is not None):
                            if (value not in option['choices']):
                                log.error(
                                    f'{value} is not a choice for {option}')
                        i += 1
                    setattr(self, key, value)
                else:
                    if (settingInputs and not item.startswith('-')):
                        # Input file names
                        myList.append(item)
                    else:
                        # Unknown Option!
                        hmm = difflib.get_close_matches(item, option_names)
                        potential_options = ', '.join(hmm)
                        append = ''
                        if (hmm != []):
                            append = f'\n\n    Did you mean:\n        {potential_options}'
                        log.error(f'Unknown option: {item}{append}')
                i += 1
            if (settingInputs):
                setattr(self, optionList, myList)

    args = parse_options(sys.argv[1:], Log(3), options)

    # Print help screen for entire program.
    if (args.help):
        for option in options:
            print(' ', ', '.join(option['names']) + ':', option['help'])
        print('\nHave an issue? Make an issue. '\
            'Visit https://github.com/wyattblue/auto-editor/issues')
        sys.exit()

    if (args.version):
        print('Auto-Editor version', version)
        sys.exit()

    from usefulFunctions import isAudioFile, vidTracks, conwrite, getAudioChunks
    from wavfile import read, write

    if (not args.preview):
        if (args.export_to_premiere):
            conwrite('Exporting to Adobe Premiere Pro XML file.')
        elif (args.export_to_resolve):
            conwrite('Exporting to DaVinci Resolve XML file.')
        elif (args.export_as_audio):
            conwrite('Exporting as audio.')
        else:
            conwrite('Starting.')

    newF = None
    newP = None
    if (platform.system() == 'Windows' and not args.my_ffmpeg):
        newF = os.path.join(dirPath, 'win-ffmpeg/bin/ffmpeg.exe')
        newP = os.path.join(dirPath, 'win-ffmpeg/bin/ffprobe.exe')
    if (platform.system() == 'Darwin' and not args.my_ffmpeg):
        newF = os.path.join(dirPath, 'mac-ffmpeg/bin/ffmpeg')
        newP = os.path.join(dirPath, 'mac-ffmpeg/bin/ffprobe')
    if (newF is not None and os.path.isfile(newF)):
        ffmpeg = newF
        ffprobe = newP
    else:
        ffmpeg = 'ffmpeg'
        ffprobe = 'ffprobe'

    makingDataFile = args.export_to_premiere or args.export_to_resolve

    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if (args.debug):
        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system())
        # Platform can be 'Linux', 'Darwin' (macOS), 'Java', 'Windows'

        print('FFmpeg path:', ffmpeg)
        print('Auto-Editor version', version)
        if (args.input == []):
            sys.exit()

    log = Log(3 if args.debug else 2)

    if (is64bit == '32-bit'):
        # I should have put this warning a long time ago.
        log.warning("You have the 32-bit version of Python, which means you won't be " \
            'able to handle long videos.')

    if (args.frame_margin < 0):
        log.error('Frame margin cannot be negative.')

    if (args.input == []):
        log.error(
            'You need the (input) argument so that auto-editor can do the work for you.'
        )

    if (args.silent_speed <= 0 or args.silent_speed > 99999):
        args.silent_speed = 99999
    if (args.video_speed <= 0 or args.video_speed > 99999):
        args.video_speed = 99999

    inputList = []
    for myInput in args.input:
        if (os.path.isdir(myInput)):

            def validFiles(path):
                for f in os.listdir(path):
                    if (not f.startswith('.') and not f.endswith('.xml')
                            and not f.endswith('.png')
                            and not f.endswith('.md')
                            and not os.path.isdir(f)):
                        yield os.path.join(path, f)

            inputList += sorted(validFiles(myInput))
        elif (os.path.isfile(myInput)):
            inputList.append(myInput)
        elif (myInput.startswith('http://') or myInput.startswith('https://')):
            print('URL detected, using youtube-dl to download from webpage.')
            basename = re.sub(r'\W+', '-', myInput)
            cmd = [
                'youtube-dl', '-f',
                'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4', myInput,
                '--output', basename, '--no-check-certificate'
            ]
            if (ffmpeg != 'ffmpeg'):
                cmd.extend(['--ffmpeg-location', ffmpeg])
            subprocess.call(cmd)
            inputList.append(basename + '.mp4')
        else:
            log.error('Could not find file: ' + myInput)

    if (args.output_file is None):
        args.output_file = []

    if (len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            oldFile = inputList[i]
            dotIndex = oldFile.rfind('.')
            if (args.export_to_premiere or args.export_to_resolve):
                args.output_file.append(oldFile[:dotIndex] + '.xml')
            else:
                ext = oldFile[dotIndex:]
                if (args.export_as_audio):
                    ext = '.wav'
                end = '_ALTERED' + ext
                args.output_file.append(oldFile[:dotIndex] + end)

    TEMP = tempfile.mkdtemp()

    if (args.combine_files):
        with open(f'{TEMP}/combines.txt', 'w') as outfile:
            for fileref in inputList:
                outfile.write(f"file '{fileref}'\n")

        cmd = [
            ffmpeg, '-f', 'concat', '-safe', '0', '-i', f'{TEMP}/combines.txt',
            '-c', 'copy', 'combined.mp4'
        ]
        subprocess.call(cmd)
        inputList = ['combined.mp4']

    speeds = [args.silent_speed, args.video_speed]

    startTime = time.time()

    numCuts = 0
    for i, INPUT_FILE in enumerate(inputList):
        newOutput = args.output_file[i]
        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        # Grab the sample rate from the input.
        sr = args.sample_rate
        if (sr is None):
            output = pipeToConsole([ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
            try:
                matchDict = re.search(r'\s(?P<grp>\w+?)\sHz',
                                      output).groupdict()
                sr = matchDict['grp']
            except AttributeError:
                sr = 48000
        args.sample_rate = sr

        # Grab the audio bitrate from the input.
        abit = args.audio_bitrate
        if (abit is None):
            output = pipeToConsole([
                ffprobe, '-v', 'error', '-select_streams', 'a:0',
                '-show_entries', 'stream=bit_rate', '-of', 'compact=p=0:nk=1',
                INPUT_FILE
            ])
            try:
                abit = int(output)
            except:
                log.warning("Couldn't automatically detect audio bitrate.")
                abit = '500k'
                log.debug('Setting audio bitrate to ' + abit)
            else:
                abit = str(round(abit / 1000)) + 'k'
        else:
            abit = str(abit)
        args.audio_bitrate = abit

        if (isAudioFile(INPUT_FILE)):
            fps = 30
            tracks = 1
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-b:a', args.audio_bitrate,
                '-ac', '2', '-ar',
                str(args.sample_rate), '-vn', f'{TEMP}/fastAud.wav'
            ]
            if (args.debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        else:
            if (args.export_to_premiere):
                fps = 29.97
            else:
                fps = ffmpegFPS(ffmpeg, INPUT_FILE, log)
            tracks = vidTracks(INPUT_FILE, ffprobe, log)
            if (args.cut_by_this_track >= tracks):
                log.error("You choose a track that doesn't exist.\n" \
                    f'There are only {tracks-1} tracks. (starting from 0)')

            vcodec = args.video_codec
            if (vcodec is None):
                output = pipeToConsole(
                    [ffmpeg, '-i', INPUT_FILE, '-hide_banner'])
                try:
                    matchDict = re.search(r'Video:\s(?P<video>\w+?)\s',
                                          output).groupdict()
                    vcodec = matchDict['video']
                    log.debug(vcodec)
                except AttributeError:
                    vcodec = 'copy'
                    log.warning(
                        "Couldn't automatically detect the video codec.")

            if (args.video_bitrate is not None and vcodec == 'copy'):
                log.warning('Your bitrate will not be applied because' \
                        ' the video codec is "copy".')

            for trackNum in range(tracks):
                cmd = [
                    ffmpeg, '-y', '-i', INPUT_FILE, '-ab', args.audio_bitrate,
                    '-ac', '2', '-ar',
                    str(args.sample_rate), '-map', f'0:a:{trackNum}',
                    f'{TEMP}/{trackNum}.wav'
                ]
                if (args.debug):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '0'])
                subprocess.call(cmd)

            if (args.cut_by_all_tracks):
                cmd = [
                    ffmpeg, '-y', '-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amerge=inputs={tracks}', '-map', 'a', '-ar',
                    str(args.sample_rate), '-ac', '2', '-f', 'wav',
                    f'{TEMP}/combined.wav'
                ]
                if (args.debug):
                    cmd.extend(['-hide_banner'])
                else:
                    cmd.extend(['-nostats', '-loglevel', '0'])

                subprocess.call(cmd)

                sampleRate, audioData = read(f'{TEMP}/combined.wav')
            else:
                if (os.path.isfile(f'{TEMP}/{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(
                        f'{TEMP}/{args.cut_by_this_track}.wav')
                else:
                    log.error('Audio track not found!')

        chunks = getAudioChunks(audioData, sampleRate, fps,
                                args.silent_threshold, args.frame_margin,
                                args.min_clip_length, args.min_cut_length,
                                args.ignore, args.cut_out, log)

        clips = []
        for chunk in chunks:
            if (speeds[chunk[2]] == 99999):
                numCuts += 1
            else:
                clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])

        if (fps is None and not isAudioFile(INPUT_FILE)):
            if (makingDataFile):
                dotIndex = INPUT_FILE.rfind('.')
                end = '_constantFPS' + oldFile[dotIndex:]
                constantLoc = oldFile[:dotIndex] + end
            else:
                constantLoc = f'{TEMP}/constantVid{fileFormat}'
            cmd = [
                ffmpeg, '-y', '-i', INPUT_FILE, '-filter:v', f'fps=fps=30',
                constantLoc
            ]
            if (args.debug):
                cmd.extend(['-hide_banner'])
            else:
                cmd.extend(['-nostats', '-loglevel', '0'])
            subprocess.call(cmd)
            INPUT_FILE = constancLoc

        if (args.preview):
            args.no_open = True
            from preview import preview

            preview(INPUT_FILE, chunks, speeds, args.debug)
            continue

        if (args.export_to_premiere):
            args.no_open = True
            from premiere import exportToPremiere

            exportToPremiere(INPUT_FILE, TEMP, newOutput, clips, tracks,
                             sampleRate, log)
            continue
        if (args.export_to_resolve):
            args.no_open = True
            duration = chunks[len(chunks) - 1][1]
            from resolve import exportToResolve

            exportToResolve(INPUT_FILE, newOutput, clips, duration, sampleRate,
                            log)
            continue
        if (isAudioFile(INPUT_FILE) and not makingDataFile):
            from fastAudio import fastAudio

            fastAudio(ffmpeg, INPUT_FILE, newOutput, chunks, speeds,
                      args.audio_bitrate, sampleRate, args.debug, True, log)
            continue

        from fastVideo import fastVideo
        fastVideo(ffmpeg, INPUT_FILE, newOutput, chunks, speeds, tracks,
                  args.audio_bitrate, sampleRate, args.debug, TEMP,
                  args.keep_tracks_seperate, vcodec, fps, args.export_as_audio,
                  args.video_bitrate, args.preset, args.tune, log)

    if (not os.path.isfile(newOutput)):
        log.error(f'The file {newOutput} was not created.')

    if (not args.preview and not makingDataFile):
        timeLength = round(time.time() - startTime, 2)
        minutes = timedelta(seconds=round(timeLength))
        print(f'Finished. took {timeLength} seconds ({minutes})')

    if (not args.preview and makingDataFile):
        timeSave = numCuts * 2  # assuming making each cut takes about 2 seconds.
        units = 'seconds'
        if (timeSave >= 3600):
            timeSave = round(timeSave / 3600, 1)
            if (timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'hours'
        if (timeSave >= 60):
            timeSave = round(timeSave / 60, 1)
            if (timeSave >= 10 or timeSave % 1 == 0):
                timeSave = round(timeSave)
            units = 'minutes'

        print(f'Auto-Editor made {numCuts} cuts',
              end='')  # Don't add a newline.
        if (numCuts > 4):
            print(
                f', which would have taken about {timeSave} {units} if edited manually.'
            )
        else:
            print('.')

    if (not args.no_open):
        try:  # should work on Windows
            os.startfile(newOutput)
        except AttributeError:
            try:  # should work on MacOS and most Linux versions
                subprocess.call(['open', newOutput])
            except:
                try:  # should work on WSL2
                    subprocess.call(['cmd.exe', '/C', 'start', newOutput])
                except:
                    log.warning('Could not open output file.')
    rmtree(TEMP)
Esempio n. 29
0
import os
from wavfile import read, write

for root, dirs, files in os.walk('../data/raw'):
    for wav in files:
        rate, data, _, loops = read(f'{root}/{wav}', readloops=True)

        if loops is not None:
            print(wav, loops)
            start, end = loops[0]
            cut_data = data[start:end]

            assert (len(cut_data) == end - start)

            write(f'../data/cut/{wav}', rate, cut_data)
Esempio n. 30
0
def padarray(A, length, before=0):
    t = length - len(A) - before
    if t > 0:
        width = (before, t) if A.ndim == 1 else ([before, t], [0, 0])
        return np.pad(A, pad_width=width, mode='constant')
    else:
        width = (before, 0) if A.ndim == 1 else ([before, 0], [0, 0])
        return np.pad(A[:length - before], pad_width=width, mode='constant')

def filter20_20k(x, sr): # filters everything outside out 20 - 20000 Hz
    nyq = 0.5 * sr
    sos = signal.butter(5, [20.0 / nyq, 20000.0 / nyq], btype='band', output='sos')
    return signal.sosfilt(sos, x)

sr, a, br = wavfile.read(SWEEPFILE, normalized=True)
sr, b, br = wavfile.read(RECFILE, normalized=True)

a = padarray(a, sr*50, before=sr*10)
b = padarray(b, sr*50, before=sr*10)
h = np.zeros_like(b)

for chan in [0, 1]:
    b1 = b[:,chan]

    b1 = filter20_20k(b1, sr)
    ffta = np.fft.rfft(a)
    fftb = np.fft.rfft(b1)
    ffth = fftb / ffta
    h1 = np.fft.irfft(ffth)
    h1 = filter20_20k(h1, sr)
Esempio n. 31
0
def write_vas3(input_foldername, output_filename, metadata=None):
    if not input_foldername:
        input_foldername = ""

    if not output_filename:
        output_filename = ""

    if not metadata:
        metadata = json.load(
            open(os.path.join(input_foldername, "metadata.json"), "r"))

    with open(output_filename, "wb") as outfile:
        version = 2
        gdx_size = GDX_SIZES[
            metadata['type']] if metadata['type'] in GDX_SIZES else 0x14
        gdx_start = 0x40
        gdx_entry_start = gdx_start + gdx_size
        data_start = gdx_start + gdx_size + (len(metadata['entries']) * 0x40)

        data_start_padding = 16 - (data_start % 16)
        if data_start_padding != 16:
            data_start += data_start_padding

        outfile.write("VA3W".encode('ascii'))
        outfile.write(struct.pack("<B", 1))
        outfile.write(struct.pack("<B", 0))
        outfile.write(struct.pack("<B", 0))
        outfile.write(struct.pack(
            "<B", version))  # TODO: Add support for saving old archives?
        outfile.write(struct.pack("<I", len(metadata['entries'])))
        outfile.write(struct.pack(
            "<I", gdx_size))  # Change depending on archive version
        outfile.write(struct.pack("<I", gdx_start))
        outfile.write(struct.pack("<I", gdx_entry_start))
        outfile.write(struct.pack("<I", data_start))

        if outfile.tell() < gdx_start:
            outfile.write(bytearray([0] *
                                    (gdx_start - outfile.tell())))  # Padding

        outfile.write(metadata['type'].encode('ascii'))
        outfile.write(struct.pack("<H", metadata['defaults']['default_hihat']))
        outfile.write(struct.pack("<H", metadata['defaults']['default_snare']))
        outfile.write(struct.pack("<H", metadata['defaults']['default_bass']))
        outfile.write(
            struct.pack("<H", metadata['defaults']['default_hightom']))
        outfile.write(struct.pack("<H",
                                  metadata['defaults']['default_lowtom']))
        outfile.write(
            struct.pack("<H", metadata['defaults']['default_rightcymbal']))

        if metadata['type'] == "GDXH":
            outfile.write(struct.pack("<B", metadata['gdx_type_unk1']))
            outfile.write(struct.pack("<B", metadata['gdx_volume_flag']))
        elif metadata['type'] == "GDXG":
            outfile.write(
                struct.pack("<H", metadata['defaults']['default_leftcymbal']))
            outfile.write(
                struct.pack("<H", metadata['defaults']['default_floortom']))
            outfile.write(
                struct.pack("<H", metadata['defaults']['default_leftpedal']))
        else:
            print("Unknown type %s" % metadata['type'])
            exit(1)

        if outfile.tell() < gdx_entry_start:
            outfile.write(bytearray(
                [0] * (gdx_entry_start - outfile.tell())))  # Padding

        defaults = [metadata['defaults'][x] for x in metadata['defaults']]
        data_section = bytearray()

        for entry in metadata['entries']:
            filename = entry['filename']

            if "NoFilename" in entry['flags']:
                filename = "%04x" % entry['sound_id']

            if not os.path.exists(os.path.join(input_foldername, filename)):
                # Lame way to check if it has an extension
                for ext in ['wav', 'ogg', 'mp3']:
                    new_filename = "{}.{}".format(filename,
                                                  ext).replace("\\", "/")

                    if os.path.exists(
                            os.path.join(input_foldername, new_filename)):
                        filename = new_filename
                        break

            # Build full path

            filename = os.path.join(
                input_foldername, os.path.normpath(filename.replace("\\",
                                                                    "/")))
            filename = helper.getCaseInsensitivePath(filename)

            if not os.path.exists(filename):
                print("Could not find %s" % filename)
                continue

            # Set entry filename to just the filename without extension or path
            entry['filename'] = os.path.splitext(os.path.basename(filename))[0]

            if 'flags' not in entry:
                entry['flags'] = []

            if 'extra' not in entry:
                entry['extra'] = 255  # Normal?

            # try:
            #     rate, raw_data, bits = wavfile.read(filename)
            # except:

            # Try using pysoundfile if wavfile failed
            # If this code works well enough, I can probably get rid of
            # wavfile for the was3tool since looping isn't required
            # TODO: Replace this with code to detect if it's a WAV, 16bit, mono, and 48000 and if so, use wavfile instead
            #print(filename)
            filename = audio.get_processed_wav(filename,
                                               channels=1,
                                               rate=48000,
                                               bits=16)

            rate, raw_data, bits = wavfile.read(filename)

            channels = 1 if len(raw_data.shape) == 1 else raw_data.shape[1]

            encoded_data = adpcmwave.encode_data(raw_data, channels)

            sound_flag = 0
            for flag in entry['flags']:
                if flag in FLAG_MAP:
                    sound_flag |= FLAG_MAP[flag]
                elif type(flag) == int:
                    sound_flag |= flag
                else:
                    print(
                        "Don't know how to handle flag {}, ignoring...".format(
                            flag))

            if version >= 2:
                if entry['sound_id'] in defaults:
                    sound_flag |= 0x04
                elif len(defaults) > 0:  # Is this right?
                    sound_flag |= 0x02  # Not a default?

            volume = entry['volume']

            if version < 2:
                volume = VOLUME_TABLE.index(
                    min(VOLUME_TABLE, key=lambda x: abs(x - entry['volume'])))

            outfile.write(struct.pack("<I", len(data_section)))
            outfile.write(struct.pack("<I", len(encoded_data)))
            outfile.write(struct.pack("<H", channels))
            outfile.write(struct.pack("<H",
                                      0x10))  # Will this ever not be 16 bit?
            outfile.write(struct.pack("<I", rate))
            outfile.write(struct.pack(
                "<I", 0))  # This should always be 0 for v2 I think?
            outfile.write(struct.pack(
                "<I", 0))  # This should always be 0 for v2 I think?
            outfile.write(struct.pack("<B", volume))
            outfile.write(struct.pack("<B", entry['pan']))
            outfile.write(struct.pack("<H", entry['sound_id']))
            outfile.write(struct.pack("<H", sound_flag))
            outfile.write(struct.pack("<H", entry['extra']))

            filename_bytes = entry['filename'].encode('ascii')
            outfile.write(filename_bytes[:0x20])

            if len(filename_bytes) < 0x20:
                outfile.write(bytearray([0] * (0x20 - len(filename_bytes))))

            data_section += encoded_data

            padding = 0x10 - (len(data_section) % 0x10)
            if padding != 0x10:
                data_section += bytearray([0] * padding)

        if outfile.tell() < data_start:
            outfile.write(bytearray([0] *
                                    (data_start - outfile.tell())))  # Padding

        outfile.write(data_section)
Esempio n. 32
0
def exportToPremiere(ffmpeg, myInput, newOutput, silentT, zoomT, frameMargin,
                     sampleRate, videoSpeed, silentSpeed):
    print('Running from premiere.py')
    TEMP = tempfile.mkdtemp()

    fps = 29.97

    cmd = [
        ffmpeg, '-i', myInput, '-ab', '160k', '-ac', '2', '-ar',
        str(sampleRate), '-vn', f'{TEMP}/output.wav', '-nostats', '-loglevel',
        '0'
    ]
    subprocess.call(cmd)

    sampleRate, audioData = read(f'{TEMP}/output.wav')
    chunks = getAudioChunks(audioData, sampleRate, fps, silentT, zoomT,
                            frameMargin)
    rmtree(TEMP)

    clips = []
    newSpeed = [silentSpeed, videoSpeed]
    for chunk in chunks:
        if (newSpeed[chunk[2]] != 99999):
            clips.append([chunk[0], chunk[1], newSpeed[chunk[2]] * 100])

    if (len(clips) < 1):
        print('Error! Less than 1 clip.')
        sys.exit(1)

    pathurl = 'file://localhost' + os.path.abspath(myInput)

    name = os.path.basename(myInput)

    extension = myInput[myInput.rfind('.'):]
    audioFile = extension in ['.wav', '.mp3', '.m4a']

    first = myInput[:myInput.rfind('.')]
    newFile = f'{first}.xml'

    ntsc = 'FALSE'
    ana = 'FALSE'  # anamorphic
    alphatype = 'none'
    depth = '16'
    if (not audioFile):
        try:
            import cv2
            conwrite('Grabbing video dimensions.')

            cap = cv2.VideoCapture(myInput)
            width = str(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
            height = str(int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

            cap.release()
            cv2.destroyAllWindows()

        except ImportError:
            width = '1280'
            height = '720'

    pixelar = 'square'  # pixel aspect ratio
    colordepth = '24'
    sr = sampleRate

    if (audioFile):
        with open(newFile, 'w', encoding='utf-8') as outfile:
            outfile.write('<!-- Generated by Auto-Editor -->\n')
            outfile.write(
                '<!-- https://github.com/WyattBlue/auto-editor -->\n')
            outfile.write('\n')
            outfile.write(
                '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
            outfile.write('<xmeml version="4">\n')
            outfile.write('\t<sequence>\n')
            outfile.write('\t<rate>\n')
            outfile.write('\t\t<timebase>30</timebase>\n')
            outfile.write('\t\t<ntsc>TRUE</ntsc>\n')
            outfile.write('\t</rate>\n')
            outfile.write('\t\t<name>Auto-Editor Audio Group</name>\n')
            outfile.write('\t\t<media>\n')
            outfile.write('\t\t\t<audio>\n')
            outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
            outfile.write('\t\t\t\t<format>\n')
            outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
            outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
            outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
            outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
            outfile.write('\t\t\t\t</format>\n')
            outfile.write(
                '\t\t\t\t<track currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n'
            )

            total = 0
            for j, clip in enumerate(clips):
                myStart = int(total)
                total += (clip[1] - clip[0]) / (clip[2] / 100)
                myEnd = int(total)

                outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+1}">\n')
                outfile.write(
                    '\t\t\t\t\t\t<masterclipid>masterclip-1</masterclipid>\n')
                outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
                outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
                outfile.write(
                    f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n'
                )

                if (j == 0):
                    outfile.write('\t\t\t\t\t\t<file id="file-1">\n')
                    outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                    outfile.write('\t\t\t\t\t\t\t<rate>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                    outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                    outfile.write('\t\t\t\t\t\t\t</rate>\n')
                    outfile.write('\t\t\t\t\t\t\t<media>\n')
                    outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                    outfile.write(
                        f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                    outfile.write(
                        '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                    outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                    outfile.write('\t\t\t\t\t\t\t</media>\n')
                    outfile.write('\t\t\t\t\t\t</file>\n')
                else:
                    outfile.write(f'\t\t\t\t\t\t<file id="file-1"/>\n')
                outfile.write('\t\t\t\t\t</clipitem>\n')

            outfile.write('\t\t\t\t</track>\n')
            outfile.write('\t\t\t</audio>\n')
            outfile.write('\t\t</media>\n')
            outfile.write('\t</sequence>\n')
            outfile.write('</xmeml>')

            return newFile

            # End of audio file code.

    with open(newFile, 'w', encoding='utf-8') as outfile:
        outfile.write('<!-- Generated by Auto-Editor -->\n')
        outfile.write('<!-- https://github.com/WyattBlue/auto-editor -->\n')
        outfile.write('\n')
        outfile.write(
            '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n')
        outfile.write('<xmeml version="4">\n')
        outfile.write('\t<sequence>\n')
        outfile.write('\t\t<name>Auto-Editor Video Group</name>\n')
        outfile.write('\t\t<media>\n')
        outfile.write('\t\t\t<video>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write('\t\t\t\t\t\t<rate>\n')
        outfile.write('\t\t\t\t\t\t\t<timebase>30</timebase>\n')
        outfile.write(f'\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
        outfile.write('\t\t\t\t\t\t</rate>\n')
        outfile.write(f'\t\t\t\t\t\t<width>{width}</width>\n')
        outfile.write(f'\t\t\t\t\t\t<height>{height}</height>\n')
        outfile.write(f'\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
        outfile.write(
            f'\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n')
        outfile.write('\t\t\t\t\t\t<fielddominance>none</fielddominance>\n')
        outfile.write(f'\t\t\t\t\t\t<colordepth>{colordepth}</colordepth>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write('\t\t\t\t<track>\n')

        # Handle clips.
        total = 0
        for j, clip in enumerate(clips):
            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+7}">\n')
            outfile.write(
                '\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')
            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')
            outfile.write(
                f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(
                f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')

            if (j == 0):
                outfile.write('\t\t\t\t\t\t<file id="file-2">\n')
                outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n')
                outfile.write(f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n')
                outfile.write('\t\t\t\t\t\t\t<rate>\n')
                outfile.write('\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t</rate>\n')
                outfile.write('\t\t\t\t\t\t\t<media>\n')
                outfile.write('\t\t\t\t\t\t\t\t<video>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t<rate>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t\t<timebase>30</timebase>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t\t</rate>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<width>{width}</width>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<height>{height}</height>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t\t<fielddominance>none</fielddominance>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write('\t\t\t\t\t\t\t\t</video>\n')
                outfile.write('\t\t\t\t\t\t\t\t<audio>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n')
                outfile.write('\t\t\t\t\t\t\t\t</audio>\n')
                outfile.write('\t\t\t\t\t\t\t</media>\n')
                outfile.write('\t\t\t\t\t\t</file>\n')
            else:
                outfile.write(f'\t\t\t\t\t\t<file id="file-2"/>\n')

            # Add the speed effect if nessecary
            if (clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            # Linking for video blocks
            for i in range(3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n'
                )
                if (i == 0):
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
                if (i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')
                if (i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</video>\n')
        outfile.write('\t\t\t<audio>\n')
        outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n')
        outfile.write('\t\t\t\t<format>\n')
        outfile.write('\t\t\t\t\t<samplecharacteristics>\n')
        outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n')
        outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n')
        outfile.write('\t\t\t\t\t</samplecharacteristics>\n')
        outfile.write('\t\t\t\t</format>\n')
        outfile.write(
            '\t\t\t\t<track PannerIsInverted="true" PannerStartKeyframe="-91445760000000000,0.5,0,0,0,0,0,0" PannerName="Balance" currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n'
        )

        # Audio Clips
        total = 0
        for j, clip in enumerate(clips):
            outfile.write(
                f'\t\t\t\t\t<clipitem id="clipitem-{len(clips)+8+j}" premiereChannelType="stereo">\n'
            )
            outfile.write(
                f'\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n')
            outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n')

            myStart = int(total)
            total += (clip[1] - clip[0]) / (clip[2] / 100)
            myEnd = int(total)

            outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n')
            outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n')

            outfile.write(
                f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n')
            outfile.write(
                f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n')
            outfile.write('\t\t\t\t\t\t<file id="file-2"/>\n')
            outfile.write('\t\t\t\t\t\t<sourcetrack>\n')
            outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')
            outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')
            outfile.write('\t\t\t\t\t\t</sourcetrack>\n')

            # Add speed effect for audio blocks
            if (clip[2] != 100):
                outfile.write('\t\t\t\t\t\t<filter>\n')
                outfile.write('\t\t\t\t\t\t\t<effect>\n')
                outfile.write('\t\t\t\t\t\t\t\t<name>Time Remap</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n'
                )
                outfile.write(
                    '\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n')
                outfile.write('\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>0</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>speed</name>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n')
                outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<name>reverse</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n')
                outfile.write(
                    '\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n'
                )
                outfile.write('\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n')
                outfile.write('\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n')
                outfile.write('\t\t\t\t\t\t\t\t</parameter>\n')
                outfile.write('\t\t\t\t\t\t\t</effect>\n')
                outfile.write('\t\t\t\t\t\t</filter>\n')

            if (audioFile):
                startOn = 1
            else:
                startOn = 0
            for i in range(startOn, 3):
                outfile.write('\t\t\t\t\t\t<link>\n')
                outfile.write(
                    f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n'
                )
                if (i == 0):
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n')
                else:
                    outfile.write(
                        '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n')

                if (i == 2):
                    outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n')
                else:
                    outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n')

                outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n')

                if (i == 1 or i == 2):
                    outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n')
                outfile.write('\t\t\t\t\t\t</link>\n')
            outfile.write('\t\t\t\t\t</clipitem>\n')
        outfile.write('\t\t\t\t\t<outputchannelindex>1</outputchannelindex>\n')
        outfile.write('\t\t\t\t</track>\n')
        outfile.write('\t\t\t</audio>\n')
        outfile.write('\t\t</media>\n')
        outfile.write('\t</sequence>\n')
        outfile.write('</xmeml>')

    conwrite('')
    return newFile