Ejemplo n.º 1
0
def preview(myInput, chunks: list, speeds: list, fps: float, audioFile, log):
    if (not os.path.isfile(myInput)):
        log.error('preview.py: Could not find file ' + myInput)

    oldTime = chunks[len(chunks) - 1][1]
    print('')
    printTimeFrame('Old length', oldTime, fps)

    newL = getNewLength(chunks, speeds, fps)
    printTimeFrame('New length', newL * fps, fps)
    print('')

    clips = 0
    cuts = 0
    cutL = []
    clipLengths = []
    for chunk in chunks:
        state = chunk[2]
        if (speeds[state] != 99999):
            clips += 1
            leng = (chunk[1] - chunk[0]) / speeds[state]
            clipLengths.append(leng)
        else:
            cuts += 1
            leng = chunk[1] - chunk[0]
            cutL.append(leng)

    print('Number of clips:', clips)
    printTimeFrame('Smallest clip length', min(clipLengths), fps)
    printTimeFrame('Largest clip length', max(clipLengths), fps)
    printTimeFrame('Average clip length',
                   sum(clipLengths) / len(clipLengths), fps)
    print('\nNumber of cuts:', cuts)

    if (cutL != []):
        printTimeFrame('Smallest cut length', min(cutL), fps)
        printTimeFrame('Largest cut length', max(cutL), fps)
        printTimeFrame('Average cut length', sum(cutL) / len(cutL), fps)
        print('')

    if (not audioFile):
        print('Video framerate:', fps)
    log.debug(f'Chunks:\n{chunks}')
Ejemplo n.º 2
0
def fastAudio(theFile, outFile, chunks: list, speeds: list, log, fps: float,
              machineReadable, hideBar):
    from wavfile import read, write
    import os

    import numpy as np

    log.checkType(chunks, 'chunks', list)
    log.checkType(speeds, 'speeds', list)

    def speedsOtherThan1And99999(a: list) -> bool:
        return len([x for x in a if x != 1 and x != 99999]) > 0

    if (speedsOtherThan1And99999(speeds)):
        from audiotsm2 import phasevocoder
        from audiotsm2.io.array import ArrReader, ArrWriter

    if (len(chunks) == 1 and chunks[0][2] == 0):
        log.error('Trying to create an empty file.')

    if (not os.path.isfile(theFile)):
        log.error('fastAudio.py could not find file: ' + theFile)

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0

    audioProgress = ProgressBar(len(chunks), 'Creating new audio',
                                machineReadable, hideBar)

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        audioProgress.tick(chunkNum)

    log.debug('\n   - Total Samples: ' + str(yPointer))
    log.debug('   - Samples per Frame: ' + str(samplerate / fps))
    log.debug('   - Expected video length: ' + str(yPointer /
                                                   (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)
Ejemplo n.º 3
0
def preview(myInput, chunks, speeds, debug):

    if(not os.path.isfile(myInput)):
        print('preview.py: Could not find file ', myInput)
        sys.exit(1)

    audioFile = isAudioFile(myInput)
    if(audioFile):
        fps = 30
    else:
        import cv2

        cap = cv2.VideoCapture(myInput)
        fps = cap.get(cv2.CAP_PROP_FPS)
        cap.release()
        cv2.destroyAllWindows()

    def printTimeFrame(title, frames, fps):
        inSec = round(frames / fps, 1)
        fps = round(fps)
        if(inSec < 1):
            minutes = f'{int(frames)}/{fps} frames'
        else:
            minutes = timedelta(seconds=round(inSec))
        print(f'{title}: {inSec} secs ({minutes})')


    oldTime = chunks[len(chunks)-1][1]
    print('')
    printTimeFrame('Old length', oldTime, fps)

    newL = getNewLength(chunks, speeds, fps)
    printTimeFrame('New length', newL * fps, fps)
    print('')

    clips = 0
    cuts = 0
    cutL = []
    clipLengths = []
    for chunk in chunks:
        state = chunk[2]
        if(speeds[state] != 99999):
            clips += 1
            leng = (chunk[1] - chunk[0]) / speeds[state]
            clipLengths.append(leng)
        else:
            cuts += 1
            leng = chunk[1] - chunk[0]
            cutL.append(leng)

    print('Number of clips:', clips)
    printTimeFrame('Smallest clip length', min(clipLengths), fps)
    printTimeFrame('Largest clip length', max(clipLengths), fps)
    printTimeFrame('Average clip length', sum(clipLengths) / len(clipLengths), fps)
    print('')
    print('Number of cuts:', cuts)
    printTimeFrame('Smallest cut length', min(cutL), fps)
    printTimeFrame('Largest cut length', max(cutL), fps)
    printTimeFrame('Average cut length', sum(cutL) / len(cutL), fps)
    print('')
    if(not audioFile):
        print('Video framerate:', fps)
    if(debug):
        print('Chunks:')
        print(chunks)
Ejemplo n.º 4
0
def fastAudio(ffmpeg,
              theFile,
              outFile,
              silentT,
              frameMargin,
              SAMPLE_RATE,
              audioBit,
              verbose,
              silentSpeed,
              soundedSpeed,
              needConvert,
              chunks=[],
              fps=30):

    if (not os.path.isfile(theFile)):
        print('Could not find file:', theFile)
        sys.exit(1)

    if (outFile == ''):
        fileName = theFile[:theFile.rfind('.')]
        outFile = f'{fileName}_ALTERED.wav'

    if (needConvert):
        # Only print this here so other scripts can use this function.
        print('Running from fastAudio.py')

        import tempfile
        from shutil import rmtree

        TEMP = tempfile.mkdtemp()

        cmd = [
            ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar',
            str(SAMPLE_RATE), '-vn', f'{TEMP}/fastAud.wav'
        ]
        if (not verbose):
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        theFile = f'{TEMP}/fastAud.wav'

    speeds = [silentSpeed, soundedSpeed]

    sampleRate, audioData = read(theFile)
    if (chunks == []):
        print('Creating chunks')
        chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2,
                                frameMargin)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int((newL * sampleRate) * 1.5) + int(sampleRate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0

    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * sampleRate)
        audioSampleEnd = int(audioSampleStart + (sampleRate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]

        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, sampleRate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, sampleRate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * sampleRate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    if (verbose):
        print('yPointer', yPointer)
        print('samples per frame', sampleRate / fps)
        print('Expected video length', yPointer / (sampleRate / fps))
    newAudio = newAudio[:yPointer]
    write(outFile, sampleRate, newAudio)

    if ('TEMP' in locals()):
        rmtree(TEMP)

    if (needConvert):
        return outFile
Ejemplo n.º 5
0
def fastAudio(ffmpeg,
              theFile,
              outFile,
              chunks,
              speeds,
              audioBit,
              samplerate,
              debug,
              needConvert,
              log,
              fps=30):

    if (not os.path.isfile(theFile)):
        log.error('Could not find file ' + theFile)

    if (needConvert):
        # Only print this here so other scripts can use this function.
        print('Running from fastAudio.py')

        import tempfile
        from shutil import rmtree

        TEMP = tempfile.mkdtemp()

        cmd = [
            ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar',
            str(samplerate), '-vn', f'{TEMP}/fastAud.wav'
        ]
        if (not debug):
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        theFile = f'{TEMP}/fastAud.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('yPointer: ' + str(yPointer))
    log.debug('samples per frame: ' + str(samplerate / fps))
    log.debug('Expected video length: ' + str(yPointer / (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if ('TEMP' in locals()):
        rmtree(TEMP)

    if (needConvert):
        conwrite('')
Ejemplo n.º 6
0
def preview(ffmpeg, myInput, silentT, zoomT, frameMargin, sampleRate, videoSpeed,
        silentSpeed, cutByThisTrack, bitrate):
    TEMP = tempfile.mkdtemp()

    extension = myInput[myInput.rfind('.'):]
    audioFile = extension in ['.wav', '.mp3', '.m4a']

    if(audioFile):
        fps = 30

        cmd = [ffmpeg, '-i', myInput, '-b:a', bitrate, '-ac', '2', '-ar',
            str(sampleRate), '-vn', f'{TEMP}/fastAud.wav', '-nostats', '-loglevel', '0']
        subprocess.call(cmd)

        sampleRate, audioData = read(f'{TEMP}/fastAud.wav')
        chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin)
    else:
        import cv2

        cap = cv2.VideoCapture(myInput)
        fps = cap.get(cv2.CAP_PROP_FPS)

        tracks = vidTracks(myInput, ffmpeg)

        if(cutByThisTrack >= tracks):
            print("Error! You choose a track that doesn't exist.")
            print(f'There are only {tracks-1} tracks. (starting from 0)')
            sys.exit(1)

        for trackNumber in range(tracks):
            cmd = [ffmpeg, '-i', myInput, '-ab', bitrate, '-ac', '2', '-ar',
                str(sampleRate),'-map', f'0:a:{trackNumber}',  f'{TEMP}/{trackNumber}.wav',
                '-nostats', '-loglevel', '0']
            subprocess.call(cmd)

            sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav')
            chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin)

    rmtree(TEMP)

    def printTimeFrame(title, frames, fps):
        inSec = round(frames / fps, 1)
        if(fps % 1 == 0):
            fps = round(fps)
        if(inSec < 1):
            minutes = f'{int(frames)}/{fps} frames'
        else:
            minutes = timedelta(seconds=round(inSec))
        print(f'{title}: {inSec} secs ({minutes})')


    oldTime = chunks[len(chunks)-1][1]
    printTimeFrame('Old length', oldTime, fps)

    speeds = [silentSpeed, videoSpeed]
    newL = getNewLength(chunks, speeds, fps)
    printTimeFrame('New length', newL * fps, fps)

    clips = 0
    cuts = 0
    clipLengths = []
    for chunk in chunks:
        state = chunk[2]
        if(speeds[state] != 99999):
            clips += 1
            leng = (chunk[1] - chunk[0]) / speeds[state]
            clipLengths.append(leng)
        else:
            cuts += 1

    print('Number of clips:', clips)
    #print('Number of cuts:', cuts)
    printTimeFrame('Smallest clip length', min(clipLengths), fps)
    printTimeFrame('Largest clip length', max(clipLengths), fps)
    printTimeFrame('Average clip length', sum(clipLengths) / len(clipLengths), fps)
Ejemplo n.º 7
0
def fastAudio(ffmpeg: str, theFile: str, outFile: str, chunks: list,
              speeds: list, audioBit, samplerate, needConvert: bool, temp: str,
              log, fps: float):

    if (len(chunks) == 1 and chunks[0][2] == 0):
        log.error('Trying to create empty audio.')

    if (not os.path.isfile(theFile)):
        log.error('fastAudio.py could not find file: ' + theFile)

    if (needConvert):
        cmd = [ffmpeg, '-y', '-i', theFile]
        if (audioBit is not None):
            cmd.extend(['-b:a', str(audioBit)])
        cmd.extend(
            ['-ac', '2', '-ar',
             str(samplerate), '-vn', f'{temp}/faAudio.wav'])
        if (log.is_ffmpeg):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '8'])
        subprocess.call(cmd)

        theFile = f'{temp}/faAudio.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('\n   - Total Samples: ' + str(yPointer))
    log.debug('   - Samples per Frame: ' + str(samplerate / fps))
    log.debug('   - Expected video length: ' + str(yPointer /
                                                   (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if (needConvert):
        conwrite('')