コード例 #1
0
def fastAudio(theFile, outFile, chunks: list, speeds: list, log, fps: float,
              machineReadable, hideBar):
    from wavfile import read, write
    import os

    import numpy as np

    log.checkType(chunks, 'chunks', list)
    log.checkType(speeds, 'speeds', list)

    def speedsOtherThan1And99999(a: list) -> bool:
        return len([x for x in a if x != 1 and x != 99999]) > 0

    if (speedsOtherThan1And99999(speeds)):
        from audiotsm2 import phasevocoder
        from audiotsm2.io.array import ArrReader, ArrWriter

    if (len(chunks) == 1 and chunks[0][2] == 0):
        log.error('Trying to create an empty file.')

    if (not os.path.isfile(theFile)):
        log.error('fastAudio.py could not find file: ' + theFile)

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0

    audioProgress = ProgressBar(len(chunks), 'Creating new audio',
                                machineReadable, hideBar)

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        audioProgress.tick(chunkNum)

    log.debug('\n   - Total Samples: ' + str(yPointer))
    log.debug('   - Samples per Frame: ' + str(samplerate / fps))
    log.debug('   - Expected video length: ' + str(yPointer /
                                                   (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)
コード例 #2
0
def fastAudio(ffmpeg, theFile, outFile, silentT, frameMargin, SAMPLE_RATE, audioBit,
        verbose, silentSpeed, soundedSpeed, needConvert, chunks=[], fps=30):

    if(not os.path.isfile(theFile)):
        print('Could not find file:', theFile)
        sys.exit(1)

    if(outFile == ''):
        fileName = theFile[:theFile.rfind('.')]
        outFile = f'{fileName}_ALTERED.wav'

    if(needConvert):
        # Only print this here so other scripts can use this function.
        print('Running from fastAudio.py')

        import tempfile
        from shutil import rmtree

        TEMP = tempfile.mkdtemp()

        cmd = [ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar',
            str(SAMPLE_RATE), '-vn', f'{TEMP}/fastAud.wav']
        if(not verbose):
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        theFile = f'{TEMP}/fastAud.wav'

    speeds = [silentSpeed, soundedSpeed]

    sampleRate, audioData = read(theFile)
    if(chunks == []):
        print('Creating chunks')
        chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin)

    # Get the estimated length of the new audio in frames.
    newL = getNewLength(chunks, speeds, fps)

    # Get the new length in samples with some extra leeway.
    estLeng = int((newL * sampleRate) * 1.5) + int(sampleRate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0

    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * sampleRate)
        audioSampleEnd = int(audioSampleStart + (sampleRate / fps) * (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]

        if(theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]
            spedupAudio = np.zeros((0, 2), dtype=np.int16)
            with ArrReader(spedChunk, channels, sampleRate, 2) as reader:
                with ArrWriter(spedupAudio, channels, sampleRate, 2) as writer:
                    phasevocoder(reader.channels, speed=theSpeed).run(
                        reader, writer
                    )
                    spedupAudio = writer.output


            yPointerEnd = yPointer + spedupAudio.shape[0]
            newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * sampleRate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum, totalChunks, beginTime, title='Creating new audio')

    if(verbose):
        print('yPointer', yPointer)
        print('samples per frame', sampleRate / fps)
        print('Expected video length', yPointer / (sampleRate / fps))
    newAudio = newAudio[:yPointer]
    write(outFile, sampleRate, newAudio)

    if('TEMP' in locals()):
        rmtree(TEMP)

    if(needConvert):
        return outFile
コード例 #3
0
ファイル: fastAudio.py プロジェクト: yhfy2006/auto-editor
def fastAudio(ffmpeg,
              theFile,
              outFile,
              chunks,
              speeds,
              audioBit,
              samplerate,
              debug,
              needConvert,
              log,
              fps=30):

    if (not os.path.isfile(theFile)):
        log.error('Could not find file ' + theFile)

    if (needConvert):
        # Only print this here so other scripts can use this function.
        print('Running from fastAudio.py')

        import tempfile
        from shutil import rmtree

        TEMP = tempfile.mkdtemp()

        cmd = [
            ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar',
            str(samplerate), '-vn', f'{TEMP}/fastAud.wav'
        ]
        if (not debug):
            cmd.extend(['-nostats', '-loglevel', '0'])
        subprocess.call(cmd)

        theFile = f'{TEMP}/fastAud.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('yPointer: ' + str(yPointer))
    log.debug('samples per frame: ' + str(samplerate / fps))
    log.debug('Expected video length: ' + str(yPointer / (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if ('TEMP' in locals()):
        rmtree(TEMP)

    if (needConvert):
        conwrite('')
コード例 #4
0
ファイル: test.py プロジェクト: WyattBlue/audiotsm2
from audiotsm2 import phasevocoder, soundstretch

phasevocoder('sine.wav', 'out.wav', speed=3)

soundstretch('sine.wav', 'out2.wav', speed=3)
コード例 #5
0
def fastAudio(ffmpeg: str, theFile: str, outFile: str, chunks: list,
              speeds: list, audioBit, samplerate, needConvert: bool, temp: str,
              log, fps: float):

    if (len(chunks) == 1 and chunks[0][2] == 0):
        log.error('Trying to create empty audio.')

    if (not os.path.isfile(theFile)):
        log.error('fastAudio.py could not find file: ' + theFile)

    if (needConvert):
        cmd = [ffmpeg, '-y', '-i', theFile]
        if (audioBit is not None):
            cmd.extend(['-b:a', str(audioBit)])
        cmd.extend(
            ['-ac', '2', '-ar',
             str(samplerate), '-vn', f'{temp}/faAudio.wav'])
        if (log.is_ffmpeg):
            cmd.extend(['-hide_banner'])
        else:
            cmd.extend(['-nostats', '-loglevel', '8'])
        subprocess.call(cmd)

        theFile = f'{temp}/faAudio.wav'

    samplerate, audioData = read(theFile)

    newL = getNewLength(chunks, speeds, fps)
    # Get the new length in samples with some extra leeway.
    estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2)

    # Create an empty array for the new audio.
    newAudio = np.zeros((estLeng, 2), dtype=np.int16)

    channels = 2
    yPointer = 0
    totalChunks = len(chunks)
    beginTime = time.time()

    for chunkNum, chunk in enumerate(chunks):
        audioSampleStart = int(chunk[0] / fps * samplerate)
        audioSampleEnd = int(audioSampleStart + (samplerate / fps) *
                             (chunk[1] - chunk[0]))

        theSpeed = speeds[chunk[2]]
        if (theSpeed != 99999):
            spedChunk = audioData[audioSampleStart:audioSampleEnd]

            if (theSpeed == 1):
                yPointerEnd = yPointer + spedChunk.shape[0]
                newAudio[yPointer:yPointerEnd] = spedChunk
            else:
                spedupAudio = np.zeros((0, 2), dtype=np.int16)
                with ArrReader(spedChunk, channels, samplerate, 2) as reader:
                    with ArrWriter(spedupAudio, channels, samplerate,
                                   2) as writer:
                        phasevocoder(reader.channels,
                                     speed=theSpeed).run(reader, writer)
                        spedupAudio = writer.output

                yPointerEnd = yPointer + spedupAudio.shape[0]
                newAudio[yPointer:yPointerEnd] = spedupAudio

            myL = chunk[1] - chunk[0]
            mySamples = (myL / fps) * samplerate
            newSamples = int(mySamples / theSpeed)

            yPointer = yPointer + newSamples
        else:
            # Speed is too high so skip this section.
            yPointerEnd = yPointer

        progressBar(chunkNum,
                    totalChunks,
                    beginTime,
                    title='Creating new audio')

    log.debug('\n   - Total Samples: ' + str(yPointer))
    log.debug('   - Samples per Frame: ' + str(samplerate / fps))
    log.debug('   - Expected video length: ' + str(yPointer /
                                                   (samplerate / fps)))
    newAudio = newAudio[:yPointer]
    write(outFile, samplerate, newAudio)

    if (needConvert):
        conwrite('')