def fastVideo(ffmpeg, vidFile, outFile, chunks, includeFrame, speeds, tracks, abitrate, samplerate, debug, temp, keepTracksSep, vcodec, fps, exportAsAudio, vbitrate, preset, tune, log): if (not os.path.isfile(vidFile)): log.error('fastVideo.py could not find file: ' + str(vidFile)) cap = cv2.VideoCapture(vidFile) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') for trackNum in range(tracks): fastAudio(ffmpeg, f'{temp}/{trackNum}.wav', f'{temp}/new{trackNum}.wav', chunks, speeds, abitrate, samplerate, debug, False, log, fps=fps) if (not os.path.isfile(f'{temp}/new{trackNum}.wav')): log.error('Audio file not created.') if (exportAsAudio): if (keepTracksSep): log.warning("Audio files can't have multiple tracks.") else: pass # TODO: combine all the audio tracks move(f'{temp}/0.wav', outFile) return None out = cv2.VideoWriter(f'{temp}/spedup.mp4', fourcc, fps, (width, height)) if (speeds[0] == 99999 and speeds[1] != 99999): totalFrames = int(np.where(includeFrame == 1)[0][-1]) cframe = int(np.where(includeFrame == 1)[0][0]) elif (speeds[0] != 99999 and speeds[1] == 99999): totalFrames = int(np.where(includeFrame == 0)[0][-1]) cframe = int(np.where(includeFrame == 0)[0][0]) else: totalFrames = chunks[len(chunks) - 1][1] cframe = 0 beginTime = time.time() starting = cframe cap.set(cv2.CAP_PROP_POS_FRAMES, cframe) remander = 0 framesWritten = 0 while cap.isOpened(): ret, frame = cap.read() if (not ret or cframe > totalFrames): break cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame try: state = includeFrame[cframe] except IndexError: state = 0 mySpeed = speeds[state] if (mySpeed != 99999): doIt = (1 / mySpeed) + remander for __ in range(int(doIt)): out.write(frame) framesWritten += 1 remander = doIt % 1 progressBar(cframe - starting, totalFrames - starting, beginTime, title='Creating new video') conwrite('Writing the output file.') cap.release() out.release() cv2.destroyAllWindows() log.debug('Frames written ' + str(framesWritten)) # Now mix new audio(s) and the new video. if (keepTracksSep): cmd = [ffmpeg, '-y'] for i in range(tracks): cmd.extend(['-i', f'{temp}/new{i}.wav']) cmd.extend(['-i', f'{temp}/spedup.mp4']) for i in range(tracks): cmd.extend(['-map', f'{i}:a:0']) cmd.extend(['-map', f'{tracks}:v:0', '-c:v', vcodec]) if (vbitrate is None): cmd.extend(['-crf', '15']) else: cmd.extend(['-b:v', vbitrate]) if (tune != 'none'): cmd.extend(['-tune', tune]) cmd.extend(['-preset', preset, '-movflags', '+faststart', outFile]) if (debug): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) else: # Merge all the audio tracks into one. if (tracks > 1): cmd = [ffmpeg] for i in range(tracks): cmd.extend(['-i', f'{temp}/new{i}.wav']) cmd.extend([ '-filter_complex', f'amerge=inputs={tracks}', '-ac', '2', f'{temp}/newAudioFile.wav' ]) if (debug): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) else: move(f'{temp}/new0.wav', f'{temp}/newAudioFile.wav') def pipeToConsole(myCommands): process = subprocess.Popen(myCommands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, __ = process.communicate() return stdout.decode() cmd = [ ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i', f'{temp}/spedup.mp4', '-c:v', vcodec ] if (vbitrate is None): cmd.extend(['-crf', '15']) else: cmd.extend(['-b:v', vbitrate]) if (tune != 'none'): cmd.extend(['-tune', tune]) cmd.extend([ '-preset', preset, '-movflags', '+faststart', outFile, '-hide_banner' ]) log.debug(cmd) message = pipeToConsole(cmd) log.debug('') log.debug(message) if ('Conversion failed!' in message): log.warning('The muxing/compression failed. '\ 'This may be a problem with your ffmpeg, your codec, or your bitrate.'\ '\nTrying, again but using the "copy" video codec.') cmd = [ ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i', f'{temp}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart', outFile, '-nostats', '-loglevel', '0' ] subprocess.call(cmd) log.debug(cmd) conwrite('')
def fastVideo(ffmpeg, videoFile, outFile, silentT, frameMargin, SAMPLE_RATE, AUD_BITRATE, verbose, videoSpeed, silentSpeed, cutByThisTrack, keepTracksSep): print('Running from fastVideo.py') import cv2 conwrite('Reading audio.') if(not os.path.isfile(videoFile)): print('Could not find file:', videoFile) sys.exit(1) TEMP = tempfile.mkdtemp() speeds = [silentSpeed, videoSpeed] cap = cv2.VideoCapture(videoFile) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = cap.get(cv2.CAP_PROP_FPS) tracks = vidTracks(videoFile, ffmpeg) if(cutByThisTrack >= tracks): print("Error! You choose a track that doesn't exist.") print(f'There are only {tracks-1} tracks. (starting from 0)') sys.exit(1) for trackNumber in range(tracks): cmd = [ffmpeg, '-i', videoFile, '-ab', AUD_BITRATE, '-ac', '2', '-ar', str(SAMPLE_RATE),'-map', f'0:a:{trackNumber}', f'{TEMP}/{trackNumber}.wav'] if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav') chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin) # Handle the Audio for trackNumber in range(tracks): fastAudio(ffmpeg, f'{TEMP}/{trackNumber}.wav', f'{TEMP}/new{trackNumber}.wav', silentT, frameMargin, SAMPLE_RATE, AUD_BITRATE, verbose, silentSpeed, videoSpeed, False, chunks=chunks, fps=fps) if(not os.path.isfile(f'{TEMP}/new{trackNumber}.wav')): print('Error! Audio file not created.') sys.exit(1) out = cv2.VideoWriter(f'{TEMP}/spedup.mp4', fourcc, fps, (width, height)) totalFrames = chunks[len(chunks) - 1][1] beginTime = time.time() remander = 0 framesWritten = 0 while cap.isOpened(): ret, frame = cap.read() if(not ret): break cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame state = None for chunk in chunks: if(cframe >= chunk[0] and cframe <= chunk[1]): state = chunk[2] break if(state is not None): mySpeed = speeds[state] if(mySpeed != 99999): doIt = (1 / mySpeed) + remander for __ in range(int(doIt)): out.write(frame) framesWritten += 1 remander = doIt % 1 progressBar(cframe, totalFrames, beginTime, title='Creating new video') conwrite('Writing the output file.') cap.release() out.release() cv2.destroyAllWindows() if(verbose): print('Frames written', framesWritten) first = videoFile[:videoFile.rfind('.')] extension = videoFile[videoFile.rfind('.'):] if(outFile == ''): outFile = f'{first}_ALTERED{extension}' # Now mix new audio(s) and the new video. if(keepTracksSep): cmd = [ffmpeg, '-y'] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend(['-i', f'{TEMP}/spedup.mp4']) for i in range(tracks): cmd.extend(['-map', f'{i}:a:0']) cmd.extend(['-map', f'{tracks}:v:0','-c:v', 'copy', '-movflags', '+faststart', outFile]) if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) else: # Merge all the audio tracks into one. if(tracks > 1): cmd = [ffmpeg] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend(['-filter_complex', f'amerge=inputs={tracks}', '-ac', '2', f'{TEMP}/newAudioFile.wav']) if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) else: os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav') cmd = [ffmpeg, '-y', '-i', f'{TEMP}/newAudioFile.wav', '-i', f'{TEMP}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart', outFile] if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) rmtree(TEMP) conwrite('') return outFile
def fastVideo(ffmpeg, vidFile, outFile, chunks, speeds, tracks, abitrate, samplerate, debug, temp, keepTracksSep, vcodec, fps, exportAsAudio, vbitrate, log): if (not os.path.isfile(vidFile)): log.error('Could not find file ' + vidFile) cap = cv2.VideoCapture(vidFile) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') for trackNum in range(tracks): fastAudio(ffmpeg, f'{temp}/{trackNum}.wav', f'{temp}/new{trackNum}.wav', chunks, speeds, abitrate, samplerate, debug, False, log, fps=fps) if (not os.path.isfile(f'{temp}/new{trackNum}.wav')): log.error('Audio file not created.') if (exportAsAudio): # TODO: combine all the audio tracks # TODO: warn the user if they add keep_tracks_seperate os.rename(f'{temp}/0.wav', outFile) return None out = cv2.VideoWriter(f'{temp}/spedup.mp4', fourcc, fps, (width, height)) totalFrames = chunks[len(chunks) - 1][1] beginTime = time.time() remander = 0 framesWritten = 0 while cap.isOpened(): ret, frame = cap.read() if (not ret): break cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame state = None for chunk in chunks: if (cframe >= chunk[0] and cframe <= chunk[1]): state = chunk[2] break if (state is not None): mySpeed = speeds[state] if (mySpeed != 99999): doIt = (1 / mySpeed) + remander for __ in range(int(doIt)): out.write(frame) framesWritten += 1 remander = doIt % 1 progressBar(cframe, totalFrames, beginTime, title='Creating new video') conwrite('Writing the output file.') cap.release() out.release() cv2.destroyAllWindows() log.debug('Frames written ' + str(framesWritten)) # Now mix new audio(s) and the new video. if (keepTracksSep): cmd = [ffmpeg, '-y'] for i in range(tracks): cmd.extend(['-i', f'{temp}/new{i}.wav']) cmd.extend(['-i', f'{temp}/spedup.mp4']) for i in range(tracks): cmd.extend(['-map', f'{i}:a:0']) cmd.extend([ '-map', f'{tracks}:v:0', '-b:v', vbitrate, '-c:v', vcodec, '-movflags', '+faststart', outFile ]) if (debug): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) else: # Merge all the audio tracks into one. if (tracks > 1): cmd = [ffmpeg] for i in range(tracks): cmd.extend(['-i', f'{temp}/new{i}.wav']) cmd.extend([ '-filter_complex', f'amerge=inputs={tracks}', '-ac', '2', f'{temp}/newAudioFile.wav' ]) if (debug): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) else: os.rename(f'{temp}/new0.wav', f'{temp}/newAudioFile.wav') def pipeToConsole(myCommands): process = subprocess.Popen(myCommands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, __ = process.communicate() return stdout.decode() cmd = [ ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i', f'{temp}/spedup.mp4', '-b:v', vbitrate, '-c:v', vcodec, '-movflags', '+faststart', outFile, '-hide_banner' ] message = pipeToConsole(cmd) log.debug('') log.debug(message) if ('Conversion failed!' in message): log.warning('The muxing/compression failed. '\ 'This may be a problem with your ffmpeg, your codec, or your bitrate.'\ '\nTrying, again but using the "copy" video codec.') cmd = [ ffmpeg, '-y', '-i', f'{temp}/newAudioFile.wav', '-i', f'{temp}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart', outFile, '-nostats', '-loglevel', '0' ] subprocess.call(cmd) log.debug(cmd) conwrite('')
def fastAudio(ffmpeg, theFile, outFile, silentT, frameMargin, SAMPLE_RATE, audioBit, verbose, silentSpeed, soundedSpeed, needConvert, chunks=[], fps=30): if (not os.path.isfile(theFile)): print('Could not find file:', theFile) sys.exit(1) if (outFile == ''): fileName = theFile[:theFile.rfind('.')] outFile = f'{fileName}_ALTERED.wav' if (needConvert): # Only print this here so other scripts can use this function. print('Running from fastAudio.py') import tempfile from shutil import rmtree TEMP = tempfile.mkdtemp() cmd = [ ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar', str(SAMPLE_RATE), '-vn', f'{TEMP}/fastAud.wav' ] if (not verbose): cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) theFile = f'{TEMP}/fastAud.wav' speeds = [silentSpeed, soundedSpeed] sampleRate, audioData = read(theFile) if (chunks == []): print('Creating chunks') chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin) newL = getNewLength(chunks, speeds, fps) # Get the new length in samples with some extra leeway. estLeng = int((newL * sampleRate) * 1.5) + int(sampleRate * 2) # Create an empty array for the new audio. newAudio = np.zeros((estLeng, 2), dtype=np.int16) channels = 2 yPointer = 0 totalChunks = len(chunks) beginTime = time.time() for chunkNum, chunk in enumerate(chunks): audioSampleStart = int(chunk[0] / fps * sampleRate) audioSampleEnd = int(audioSampleStart + (sampleRate / fps) * (chunk[1] - chunk[0])) theSpeed = speeds[chunk[2]] if (theSpeed != 99999): spedChunk = audioData[audioSampleStart:audioSampleEnd] if (theSpeed == 1): yPointerEnd = yPointer + spedChunk.shape[0] newAudio[yPointer:yPointerEnd] = spedChunk else: spedupAudio = np.zeros((0, 2), dtype=np.int16) with ArrReader(spedChunk, channels, sampleRate, 2) as reader: with ArrWriter(spedupAudio, channels, sampleRate, 2) as writer: phasevocoder(reader.channels, speed=theSpeed).run(reader, writer) spedupAudio = writer.output yPointerEnd = yPointer + spedupAudio.shape[0] newAudio[yPointer:yPointerEnd] = spedupAudio myL = chunk[1] - chunk[0] mySamples = (myL / fps) * sampleRate newSamples = int(mySamples / theSpeed) yPointer = yPointer + newSamples else: # Speed is too high so skip this section. yPointerEnd = yPointer progressBar(chunkNum, totalChunks, beginTime, title='Creating new audio') if (verbose): print('yPointer', yPointer) print('samples per frame', sampleRate / fps) print('Expected video length', yPointer / (sampleRate / fps)) newAudio = newAudio[:yPointer] write(outFile, sampleRate, newAudio) if ('TEMP' in locals()): rmtree(TEMP) if (needConvert): return outFile
def fastAudio(ffmpeg, theFile, outFile, chunks, speeds, audioBit, samplerate, debug, needConvert, log, fps=30): if (not os.path.isfile(theFile)): log.error('Could not find file ' + theFile) if (needConvert): # Only print this here so other scripts can use this function. print('Running from fastAudio.py') import tempfile from shutil import rmtree TEMP = tempfile.mkdtemp() cmd = [ ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar', str(samplerate), '-vn', f'{TEMP}/fastAud.wav' ] if (not debug): cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) theFile = f'{TEMP}/fastAud.wav' samplerate, audioData = read(theFile) newL = getNewLength(chunks, speeds, fps) # Get the new length in samples with some extra leeway. estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2) # Create an empty array for the new audio. newAudio = np.zeros((estLeng, 2), dtype=np.int16) channels = 2 yPointer = 0 totalChunks = len(chunks) beginTime = time.time() for chunkNum, chunk in enumerate(chunks): audioSampleStart = int(chunk[0] / fps * samplerate) audioSampleEnd = int(audioSampleStart + (samplerate / fps) * (chunk[1] - chunk[0])) theSpeed = speeds[chunk[2]] if (theSpeed != 99999): spedChunk = audioData[audioSampleStart:audioSampleEnd] if (theSpeed == 1): yPointerEnd = yPointer + spedChunk.shape[0] newAudio[yPointer:yPointerEnd] = spedChunk else: spedupAudio = np.zeros((0, 2), dtype=np.int16) with ArrReader(spedChunk, channels, samplerate, 2) as reader: with ArrWriter(spedupAudio, channels, samplerate, 2) as writer: phasevocoder(reader.channels, speed=theSpeed).run(reader, writer) spedupAudio = writer.output yPointerEnd = yPointer + spedupAudio.shape[0] newAudio[yPointer:yPointerEnd] = spedupAudio myL = chunk[1] - chunk[0] mySamples = (myL / fps) * samplerate newSamples = int(mySamples / theSpeed) yPointer = yPointer + newSamples else: # Speed is too high so skip this section. yPointerEnd = yPointer progressBar(chunkNum, totalChunks, beginTime, title='Creating new audio') log.debug('yPointer: ' + str(yPointer)) log.debug('samples per frame: ' + str(samplerate / fps)) log.debug('Expected video length: ' + str(yPointer / (samplerate / fps))) newAudio = newAudio[:yPointer] write(outFile, samplerate, newAudio) if ('TEMP' in locals()): rmtree(TEMP) if (needConvert): conwrite('')
def motionDetection(path: str, ffprobe: str, motionThreshold: float, log, width: int, dilates: int, blur: int) -> np.ndarray: import cv2 import subprocess from usefulFunctions import progressBar, conwrite cap = cv2.VideoCapture(path) # Find total frames if (path.endswith('.mp4') or path.endswith('.mov')): # Query Container cmd = [ ffprobe, '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=nb_frames', '-of', 'default=nokey=1:noprint_wrappers=1', path ] else: # Count the number of frames (slow) cmd = [ ffprobe, '-v', 'error', '-count_frames', '-select_streams', 'v:0', '-show_entries', 'stream=nb_read_frames', '-of', 'default=nokey=1:noprint_wrappers=1', path ] # Read what ffprobe piped in. process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, __ = process.communicate() output = stdout.decode() totalFrames = int(output) + 1 log.debug(f' - Cutting totalFrames: {totalFrames}') prevFrame = None gray = None hasMotion = np.zeros((totalFrames), dtype=np.bool_) total = None def resize(image, width=None, height=None, inter=cv2.INTER_AREA): if (width is None and height is None): return image h, w = image.shape[:2] if (width is None): r = height / h dim = (int(w * r), height) else: r = width / w dim = (width, int(h * r)) return cv2.resize(image, dim, interpolation=inter) import time beginTime = time.time() while cap.isOpened(): if (gray is None): prevFrame = None else: prevFrame = gray ret, frame = cap.read() if (not ret): break cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame frame = resize(frame, width=width) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert frame to grayscale. if (blur > 0): gray = cv2.GaussianBlur(gray, (blur, blur), 0) if (prevFrame is not None): frameDelta = cv2.absdiff(prevFrame, gray) thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] # Dilate the thresholded image to fill in holes. if (dilates > 0): thresh = cv2.dilate(thresh, None, iterations=dilates) if (total is None): total = thresh.shape[0] * thresh.shape[1] if (np.count_nonzero(thresh) / total >= motionThreshold): hasMotion[cframe] = True progressBar(cframe, totalFrames, beginTime, title='Detecting motion') cap.release() cv2.destroyAllWindows() conwrite('') return hasMotion
def fastAudio(ffmpeg: str, theFile: str, outFile: str, chunks: list, speeds: list, audioBit, samplerate, needConvert: bool, temp: str, log, fps: float): if (len(chunks) == 1 and chunks[0][2] == 0): log.error('Trying to create empty audio.') if (not os.path.isfile(theFile)): log.error('fastAudio.py could not find file: ' + theFile) if (needConvert): cmd = [ffmpeg, '-y', '-i', theFile] if (audioBit is not None): cmd.extend(['-b:a', str(audioBit)]) cmd.extend( ['-ac', '2', '-ar', str(samplerate), '-vn', f'{temp}/faAudio.wav']) if (log.is_ffmpeg): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '8']) subprocess.call(cmd) theFile = f'{temp}/faAudio.wav' samplerate, audioData = read(theFile) newL = getNewLength(chunks, speeds, fps) # Get the new length in samples with some extra leeway. estLeng = int(newL * samplerate * 1.5) + int(samplerate * 2) # Create an empty array for the new audio. newAudio = np.zeros((estLeng, 2), dtype=np.int16) channels = 2 yPointer = 0 totalChunks = len(chunks) beginTime = time.time() for chunkNum, chunk in enumerate(chunks): audioSampleStart = int(chunk[0] / fps * samplerate) audioSampleEnd = int(audioSampleStart + (samplerate / fps) * (chunk[1] - chunk[0])) theSpeed = speeds[chunk[2]] if (theSpeed != 99999): spedChunk = audioData[audioSampleStart:audioSampleEnd] if (theSpeed == 1): yPointerEnd = yPointer + spedChunk.shape[0] newAudio[yPointer:yPointerEnd] = spedChunk else: spedupAudio = np.zeros((0, 2), dtype=np.int16) with ArrReader(spedChunk, channels, samplerate, 2) as reader: with ArrWriter(spedupAudio, channels, samplerate, 2) as writer: phasevocoder(reader.channels, speed=theSpeed).run(reader, writer) spedupAudio = writer.output yPointerEnd = yPointer + spedupAudio.shape[0] newAudio[yPointer:yPointerEnd] = spedupAudio myL = chunk[1] - chunk[0] mySamples = (myL / fps) * samplerate newSamples = int(mySamples / theSpeed) yPointer = yPointer + newSamples else: # Speed is too high so skip this section. yPointerEnd = yPointer progressBar(chunkNum, totalChunks, beginTime, title='Creating new audio') log.debug('\n - Total Samples: ' + str(yPointer)) log.debug(' - Samples per Frame: ' + str(samplerate / fps)) log.debug(' - Expected video length: ' + str(yPointer / (samplerate / fps))) newAudio = newAudio[:yPointer] write(outFile, samplerate, newAudio) if (needConvert): conwrite('')