def preview(ffmpeg, myInput, silentT, zoomT, frameMargin, sampleRate, videoSpeed, silentSpeed, cutByThisTrack, bitrate): TEMP = tempfile.mkdtemp() cap = cv2.VideoCapture(myInput) fps = cap.get(cv2.CAP_PROP_FPS) tracks = vidTracks(myInput, ffmpeg) if (cutByThisTrack >= tracks): print("Error: You choose a track that doesn't exist.") print(f'There are only {tracks-1} tracks. (starting from 0)') sys.exit(1) for trackNumber in range(tracks): cmd = [ ffmpeg, '-i', myInput, '-ab', bitrate, '-ac', '2', '-ar', str(sampleRate), '-map', f'0:a:{trackNumber}', f'{TEMP}/{trackNumber}.wav', '-nostats', '-loglevel', '0' ] subprocess.call(cmd) sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav') chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin) rmtree(TEMP) def printTimeFrame(title, frames, fps): inSec = round(frames / fps, 1) if (fps % 1 == 0): fps = round(fps) if (inSec < 1): minutes = f'{int(frames)}/{fps} frames' else: minutes = timedelta(seconds=round(inSec)) print(f'{title}: {inSec} secs ({minutes})') oldTime = chunks[len(chunks) - 1][1] printTimeFrame('Old length', oldTime, fps) speeds = [silentSpeed, videoSpeed] newL = getNewLength(chunks, speeds, fps) printTimeFrame('New length', newL * fps, fps) clips = 0 clipLengths = [] for chunk in chunks: state = chunk[2] if (speeds[state] != 99999): clips += 1 leng = (chunk[1] - chunk[0]) / speeds[state] clipLengths.append(leng) print('Number of clips:', clips) printTimeFrame('Smallest clip length', min(clipLengths), fps) printTimeFrame('Largest clip length', max(clipLengths), fps) printTimeFrame('Average clip length', sum(clipLengths) / len(clipLengths), fps)
def preview(myInput, silentT, zoomT, frameMargin, sampleRate, videoSpeed, silentSpeed): TEMP = '.TEMP' cap = cv2.VideoCapture(myInput) fps = round(cap.get(cv2.CAP_PROP_FPS)) try: os.mkdir(TEMP) except OSError: rmtree(TEMP) os.mkdir(TEMP) cmd = [ 'ffmpeg', '-i', myInput, '-ab', '160k', '-ac', '2', '-ar', str(sampleRate), '-vn', f'{TEMP}/output.wav', '-nostats', '-loglevel', '0' ] subprocess.call(cmd) sampleRate, audioData = wavfile.read(f'{TEMP}/output.wav') chunks = getAudioChunks(audioData, sampleRate, fps, silentT, zoomT, frameMargin) def printTimeFrame(title, frames, fps): inSec = round(frames / fps, 1) if (inSec < 1): minutes = f'{int(frames)}/{fps} frames' else: minutes = timedelta(seconds=round(inSec)) print(f'{title}: {inSec} secs ({minutes})') oldTime = chunks[len(chunks) - 1][1] printTimeFrame('Old length', oldTime, fps) NEW_SPEED = [silentSpeed, videoSpeed] frameLen = 0 for chunk in chunks: leng = chunk[1] - chunk[0] if (NEW_SPEED[chunk[2]] < 99999): frameLen += leng * (1 / NEW_SPEED[chunk[2]]) printTimeFrame('New length', frameLen, fps) cuts = 0 cutLengths = [] for chunk in chunks: state = chunk[2] if (NEW_SPEED[state] != 99999): cuts += 1 leng = (chunk[1] - chunk[0]) / NEW_SPEED[state] cutLengths.append(leng) print('Number of cuts:', cuts) printTimeFrame('Smallest clip length', min(cutLengths), fps) printTimeFrame('Largest clip length', max(cutLengths), fps) printTimeFrame('Average clip length', sum(cutLengths) / len(cutLengths), fps)
def fastVideoPlus(ffmpeg, videoFile, outFile, silentT, frameMargin, SAMPLE_RATE, AUD_BITRATE, verbose, videoSpeed, silentSpeed, cutByThisTrack, keepTracksSep): print('Running from fastVideoPlus.py') import cv2 # pip3 install opencv-python conwrite('Reading audio.') if(not os.path.isfile(videoFile)): print('Could not find file:', videoFile) sys.exit(1) TEMP = tempfile.mkdtemp() speeds = [silentSpeed, videoSpeed] cap = cv2.VideoCapture(videoFile) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = cap.get(cv2.CAP_PROP_FPS) tracks = vidTracks(videoFile, ffmpeg) if(cutByThisTrack >= tracks): print("Error! You choose a track that doesn't exist.") print(f'There are only {tracks-1} tracks. (starting from 0)') sys.exit(1) for trackNumber in range(tracks): cmd = [ffmpeg, '-i', videoFile, '-ab', AUD_BITRATE, '-ac', '2', '-ar', str(SAMPLE_RATE),'-map', f'0:a:{trackNumber}', f'{TEMP}/{trackNumber}.wav'] if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav') chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin) # Handle the Audio for trackNumber in range(tracks): fastAudio(ffmpeg, f'{TEMP}/{trackNumber}.wav', f'{TEMP}/new{trackNumber}.wav', silentT, frameMargin, SAMPLE_RATE, AUD_BITRATE, verbose, silentSpeed, videoSpeed, False, chunks=chunks, fps=fps) if(not os.path.isfile(f'{TEMP}/new{trackNumber}.wav')): raise IOError('Error! Audio file not created.') out = cv2.VideoWriter(f'{TEMP}/spedup.mp4', fourcc, fps, (width, height)) totalFrames = chunks[len(chunks) - 1][1] beginTime = time() remander = 0 framesWritten = 0 while cap.isOpened(): ret, frame = cap.read() if(not ret): break cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame state = None for chunk in chunks: if(cframe >= chunk[0] and cframe <= chunk[1]): state = chunk[2] break if(state is not None): mySpeed = speeds[state] if(mySpeed != 99999): doIt = (1 / mySpeed) + remander for __ in range(int(doIt)): out.write(frame) framesWritten += 1 remander = doIt % 1 else: if(verbose): pass#print('state is None') progressBar(cframe, totalFrames, beginTime, title='Creating new video') cap.release() out.release() cv2.destroyAllWindows() conwrite('') if(verbose): print('Frames written', framesWritten) first = videoFile[:videoFile.rfind('.')] extension = videoFile[videoFile.rfind('.'):] if(outFile == ''): outFile = f'{first}_ALTERED{extension}' # Now mix new audio(s) and the new video. if(keepTracksSep): cmd = [ffmpeg, '-y'] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend(['-i', f'{TEMP}/spedup.mp4']) for i in range(tracks): cmd.extend(['-map', f'{i}:a:0']) cmd.extend(['-map', f'{tracks}:v:0','-c:v', 'copy', '-movflags', '+faststart', outFile]) if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) else: if(tracks > 1): cmd = [ffmpeg] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend(['-filter_complex', f'amerge=inputs={tracks}', '-ac', '2', f'{TEMP}/newAudioFile.wav']) if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) else: os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav') cmd = [ffmpeg, '-y', '-i', f'{TEMP}/newAudioFile.wav', '-i', f'{TEMP}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart', outFile] if(verbose): cmd.extend(['-hide_banner']) else: cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) rmtree(TEMP) return outFile
def fastVideo(ffmpeg, videoFile, outFile, silentThreshold, frameMargin, SAMPLE_RATE, AUD_BITRATE, VERBOSE, cutByThisTrack, keepTracksSep): print('Running from fastVideo.py') import cv2 # pip3 install opencv-python conwrite('Reading audio.') if(not os.path.isfile(videoFile)): print('Could not find file:', videoFile) sys.exit() TEMP = tempfile.mkdtemp() cap = cv2.VideoCapture(videoFile) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = cap.get(cv2.CAP_PROP_FPS) tracks = vidTracks(videoFile, ffmpeg) if(cutByThisTrack >= tracks): print("Error: You choose a track that doesn't exist.") print(f'There are only {tracks-1} tracks. (starting from 0)') sys.exit(1) for trackNumber in range(tracks): cmd = [ffmpeg, '-i', videoFile, '-ab', AUD_BITRATE, '-ac', '2', '-ar', str(SAMPLE_RATE),'-map', f'0:a:{trackNumber}', f'{TEMP}/{trackNumber}.wav'] if(not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) else: cmd.extend(['-hide_banner']) subprocess.call(cmd) sampleRate, audioData = read(f'{TEMP}/{cutByThisTrack}.wav') chunks = getAudioChunks(audioData, sampleRate, fps, silentThreshold, 2, frameMargin) oldAudios = [] newAudios = [] for i in range(tracks): __, audioData = read(f'{TEMP}/{i}.wav') oldAudios.append(audioData) newAudios.append(np.zeros_like(audioData, dtype=np.int16)) yPointer = 0 out = cv2.VideoWriter(f'{TEMP}/spedup.mp4', fourcc, fps, (width, height)) totalFrames = chunks[len(chunks) - 1][1] beginTime = time() while cap.isOpened(): ret, frame = cap.read() if(not ret): break cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame state = None for chunk in chunks: if(cframe >= chunk[0] and cframe <= chunk[1]): state = chunk[2] break if(state == 1): out.write(frame) audioSampleStart = int((cframe / fps) * sampleRate) audioSampleEnd = int(audioSampleStart + sampleRate / fps) # handle audio tracks for i, oneAudioData in enumerate(oldAudios): audioChunk = oneAudioData[audioSampleStart:audioSampleEnd] yPointerEnd = yPointer + audioChunk.shape[0] newAudios[i][yPointer:yPointerEnd] = audioChunk yPointer = yPointerEnd progressBar(cframe, totalFrames, beginTime) # finish audio conwrite('Writing the output file.') for i, newData in enumerate(newAudios): newData = newData[:yPointer] write(f'{TEMP}/new{i}.wav', sampleRate, newData) if(not os.path.isfile(f'{TEMP}/new{i}.wav')): raise IOError('audio file not created.') cap.release() out.release() cv2.destroyAllWindows() first = videoFile[:videoFile.rfind('.')] extension = videoFile[videoFile.rfind('.'):] if(outFile == ''): outFile = f'{first}_ALTERED{extension}' # Now mix new audio(s) and the new video. if(keepTracksSep): cmd = [ffmpeg, '-y'] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend(['-i', f'{TEMP}/spedup.mp4']) # add input video for i in range(tracks): cmd.extend(['-map', f'{i}:a:0']) cmd.extend(['-map', f'{tracks}:v:0','-c:v', 'copy', '-movflags', '+faststart', outFile]) if(not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) else: if(tracks > 1): cmd = [ffmpeg] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend(['-filter_complex', f'amerge=inputs={tracks}', '-ac', '2', f'{TEMP}/newAudioFile.wav']) if(not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) else: cmd.extend(['-hide_banner']) subprocess.call(cmd) else: os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav') cmd = [ffmpeg, '-y', '-i', f'{TEMP}/newAudioFile.wav', '-i', f'{TEMP}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart', '-strict', '-2', outFile] if(not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) else: cmd.extend(['-hide_banner']) subprocess.call(cmd) rmtree(TEMP) conwrite('') return outFile
def fastVideoPlus(videoFile, outFile, silentThreshold, frameMargin, SAMPLE_RATE, AUD_BITRATE, VERBOSE, videoSpeed, silentSpeed, cutByThisTrack, keepTracksSep): print('Running from fastVideoPlus.py') if (not os.path.isfile(videoFile)): print('Could not find file:', videoFile) sys.exit() TEMP = '.TEMP' FADE_SIZE = 400 NEW_SPEED = [silentSpeed, videoSpeed] cap = cv2.VideoCapture(videoFile) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = round(cap.get(cv2.CAP_PROP_FPS)) try: os.mkdir(TEMP) except OSError: rmtree(TEMP) os.mkdir(TEMP) tracks = vidTracks(videoFile) if (cutByThisTrack >= tracks): print("Error: You choose a track that doesn't exist.") print(f'There are only {tracks-1} tracks. (starting from 0)') sys.exit() for trackNumber in range(tracks): cmd = [ 'ffmpeg', '-i', videoFile, '-ab', AUD_BITRATE, '-ac', '2', '-ar', str(SAMPLE_RATE), '-map', f'0:a:{trackNumber}', f'{TEMP}/{trackNumber}.wav' ] if (not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) else: cmd.extend(['-hide_banner']) subprocess.call(cmd) sampleRate, audioData = wavfile.read(f'{TEMP}/{cutByThisTrack}.wav') chunks = getAudioChunks(audioData, sampleRate, fps, silentThreshold, 2, frameMargin) hmm = preview(chunks, NEW_SPEED, fps) estLeng = int((hmm * SAMPLE_RATE) * 1.5) + int(SAMPLE_RATE * 2) oldAudios = [] newAudios = [] for i in range(tracks): __, audioData = wavfile.read(f'{TEMP}/{i}.wav') oldAudios.append(audioData) newAudios.append(np.zeros((estLeng, 2), dtype=np.int16)) yPointer = 0 out = cv2.VideoWriter(f'{TEMP}/spedup.mp4', fourcc, fps, (width, height)) channels = 2 switchStart = 0 needChange = False preve = None endMargin = 0 yPointer = 0 frameBuffer = [] def writeFrames(frames, nAudio, speed, samplePerSecond, writer): numAudioChunks = round(nAudio / samplePerSecond * fps) global nFrames numWrites = numAudioChunks - nFrames nFrames += numWrites # if sync issue exists, change this back limit = len(frames) - 1 for i in range(numWrites): frameIndex = round(i * speed) if (frameIndex > limit): writer.write(frames[-1]) else: writer.write(frames[frameIndex]) totalFrames = chunks[len(chunks) - 1][1] outFrame = 0 beginTime = time() while cap.isOpened(): ret, frame = cap.read() if (not ret): break cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame currentTime = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 audioSampleStart = int(currentTime * sampleRate) audioSampleEnd = min( audioSampleStart + sampleRate // fps * frameMargin, len(audioData)) switchEnd = audioSampleStart + sampleRate // fps audioChunk = audioData[audioSampleStart:audioSampleEnd] state = None for chunk in chunks: if (cframe >= chunk[0] and cframe <= chunk[1]): state = chunk[2] break if (state == 0): if (endMargin < 1): isSilent = 1 else: isSilent = 0 endMargin -= 1 else: isSilent = 0 endMargin = frameMargin if (preve is not None and preve != isSilent): needChange = True preve = isSilent if (not needChange): frameBuffer.append(frame) else: theSpeed = NEW_SPEED[isSilent] if (theSpeed < 99999): # handle audio tracks for i, oneAudioData in enumerate(oldAudios): spedChunk = oneAudioData[switchStart:switchEnd] spedupAudio = np.zeros((0, 2), dtype=np.int16) with ArrReader(spedChunk, channels, sampleRate, 2) as reader: with ArrWriter(spedupAudio, channels, sampleRate, 2) as writer: phasevocoder(reader.channels, speed=theSpeed).run(reader, writer) spedupAudio = writer.output yPointerEnd = yPointer + spedupAudio.shape[0] newAudios[i][yPointer:yPointerEnd] = spedupAudio yPointer = yPointerEnd else: yPointerEnd = yPointer writeFrames(frameBuffer, yPointerEnd, NEW_SPEED[isSilent], sampleRate, out) frameBuffer = [] switchStart = switchEnd needChange = False progressBar(cframe, totalFrames, beginTime) # finish audio for i, newData in enumerate(newAudios): newData = newData[:yPointer] wavfile.write(f'{TEMP}/new{i}.wav', sampleRate, newData) if (not os.path.isfile(f'{TEMP}/new{i}.wav')): raise IOError('audio file not created.') cap.release() out.release() cv2.destroyAllWindows() first = videoFile[:videoFile.rfind('.')] extension = videoFile[videoFile.rfind('.'):] if (outFile == ''): outFile = f'{first}_ALTERED{extension}' # Now mix new audio(s) and the new video. if (keepTracksSep): cmd = ['ffmpeg', '-y'] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend(['-i', f'{TEMP}/spedup.mp4']) # add input video for i in range(tracks): cmd.extend(['-map', f'{i}:a:0']) cmd.extend([ '-map', f'{tracks}:v:0', '-c:v', 'copy', '-movflags', '+faststart', outFile ]) if (not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) else: if (tracks > 1): cmd = ['ffmpeg'] for i in range(tracks): cmd.extend(['-i', f'{TEMP}/new{i}.wav']) cmd.extend([ '-filter_complex', f'amerge=inputs={tracks}', '-ac', '2', f'{TEMP}/newAudioFile.wav' ]) if (not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) else: os.rename(f'{TEMP}/new0.wav', f'{TEMP}/newAudioFile.wav') cmd = [ 'ffmpeg', '-y', '-i', f'{TEMP}/newAudioFile.wav', '-i', f'{TEMP}/spedup.mp4', '-c:v', 'copy', '-movflags', '+faststart', outFile ] if (not VERBOSE): cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) return outFile
def fastAudio(ffmpeg, theFile, outFile, silentT, frameMargin, SAMPLE_RATE, audioBit, verbose, silentSpeed, soundedSpeed, needConvert, chunks=[], fps=30): if(not os.path.isfile(theFile)): print('Could not find file:', theFile) sys.exit(1) if(outFile == ''): fileName = theFile[:theFile.rfind('.')] outFile = f'{fileName}_ALTERED.wav' if(needConvert): # Only print this here so other scripts can use this function. print('Running from fastAudio.py') import tempfile from shutil import rmtree TEMP = tempfile.mkdtemp() cmd = [ffmpeg, '-i', theFile, '-b:a', audioBit, '-ac', '2', '-ar', str(SAMPLE_RATE), '-vn', f'{TEMP}/fastAud.wav'] if(not verbose): cmd.extend(['-nostats', '-loglevel', '0']) subprocess.call(cmd) theFile = f'{TEMP}/fastAud.wav' speeds = [silentSpeed, soundedSpeed] sampleRate, audioData = read(theFile) if(chunks == []): print('Creating chunks') chunks = getAudioChunks(audioData, sampleRate, fps, silentT, 2, frameMargin) # Get the estimated length of the new audio in frames. newL = getNewLength(chunks, speeds, fps) # Get the new length in samples with some extra leeway. estLeng = int((newL * sampleRate) * 1.5) + int(sampleRate * 2) # Create an empty array for the new audio. newAudio = np.zeros((estLeng, 2), dtype=np.int16) channels = 2 yPointer = 0 totalChunks = len(chunks) beginTime = time.time() for chunkNum, chunk in enumerate(chunks): audioSampleStart = int(chunk[0] / fps * sampleRate) audioSampleEnd = int(audioSampleStart + (sampleRate / fps) * (chunk[1] - chunk[0])) theSpeed = speeds[chunk[2]] if(theSpeed != 99999): spedChunk = audioData[audioSampleStart:audioSampleEnd] spedupAudio = np.zeros((0, 2), dtype=np.int16) with ArrReader(spedChunk, channels, sampleRate, 2) as reader: with ArrWriter(spedupAudio, channels, sampleRate, 2) as writer: phasevocoder(reader.channels, speed=theSpeed).run( reader, writer ) spedupAudio = writer.output yPointerEnd = yPointer + spedupAudio.shape[0] newAudio[yPointer:yPointerEnd] = spedupAudio myL = chunk[1] - chunk[0] mySamples = (myL / fps) * sampleRate newSamples = int(mySamples / theSpeed) yPointer = yPointer + newSamples else: # Speed is too high so skip this section. yPointerEnd = yPointer progressBar(chunkNum, totalChunks, beginTime, title='Creating new audio') if(verbose): print('yPointer', yPointer) print('samples per frame', sampleRate / fps) print('Expected video length', yPointer / (sampleRate / fps)) newAudio = newAudio[:yPointer] write(outFile, sampleRate, newAudio) if('TEMP' in locals()): rmtree(TEMP) if(needConvert): return outFile
def exportToPremiere(ffmpeg, myInput, newOutput, silentT, zoomT, frameMargin, sampleRate, videoSpeed, silentSpeed): print('Running from premiere.py') TEMP = tempfile.mkdtemp() fps = 29.97 cmd = [ ffmpeg, '-i', myInput, '-ab', '160k', '-ac', '2', '-ar', str(sampleRate), '-vn', f'{TEMP}/output.wav', '-nostats', '-loglevel', '0' ] subprocess.call(cmd) sampleRate, audioData = read(f'{TEMP}/output.wav') chunks = getAudioChunks(audioData, sampleRate, fps, silentT, zoomT, frameMargin) rmtree(TEMP) clips = [] newSpeed = [silentSpeed, videoSpeed] for chunk in chunks: if (newSpeed[chunk[2]] != 99999): clips.append([chunk[0], chunk[1], newSpeed[chunk[2]] * 100]) if (len(clips) < 1): print('Error! Less than 1 clip.') sys.exit() print( '\nWarning, this method is underdeveloped and does not support many features.' ) pathurl = 'file://localhost' + os.path.abspath(myInput) name = os.path.basename(myInput) ntsc = 'FALSE' ana = 'FALSE' # anamorphic alphatype = 'none' depth = '16' width = '1920' height = '1080' pixelar = 'square' # pixel aspect ratio colordepth = '24' sr = sampleRate with open('export.xml', 'w', encoding='utf-8') as outfile: outfile.write('<!-- Generated by Auto-Editor -->\n') outfile.write('<!-- https://github.com/WyattBlue/auto-editor -->\n') outfile.write('\n') outfile.write( '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE xmeml>\n') outfile.write('<xmeml version="4">\n') outfile.write('\t<sequence>\n') outfile.write('\t\t<name>auto-editor export group</name>\n') outfile.write('\t\t<media>\n') outfile.write('\t\t\t<video>\n') outfile.write('\t\t\t\t<format>\n') outfile.write('\t\t\t\t\t<samplecharacteristics>\n') outfile.write('\t\t\t\t\t\t<rate>\n') outfile.write('\t\t\t\t\t\t\t<timebase>30</timebase>\n') outfile.write(f'\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n') outfile.write('\t\t\t\t\t\t</rate>\n') outfile.write(f'\t\t\t\t\t\t<width>{width}</width>\n') outfile.write(f'\t\t\t\t\t\t<height>{height}</height>\n') outfile.write(f'\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n') outfile.write( f'\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n') outfile.write('\t\t\t\t\t\t<fielddominance>none</fielddominance>\n') outfile.write(f'\t\t\t\t\t\t<colordepth>{colordepth}</colordepth>\n') outfile.write('\t\t\t\t\t</samplecharacteristics>\n') outfile.write('\t\t\t\t</format>\n') outfile.write('\t\t\t\t<track>\n') # Handle video clips. total = 0 for j, clip in enumerate(clips): myStart = int(total) total += (clip[1] - clip[0]) / (clip[2] / 100) myEnd = int(total) outfile.write(f'\t\t\t\t\t<clipitem id="clipitem-{j+7}">\n') outfile.write( '\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n') outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n') outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n') outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n') outfile.write( f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n') outfile.write( f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n') if (j == 0): outfile.write('\t\t\t\t\t\t<file id="file-2">\n') outfile.write(f'\t\t\t\t\t\t\t<name>{name}</name>\n') outfile.write(f'\t\t\t\t\t\t\t<pathurl>{pathurl}</pathurl>\n') outfile.write('\t\t\t\t\t\t\t<rate>\n') outfile.write(f'\t\t\t\t\t\t\t\t<timebase>{fps}</timebase>\n') outfile.write(f'\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n') outfile.write('\t\t\t\t\t\t\t</rate>\n') outfile.write('\t\t\t\t\t\t\t<media>\n') outfile.write('\t\t\t\t\t\t\t\t<video>\n') outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n') outfile.write('\t\t\t\t\t\t\t\t\t\t<rate>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t\t\t<timebase>{fps}</timebase>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t\t\t<ntsc>{ntsc}</ntsc>\n') outfile.write('\t\t\t\t\t\t\t\t\t\t</rate>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t\t<width>{width}</width>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t\t<height>{height}</height>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t\t<anamorphic>{ana}</anamorphic>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t\t<pixelaspectratio>{pixelar}</pixelaspectratio>\n' ) outfile.write( '\t\t\t\t\t\t\t\t\t\t<fielddominance>none</fielddominance>\n' ) outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n') outfile.write('\t\t\t\t\t\t\t\t</video>\n') outfile.write('\t\t\t\t\t\t\t\t<audio>\n') outfile.write('\t\t\t\t\t\t\t\t\t<samplecharacteristics>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t\t<depth>{depth}</depth>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n') outfile.write('\t\t\t\t\t\t\t\t\t</samplecharacteristics>\n') outfile.write( '\t\t\t\t\t\t\t\t\t<channelcount>2</channelcount>\n') outfile.write('\t\t\t\t\t\t\t\t</audio>\n') outfile.write('\t\t\t\t\t\t\t</media>\n') outfile.write('\t\t\t\t\t\t</file>\n') else: outfile.write(f'\t\t\t\t\t\t<file id="file-2"/>\n') # Add the speed effect if nessecary if (clip[2] != 100): outfile.write(f'\t\t\t\t\t\t<filter>\n') outfile.write(f'\t\t\t\t\t\t\t<effect>\n') outfile.write(f'\t\t\t\t\t\t\t\t<name>Time Remap</name>\n') outfile.write( f'\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n') outfile.write( f'\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n' ) outfile.write( f'\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n') outfile.write( f'\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>0</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<name>speed</name>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<name>reverse</name>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write(f'\t\t\t\t\t\t\t</effect>\n') outfile.write(f'\t\t\t\t\t\t</filter>\n') # Linking for video blocks for i in range(3): outfile.write('\t\t\t\t\t\t<link>\n') outfile.write( f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n' ) if (i == 0): outfile.write( '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n') else: outfile.write( '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n') if (i == 2): outfile.write( f'\t\t\t\t\t\t\t<trackindex>2</trackindex>\n') else: outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n') outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n') if (i == 1 or i == 2): outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n') outfile.write('\t\t\t\t\t\t</link>\n') outfile.write('\t\t\t\t\t</clipitem>\n') outfile.write('\t\t\t\t</track>\n') outfile.write('\t\t\t</video>\n') outfile.write('\t\t\t<audio>\n') outfile.write('\t\t\t\t<numOutputChannels>2</numOutputChannels>\n') outfile.write('\t\t\t\t<format>\n') outfile.write('\t\t\t\t\t<samplecharacteristics>\n') outfile.write(f'\t\t\t\t\t\t<depth>{depth}</depth>\n') outfile.write(f'\t\t\t\t\t\t<samplerate>{sr}</samplerate>\n') outfile.write('\t\t\t\t\t</samplecharacteristics>\n') outfile.write('\t\t\t\t</format>\n') outfile.write( '\t\t\t\t<track PannerIsInverted="true" PannerStartKeyframe="-91445760000000000,0.5,0,0,0,0,0,0" PannerName="Balance" currentExplodedTrackIndex="0" totalExplodedTrackCount="2" premiereTrackType="Stereo">\n' ) total = 0 # Audio Clips for j, clip in enumerate(clips): outfile.write( f'\t\t\t\t\t<clipitem id="clipitem-{len(clips)+8+j}" premiereChannelType="stereo">\n' ) outfile.write( f'\t\t\t\t\t\t<masterclipid>masterclip-2</masterclipid>\n') outfile.write(f'\t\t\t\t\t\t<name>{name}</name>\n') myStart = int(total) total += (clip[1] - clip[0]) / (clip[2] / 100) myEnd = int(total) outfile.write(f'\t\t\t\t\t\t<start>{myStart}</start>\n') outfile.write(f'\t\t\t\t\t\t<end>{myEnd}</end>\n') outfile.write( f'\t\t\t\t\t\t<in>{int(clip[0] / (clip[2] / 100))}</in>\n') outfile.write( f'\t\t\t\t\t\t<out>{int(clip[1] / (clip[2] / 100))}</out>\n') outfile.write('\t\t\t\t\t\t<file id="file-2"/>\n') outfile.write('\t\t\t\t\t\t<sourcetrack>\n') outfile.write('\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n') outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n') outfile.write('\t\t\t\t\t\t</sourcetrack>\n') # Add speed effect for audio blocks if (clip[2] != 100): outfile.write(f'\t\t\t\t\t\t<filter>\n') outfile.write(f'\t\t\t\t\t\t\t<effect>\n') outfile.write(f'\t\t\t\t\t\t\t\t<name>Time Remap</name>\n') outfile.write( f'\t\t\t\t\t\t\t\t<effectid>timeremap</effectid>\n') outfile.write( f'\t\t\t\t\t\t\t\t<effectcategory>motion</effectcategory>\n' ) outfile.write( f'\t\t\t\t\t\t\t\t<effecttype>motion</effecttype>\n') outfile.write( f'\t\t\t\t\t\t\t\t<mediatype>video</mediatype>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>variablespeed</parameterid>\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<name>variablespeed</name>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<valuemin>0</valuemin>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<valuemax>1</valuemax>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>0</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>speed</parameterid>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<name>speed</name>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t<valuemin>-100000</valuemin>\n') outfile.write( f'\t\t\t\t\t\t\t\t\t<valuemax>100000</valuemax>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>{clip[2]}</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>reverse</parameterid>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<name>reverse</name>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write( f'\t\t\t\t\t\t\t\t<parameter authoringApp="PremierePro">\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<parameterid>frameblending</parameterid>\n' ) outfile.write( f'\t\t\t\t\t\t\t\t\t<name>frameblending</name>\n') outfile.write(f'\t\t\t\t\t\t\t\t\t<value>FALSE</value>\n') outfile.write(f'\t\t\t\t\t\t\t\t</parameter>\n') outfile.write(f'\t\t\t\t\t\t\t</effect>\n') outfile.write(f'\t\t\t\t\t\t</filter>\n') for i in range(3): outfile.write('\t\t\t\t\t\t<link>\n') outfile.write( f'\t\t\t\t\t\t\t<linkclipref>clipitem-{(i*(len(clips)+1))+7+j}</linkclipref>\n' ) if (i == 0): outfile.write( '\t\t\t\t\t\t\t<mediatype>video</mediatype>\n') else: outfile.write( '\t\t\t\t\t\t\t<mediatype>audio</mediatype>\n') if (i == 2): outfile.write('\t\t\t\t\t\t\t<trackindex>2</trackindex>\n') else: outfile.write('\t\t\t\t\t\t\t<trackindex>1</trackindex>\n') outfile.write(f'\t\t\t\t\t\t\t<clipindex>{j+1}</clipindex>\n') if (i == 1 or i == 2): outfile.write('\t\t\t\t\t\t\t<groupindex>1</groupindex>\n') outfile.write('\t\t\t\t\t\t</link>\n') outfile.write('\t\t\t\t\t</clipitem>\n') outfile.write('\t\t\t\t\t<outputchannelindex>1</outputchannelindex>\n') outfile.write('\t\t\t\t</track>\n') outfile.write('\t\t\t</audio>\n') outfile.write('\t\t</media>\n') outfile.write('\t</sequence>\n') outfile.write('</xmeml>') return 'export.xml'