if current_song is None: current_song = CURRENT_SONGS.get("chrome") if current_song is None or current_song not in SONG_DB: if current_song is not None: download = strtobool( input( "Would you like to download and process {} by {}?".format( current_song[1], current_song[0]))) if download: if current_song[0] is not None: search = " - ".join(current_song) else: search = current_song[1] url = get_url(search) print(url) gen_video(url, SONG_DB, SONG_LOCK) print(SONG_DB) else: print("Sleeping...", current_song) time.sleep(5) continue demo = Demo.from_file(SONG_DB[current_song]) print(len(demo.data), len(demo.wav)) seconds = 4 frames = 48000 * seconds data = read_stream(stream, frames) start = time.time() xcorr = fftconvolve(demo.wav, data[::-1]) time_in_video = time.time() - start + xcorr.argmax() / 48000 play_at_time(demo, time_in_video)
import time import pyaudio from matplotlib import pyplot as plt from scipy.signal import fftconvolve from videoprocess import process_video, play_at_time from demo import Demo import ffmpeg import numpy as np CHUNKSIZE = 512 # fixed chunk size def read_stream(stream, frames): return np.mean(np.frombuffer(stream.read(frames), dtype=np.int16).reshape(-1, 2), axis=1) if __name__ == '__main__': demo = Demo.from_file("Drunk.npz") out, err = ( ffmpeg .input('Robotaki - Drunk.mp4') .output('pipe:', ac=1, ar=48000, format='wav') .run(capture_stdout=True, capture_stderr=True) ) wavrate = int(re.search(r"([0-9]+) Hz", err.decode('utf-8')).group(1)) wav = ( np .frombuffer(out, np.int16) )
#!/usr/bin/env python import os, sys, time import logging from demo import Demo logging.basicConfig( level=logging.DEBUG ) if __name__ == "__main__": with open(sys.argv[1], "r") as f: Demo.from_file(f).parse()