def analyze(video_id): print "Beginning audio analysis...\n" file_name = audio_scraper.get_wav_from_vid(video_id) decision_tree = tree.generate() audio_file = AudioFile.open(file_name) frames = audio_file.frames(16384) save_spectrum_image(audio_file) analysis = [] i = 0 for frame in frames: #print "Processing [audio frame {}]...".format(i) frequencies, levels = get_power_spectrum(frame) data = training_data.generate(frequencies, levels, file_name) try: analysis.append(decision_tree.predict(data)) except ValueError as e: print "Encountered error: {0}".format(e) i += 1 print "Audio analysis complete.\n" return analysis, frames
def generate_ai(): ais = [] prices, results = generate() for n in range(50): indexes = [] for m in range(3): indexes.append(random.randint(0, 7)) ais.append( AI({ 'AAPL': [], 'AMD': [], 'AMZN': [], "INTC": [], "MSFT": [], "CSCO": [], "GPRO": [], "NVDA": [] })) for each in ais: each.train( [prices[indexes[0]], prices[indexes[1]], prices[indexes[2]]], [results[indexes[0]], results[indexes[1]], results[indexes[2]]], 1000 * random.randint(4, 10)) print("Generation finished!") return ais
def analyze(video_id): print "Beginning audio analysis...\n" file_name = audio_scraper.get_wav_from_vid(video_id) decision_tree = tree.generate() audio_file = AudioFile.open(file_name) frames = audio_file.frames(16384) save_spectrum_image(audio_file) analysis = [] i = 0 for frame in frames: # print "Processing [audio frame {}]...".format(i) frequencies, levels = get_power_spectrum(frame) data = training_data.generate(frequencies, levels, file_name) try: analysis.append(decision_tree.predict(data)) except ValueError as e: print "Encountered error: {0}".format(e) i += 1 print "Audio analysis complete.\n" return analysis, frames
def build_dataset(): dataset = [] class_labels = [] audacity_files = glob.glob(os.getcwd() + '/audio/audio_data/*.txt') for file_name in audacity_files: if "SKIP" in file_name: continue frequencies = [] levels = [] with open(file_name, 'r') as file: category = file.readline() class_labels.append(category) lines = file.readlines() for line in lines: line = line.split(",") frequencies.append(float(line[0])) levels.append(float(line[1])) data = training_data.generate(frequencies, levels, file_name) dataset.append(data) return dataset, class_labels