def main_2(): short_term_mem = [] data_cache = dict() m = pi_audio.Microphone() m.start_stream() fft = pi_signal.fft_object(CHUNK_SIZE, FREQUENCY) count = 0 data = list() while True: data += m.get_stream_buffer() if len(data) >= CHUNK_SIZE: bar = fft.get_group_note_for_data(data[0:CHUNK_SIZE])[0].getasstr() if bar in data_cache.keys(): data_cache[bar] = [time.time(), data_cache[bar][1] + 1] else: data_cache[bar] = [time.time(), 1] # FIFO buffer as short-term memory if bar in short_term_mem: short_term_mem.remove(bar) short_term_mem = [bar] + short_term_mem print "{0} @ {2} | {1} vs {3}".format(bar, count, data_cache[bar], len(data_cache.keys())) #time.sleep(.2) count += 1 # Delete used data portion data = data[CHUNK_SIZE:] return short_term_mem, data_cache, mic
def test_1(): mic = pi_audio.Microphone() mic.start_stream() sample_frequency = mic.rate number_of_sample_points = mic.chunk_size fft = pi_signal.fft_object(number_of_sample_points, sample_frequency) def __generate_data(): data = mic.get_recent_chunked_stream_buffer() if data: data = fft.get_group_note_for_data(data) return data return None cluster = NodeCluster() cluster.add_parallel_node(PrimaryPitchPrinter()) cluster.add_parallel_node(NoteGroupChangeLog()) cluster.add_parallel_node(NoteFrequencyTracker()) cluster.add_parallel_node(NoteGroupOverTimeGraph()) cluster.add_parallel_node( pi_signal.NoteClustifier(LOG_DIR + "note_clusters_over_time.log")) task_processor = ChunkStreamSimultaneousProcessor() task_processor.add_data_inline_process(__generate_data, cluster) task_processor.start_processing() # blocking task_processor.stop_processing()
def __init__(self, chunk_size=CHUNK_SIZE, rate=FREQUENCY): #Create started variable self._started = False #Initialize this module as a ros node rospy.init_node('audio_subscriber', anonymous=True) #Initialize speaker self.audio_output = pi_audio.Speaker() #Create Data buffer self.left_data = [] self.right_data = [] #Create signal processing object self.chunk_size = chunk_size; self.rate = rate self.fft_object = pi_signal.fft_object(self.chunk_size, self.rate)
def main_1(): print "Getting mic ..." m = pi_audio.Microphone() print "Starting stream ...", print m.start_stream() gdn = None for i in xrange(3): print "Getting stream ...", data = m.get_stream_buffer() print len(data) if len(data) > 0: fft = pi_signal.fft_object(CHUNK_SIZE, FREQUENCY) gdn = fft.get_group_note_for_data(data) print "Stoping stream ..." m.stop_stream() return m, gdn
def main_25(): MAX_MEM_WORD_LENGTH = 20 short_term_mem = [] data_cache = dict() m = pi_audio.Microphone() m.start_stream() fft = pi_signal.fft_object(CHUNK_SIZE, FREQUENCY) count = 0 data = list() word = list() words = list() heard_time = list() word_freq_count = list() while True: data += m.get_stream_buffer() if len(data) >= CHUNK_SIZE: bar = fft.get_group_note_for_data(data[0:CHUNK_SIZE])[0].getasstr() word.append(bar) if len(word) <= 0: pass elif word in words: index = words.index(word) heard_time[index] = time.time() word_freq_count[index] += 1 elif not word in words: words.append(word) heard_time.append(time.time()) word_freq_count.append(1) elif (len(word) >= MAX_MEM_WORD_LENGTH): word = list() print "word = {0}".format(word) # Delete used data portion data = data[CHUNK_SIZE:] return words, heard_time, word_freq_count, mic
def test_2(): sound_file = pi_audio.SoundFile(filename=SOUND_DIR + "name-01.wav") sound_file.start_stream() sample_frequency = sound_file.rate number_of_sample_points = sound_file.chunk_size fft = pi_signal.fft_object(number_of_sample_points, sample_frequency) data = None def __generate_data(): global data data = sound_file.get_stream_buffer() if data: return fft.get_group_note_for_data(data) return None def __generate_data_2(): return "Progress {0}%".format(sound_file.current_index / float(sound_file.data_length) * 100.0) cluster = NodeCluster() cluster.add_parallel_node(PrimaryPitchPrinter()) cluster.add_parallel_node(NoteGroupChangeLog()) cluster.add_parallel_node(NoteFrequencyTracker()) cluster.add_parallel_node(NoteGroupOverTimeGraph()) cluster_2 = NodeCluster() cluster_2.add_parallel_node(DataPrinter()) task_processor = ChunkStreamSimultaneousProcessor() task_processor.add_data_inline_process(__generate_data, cluster) #task_processor.add_data_inline_process(__generate_data_2, cluster_2) task_processor.start_processing() # blocking task_processor.stop_processing()