def main_2(): short_term_mem = [] data_cache = dict() m = pi_audio.Microphone() m.start_stream() fft = pi_signal.fft_object(CHUNK_SIZE, FREQUENCY) count = 0 data = list() while True: data += m.get_stream_buffer() if len(data) >= CHUNK_SIZE: bar = fft.get_group_note_for_data(data[0:CHUNK_SIZE])[0].getasstr() if bar in data_cache.keys(): data_cache[bar] = [time.time(), data_cache[bar][1] + 1] else: data_cache[bar] = [time.time(), 1] # FIFO buffer as short-term memory if bar in short_term_mem: short_term_mem.remove(bar) short_term_mem = [bar] + short_term_mem print "{0} @ {2} | {1} vs {3}".format(bar, count, data_cache[bar], len(data_cache.keys())) #time.sleep(.2) count += 1 # Delete used data portion data = data[CHUNK_SIZE:] return short_term_mem, data_cache, mic
def __init__(self, fps=30, left_channel_index=2, right_channel_index=3): #Create started variable self._started = False #Setup publisher - Setup pub before init !Order Matters! self.audio_data_pub = rospy.Publisher('AudioData_01', AudioData, queue_size=10) #Initialize this module as a ros node rospy.init_node('audio_publisher', anonymous=True) #Setup refresh rate variables self.fps = min(max(fps, 1), 30) #constrain bwt 1 and 30 self.rate = rospy.Rate(self.fps) #Setup Microphone Device index self.left_device_index = left_channel_index self.right_device_index = right_channel_index #Create data buffer self.audio_data_buffer = AudioData() #Create Mic Object self.left_microphone = pi_audio.Microphone()
def test_1(): mic = pi_audio.Microphone() mic.start_stream() sample_frequency = mic.rate number_of_sample_points = mic.chunk_size fft = pi_signal.fft_object(number_of_sample_points, sample_frequency) def __generate_data(): data = mic.get_recent_chunked_stream_buffer() if data: data = fft.get_group_note_for_data(data) return data return None cluster = NodeCluster() cluster.add_parallel_node(PrimaryPitchPrinter()) cluster.add_parallel_node(NoteGroupChangeLog()) cluster.add_parallel_node(NoteFrequencyTracker()) cluster.add_parallel_node(NoteGroupOverTimeGraph()) cluster.add_parallel_node( pi_signal.NoteClustifier(LOG_DIR + "note_clusters_over_time.log")) task_processor = ChunkStreamSimultaneousProcessor() task_processor.add_data_inline_process(__generate_data, cluster) task_processor.start_processing() # blocking task_processor.stop_processing()
def main(): print "--Recording--" m = pi_audio.Microphone() m.record(5) print "-- Done --" print "-- Playing --" s = pi_audio.Speaker() s.start_stream() s.push_data(m.data.tolist()) print "-- Done --" s.stop_stream()
def main_1(): print "Getting mic ..." m = pi_audio.Microphone() print "Starting stream ...", print m.start_stream() gdn = None for i in xrange(3): print "Getting stream ...", data = m.get_stream_buffer() print len(data) if len(data) > 0: fft = pi_signal.fft_object(CHUNK_SIZE, FREQUENCY) gdn = fft.get_group_note_for_data(data) print "Stoping stream ..." m.stop_stream() return m, gdn
def main_25(): MAX_MEM_WORD_LENGTH = 20 short_term_mem = [] data_cache = dict() m = pi_audio.Microphone() m.start_stream() fft = pi_signal.fft_object(CHUNK_SIZE, FREQUENCY) count = 0 data = list() word = list() words = list() heard_time = list() word_freq_count = list() while True: data += m.get_stream_buffer() if len(data) >= CHUNK_SIZE: bar = fft.get_group_note_for_data(data[0:CHUNK_SIZE])[0].getasstr() word.append(bar) if len(word) <= 0: pass elif word in words: index = words.index(word) heard_time[index] = time.time() word_freq_count[index] += 1 elif not word in words: words.append(word) heard_time.append(time.time()) word_freq_count.append(1) elif (len(word) >= MAX_MEM_WORD_LENGTH): word = list() print "word = {0}".format(word) # Delete used data portion data = data[CHUNK_SIZE:] return words, heard_time, word_freq_count, mic