def describe(result): notes = result.notes if len(notes) == 1: return color('green', str(list(notes)[0])) else: return color('yellow', '(%s)' % (' | '.join(map(str, sorted(notes)))))
def describe(component): if component is None: return ' ' * 14 desc = '%-14s' % ('%5.1f, %s' % (component.intensity, component.state)) if component.state is FADING and component.previous_state is RISING: return color('purple!', desc) elif component.state is RISING: return color('green', desc) elif component.state is FADING: return color('red', desc) else: return desc
def event(self, what, component): changed = False if what is NoteEventEmitter.HEARD: print color('green', component) changed = self._heard(component) elif what is NoteEventEmitter.FADED: print color('red', component) changed = self._faded(component) else: # ?? assert what in NoteEventEmitter.EVENTS if changed: self._listener(self._results)
def show_results(results): def describe(result): notes = result.notes if len(notes) == 1: return color('green', str(list(notes)[0])) else: return color('yellow', '(%s)' % (' | '.join(map(str, sorted(notes))))) output = ', '.join(describe(result) for result in results) if results \ else color('red', '----') print ' ' + output if not results: print interpreter._note_to_result print interpreter._component_to_result
results = recording['results'] else: results = recording plotted_series = defaultdict(list) for result in results: needed = set(notes) for freq, intensity in result: note = Note.from_frequency(freq) if note in needed: needed.remove(note) plotted_series[note].append(intensity) for unfound in needed: plotted_series[unfound].append(0.0) colors = get_colors(len(notes)) for i, color_info in enumerate(colors): print color(color_info[0], '%-3s', notes[i]), print plots = [] for note in notes: plots.append('{%s}' % ', '.join('%.02f' % power for power in plotted_series[note])) print 'ListLinePlot[{%s},\n PlotStyle -> {%s},\n ' \ 'PlotRange -> All, PlotMarkers -> Automatic]' % \ (',\n '.join(plots), ', '.join('%s' % c[1] for c in colors))
def _analyze_component(self, peaked_component, all_components): try: fingerprints = self._profile.peaks[peaked_component.note] except KeyError: return # print 'sorry, got nothing for %s' % str(peaked_component.note) def get_average_intensity(component, index): start = (-index) - 1 length = 3 intensities = component.intensities[start:start+length] try: return sum(intensities) / len(intensities) except ZeroDivisionError: return 0.0 peak = peaked_component.peak peak_offset = peaked_component.counter - peak[0] # print peaked_component.note, peak[1], # for component in all_components: # try: # intensity = component.intensities[-peak_offset - 1] # if peak[1] < intensity * 0.8: # print # return # except IndexError: # continue # print # print peaked_component.intensities peak_intensity = get_average_intensity(peaked_component, peak_offset) supporters = {} if peak_intensity > 0.0: for component in all_components: if component == peaked_component: continue intensity = get_average_intensity(component, peak_offset) ratio = intensity / peak_intensity if ratio > 0.15: supporters[component.note] = ratio if supporters: average = sum(supporters.itervalues()) / len(supporters) maximum = max(supporters.itervalues()) else: average = maximum = 0.0 if self._debug_peaks: print >>sys.stderr, '%4d:' % peak[0], if supporters: if average <= 0.5: color_name = 'green!' else: color_name = 'blue!' average_s = '%.2f' % average else: color_name = 'yellow!' average_s = '----' print >>sys.stderr, color(color_name, 'Component %s peaked at ' '(%.2f => %.2f): %s', peaked_component.note, peak[1], peak_intensity, average_s) print >>sys.stderr, ' ' + ', '.join('%s: %.2f' % pair for pair in sorted(supporters.iteritems(), key=lambda (n, i): i, reverse=True)) if peak_intensity <= 0.0: return best_match = self._profile.find_match(peaked_component.note, supporters) if best_match and average <= 0.8 and maximum <= 2.0: if self._debug: print >>sys.stderr, '%3d: Peak of %s @ %s indicates note %s ' \ '(distance: %.2f)' % ((peak[0], peaked_component.note, peak_intensity) + best_match) self._callback(*best_match)
def create_fingerprints(capturer, target, harmonics): runs = [] notes = defaultdict(int) print "Fingerprinting %s;" % str(target), if harmonics is None: print "not restricting to harmonics." else: print "harmonics: %s" % ", ".join(str(h) for h in harmonics) def get_common(threshold=3): common = set() for note, count in notes.iteritems(): if count >= threshold: common.add(note) return common def make_fingerprint(notes, run): return dict((note, run.get(note, 0.0)) for note in notes) def reduce_run(run, max_length=5): return dict([(note, power) for note, power in sorted(run.iteritems(), key=lambda p: -p[1])][:max_length]) def make_combined(common_notes): combined = {} for note in common_notes: total = 0.0 count = 0 for run in runs: try: total += run[note] count += 1 except KeyError: continue combined[note] = total / count return combined first_run = True while (len(runs) < 4 or len(get_common()) < 5) and len(runs) < 7: color_name = "purple!" if first_run else "black!" if len(runs) < 2: style = "quickly (do not hold the note)" elif len(runs) < 4: style = "and hold" else: style = "however you feel best expresses yourself" print color(color_name, "--> Play %s %s.", target, style) first_run = False results = capturer.capture() peaks = find_peaks(results, harmonics=harmonics, maximum=6) if not peaks: continue for note, intensity in peaks.iteritems(): notes[note] += 1 print "{%s}" % (", ".join("%s: %d" % pair for pair in sorted(notes.iteritems(), key=lambda p: -p[1]))) runs.append(peaks) common_notes = get_common() fingerprints = [reduce_run(run) for run in runs] fingerprints.append(make_combined(common_notes)) for l in xrange(len(runs), 3, -1): very_common_notes = get_common(l) if len(very_common_notes) >= 3: max_combined = make_combined(very_common_notes) if make_combined != fingerprints[-1]: fingerprints.append(max_combined) break combined = {} strict = {} for note, count in notes.iteritems(): if count >= 3: total = 0.0 strict_total = 0.0 for i, run in enumerate(runs): try: total += run[note] if count >= 4: strict_total += run[note] fingerprints[i][note] = run[note] except KeyError: continue combined[note] = total / count if count >= 4: strict[note] = strict_total / count keys = [] for fp in fingerprints + [combined, strict]: print "-" * 30 for note, intensity in sorted(fp.iteritems(), key=lambda (n, i): -i): keys.append(note) print "%3s @ %5.2f" % (note, intensity) note_vector = create_vector(keys, combined) for i in xrange(2): print color("green!", "--> Play %s in some fashion.", target) results = capturer.capture() peaks = find_peaks(results, keys[0]) if not peaks: print "no match" else: print "%.3f" % cosine_distance(note_vector, create_vector(keys, peaks)) return fingerprints + [combined, strict]
for warning in w: if warning.category is ExtraNoteWarning: warnings.append(warning.message) if unheard is not None or error is not None or missing_rec is not None: color_name = 'red!' results['errors'] += 1 elif warnings: color_name = 'yellow!' results['warnings'] += 1 else: color_name = 'green!' results['ok'] += 1 print color(color_name, test.name) for warning in warnings: print ' %2d: Extra notes:' % warning.expectation, \ ', '.join(str(note) for note in warning.notes) if unheard is not None: print color('red', ' %2d: Failed to detect ' % unheard.expectation + ', '.join(str(note) for note in unheard.notes)) if missing_rec is not None: print color('red', ' Unable to find recording %r' % missing_rec.args[0]) if error is not None: print color('red', ' %s' % error[1]) trace = traceback.format_tb(error[2])
def _run(self): last_pos = -1 last_expected_pos = -1 for action, arg, expected_pos in self.instructions: if action == 'pause': print '--> Playing silence.' self.matcher.add([]) elif action == 'play': print '--> Playing %s.' % \ ', '.join(unparse_note(*freq_to_note(f)) for f in arg) self.matcher.add(arg) else: raise ValueError(action) # wait for the matcher to call our matched() method, and for # matched() to send us the matcher's new position through the queue new_pos = self._position_queue.get() if new_pos == last_pos: message = 'Stayed at' else: message = 'Moved to' # assess what happened: stop = False if expected_pos is None: # we didn't care where we went (instruction ended with ?) text_color = 'yellow' elif new_pos != expected_pos: # we didn't go where we expected text_color = 'red' stop = True verb = 'move to' if expected_pos != last_pos else 'stay at' expectation_string = '; expected to %s %s' % \ (verb, self._describe_interval(expected_pos)) else: # we went where we expected text_color = 'green' expectation_string = '' # display what happened if new_pos == last_pos: message = 'Stayed at' else: message = 'Moved to' print color(text_color, '<-- %s %s%s.', message, self._describe_interval(new_pos), expectation_string) if stop: break # if we didn't care where we went on this step (i.e., the line for # this instruction ended with '?'), save our new position as the # last-expected position; otherwise, save the actual last-expected # position last_expected_pos = expected_pos if expected_pos is not None \ else new_pos last_pos = new_pos sleep(0.5)
def print_result(action, note, score=None): color_name = 'green!' if action == '+' else 'red!' if score: print color(color_name, '%3s (%.3f)', note, score) else: print color(color_name, '%3s', note)
def print_match(note, distance): print color('red!', 'Found %s (distance: %.3f)' % (note, distance))
if options.track: # just track the frequency components visually; do not ID notes callback = show_components else: # full monty: identify the notes being played detector = Identifier(print_result, profile, debug=options.debug, debug_peaks=options.peaks) peak_tracker = PeakTracker(detector) callback = peak_tracker.update tracker = ComponentTracker(callback) if options.track: # print note headings for note in watched: print color('black!', '%-3s%s', note, ' ' * 11), print for note in watched: print color('black!', '=' * 14), print if options.recording: # read the FFT results from a file created by audio/test.py with open(options.recording, 'rb') as stream: recording = pickle.load(stream) if isinstance(recording, dict) and 'results' in recording: results = recording['results'] else: results = recording
if len(notes) > 0: print "Note highlighting:", highlighted = notes[:len(colors)] if options.overtones: i = 2.0 while len(highlighted) + len(notes) <= len(colors): highlighted += [freq * i for freq in notes] i += 1 highlighted = list(set(highlighted)) highlighted.sort() colors = colors[:len(highlighted)] for color_name, h_freq in zip(colors, highlighted): print color(color_name, '%3s ' % unparse_note(*freq_to_note(h_freq))), print else: highlighted = [] listener = audio.Listener(window_size=options.window_size, interval=options.interval, filters=filters) queue = listener.start() recorded = [] series = dict((Note.from_frequency(f), []) for f in highlighted) plotted_notes = set(series.keys()) while True: try: offset, buckets, data = queue.pop()