def getPos(cls): _pos = {} openTones = ["E", "B", "G", "D", "A", "E"] tone = Tone() for stringIndex, openTone in enumerate(openTones): toneIndex = tone.getToneNumberByName(openTone) arr = [] for i in range(13): toneString = tone.getToneName(openTone, i) arr.append(toneString) _pos[stringIndex + 1] = arr return _pos
class Bot(): def __init__(self): self.current_mood=0 self.tone = Tone(IAM_AUTHENTICATOR, SERVICE_URL) def _check_mood_tone(self, user_tones): # check mood: -1 means sad, 0 - neutral, 1 - happy user_emotions = [tone['tone_id'] for tone in user_tones] tones_mapper = lambda emotion: 1 if emotion in POSITIVE_EMOTIONS else (-1 if emotion in NEGATIVE_EMOTIONS else 0) return sum(map(tones_mapper, user_emotions)) def _get_feedback_message(self): return random.choice(MESSAGES[self.current_mood]) def _update_mood(self, user_moods): mood_tone = self._check_mood_tone(user_moods) new_mood = self.current_mood + mood_tone # update mood if it in allowed mood state range [-1; 1] if new_mood in range(-1,2): self.current_mood = new_mood def _handle_mood_command(self): return MOODS[self.current_mood] def handle_user_message(self, msg): if msg.lower() == 'mood': return self._handle_mood_command() else: user_moods = self.tone.get_tone(msg) if len(user_moods): self._update_mood(user_moods) return self._get_feedback_message()
class Chord: tone = Tone() @classmethod def parse(cls, chord, tensions): r = re.compile("(C|Db|D|Eb|E|F|Gb|G|Ab|A|Bb|B)(m{0,1})(7|M7)") m = r.search(chord) root = m.group(1) thirdSymbol = "M" if m.group(2) == "" else m.group(2) third = cls.tone.getTone(root,thirdSymbol) fifth = cls.tone.getTone(root, "5") seventh = cls.tone.getTone(root, m.group(3)) dic = {} dic[root] = "1" dic[third] = "3" dic[fifth] = "5" dic[seventh] = "7" if tensions != None: for tension in tensions: t = cls.tone.getTone(root, tension) dic[t] = tension # omit root when tension contains 9th if "9" in tensions or "b9" in tensions or "#9" in tensions: del dic[root] # omit 5th when tension contains 13th if "13" in tensions or "b13" in tensions: del dic[fifth] return dic
def __init__(self, samples_per_second): # DEBUG # Create a buffer to store the outdata for analysis duration = 20 # seconds self.out_pcm = numpy.zeros( (duration * samples_per_second, output_channels)) self.out_pcm_pos = 0 # Threading event on which the stream can wait self.event = threading.Event() self.samples_per_second = samples_per_second # Allocate space for recording max_recording_duration = 10 #seconds FIXME max_recording_samples = (max_recording_duration * samples_per_second) self.rec_pcm = numpy.zeros((max_recording_samples, input_channels)) # Initialise recording position self.rec_position = 0 # Current state of the process self.process_state = ProcessState.RESET # Instance of the Fast Fourier Transform (FFT) analyser self.fft_analyser = FFTAnalyser( array=self.rec_pcm, samples_per_second=samples_per_second, freqs=[375], ) # Variable to record when we entered the current state self.state_start_time = None # Specify the minimum number of samples self.min_num_samples = 50 # Variables for tones tone_duration = 1 # second n_tones = len(self.fft_analyser.freqs) self.tones = [ Tone(freq, samples_per_second, output_channels, max_level=1 / n_tones, duration=tone_duration) for freq in self.fft_analyser.freqs ] # Variables for warming up the stream self.warmup_stream_duration = 1 # seconds self.warmup_stream_start_time = None # Variables for the measurement of levels of silence self.silence_threshold_samples = 100 # samples self.silence_levels = [] self.silence_start_time = None self.silence_mean = None self.silence_sd = None self.silence_mean_threshold = 1e-6 self.silence_sd_threshold = 1e-6 self.silence_max_time_in_state = 5 # seconds # Variables for fadein tone0 self.fadein_tone0_duration = 50 / 1000 # seconds # Variables for non-silence self.non_silence_threshold_num_sd = 4 # number of std. deviations away from silence self.non_silence_threshold_samples = 100 # samples of non-silence self.non_silence_samples = 0 self.non_silence_detected = False self.non_silence_abort_start_time = None self.non_silence_max_time_in_state = 5 # seconds # Variables for measurement of tone0 and not-tone1 self.tone0_levels = [] self.tone0_threshold_samples = 100 self.tone0_mean = None self.tone0_sd = None self.tone0_abs_pcm_mean = None self.tone0_abs_pcm_sd = None self.tone0_abs_pcm_means = [] # Variables for detect silence self.detect_silence_detected = False self.detect_silence_threshold_num_sd = 3 # std. deviations from tone0_tone1 self.detect_silence_samples = 0 self.detect_silence_threshold_samples = 100 # samples self.detect_silence_max_time_in_state = 5 # seconds # Variables for measure silence 2 self.measure_silence2_threshold_samples = 100 self.measure_silence2_samples = 0 self.measure_silence2_abs_pcm_means = [] self.silence_abs_pcm_mean = None self.silence_abs_pcm_sd = None # Variable to store error during audio processing self.error = None self.exception = None
def __init__(self, samples_per_second): # DEBUG # Create a buffer to store the outdata for analysis duration = 20 # seconds self.out_pcm = numpy.zeros( (duration * samples_per_second, output_channels)) self.out_pcm_pos = 0 # Threading event on which the stream can wait self.event = threading.Event() # Queues to hold input and output frames for debugging self.q_in = queue.Queue() self.q_out = queue.Queue() # Queue to hold click-based information for processing # outside of the audio thread. self.q_click = queue.Queue() # Store samples per second self.samples_per_second = samples_per_second # Allocate space for recording max_recording_duration = 10 #seconds max_recording_samples = max_recording_duration * samples_per_second self.rec_pcm = numpy.zeros((max_recording_samples, input_channels)) # Instance of the Fast Fourier Transform (FFT) analyser self.fft_analyser = FFTAnalyser( array=self.rec_pcm, samples_per_second=samples_per_second) # Tone to produce clicks self.tone = Tone(375) # Initialise recording position self.rec_position = 0 # Current state of the process self.process_state = ProcessState.RESET # Variable to record when we entered the current state self.state_start_time = None # Variables from levels self.silence_abs_pcm_mean = levels["silence_abs_pcm_mean"] self.silence_abs_pcm_sd = levels["silence_abs_pcm_sd"] self.tone0_abs_pcm_mean = levels["tone0_abs_pcm_mean"] self.ton0_abs_pcm_sd = levels["tone0_abs_pcm_sd"] # Variables for detect silence self.detect_silence_detected = False self.detect_silence_threshold_num_sd = 5 # std. deviations self.detect_silence_threshold_samples = 20 self.detect_silence_samples = 0 # samples # Variables for CLEANUP self.cleanup_cycles = 0 self.cleanup_cycles_threshold = 3 # Variables for PLAY_CLICK self.play_click_count = 0
def __init__(self): self.current_mood=0 self.tone = Tone(IAM_AUTHENTICATOR, SERVICE_URL)
def __init__(self, samples_per_second): # DEBUG # Create a buffer to store the outdata for analysis duration = 20 # seconds self.out_pcm = numpy.zeros( (duration * samples_per_second, output_channels)) self.out_pcm_pos = 0 # Threading event on which the stream can wait self.event = threading.Event() # Queue to save output data for debugging self.q = queue.Queue() # Queues for off-thread processing self.q_process = queue.Queue() # Store samples per second parameter self.samples_per_second = samples_per_second # Allocate space for recording max_recording_duration = 10 # seconds max_recording_samples = (max_recording_duration * samples_per_second) self.rec_pcm = numpy.zeros((max_recording_samples, input_channels)) # Initialise recording position self.rec_position = 0 # Current state of the process self.process_state = ProcessState.RESET # Instance of the Fast Fourier Transform (FFT) analyser self.fft_analyser = FFTAnalyser( array=self.rec_pcm, samples_per_second=samples_per_second) # Variable to record when we entered the current state self.state_start_time = None # Variables for tone assert self.fft_analyser.n_freqs == 1 tone_duration = 1 # second self.tone = Tone(self.fft_analyser.freqs[0], self.samples_per_second, channels=output_channels, max_level=1, duration=tone_duration) # Variables for levels self.silence_mean = levels["silence_mean"] self.silence_sd = levels["silence_sd"] self.tone0_mean = levels["tone0_mean"] self.tone0_sd = levels["tone0_sd"] # Variables for DETECT_SILENCE_START self.detect_silence_start_threshold_levels = ( (self.tone0_mean[0] + self.silence_mean[0]) / 2) self.detect_silence_start_samples = 0 self.detect_silence_start_threshold_samples = 100 # samples self.detect_silence_start_detected = False # Variables for START_TONE self.start_tone_click_duration = 75 / 1000 # seconds # Variables for DETECT_TONE self.detect_tone_threshold = ( (self.tone0_mean[0] + self.silence_mean[0]) / 2) self.detect_tone_start_detect_time = None self.detect_tone_threshold_duration = 50 / 1000 # seconds self.detect_tone_detected = False self.detect_tone_max_time_in_state = 5 # seconds # Variables for STOP_TONE self.stop_tone_fadeout_duration = 20 / 1000 # seconds # Variables for DETECT_SILENCE_END self.detect_silence_end_threshold_levels = ( (self.tone0_mean[0] + self.silence_mean[0]) / 2) self.detect_silence_end_samples = 0 self.detect_silence_end_threshold_samples = 10 self.detect_silence_end_detected = False # Variables for CLEANUP self.cleanup_cycles = 0 self.cleanup_cycles_threshold = 3 # ======= # Variables for START_TONE0 self.start_tone0_start_play_time = None # Variables for DETECT_TONE0 self.detect_tone0_threshold_num_sd = 4 # Variables for START_TONE0_TONE1 self.start_tone0_tone1_start_play_time = None self.start_tone0_tone1_fadein_duration = 5 / 1000 # seconds # Variables for DETECT_TONE0_TONE1 self.detect_tone0_tone1_start_detect_time = None self.detect_tone0_tone1_threshold_num_sd = 4 self.detect_tone0_tone1_threshold_duration = 50 / 1000 # seconds self.detect_tone0_tone1_max_time_in_state = 5 # seconds self.detect_tone0_tone1_detected = False
}) # Update the position of the last item that was processed self._pos = end_pos return results if __name__ == "__main__": from tone import Tone samples_per_second = 48000 fft_analyser = FFTAnalyser(samples_per_second) # Create a buffer to hold a sample samples = int(fft_analyser.window_width * samples_per_second) channels = 1 buf = numpy.zeros((samples, channels), numpy.float32) # Populate the buffer with a tone frequency = 375 # Hz tone = Tone(frequency, channels=channels) tone.play() tone.output(buf) # Analyse the buffer result = fft_analyser.run(buf, len(buf)) print("Levels at analysed frequencies:") print(numpy.around(result, 1))
"wholetone", "wholetone", "semitone", "wholetone", "wholetone" ] } if __name__ == "__main__": from tone import Tone import threading base_frequency = 440 for each in scales["major_pentatonic"]: print(base_frequency) tone = Tone(base_frequency * intervals[each]) base_frequency *= intervals[each] tone.playTone(500) base_frequency = 440 for each in scales["minor_pentatonic"]: print(base_frequency) tone = Tone(base_frequency * intervals[each]) base_frequency *= intervals[each] tone.playTone(500) chord = [] base_frequency = 440 for each in chords["major_seventh"]: noteThread = threading.Thread(target=Tone(base_frequency * intervals[each]).playTone(500)) chord.append(noteThread)
#!/usr/bin/python3 from rsclib.Rational import Rational from tone import Voice, Bar, Tone, Tune, Pause, halftone, Meter v1 = Voice(id='T1', clef='treble-8', name='Tenore I', snm='T.I') b1 = Bar(8, 8) b1.add(Tone(halftone('B'), 2)) b1.add(Tone(halftone('c'), 2)) b1.add(Tone(halftone('d'), 2)) b1.add(Tone(halftone('g'), 2)) v1.add(b1) b1 = Bar(8, 8) b1.add(Tone(halftone('f'), 6)) b1.add(Tone(halftone('e'), 2)) v1.add(b1) b1 = Bar(8, 8) b1.add(Tone(halftone('d'), 2)) b1.add(Tone(halftone('c'), 2)) b1.add(Tone(halftone('d'), 2)) b1.add(Tone(halftone('e'), 2)) v1.add(b1) b1 = Bar(8, 8) b1.add(Tone(halftone('d'), 4)) b1.add(Tone(halftone('c'), 2)) b1.add(Pause(2)) v1.add(b1) v2 = Voice(id='T2', clef='treble-8', name='Tenore II', snm='T.II') b2 = Bar(8, 8) b2.add(Tone(halftone('G'), 2))