def get_question(self): """ Return a string that can be used to display the intervals. """ score = elems.Score() staff = score.add_staff() staff.set_property(solfege.mpd.Rat(0, 1), 'hide-barline', True) staff.set_property(solfege.mpd.Rat(0, 1), 'hide-timesignature', True) score.voice11.append( elems.Note(self.m_tonika, solfege.mpd.Duration(4, 0))) last = self.m_tonika tones = [last] for i in self.m_question: n = mpd.Interval() n.set_from_int(i) last = last + n if abs(last.m_accidental_i) > 1: last.normalize_double_accidental() tones.append(last) score.voice11.append(elems.Note(last, solfege.mpd.Duration(4, 0))) score.m_staffs[0].set_clef( solfege.mpd.select_clef(" ".join( [x.get_octave_notename() for x in tones])), solfege.mpd.Rat(0, 1)) return score
def f(widget, i): added = self.g_rwidget.on_add_item(elems.Note( MusicalPitch.new_from_notename("c"), Duration(i, 0))) if self.g_add_dots_toggle.get_active(): self.g_rwidget.on_toggle_dots(1) if added: self.g_rwidget.cursor_next() self.g_rwidget.grab_focus()
def on_key_press(self, window, event): if not self.m_editable: return key_dict = { Gdk.KEY_1: 1, Gdk.KEY_2: 2, Gdk.KEY_3: 4, Gdk.KEY_4: 8, Gdk.KEY_5: 16, Gdk.KEY_6: 32, } if event.keyval in (Gdk.KEY_Right, Gdk.KEY_KP_Right): self.cursor_next() self.queue_draw() elif event.keyval in (Gdk.KEY_Left, Gdk.KEY_KP_Left): self.cursor_prev() self.queue_draw() elif event.keyval == Gdk.KEY_BackSpace: self.backspace() elif event.keyval in (Gdk.KEY_Delete, Gdk.KEY_KP_Delete): self.delete() elif event.keyval in key_dict: if self.m_input_mode == self.NOTE_INPUT: added = self.on_add_item( elems.Note(MusicalPitch.new_from_notename("c"), Duration(key_dict[event.keyval], 0))) else: assert self.m_input_mode == self.REST_INPUT added = self.on_add_item( elems.Rest(Duration(key_dict[event.keyval], 0))) if added: self.cursor_next() self.queue_draw() self.grab_focus() elif event.keyval == Gdk.KEY_period: self.on_toggle_dots(1) elif event.keyval == Gdk.KEY_colon: self.on_toggle_dots(-1) elif event.keyval == Gdk.KEY_t: self.on_toggle_tie() elif event.keyval == Gdk.KEY_r: if self.m_input_mode == self.NOTE_INPUT: self.m_input_mode = self.REST_INPUT else: self.m_input_mode = self.NOTE_INPUT
def play_question(self): cadence = self.m_cadence['music'][:] p = mpd.MusicalPitch.new_from_notename("c'") + self.m_tone if self.get_bool('random_tonic'): cadence = cadence.replace( "\\staff", "\\staff\\transpose %s" % self.m_transpose.get_octave_notename()) p.transpose_by_musicalpitch(self.m_transpose) m = mpd.parse_to_score_object(cadence) # Here we assume that we can check the first voice of the first # staff when finding the timepos and the duration of the last # tone in the cadence. But it is ok to have more staffs or voices # in the cadence, as long as the first assumption is true. staff = m.add_staff() voice = staff.add_voice() if 'tone_instrument' in self.m_cadence: try: instr = soundcard.find_midi_instrument_number( self.m_cadence.get("tone_instrument")) except KeyError: logging.warning("WARNING: Bad MIDI instrument name in «%s»" % self.m_P.m_filename) instr = cfg.get_int("config/preferred_instrument") else: instr = cfg.get_int("config/preferred_instrument") if self.get_bool('tone_in_cadence'): timepos = m.m_staffs[0].get_timeposes()[-1] last_len = m.m_staffs[0].m_voices[0].m_length - timepos else: timepos = m.m_staffs[0].m_voices[0].m_length last_len = mpd.Rat(1, 4) voice.set_elem([elems.Note(p, elems.Duration.new_from_rat(last_len))], timepos) tr = mpd.score_to_tracks(m) t = self.m_cadence.get('tempo', (60, 4)) tr[0].prepend_bpm(t[0], t[1]) tr[-1].prepend_patch(instr) soundcard.synth.play_track(*tr)
# If not we can do this: if not toc_data.m_duration: if not getattr(toc_data, 'm_duration', None): toc_data.m_duration = cur_duration.clone() if times: toc_data.m_duration.m_tuplet = times[0].clone() if relative_mode: toc_data.m_pitch = musicalpitch_relative( relto, toc_data.m_pitch) relto = toc_data.m_pitch.clone() if transpose_pitch: toc_data.transpose(transpose_pitch) if partial: score.add_partial_bar(partial, None) partial = None if context == NOTES: note = elems.Note(toc_data.m_pitch, toc_data.m_duration) try: cur_voice.append(note, stem_dir) except elems.Voice.BarFullException, e: raise ParseError(unicode(e), lexer) if beam is not None: beam.append(note) if times is not None: times.append(note) # The 3.16-parser only handles ties between whole chords, not # single tones of a chord. if tie_is_in_the_air: for note in cur_voice.m_tdict[last_pos]['elem']: if [ n for n in cur_voice.m_tdict[timepos]['elem'] if n.m_musicalpitch.get_octave_notename() ==
def parse_to_score_object(music): lexer = Lexer(music) relative_mode = None relto = None transpose_pitch = None TOPLEVEL = 1 # 'toplevel' NOTES = 2 # 'notes' START_OF_CHORD = 3 # 'start-of-chord' CHORD = 4 # 'chord' context = TOPLEVEL score = elems.Score() chord_duration = None cur_duration = Duration(4, 0) tie_is_in_the_air = 0 beam = None # None when not parsing notes in a tuplet. Item 0 is the ration and 1.. is the notes times = None cur_staff = None # we set it just so pyflakes does not complain. relto_backup = None # This variable is set to the duration of the pickup bar from we parse # \partial nn until the bar has been created. partial = None for toc, toc_data in lexer: try: if toc_data.m_duration: cur_duration = toc_data.m_duration.clone() except AttributeError: pass if toc == Lexer.STAFF: assert context == TOPLEVEL cur_staff = score.add_staff(elems.Staff) cur_voice = cur_staff.m_voices[-1] stem_dir = const.BOTH tuplet_dir = const.BOTH relative_mode = None timepos = Rat(0) last_pos = timepos elif toc == Lexer.RHYTHMSTAFF: assert context == TOPLEVEL cur_staff = score.add_staff(elems.RhythmStaff) cur_voice = cur_staff.m_voices[-1] stem_dir = const.BOTH tuplet_dir = const.BOTH relative_mode = None timepos = Rat(0) last_pos = timepos elif toc == Lexer.VOICE: if not cur_staff: raise ParseError("Don't use \\addvoice before \\staff", lexer) relative_mode = None timepos = Rat(0) cur_voice = cur_staff.add_voice() elif toc == Lexer.RELATIVE: assert not relative_mode relative_mode = 1 relto = toc_data elif toc == Lexer.TRANSPOSE: transpose_pitch = toc_data elif toc == Lexer.PARTIAL: partial = toc_data elif toc == Lexer.TIME: if not cur_staff: raise ParseError("\\time can not be used before \\staff", lexer) # FIXME # Also now, we only allow the first voice to change time signatures if cur_staff.m_voices.index(cur_voice) != 0: raise ParseError("only timesig in first voice", lexer) # FIXME: we are stricter with time signatures that both solfege 3.16 # and LilyPond if not cur_voice.is_bar_full(): raise ParseError("timesig change only when bar is full!", lexer) if partial: score.add_partial_bar(partial, toc_data) partial = None else: score.add_bar(toc_data) elif toc == Lexer.KEY: p = MusicalPitch.new_from_notename(toc_data[0]) if transpose_pitch: p.transpose_by_musicalpitch(transpose_pitch) k = (p.get_notename(), toc_data[1]) if not cur_staff: raise ParseError("\\key can not be used before \\staff", lexer) cur_staff.set_key_signature(k, timepos) elif toc == Lexer.TIMES: if not times: times = [toc_data] else: raise ParseError(r"\times nn/nn does not nest", lexer) elif toc == Lexer.CLEF: try: cur_staff.set_clef(toc_data, timepos) except elems.UnknownClefException as e: e.m_lineno, e.m_linepos1, e.m_linepos2 = lexer.get_error_location( ) raise elif toc == '|': if timepos != score.get_bar_at(last_pos).end(): logging.warning("Bar check failed at %s", timepos) elif toc == '{': if (context == TOPLEVEL): context = NOTES # if not cur_staff.m_coldict[Rat(0, 1)].m_keysignature: # FIXME dont understand if transpose_pitch: k = (transpose_pitch.get_notename(), 'major') else: k = ('c', 'major') cur_voice.set_key_signature(k) else: raise ParseError("Token '{' not allowed here.", lexer) elif toc == '<': if context == NOTES: context = START_OF_CHORD else: raise ParseError("Token '<' not allowed here.", lexer) elif toc == '>': if context == CHORD: if tie_is_in_the_air: # The 3.16-parser only handles ties between whole chords, not # single tones of a chord. if tie_is_in_the_air: for last_note in cur_voice.m_tdict[last_pos]['elem']: for cur_note in cur_voice.m_tdict[timepos]['elem']: if last_note.m_musicalpitch.get_octave_notename( ) == cur_note.m_musicalpitch.get_octave_notename( ): cur_voice.tie([last_note, cur_note]) tie_is_in_the_air = 0 last_pos = timepos timepos = timepos + chord_duration.get_rat_value() chord_duration = None relto = relto_backup relto_backup = None context = NOTES else: raise ParseError("Token '>' not allowed here.", lexer) elif toc == '}': if context == NOTES: if times: cur_voice.tuplet(times[0], tuplet_dir, times[1:]) times = None cur_duration.m_tuplet = Rat(1, 1) else: context = TOPLEVEL else: raise ParseError("Token '}' not allowed here.", lexer) elif toc == '[': beam = [] elif toc == ']': cur_voice.beam(beam) beam = None elif toc == '~': tie_is_in_the_air = 1 elif toc == Lexer.NOTE and (context in [NOTES, CHORD, START_OF_CHORD]): # FIXME check if toc_data.m_duration will ever be undefined. # If not we can do this: if not toc_data.m_duration: if not getattr(toc_data, 'm_duration', None): toc_data.m_duration = cur_duration.clone() if times: toc_data.m_duration.m_tuplet = times[0].clone() if relative_mode: toc_data.m_pitch = musicalpitch_relative( relto, toc_data.m_pitch) relto = toc_data.m_pitch.clone() if transpose_pitch: toc_data.transpose(transpose_pitch) if partial: score.add_partial_bar(partial, None) partial = None if context == NOTES: note = elems.Note(toc_data.m_pitch, toc_data.m_duration) try: cur_voice.append(note, stem_dir) except elems.Voice.BarFullException as e: raise ParseError(str(e), lexer) if beam is not None: beam.append(note) if times is not None: times.append(note) # The 3.16-parser only handles ties between whole chords, not # single tones of a chord. if tie_is_in_the_air: for note in cur_voice.m_tdict[last_pos]['elem']: for n in cur_voice.m_tdict[timepos]['elem']: if n.m_musicalpitch.get_octave_notename( ) == note.m_musicalpitch.get_octave_notename(): cur_voice.tie([note, n]) tie_is_in_the_air = 0 last_pos = timepos timepos = timepos + toc_data.m_duration.get_rat_value() elif context == START_OF_CHORD: cur_voice.append( elems.Note(toc_data.m_pitch, toc_data.m_duration), stem_dir) relto_backup = relto chord_duration = toc_data.m_duration context = CHORD elif context == CHORD: cur_voice.add_to( timepos, elems.Note(toc_data.m_pitch, toc_data.m_duration)) elif toc == Lexer.SKIP and context == NOTES: if toc_data.m_duration: cur_duration = toc_data.m_duration.clone() else: toc_data.m_duration = cur_duration.clone() skip = elems.Skip(toc_data.m_duration) cur_voice.append(skip) last_pos = timepos timepos = timepos + toc_data.m_duration.get_rat_value() elif toc == Lexer.REST and context == NOTES: if toc_data.m_duration: cur_duration = toc_data.m_duration.clone() else: toc_data.m_duration = cur_duration.clone() rest = elems.Rest(toc_data.m_duration) cur_voice.append(rest) last_pos = timepos timepos += toc_data.m_duration.get_rat_value() elif toc == Lexer.STEMDIR: stem_dir = toc_data elif toc == Lexer.TUPLETDIR: tuplet_dir = toc_data else: raise ParseError(toc, lexer) return score