def _apply_brahms_melodies_if_possible( cantus_firmus: basic.SequentialEvent[music.NoteLike], ) -> basic.SequentialEvent[basic.SequentialEvent[music.NoteLike]]: applied_brahms_melodies = basic.SequentialEvent([]) current_rest_duration = 0 last_bar_had_pitches = False for current_bar, next_bar in zip(cantus_firmus, cantus_firmus[1:]): if current_bar.pitch_or_pitches: if current_rest_duration != 0: applied_brahms_melodies.append( basic.SequentialEvent([ music.NoteLike([], current_rest_duration + fractions.Fraction(1, 4)) ])) current_rest_duration = 0 potential_brahms_melodies = _make_potential_brahms_melodies( current_bar) applied_brahms_melodies.append( _select_from_potential_brahms_melodies( potential_brahms_melodies, current_bar, next_bar)) last_bar_had_pitches = True else: if last_bar_had_pitches: current_rest_duration -= fractions.Fraction(1, 4) current_rest_duration += current_bar.duration last_bar_had_pitches = False applied_brahms_melodies.append( basic.SequentialEvent( [music.NoteLike([], next_bar.duration - fractions.Fraction(1, 4))])) return applied_brahms_melodies
def synthesize_applied_cantus_firmus(cantus_firmus: basic.SequentialEvent): from mutwo.converters.frontends import midi drone = basic.SequentialEvent([ music.NoteLike( note.pitch_or_pitches[0] - pitches.JustIntonationPitch("2/1"), note.duration, ) if note.pitch_or_pitches else note for note in cantus_firmus ]) melody = basic.SequentialEvent([]) for note in cantus_firmus: if note.pitch_or_pitches: pitches_cycle = itertools.cycle(sorted(note.pitch_or_pitches)) [ melody.append( music.NoteLike(next(pitches_cycle), fractions.Fraction(1, 6))) for _ in range(int(note.duration * 6)) ] else: melody.append(copy.copy(note)) for name, sequential_event in (("drone", drone), ("melody", melody)): converter = midi.MidiFileConverter( "builds/materials/applied_cantus_firmus_{}.mid".format(name)) converter.convert( sequential_event.set_parameter("duration", lambda duration: duration * 4, mutate=False))
def _select_from_potential_brahms_melodies( potential_brahms_melodies: typing.Tuple[basic.SequentialEvent, ...], current_bar: music.NoteLike, next_bar: music.NoteLike, ) -> basic.SequentialEvent[music.NoteLike]: pitches_per_bar0, pitches_per_bar1 = (set( map(lambda pitch: pitch.normalize(mutate=False).exponents, pitch_or_pitches)) for pitch_or_pitches in ( current_bar.pitch_or_pitches, next_bar.pitch_or_pitches, )) common_pitches = tuple( pitches.JustIntonationPitch(pitch) for pitch in pitches_per_bar0.intersection(pitches_per_bar1)) is_last_pitch_connection_pitch_per_brahms_melody = tuple( brahms_melody[-1].pitch_or_pitches[0].normalize( mutate=False) in common_pitches for brahms_melody in potential_brahms_melodies) if any(is_last_pitch_connection_pitch_per_brahms_melody): nth_melody_is_possible = is_last_pitch_connection_pitch_per_brahms_melody.index( True) return potential_brahms_melodies[nth_melody_is_possible] else: return basic.SequentialEvent( [music.NoteLike([], current_bar.duration)])
def load_cantus_firmus(): import music21 m21_lasso_measures = music21.converter.parse( # "ot2/analysis/data/lasso_cantus_firmus.mxl" "ot2/analysis/data/lasso_adjusted_cantus_firmus.mxl" )[1].getElementsByClass("Measure") mutwo_lasso = basic.SequentialEvent([]) current_root_pitch = None previous_event = None melodic_pitch_counter = 0 for measure in m21_lasso_measures: for event in measure: if isinstance(event, music21.note.GeneralNote): event_duration = fractions.Fraction(event.duration.quarterLength) / 4 if previous_event and previous_event.isRest and not event.isRest: current_root_pitch = cantus_firmus_constants.START_PITCH_TO_ROOT[ event.pitch.name.lower() ] if event.isRest: melodic_pitch_counter = 0 mutwo_lasso.append(music.NoteLike([], event_duration)) else: try: ji_ratios = cantus_firmus_constants.INTERVALS[ melodic_pitch_counter ] except IndexError: ji_ratios = cantus_firmus_constants.INTERVALS[-1] duration_per_ratio = event_duration / len(ji_ratios) for ji_ratio in ji_ratios: pitch = ( ji_ratio + current_root_pitch - pitches.JustIntonationPitch("2/1") ) note = music.NoteLike([pitch], duration_per_ratio) mutwo_lasso.append(note) melodic_pitch_counter += 1 previous_event = event return mutwo_lasso
def imitate_melody( melody_to_imitate: basic.SequentialEvent[music.NoteLike], harmony: typing.Sequence[pitches.JustIntonationPitch], ) -> basic.SequentialEvent[music.NoteLike]: melodic_pitches = tuple(pitch_or_pitches[0] for pitch_or_pitches in melody_to_imitate.get_parameter("pitch_or_pitches") if pitch_or_pitches) transformed_pitches = iter(imitations.imitate(melodic_pitches, harmony)) imitated_melody = basic.SequentialEvent([ music.NoteLike(next(transformed_pitches), duration) if original_pitch else music.NoteLike([], duration) for original_pitch, duration in zip( melody_to_imitate.get_parameter("pitch_or_pitches"), melody_to_imitate.get_parameter("duration"), ) ]) return imitated_melody
def make_applied_cantus_firmus( cantus_firmus: basic.SequentialEvent, ) -> basic.SequentialEvent[music.NoteLike]: transitional_harmonies = _load_transitional_harmonies() cantus_firmus = _process_cantus_firmus(cantus_firmus) previous_pitch_or_pitches = None applied_cantus_firmus = basic.SequentialEvent([]) roots = (cantus_firmus_constants.START_PITCH_TO_ROOT[start_pitch] for start_pitch in "c e a c e a d g c e a".split(" ")) current_root = None for pitch_or_pitches, next_pitch_or_pitches, duration in zip( cantus_firmus.get_parameter("pitch_or_pitches"), cantus_firmus.get_parameter("pitch_or_pitches")[1:] + (None, ), cantus_firmus.get_parameter("duration"), ): if pitch_or_pitches: if current_root is None: current_root = next(roots) current_pitch = pitch_or_pitches[0] previous_pitch = (previous_pitch_or_pitches[0] if previous_pitch_or_pitches else current_pitch) next_pitch = (next_pitch_or_pitches[0] if next_pitch_or_pitches else current_pitch) harmony = [ current_pitch + pitch for pitch in transitional_harmonies[tuple( (pitch - current_root).normalize(mutate=False).exponents for pitch in (current_pitch, previous_pitch, next_pitch))] ] # MAKE EVERYTHING MORE READABLE (go down to pitch 'e') harmony = [ pitch - pitches.JustIntonationPitch('3/2') for pitch in harmony ] event = music.NoteLike(harmony, duration) else: event = music.NoteLike([], duration) current_root = None applied_cantus_firmus.append(event) previous_pitch_or_pitches = pitch_or_pitches return applied_cantus_firmus
def _make_drone( melody_part: typing.Tuple[typing.Tuple[ typing.Tuple[pitches.JustIntonationPitch, ...], fractions.Fraction, typing.Optional[basic.SequentialEvent[music.NoteLike]], ], ..., ], ) -> basic.SequentialEvent[music.NoteLike]: drone = basic.SequentialEvent([]) for harmony, duration, _ in melody_part: drone.append( music.NoteLike(harmony[0].register(-2, mutate=False), duration)) drone[0].duration -= fractions.Fraction(1, 4) drone[-1].duration += fractions.Fraction(1, 4) return drone
def convert( self, tagged_simultaneous_event: ot2_basic.TaggedSimultaneousEvent[ basic.SimultaneousEvent[basic.SequentialEvent[music.NoteLike]]], ) -> abjad.Score: staff_group = abjad.StaffGroup([]) duration = tagged_simultaneous_event.duration for instrument_id in sorted( tagged_simultaneous_event.tag_to_event_index.keys(), key=lambda tag: tagged_simultaneous_event.tag_to_event_index[ tag], ): converter = self._instrument_id_to_sequential_event_to_abjad_voice_converter[ instrument_id] simultaneous_event = tagged_simultaneous_event[instrument_id] staff = abjad.Staff([], simultaneous=True) # staff.remove_commands.append("Time_signature_engraver") for sequential_event in simultaneous_event: difference = duration - sequential_event.duration if difference: sequential_event.append(music.NoteLike([], difference)) if instrument_id == instruments.ID_PERCUSSIVE: sequential_event = self._prepare_percussion_sequential_event( sequential_event) abjad_voice = converter.convert(sequential_event) if instrument_id not in ( instruments.ID_PERCUSSIVE, instruments.ID_NOISE, ): self._prepare_duration_line_voice(abjad_voice) if instrument_id == instruments.ID_PERCUSSIVE: self._prepare_percussion_voice(abjad_voice) elif instrument_id == instruments.ID_DRONE: self._prepare_drone_voice(abjad_voice) elif instrument_id == instruments.ID_NOISE: self._prepare_percussion_voice(abjad_voice) self._prepare_voice(abjad_voice, instrument_id) staff.append(abjad_voice) staff_group.append(staff) abjad_score = abjad.Score([staff_group]) return abjad_score
def _make_potential_brahms_melodies( event: music.NoteLike, ) -> typing.Tuple[basic.SequentialEvent, ...]: harmony = tuple(event.pitch_or_pitches) if harmony and event.duration in ( fractions.Fraction(4, 2), fractions.Fraction(8, 2), ): if event.duration == 2: used_melodies = (brahms.BRAHMS0.copy(), brahms.BRAHMS1.copy()) used_melodies[1][4].duration = fractions.Fraction(1, 4) used_melodies[1][7].duration = fractions.Fraction(1, 4) used_melodies[0].set_parameter( "duration", lambda duration: duration * fractions.Fraction(2, 3)) used_melodies[1].set_parameter( "duration", lambda duration: duration * fractions.Fraction(2, 3)) used_melodies[0].insert( 0, music.NoteLike([], fractions.Fraction(1, 4))) used_melodies[0][-1].duration = fractions.Fraction(1, 4) used_melodies[1][0].duration = fractions.Fraction(1, 4) used_melodies[1][-1].duration = fractions.Fraction(1, 4) else: used_melodies = (brahms.BRAHMS1.copy(), ) used_melodies[0].insert( 0, music.NoteLike([], fractions.Fraction(3, 4))) potential_brahms_melodies = [] for melody_to_imitate in used_melodies: potential_brahms_melodies.append( zimmermann.events.imitate_melody(melody_to_imitate, harmony)) for potential_brahms_melody in potential_brahms_melodies: _post_process_brahms_melody(potential_brahms_melody) return tuple(potential_brahms_melodies) else: return tuple([])
def _get_possible_melodies_for_each_bar_to_fill( self, melody_part: typing.Tuple[typing.Tuple[ typing.Tuple[pitches.JustIntonationPitch, ...], fractions.Fraction, typing.Optional[basic.SequentialEvent[music.NoteLike]], ], ..., ], bars_to_fill: typing.Tuple[typing.Tuple[int, fractions.Fraction], ...], ) -> typing.Tuple[typing.Tuple[basic.SequentialEvent[music.NoteLike], ...], ...]: possible_melodies_for_each_bar_to_fill = [] adjusted_bars_to_fill = [] for nth_bar, bar_duration in bars_to_fill: cengkok_duration = int(bar_duration * 4) use_four_beats = False if cengkok_duration == 4: cengkok_duration = 8 use_four_beats = True available_cengkoks = cengkoks.CENGKOKS[cengkok_duration] current_harmony = melody_part[nth_bar][0] try: next_harmony = melody_part[nth_bar + 1][0] except IndexError: next_harmony = None possible_melodies = self._build_possible_melodies_for_bar( available_cengkoks, current_harmony, next_harmony, use_four_beats) n_possible_melodies = len(possible_melodies) if n_possible_melodies == 1: self._event_blueprint[nth_bar] = possible_melodies[0] elif n_possible_melodies > 0: adjusted_bars_to_fill.append((nth_bar, bar_duration)) possible_melodies_for_each_bar_to_fill.append( possible_melodies) else: self._event_blueprint[nth_bar] = basic.SequentialEvent([ music.NoteLike([], self._event_blueprint[nth_bar].duration) ]) return ( tuple(possible_melodies_for_each_bar_to_fill), tuple(adjusted_bars_to_fill), )
def load_cengkoks() -> basic.SequentialEvent[music.NoteLike]: blueprint = basic.SequentialEvent([ basic.SequentialEvent([ music.NoteLike( [], applied_cantus_firmus.APPLIED_CANTUS_FIRMUS.duration) ]) ]) for path in os.listdir(PICKLE_PATH): concatenated_path = f"{PICKLE_PATH}/{path}" with open(concatenated_path, "rb") as f: start_time, applied_melody = pickle.load(f) blueprint.squash_in(start_time, applied_melody) blueprint.tie_by( lambda event0, event1: event0.pitch_or_pitches == event1. pitch_or_pitches, event_type_to_examine=basic.SimpleEvent, ) return functools.reduce(operator.add, blueprint)
mutwo_pitch_to_abjad_pitch_converter=mutwo_abjad.MutwoPitchToHEJIAbjadPitchConverter() ) lilypond_file = abjad.LilyPondFile(includes=["ekme-heji-ref-c.ily"]) for current_pitch, previous_pitch, next_pitch, solution, harmonicity in DATA: movement_pitches = tuple( pitches.JustIntonationPitch(exponents) for exponents in (previous_pitch, current_pitch, next_pitch) ) solution_pitches = tuple( pitches.JustIntonationPitch(exponents) for exponents in solution ) sequential_event = basic.SequentialEvent([]) for movement_pitch in movement_pitches: sequential_event.append( music.NoteLike(movement_pitch, fractions.Fraction(1, 3)) ) for solution_pitch in solution_pitches: sequential_event.append( music.NoteLike(solution_pitch, fractions.Fraction(1, 5)) ) voice = converter.convert(sequential_event) abjad.attach( abjad.Markup( "\\teeny { movement: " + str(' - '.join(str(pitch.ratio) for pitch in movement_pitches)) + ", harmonicity = " + str(round(harmonicity, 3)) + " }",
def test_convert(self): musical_data = ot2_basic.TaggedSimultaneousEvent( [ basic.SimultaneousEvent([ basic.SequentialEvent([ music.NoteLike("1/1", 1, "pp"), music.NoteLike("15/16", 1, "pp"), music.NoteLike([], 0.5, "pp"), music.NoteLike("16/15", 0.75, "p"), music.NoteLike([], 1.25, "p"), music.NoteLike("9/8", 1.5, "p"), ]) ]), basic.SimultaneousEvent([ basic.SequentialEvent([ music.NoteLike("5/8", 0.5, "pp"), music.NoteLike("11/16", 1, "pp"), music.NoteLike([], 1, "pp"), music.NoteLike("3/4", 0.75, "p"), music.NoteLike([], 0.25, "p"), music.NoteLike("3/4", 0.75, "p"), ]) ]), basic.SimultaneousEvent([ basic.SequentialEvent([ music.NoteLike([], 0.75, "pp"), music.NoteLike("11/9", 1, "pp"), music.NoteLike("4/3", 1, "pp"), music.NoteLike("3/2", 0.75, "ppp"), music.NoteLike([], 0.75, "ppp"), music.NoteLike("3/5", 0.75, "ppp"), ]) ]), basic.SimultaneousEvent([ basic.SequentialEvent([ music.NoteLike("1/4", 4, "pp"), music.NoteLike([], 1, "pp"), music.NoteLike("1/4", 1, "pp"), ]), basic.SequentialEvent([ music.NoteLike([], 3, "pp"), music.NoteLike("3/8", 2.5, "pp"), music.NoteLike([], 0.5, "pp"), ]), ]), basic.SimultaneousEvent([ basic.SequentialEvent([ music.NoteLike("g", 0.25, "pp"), music.NoteLike("g", 0.5, "pp"), music.NoteLike("g", 0.25, "pp"), music.NoteLike("b", fractions.Fraction(1, 6), "pp"), music.NoteLike("f", fractions.Fraction(1, 12), "pp"), music.NoteLike("g", 1, "pp"), music.NoteLike("f", 1, "pp"), music.NoteLike("g", 1, "pp"), music.NoteLike("g", 1, "pp"), ]) ]), basic.SimultaneousEvent( [basic.SequentialEvent([music.NoteLike([], 6, "ppp")])]), ], tag_to_event_index=instruments.INSTRUMENT_ID_TO_INDEX, ) abjad_score_converter = ot2_abjad.TaggedSimultaneousEventToAbjadScoreConverter( ( abjad.TimeSignature((4, 2)), abjad.TimeSignature((4, 2)), abjad.TimeSignature((4, 2)), abjad.TimeSignature((4, 2)), )) abjad_score = abjad_score_converter.convert(musical_data) lilypond_file_converter = ot2_abjad.AbjadScoreToLilypondFileConverter() lilypond_file = lilypond_file_converter.convert(abjad_score) abjad.persist.as_pdf(lilypond_file, "tests/converters/frontends/score_test.pdf")
is_addable = False if is_addable: try: os.mkdir(chord_primes_path) except FileExistsError: pass try: os.mkdir(harmony_path) except FileExistsError: pass sequential_event = basic.SequentialEvent([ music.NoteLike(pitch_variant_combination, 8, 0.25) ]) pitch_variant_name = "s{}cp{}h_{}_{}_DIFF_{}".format( structural_prime, str(chord_primes).replace(" ", ""), harmony_name, nth_pitch_variant_combination, intervallv, ) midi_file_converter = midi.MidiFileConverter( "{}/{}.mid".format(harmony_path, pitch_variant_name)) midi_file_converter.convert(sequential_event) abjad_converter = mutwo_abjad.SequentialEventToAbjadVoiceConverter(
def _build_possible_melodies_for_bar( self, available_cengkoks: typing.Dict[int, typing.Tuple[typing.Tuple[ str, typing.Tuple[int, ...]]]], current_harmony: typing.Tuple[pitches.JustIntonationPitch, ...], next_harmony: typing.Optional[typing.Tuple[pitches.JustIntonationPitch, ...]], use_four_beats: bool, ) -> typing.Tuple[basic.SequentialEvent[music.NoteLike], ...]: def apply_octave_mark(pitch, octave_mark): octave_mark = int(octave_mark) oct_pitch = pitches.JustIntonationPitch([octave_mark]) return pitch + oct_pitch if next_harmony: exponents_per_bar0, exponents_per_bar1 = (tuple( map( lambda pitch: pitch.normalize(mutate=False).exponents, pitch_or_pitches, )) for pitch_or_pitches in ( current_harmony, next_harmony, )) common_exponents = tuple( set(exponents_per_bar0).intersection(set(exponents_per_bar1))) common_pitch_index = exponents_per_bar0.index(common_exponents[0]) else: common_pitch_index = 0 seleh = (1, 2, 3, 4, 5, 6)[common_pitch_index] seleh_is_four = False if seleh == 4: seleh_is_four = True seleh = 3 cengkoks_to_use = available_cengkoks[seleh] decodex = { java_pitch_index: pitch for java_pitch_index, pitch in zip("1 2 3 4 5 6".split(" "), current_harmony) } pitch_converter = mmml.MMMLPitchesConverter( mmml.MMMLSinglePitchConverter( decodex, apply_octave_mark, )) possible_melodies = [] for cengkok_pitches, cengkok_rhythms in cengkoks_to_use: cengkok_pitches = pitch_converter.convert(cengkok_pitches) if seleh_is_four: cengkok_pitches = cengkok_pitches[:-1] + ( (decodex["4"].register(cengkok_pitches[-1][0].octave, mutate=False), ), ) melody = basic.SequentialEvent([ music.NoteLike(pitch, fractions.Fraction(rhythm, 4)) for pitch, rhythm in zip(cengkok_pitches, cengkok_rhythms) ]) if use_four_beats: melody.cut_off(0, 1) self._apply_vanitas_rhythm_grid_on_melody(melody) def register_pitches0(pitch_or_pitches): return [ pitch - pitches.JustIntonationPitch("2/1") for pitch in pitch_or_pitches ] def register_pitches1(pitch_or_pitches): return [ pitch + pitches.JustIntonationPitch("2/1") for pitch in pitch_or_pitches ] melodies_to_add = ( melody, melody.set_parameter("pitch_or_pitches", register_pitches0, mutate=False), melody.set_parameter("pitch_or_pitches", register_pitches1, mutate=False), ) for melody in melodies_to_add: available_pitches = functools.reduce( operator.add, melody.get_parameter("pitch_or_pitches")) if (min(available_pitches) >= instruments. AMBITUS_SUSTAINING_INSTRUMENTS_JUST_INTONATION_PITCHES. borders[0] and max(available_pitches) <= instruments. AMBITUS_SUSTAINING_INSTRUMENTS_JUST_INTONATION_PITCHES[ 1]): possible_melodies.append(melody) return tuple(possible_melodies)