def synthesize_applied_cantus_firmus(cantus_firmus: basic.SequentialEvent):
    from mutwo.converters.frontends import midi

    drone = basic.SequentialEvent([
        music.NoteLike(
            note.pitch_or_pitches[0] - pitches.JustIntonationPitch("2/1"),
            note.duration,
        ) if note.pitch_or_pitches else note for note in cantus_firmus
    ])
    melody = basic.SequentialEvent([])
    for note in cantus_firmus:
        if note.pitch_or_pitches:
            pitches_cycle = itertools.cycle(sorted(note.pitch_or_pitches))
            [
                melody.append(
                    music.NoteLike(next(pitches_cycle),
                                   fractions.Fraction(1, 6)))
                for _ in range(int(note.duration * 6))
            ]
        else:
            melody.append(copy.copy(note))

    for name, sequential_event in (("drone", drone), ("melody", melody)):
        converter = midi.MidiFileConverter(
            "builds/materials/applied_cantus_firmus_{}.mid".format(name))
        converter.convert(
            sequential_event.set_parameter("duration",
                                           lambda duration: duration * 4,
                                           mutate=False))
Exemple #2
0
def _apply_brahms_melodies_if_possible(
    cantus_firmus: basic.SequentialEvent[music.NoteLike],
) -> basic.SequentialEvent[basic.SequentialEvent[music.NoteLike]]:
    applied_brahms_melodies = basic.SequentialEvent([])
    current_rest_duration = 0
    last_bar_had_pitches = False
    for current_bar, next_bar in zip(cantus_firmus, cantus_firmus[1:]):
        if current_bar.pitch_or_pitches:
            if current_rest_duration != 0:
                applied_brahms_melodies.append(
                    basic.SequentialEvent([
                        music.NoteLike([], current_rest_duration +
                                       fractions.Fraction(1, 4))
                    ]))
                current_rest_duration = 0
            potential_brahms_melodies = _make_potential_brahms_melodies(
                current_bar)
            applied_brahms_melodies.append(
                _select_from_potential_brahms_melodies(
                    potential_brahms_melodies, current_bar, next_bar))
            last_bar_had_pitches = True
        else:
            if last_bar_had_pitches:
                current_rest_duration -= fractions.Fraction(1, 4)
            current_rest_duration += current_bar.duration
            last_bar_had_pitches = False

    applied_brahms_melodies.append(
        basic.SequentialEvent(
            [music.NoteLike([],
                            next_bar.duration - fractions.Fraction(1, 4))]))

    return applied_brahms_melodies
Exemple #3
0
def distribute_phrases(splitted_parts: typing.Tuple[phrases.Phrases, ...]):
    distribute_phrases = basic.SequentialEvent([])
    splitted_parts_as_sequential_event = basic.SequentialEvent(
        basic.SequentialEvent(part) for part in splitted_parts)
    duration = splitted_parts_as_sequential_event.duration
    for absolute_time_of_phrase, phrase in zip(
            splitted_parts_as_sequential_event.absolute_times,
            splitted_parts_as_sequential_event,
    ):
        is_first = True
        for absolute_time_of_element, phrase_element in zip(
                phrase.absolute_times, phrase):
            absolute_position = (absolute_time_of_phrase +
                                 absolute_time_of_element) / duration

            pause_duration = distributed_phrases_constants.PAUSE_DURATION_BETWEEN_PHRASES_DECIDER.gamble_at(
                absolute_position)

            if pause_duration == 0:
                extend_to_previous = True
            else:
                pause_duration = pause_duration.value_at(absolute_position)
                extend_to_previous = False

            if is_first:
                extend_to_previous = False
                is_first = False

            if extend_to_previous:
                distribute_phrases[-1].extend(phrase_element)
            else:
                distribute_phrases.append(phrase_element)

    return distribute_phrases
Exemple #4
0
    def find_solutions(
        beats_per_part: int,
        units_per_part: int,
        compounds_per_part: int,
        division_size: int,
    ) -> tuple:
        possible_cpps = tuple(
            toussaint.euclidean(compounds_per_part, n)
            for n in range(1, compounds_per_part + 1)
            if units_per_part % n == 0)
        solutions = []
        for ccp in possible_cpps:
            length_ccp = len(ccp)
            tests_if_addable = (
                units_per_part % length_ccp == 0,
                beats_per_part % length_ccp == 0,
            )

            units_per_metre = units_per_part // length_ccp
            beats_per_metre = beats_per_part // length_ccp

            if all(tests_if_addable):

                is_still_addable = True
                mets = []
                for item in ccp:
                    unit_divisions = toussaint.euclidean(units_per_metre, item)
                    item_tests = (beats_per_metre % item == 0,
                                  all(unit_divisions))

                    if all(item_tests):
                        available_unit_sizes_per_compound = beats_per_metre // item
                        comps = []
                        for amount_units in unit_divisions:
                            u_sizes = toussaint.euclidean(
                                available_unit_sizes_per_compound,
                                amount_units)
                            if all(u_sizes):
                                comps.append(
                                    basic.SequentialEvent([
                                        basic.SequentialEvent([
                                            basic.SimpleEvent(division_size)
                                            for _ in range(s)
                                        ]) for s in u_sizes
                                    ]))
                            else:
                                is_still_addable = False

                        mets.append(basic.SequentialEvent(comps))

                    else:
                        is_still_addable = False

                if is_still_addable:
                    solutions.append(basic.SequentialEvent(mets))

        return tuple(solutions)
def _make_families(
    family_data_per_part: typing.Tuple[typing.Tuple[typing.Tuple[int, ...],
                                                    float], ...],
    composition_structure: structure.StructureType,
) -> basic.SequentialEvent[typing.Union[basic.SimpleEvent,
                                        families.FamilyOfPitchCurves]]:
    import progressbar

    families_for_all_parts = basic.SequentialEvent([])
    with progressbar.ProgressBar(max_value=len(family_data_per_part)) as bar:
        nth_part = 0
        for part0, part1, family_data in zip(
                basic.SequentialEvent([None]) + composition_structure,
                composition_structure,
                family_data_per_part,
        ):
            duration_in_seconds = part1[0].duration
            duration_of_following_cengkok_part_in_seconds = part1[
                1].duration_in_seconds
            if part0:
                last_pitch_of_last_melodic_phrase = part0[1][-1].root
            else:
                last_pitch_of_last_melodic_phrase = pitches.JustIntonationPitch(
                    "1/1")

            first_pitch_of_next_melodic_phrase = part1[1][0].root

            n_root_notes_per_family, density, *rest_distribution = family_data

            if rest_distribution:
                rest_distribution = rest_distribution[0]

            family_structure_for_current_part = _family_data_to_families(
                last_pitch_of_last_melodic_phrase,
                first_pitch_of_next_melodic_phrase,
                duration_in_seconds,
                duration_of_following_cengkok_part_in_seconds,
                n_root_notes_per_family,
                density,
                rest_distribution,
            )

            assert round(family_structure_for_current_part.duration,
                         5) == round(
                             duration_in_seconds +
                             duration_of_following_cengkok_part_in_seconds, 5)

            families_for_all_parts.extend(family_structure_for_current_part)
            bar.update(nth_part)
            nth_part += 1

    return families_for_all_parts
Exemple #6
0
def _select_from_potential_brahms_melodies(
    potential_brahms_melodies: typing.Tuple[basic.SequentialEvent, ...],
    current_bar: music.NoteLike,
    next_bar: music.NoteLike,
) -> basic.SequentialEvent[music.NoteLike]:
    pitches_per_bar0, pitches_per_bar1 = (set(
        map(lambda pitch: pitch.normalize(mutate=False).exponents,
            pitch_or_pitches)) for pitch_or_pitches in (
                current_bar.pitch_or_pitches,
                next_bar.pitch_or_pitches,
            ))
    common_pitches = tuple(
        pitches.JustIntonationPitch(pitch)
        for pitch in pitches_per_bar0.intersection(pitches_per_bar1))
    is_last_pitch_connection_pitch_per_brahms_melody = tuple(
        brahms_melody[-1].pitch_or_pitches[0].normalize(
            mutate=False) in common_pitches
        for brahms_melody in potential_brahms_melodies)
    if any(is_last_pitch_connection_pitch_per_brahms_melody):
        nth_melody_is_possible = is_last_pitch_connection_pitch_per_brahms_melody.index(
            True)
        return potential_brahms_melodies[nth_melody_is_possible]
    else:
        return basic.SequentialEvent(
            [music.NoteLike([], current_bar.duration)])
Exemple #7
0
 def _render_soundfile_for_keyboard(instrument_id, filtered_time_brackets):
     time_brackets_converter = (converters.symmetrical.time_brackets.
                                TimeBracketsToEventConverter(instrument_id))
     converted_time_brackets = time_brackets_converter.convert(
         filtered_time_brackets)
     if converted_time_brackets:
         n_sequential_events = max(
             len(simultaneous_event)
             for simultaneous_event in converted_time_brackets
             if isinstance(simultaneous_event, basic.SimultaneousEvent))
         simultaneous_event = basic.SimultaneousEvent([
             basic.SequentialEvent([]) for _ in range(n_sequential_events)
         ])
         for event in converted_time_brackets:
             if isinstance(event, basic.SimpleEvent):
                 rest = basic.SimpleEvent(event.duration)
                 for seq in simultaneous_event:
                     seq.append(rest)
             else:
                 for ev, sequential_event in zip(event, simultaneous_event):
                     ev = PLAYING_INDICATORS_CONVERTER.convert(ev)
                     sequential_event.extend(ev)
         for nth_seq_event, sequential_event in enumerate(
                 simultaneous_event):
             midi_file_converter = (
                 ot2_converters.frontends.midi.
                 KeyboardEventToMidiFileConverter(nth_seq_event))
             midi_file_converter.convert(sequential_event)
Exemple #8
0
 def _get_variable_data(
     melody_part: typing.Tuple[typing.Tuple[
         typing.Tuple[pitches.JustIntonationPitch, ...], fractions.Fraction,
         typing.Optional[basic.SequentialEvent[music.NoteLike]], ], ..., ],
 ) -> typing.Tuple[typing.Tuple[typing.Tuple[int, fractions.Fraction], ...],
                   basic.SequentialEvent]:
     event_blueprint = basic.SequentialEvent([])
     bars_to_fill = []
     for nth_bar, bar in enumerate(melody_part):
         _, duration, brahms_melody = bar
         if brahms_melody:
             brahms_melody_pitches = functools.reduce(
                 operator.add,
                 brahms_melody.get_parameter("pitch_or_pitches"))
         if brahms_melody and (
                 min(brahms_melody_pitches) >= instruments.
                 AMBITUS_SUSTAINING_INSTRUMENTS_JUST_INTONATION_PITCHES.
                 borders[0] and max(brahms_melody_pitches) <= instruments.
                 AMBITUS_SUSTAINING_INSTRUMENTS_JUST_INTONATION_PITCHES.
                 borders[1]):
             event_blueprint.append(brahms_melody)
         else:
             bars_to_fill.append((nth_bar, duration))
             event_blueprint.append(basic.SimpleEvent(duration))
     return tuple(bars_to_fill), event_blueprint
Exemple #9
0
    def _generate_rhythmical_data(
        fundamental: pitches.JustIntonationPitch,
        n_periods_of_fundamental_per_beat: int,
        n_repetitions_of_rhythm: int,
        partial: int,
    ) -> typing.Tuple[typing.Any]:
        """Generate rhythmical data for the respective partial.

        The rhythmical data contain the following information:
            1. How often the rhythm get repeated (int)
            2. How many periods one beat last (int)
            3. The respective rhythm with duration values in seconds
               (basic.SequentialEvent[basic.SimpleEvent])
            4. Indispensability of each beat (typing.Tuple[int])
        """

        duration_of_one_period = 1 / (fundamental.frequency * partial)
        duration_of_one_beat = (duration_of_one_period *
                                n_periods_of_fundamental_per_beat)
        rhythm = basic.SequentialEvent(
            [basic.SimpleEvent(duration_of_one_beat) for _ in range(partial)])
        if partial == 1:
            indispensability_for_bar = (0, )
        else:
            indispensability_for_bar = indispensability.indispensability_for_bar(
                (partial, ))

        return (
            n_repetitions_of_rhythm,
            n_periods_of_fundamental_per_beat,
            rhythm,
            indispensability_for_bar,
        )
def _render_vibrations_to_filtered_isis_files(
    nested_vibrations: basic.SimultaneousEvent,
):
    threads = []
    for nth_cycle, cycle in enumerate(nested_vibrations):
        sample_player_event = basic.SimpleEvent(sixtycombinations.constants.DURATION)
        sample_player_event.path = "{}/{}.wav".format(
            sixtycombinations.constants.ISIS_FILES_BUILD_PATH, nth_cycle
        )

        for nth_speaker, speaker_data in enumerate(cycle):
            adapted_speaker_data = basic.SimultaneousEvent(
                [basic.SequentialEvent([sample_player_event])] + speaker_data[:]
            )

            sound_file_converter = sixtycombinations.converters.frontends.VibrationsToFilteredIsisSoundFileConverter(
                nth_cycle, nth_speaker
            )
            thread = threading.Thread(
                target=lambda: sound_file_converter.convert(adapted_speaker_data)
            )
            thread.start()
            threads.append(thread)

    while any([th.isAlive() for th in threads]):
        time.sleep(0.5)
Exemple #11
0
def make_rhythms(
    a: typing.Tuple[fractions.Fraction, fractions.Fraction],
    b: typing.Tuple[fractions.Fraction, fractions.Fraction],
    c: typing.Tuple[fractions.Fraction, fractions.Fraction],
    d: typing.Tuple[fractions.Fraction, fractions.Fraction],
    is_inversed: bool = False,
) -> typing.Dict[int, basic.SequentialEvent[basic.SimpleEvent]]:
    if is_inversed:
        a = tuple(reversed(a))
        b = tuple(reversed(b))
        c = tuple(reversed(c))
        d = tuple(reversed(d))

    rhythms = {
        2: basic.SequentialEvent(
            [basic.SimpleEvent(fractions.Fraction(1, 4)) for _ in range(4)]
        ),
        4: basic.SequentialEvent(
            [
                basic.SimpleEvent(duration)
                for duration in (
                    (fractions.Fraction(1, 4),)
                    + a
                    + (fractions.Fraction(1, 4), fractions.Fraction(1, 4))
                    + tuple(reversed(a))
                    + (fractions.Fraction(1, 4),)
                )
            ]
        ),
        8: basic.SequentialEvent(
            [
                basic.SimpleEvent(duration)
                for duration in (
                    (fractions.Fraction(1, 4),)
                    + b
                    + c
                    + d
                    + (fractions.Fraction(1, 4), fractions.Fraction(1, 4))
                    + tuple(reversed(d))
                    + tuple(reversed(c))
                    + tuple(reversed(b))
                    + (fractions.Fraction(1, 4),)
                )
            ]
        ),
    }
    return rhythms
Exemple #12
0
def convert_nested_loop_to_sequential_event(nested_loop):
    result = basic.SequentialEvent([])
    for element in nested_loop:
        if isinstance(element, tuple):
            result.append(convert_nested_loop_to_sequential_event(element))
        else:
            result.append(basic.SimpleEvent(element))
    return result
Exemple #13
0
 def _make_drone(
     melody_part: typing.Tuple[typing.Tuple[
         typing.Tuple[pitches.JustIntonationPitch, ...], fractions.Fraction,
         typing.Optional[basic.SequentialEvent[music.NoteLike]], ], ..., ],
 ) -> basic.SequentialEvent[music.NoteLike]:
     drone = basic.SequentialEvent([])
     for harmony, duration, _ in melody_part:
         drone.append(
             music.NoteLike(harmony[0].register(-2, mutate=False),
                            duration))
     drone[0].duration -= fractions.Fraction(1, 4)
     drone[-1].duration += fractions.Fraction(1, 4)
     return drone
Exemple #14
0
def load_cengkoks() -> basic.SequentialEvent[music.NoteLike]:
    blueprint = basic.SequentialEvent([
        basic.SequentialEvent([
            music.NoteLike(
                [], applied_cantus_firmus.APPLIED_CANTUS_FIRMUS.duration)
        ])
    ])

    for path in os.listdir(PICKLE_PATH):
        concatenated_path = f"{PICKLE_PATH}/{path}"
        with open(concatenated_path, "rb") as f:
            start_time, applied_melody = pickle.load(f)

        blueprint.squash_in(start_time, applied_melody)

    blueprint.tie_by(
        lambda event0, event1: event0.pitch_or_pitches == event1.
        pitch_or_pitches,
        event_type_to_examine=basic.SimpleEvent,
    )

    return functools.reduce(operator.add, blueprint)
def load_cantus_firmus():
    import music21

    m21_lasso_measures = music21.converter.parse(
        # "ot2/analysis/data/lasso_cantus_firmus.mxl"
        "ot2/analysis/data/lasso_adjusted_cantus_firmus.mxl"
    )[1].getElementsByClass("Measure")
    mutwo_lasso = basic.SequentialEvent([])

    current_root_pitch = None
    previous_event = None
    melodic_pitch_counter = 0

    for measure in m21_lasso_measures:
        for event in measure:
            if isinstance(event, music21.note.GeneralNote):
                event_duration = fractions.Fraction(event.duration.quarterLength) / 4
                if previous_event and previous_event.isRest and not event.isRest:
                    current_root_pitch = cantus_firmus_constants.START_PITCH_TO_ROOT[
                        event.pitch.name.lower()
                    ]

                if event.isRest:
                    melodic_pitch_counter = 0
                    mutwo_lasso.append(music.NoteLike([], event_duration))
                else:
                    try:
                        ji_ratios = cantus_firmus_constants.INTERVALS[
                            melodic_pitch_counter
                        ]
                    except IndexError:
                        ji_ratios = cantus_firmus_constants.INTERVALS[-1]

                    duration_per_ratio = event_duration / len(ji_ratios)
                    for ji_ratio in ji_ratios:
                        pitch = (
                            ji_ratio
                            + current_root_pitch
                            - pitches.JustIntonationPitch("2/1")
                        )
                        note = music.NoteLike([pitch], duration_per_ratio)
                        mutwo_lasso.append(note)
                    melodic_pitch_counter += 1

                previous_event = event

    return mutwo_lasso
Exemple #16
0
def imitate_melody(
    melody_to_imitate: basic.SequentialEvent[music.NoteLike],
    harmony: typing.Sequence[pitches.JustIntonationPitch],
) -> basic.SequentialEvent[music.NoteLike]:
    melodic_pitches = tuple(pitch_or_pitches[0] for pitch_or_pitches in
                            melody_to_imitate.get_parameter("pitch_or_pitches")
                            if pitch_or_pitches)
    transformed_pitches = iter(imitations.imitate(melodic_pitches, harmony))
    imitated_melody = basic.SequentialEvent([
        music.NoteLike(next(transformed_pitches), duration)
        if original_pitch else music.NoteLike([], duration)
        for original_pitch, duration in zip(
            melody_to_imitate.get_parameter("pitch_or_pitches"),
            melody_to_imitate.get_parameter("duration"),
        )
    ])
    return imitated_melody
    def _tie_rests(sequential_event: basic.SequentialEvent):
        new_sequential_event = []
        for nth_event, event in enumerate(sequential_event):
            if nth_event != 0:
                tests = (
                    AnnotatedNoteLikesToSoundFileConvert._is_rest(
                        new_sequential_event[-1])
                    or new_sequential_event[-1].vowel == "_",
                    AnnotatedNoteLikesToSoundFileConvert._is_rest(event),
                )

                if all(tests):
                    new_sequential_event[-1].duration += event.duration
                else:
                    new_sequential_event.append(event)
            else:
                new_sequential_event.append(event)
        return basic.SequentialEvent(new_sequential_event)
def make_applied_cantus_firmus(
    cantus_firmus: basic.SequentialEvent,
) -> basic.SequentialEvent[music.NoteLike]:
    transitional_harmonies = _load_transitional_harmonies()
    cantus_firmus = _process_cantus_firmus(cantus_firmus)
    previous_pitch_or_pitches = None
    applied_cantus_firmus = basic.SequentialEvent([])
    roots = (cantus_firmus_constants.START_PITCH_TO_ROOT[start_pitch]
             for start_pitch in "c e a c e a d g c e a".split(" "))
    current_root = None
    for pitch_or_pitches, next_pitch_or_pitches, duration in zip(
            cantus_firmus.get_parameter("pitch_or_pitches"),
            cantus_firmus.get_parameter("pitch_or_pitches")[1:] + (None, ),
            cantus_firmus.get_parameter("duration"),
    ):
        if pitch_or_pitches:
            if current_root is None:
                current_root = next(roots)
            current_pitch = pitch_or_pitches[0]
            previous_pitch = (previous_pitch_or_pitches[0]
                              if previous_pitch_or_pitches else current_pitch)
            next_pitch = (next_pitch_or_pitches[0]
                          if next_pitch_or_pitches else current_pitch)
            harmony = [
                current_pitch + pitch
                for pitch in transitional_harmonies[tuple(
                    (pitch - current_root).normalize(mutate=False).exponents
                    for pitch in (current_pitch, previous_pitch, next_pitch))]
            ]

            # MAKE EVERYTHING MORE READABLE (go down to pitch 'e')
            harmony = [
                pitch - pitches.JustIntonationPitch('3/2') for pitch in harmony
            ]
            event = music.NoteLike(harmony, duration)

        else:
            event = music.NoteLike([], duration)
            current_root = None

        applied_cantus_firmus.append(event)
        previous_pitch_or_pitches = pitch_or_pitches
    return applied_cantus_firmus
Exemple #19
0
    def _initialise_rests(self, duration: numbers.Number) -> None:
        test_point_maker = infit.Uniform(7, 12)
        test_points = []
        while sum(test_points) < duration:
            test_points.append(next(test_point_maker))

        test_points[-1] -= sum(test_points) - duration

        rests = basic.SequentialEvent([])
        for absolute_time, test_point_duration in zip(
                tools.accumulate_from_zero(test_points), test_points):
            absolute_position_on_timeline = absolute_time / duration
            activate_rest = self.get_value_of_at(
                "activate_rest", absolute_position_on_timeline)
            if activate_rest:
                rest_duration = self.get_value_of_at(
                    "rest_duration", absolute_position_on_timeline)
                if rest_duration > test_point_duration:
                    rest_duration = test_point_duration
                rest = basic.SimpleEvent(rest_duration)
                rest.is_rest = True
                rests.append(rest)
            else:
                rest_duration = 0

            remaining_playing = test_point_duration - rest_duration
            if remaining_playing:
                playing = basic.SimpleEvent(remaining_playing)
                playing.is_rest = False
            rests.append(playing)

        # rests_duration = 0
        # while rests_duration < duration:
        #     absolute_time = rests.duration / duration
        #     rest_duration = self.get_value_of_at("rest_duration", absolute_time)
        #     event = basic.SimpleEvent(rest_duration)
        #     event.is_rest = self.get_value_of_at("activate_rest", absolute_time)
        #     rests_duration += rest_duration
        #     rests.append(event)

        # difference = rests_duration - duration
        # rests[-1].duration -= difference
        self._rests = rests
    def _make_remix_events(
        split_annotated_note_likes: typing.Tuple[typing.Tuple[
            basic.SequentialEvent[typing.Union[basic.SimpleEvent,
                                               classes.AnnotatedNoteLike]],
            float,  # start
            str,  # path
        ]],
    ) -> basic.SimultaneousEvent[basic.SequentialEvent[basic.SimpleEvent]]:
        remix_events = basic.SimultaneousEvent([])
        for annotated_note_likes, start, path in split_annotated_note_likes:
            sequential_event = basic.SequentialEvent([])
            if start > 0:
                rest_before_remix_event = basic.SimpleEvent(start)
                sequential_event.append(rest_before_remix_event)
            remix_event = basic.SimpleEvent(annotated_note_likes.duration)
            remix_event.path = "{}.wav".format(path)
            sequential_event.append(remix_event)
            remix_events.append(sequential_event)

        return remix_events
Exemple #21
0
    def _build_segments(
        domain_start: float,
        domain_end: float,
        period_size_maker: typing.Iterator,
        period_function_maker: typing.Iterator,
    ) -> basic.SequentialEvent[ContinousEnvelopeSegment]:
        domain_duration = domain_end - domain_start

        collected_duration = 0
        segments = basic.SequentialEvent([])

        while collected_duration < domain_duration:
            period_size = next(period_size_maker)
            period_function = next(period_function_maker)
            segment = ContinousEnvelopeSegment(period_size, period_function)
            segments.append(segment)
            collected_duration += period_size

        segments[-1].duration = segments.duration - domain_duration
        return segments
Exemple #22
0
    def _get_possible_melodies_for_each_bar_to_fill(
        self,
        melody_part: typing.Tuple[typing.Tuple[
            typing.Tuple[pitches.JustIntonationPitch, ...], fractions.Fraction,
            typing.Optional[basic.SequentialEvent[music.NoteLike]], ], ..., ],
        bars_to_fill: typing.Tuple[typing.Tuple[int, fractions.Fraction], ...],
    ) -> typing.Tuple[typing.Tuple[basic.SequentialEvent[music.NoteLike], ...],
                      ...]:
        possible_melodies_for_each_bar_to_fill = []
        adjusted_bars_to_fill = []
        for nth_bar, bar_duration in bars_to_fill:
            cengkok_duration = int(bar_duration * 4)
            use_four_beats = False
            if cengkok_duration == 4:
                cengkok_duration = 8
                use_four_beats = True
            available_cengkoks = cengkoks.CENGKOKS[cengkok_duration]
            current_harmony = melody_part[nth_bar][0]
            try:
                next_harmony = melody_part[nth_bar + 1][0]
            except IndexError:
                next_harmony = None
            possible_melodies = self._build_possible_melodies_for_bar(
                available_cengkoks, current_harmony, next_harmony,
                use_four_beats)
            n_possible_melodies = len(possible_melodies)
            if n_possible_melodies == 1:
                self._event_blueprint[nth_bar] = possible_melodies[0]
            elif n_possible_melodies > 0:
                adjusted_bars_to_fill.append((nth_bar, bar_duration))
                possible_melodies_for_each_bar_to_fill.append(
                    possible_melodies)
            else:
                self._event_blueprint[nth_bar] = basic.SequentialEvent([
                    music.NoteLike([], self._event_blueprint[nth_bar].duration)
                ])

        return (
            tuple(possible_melodies_for_each_bar_to_fill),
            tuple(adjusted_bars_to_fill),
        )
Exemple #23
0
def _render_soundfile_or_midi_file_for_instrument(
    instrument_id,
    filtered_time_brackets,
    midi_file_converter,
    return_pitch: bool = False,
):
    time_brackets_converter = (converters.symmetrical.time_brackets.
                               TimeBracketsToEventConverter(instrument_id))
    converted_time_brackets = time_brackets_converter.convert(
        filtered_time_brackets)
    if converted_time_brackets:
        n_sequential_events = max(
            len(simultaneous_event)
            for simultaneous_event in converted_time_brackets
            if isinstance(simultaneous_event, basic.SimultaneousEvent))
        simultaneous_event = basic.SimultaneousEvent(
            [basic.SequentialEvent([]) for _ in range(n_sequential_events)])
        for event in converted_time_brackets:
            if isinstance(event, basic.SimpleEvent):
                rest = basic.SimpleEvent(event.duration)
                for seq in simultaneous_event:
                    seq.append(rest)
            else:
                for ev, sequential_event in zip(event, simultaneous_event):
                    ev = PLAYING_INDICATORS_CONVERTER.convert(ev)
                    sequential_event.extend(ev)

                event_duration = event.duration
                missing_sequential_events = n_sequential_events - len(event)
                if missing_sequential_events:
                    for sequential_event in simultaneous_event[
                            -missing_sequential_events:]:
                        sequential_event.append(
                            basic.SimpleEvent(event_duration))

        if return_pitch:
            simultaneous_event.set_parameter("return_pitch", True)

        midi_file_converter.convert(simultaneous_event)
        (1, None, 0, ("t",), "_"),
        # ante
        (1, 0, 1, tuple([]), "a"),
        (1, 0, 0, ("n", "t",), "e"),
        # nihil
        (1, 0, 1, ("n",), "i"),
        (1, 0, 1, ("H",), "i"),
        (1, None, 0, ("l",), "_"),
    ),
)


SINGING_PHRASES = []

for raw_phrase in RAW_PHRASES:
    converted_phrase = basic.SequentialEvent([])

    for duration, pitch, volume, consonants, vowel in raw_phrase:
        event = basic.SimpleEvent(duration)
        event.pitch = pitch
        event.volume = volumes.DirectVolume(volume)
        event.consonants = consonants
        event.vowel = vowel
        converted_phrase.append(event)

    for event_index, event in enumerate(converted_phrase):
        previous_pitch = None
        if event.pitch:
            for previous_event in reversed(converted_phrase[:event_index]):
                if previous_event.pitch:
                    previous_pitch = previous_event.pitch
Exemple #25
0
                            is_addable = False

                    if is_addable:

                        try:
                            os.mkdir(chord_primes_path)
                        except FileExistsError:
                            pass

                        try:
                            os.mkdir(harmony_path)
                        except FileExistsError:
                            pass

                        sequential_event = basic.SequentialEvent([
                            music.NoteLike(pitch_variant_combination, 8, 0.25)
                        ])
                        pitch_variant_name = "s{}cp{}h_{}_{}_DIFF_{}".format(
                            structural_prime,
                            str(chord_primes).replace(" ", ""),
                            harmony_name,
                            nth_pitch_variant_combination,
                            intervallv,
                        )

                        midi_file_converter = midi.MidiFileConverter(
                            "{}/{}.mid".format(harmony_path,
                                               pitch_variant_name))
                        midi_file_converter.convert(sequential_event)

                        abjad_converter = mutwo_abjad.SequentialEventToAbjadVoiceConverter(
Exemple #26
0
"""

from mutwo.events import basic

from sixtycombinations.classes import Partial
from sixtycombinations.constants import GROUPS
from sixtycombinations.constants import PITCH_TO_LOUDSPEAKER_MAPPING
from sixtycombinations.constants import RING_POSITION_TO_LOUDSPEAKER

NESTED_PARTIALS = basic.SimultaneousEvent(
    [
        basic.SimultaneousEvent(
            [
                basic.SimultaneousEvent(
                    [basic.SequentialEvent([]), basic.SequentialEvent([])]
                )
                for nth_speaker in cycle
            ]
        )
        for cycle in RING_POSITION_TO_LOUDSPEAKER
    ]
)

for nth_cycle, cycle in enumerate(GROUPS):
    is_first = True
    for nth_group, group in enumerate(cycle):
        if is_first:
            n_phases_rest = group.attack + group.sustain
            is_first = False
        else:
    def test_convert(self):
        musical_data = ot2_basic.TaggedSimultaneousEvent(
            [
                basic.SimultaneousEvent([
                    basic.SequentialEvent([
                        music.NoteLike("1/1", 1, "pp"),
                        music.NoteLike("15/16", 1, "pp"),
                        music.NoteLike([], 0.5, "pp"),
                        music.NoteLike("16/15", 0.75, "p"),
                        music.NoteLike([], 1.25, "p"),
                        music.NoteLike("9/8", 1.5, "p"),
                    ])
                ]),
                basic.SimultaneousEvent([
                    basic.SequentialEvent([
                        music.NoteLike("5/8", 0.5, "pp"),
                        music.NoteLike("11/16", 1, "pp"),
                        music.NoteLike([], 1, "pp"),
                        music.NoteLike("3/4", 0.75, "p"),
                        music.NoteLike([], 0.25, "p"),
                        music.NoteLike("3/4", 0.75, "p"),
                    ])
                ]),
                basic.SimultaneousEvent([
                    basic.SequentialEvent([
                        music.NoteLike([], 0.75, "pp"),
                        music.NoteLike("11/9", 1, "pp"),
                        music.NoteLike("4/3", 1, "pp"),
                        music.NoteLike("3/2", 0.75, "ppp"),
                        music.NoteLike([], 0.75, "ppp"),
                        music.NoteLike("3/5", 0.75, "ppp"),
                    ])
                ]),
                basic.SimultaneousEvent([
                    basic.SequentialEvent([
                        music.NoteLike("1/4", 4, "pp"),
                        music.NoteLike([], 1, "pp"),
                        music.NoteLike("1/4", 1, "pp"),
                    ]),
                    basic.SequentialEvent([
                        music.NoteLike([], 3, "pp"),
                        music.NoteLike("3/8", 2.5, "pp"),
                        music.NoteLike([], 0.5, "pp"),
                    ]),
                ]),
                basic.SimultaneousEvent([
                    basic.SequentialEvent([
                        music.NoteLike("g", 0.25, "pp"),
                        music.NoteLike("g", 0.5, "pp"),
                        music.NoteLike("g", 0.25, "pp"),
                        music.NoteLike("b", fractions.Fraction(1, 6), "pp"),
                        music.NoteLike("f", fractions.Fraction(1, 12), "pp"),
                        music.NoteLike("g", 1, "pp"),
                        music.NoteLike("f", 1, "pp"),
                        music.NoteLike("g", 1, "pp"),
                        music.NoteLike("g", 1, "pp"),
                    ])
                ]),
                basic.SimultaneousEvent(
                    [basic.SequentialEvent([music.NoteLike([], 6, "ppp")])]),
            ],
            tag_to_event_index=instruments.INSTRUMENT_ID_TO_INDEX,
        )

        abjad_score_converter = ot2_abjad.TaggedSimultaneousEventToAbjadScoreConverter(
            (
                abjad.TimeSignature((4, 2)),
                abjad.TimeSignature((4, 2)),
                abjad.TimeSignature((4, 2)),
                abjad.TimeSignature((4, 2)),
            ))
        abjad_score = abjad_score_converter.convert(musical_data)

        lilypond_file_converter = ot2_abjad.AbjadScoreToLilypondFileConverter()
        lilypond_file = lilypond_file_converter.convert(abjad_score)

        abjad.persist.as_pdf(lilypond_file,
                             "tests/converters/frontends/score_test.pdf")
Exemple #28
0
                    absolute_start_time_per_state = (
                        partial.absolute_start_time_per_state)
                    rhythmical_data_per_state = partial.rhythmical_data_per_state
                    for (
                            nth_state,
                            absolute_start_time_of_current_state,
                            rhythmical_data,
                    ) in zip(
                            range(len(rhythmical_data_per_state)),
                            absolute_start_time_per_state,
                            rhythmical_data_per_state,
                    ):

                        n_repetitions, n_periods, rhythm, _ = rhythmical_data
                        rhythm = basic.SequentialEvent(
                            rhythm * n_repetitions).destructive_copy()

                        if nth_state == 0:
                            envelope_points = ((0, 0), (rhythm.duration, 10))

                        elif nth_state == 1:
                            envelope_points = ((0, 10), (rhythm.duration, 10))

                        elif nth_state == 2:
                            envelope_points = ((0, 10), (rhythm.duration, 0))

                        else:
                            raise NotImplementedError()

                        weight_envelope = expenvelope.Envelope.from_points(
                            *envelope_points)
def _family_data_to_families(
    last_pitch_of_last_melodic_phrase: pitches.JustIntonationPitch,
    first_pitch_of_next_melodic_phrase: pitches.JustIntonationPitch,
    duration_in_seconds: float,
    duration_of_following_cengkok_part_in_seconds: float,
    n_root_notes_per_family: typing.Tuple[int, ...],
    density: float,
    rest_distribution: typing.Tuple[float, ...],
) -> basic.SequentialEvent[typing.Union[families.FamilyOfPitchCurves,
                                        basic.SimpleEvent]]:
    # (1) get root notes
    n_root_notes_summed = sum(n_root_notes_per_family)
    (
        choosen_cps_scale,
        frame_pitches,
    ) = _find_closest_approximation_of_interval_in_cps_scale_candidates(
        first_pitch_of_next_melodic_phrase - last_pitch_of_last_melodic_phrase,
        *common_product_set_scales.COMMON_PRODUCT_SET_SCALES,
    )
    root_notes = _extend_cps_scale_melody(frame_pitches, choosen_cps_scale,
                                          n_root_notes_summed)
    difference_between_root_note_and_last_cantus_firmus_pitch = (
        last_pitch_of_last_melodic_phrase - root_notes[0])
    pitch_pair_to_connection_pitch = _find_connection_pitch_between_two_pitches(
        choosen_cps_scale)

    # (2) get duration for each family / for each rest
    concatenated_duration_for_all_families = duration_in_seconds * density
    concatenated_duration_for_all_rests = (
        duration_in_seconds - concatenated_duration_for_all_families)
    duration_per_family_in_seconds = tuple(
        (n_root_notes / n_root_notes_summed) *
        concatenated_duration_for_all_families
        for n_root_notes in n_root_notes_per_family)
    assert round(sum(duration_per_family_in_seconds),
                 4) == round(concatenated_duration_for_all_families, 4)
    n_families = len(n_root_notes_per_family)
    n_rests = n_families + 1
    if rest_distribution:
        assert len(rest_distribution) == n_rests
        summed_weights = sum(rest_distribution)
        duration_per_rest_in_seconds = [
            concatenated_duration_for_all_rests * (weight / summed_weights)
            for weight in rest_distribution
        ]
    else:
        duration_per_rest_in_seconds = [
            concatenated_duration_for_all_rests / n_rests
            for _ in range(n_rests)
        ]

    # (3) build families / rests
    family_structure = basic.SequentialEvent([])
    root_note_indices = tuple(
        tools.accumulate_from_zero(n_root_notes_per_family))
    for (
            root_notes_index_start,
            root_notes_index_end,
            duration_for_current_family_in_seconds,
            duration_for_current_rest,
    ) in zip(
            root_note_indices,
            root_note_indices[1:],
            duration_per_family_in_seconds,
            duration_per_rest_in_seconds,
    ):
        # between each family there is a rest
        family_structure.append(basic.SimpleEvent(duration_for_current_rest))
        root_notes_for_current_family = root_notes[
            root_notes_index_start:root_notes_index_end]
        connection_notes_for_current_family = _get_connection_pitches_for_root_pitches(
            root_notes_for_current_family, pitch_pair_to_connection_pitch)
        new_family = families.RootAndConnectionBasedFamilyOfPitchCurves(
            duration_for_current_family_in_seconds,
            tuple((pitch +
                   difference_between_root_note_and_last_cantus_firmus_pitch
                   ).normalize(mutate=False)
                  for pitch in root_notes_for_current_family),
            tuple((pitch +
                   difference_between_root_note_and_last_cantus_firmus_pitch
                   ).normalize(mutate=False)
                  for pitch in connection_notes_for_current_family),
            generations=GENERATIONS,
            population_size=POPULATION_SIZE,
        )
        family_structure.append(new_family)

    family_structure.append(
        basic.SimpleEvent(duration_per_rest_in_seconds[-1] +
                          duration_of_following_cengkok_part_in_seconds))

    assert round(family_structure.duration, 3) == (round(
        duration_in_seconds + duration_of_following_cengkok_part_in_seconds,
        3))

    return family_structure
Exemple #30
0

converter = mutwo_abjad.SequentialEventToAbjadVoiceConverter(
    mutwo_pitch_to_abjad_pitch_converter=mutwo_abjad.MutwoPitchToHEJIAbjadPitchConverter()
)
lilypond_file = abjad.LilyPondFile(includes=["ekme-heji-ref-c.ily"])
for current_pitch, previous_pitch, next_pitch, solution, harmonicity in DATA:
    movement_pitches = tuple(
        pitches.JustIntonationPitch(exponents)
        for exponents in (previous_pitch, current_pitch, next_pitch)
    )
    solution_pitches = tuple(
        pitches.JustIntonationPitch(exponents) for exponents in solution
    )

    sequential_event = basic.SequentialEvent([])
    for movement_pitch in movement_pitches:
        sequential_event.append(
            music.NoteLike(movement_pitch, fractions.Fraction(1, 3))
        )

    for solution_pitch in solution_pitches:
        sequential_event.append(
            music.NoteLike(solution_pitch, fractions.Fraction(1, 5))
        )

    voice = converter.convert(sequential_event)
    abjad.attach(
        abjad.Markup(
            "\\teeny { movement: "
            + str(' - '.join(str(pitch.ratio) for pitch in movement_pitches))