def _render_soundfile_for_keyboard(instrument_id, filtered_time_brackets): time_brackets_converter = (converters.symmetrical.time_brackets. TimeBracketsToEventConverter(instrument_id)) converted_time_brackets = time_brackets_converter.convert( filtered_time_brackets) if converted_time_brackets: n_sequential_events = max( len(simultaneous_event) for simultaneous_event in converted_time_brackets if isinstance(simultaneous_event, basic.SimultaneousEvent)) simultaneous_event = basic.SimultaneousEvent([ basic.SequentialEvent([]) for _ in range(n_sequential_events) ]) for event in converted_time_brackets: if isinstance(event, basic.SimpleEvent): rest = basic.SimpleEvent(event.duration) for seq in simultaneous_event: seq.append(rest) else: for ev, sequential_event in zip(event, simultaneous_event): ev = PLAYING_INDICATORS_CONVERTER.convert(ev) sequential_event.extend(ev) for nth_seq_event, sequential_event in enumerate( simultaneous_event): midi_file_converter = ( ot2_converters.frontends.midi. KeyboardEventToMidiFileConverter(nth_seq_event)) midi_file_converter.convert(sequential_event)
def _generate_rhythmical_data( fundamental: pitches.JustIntonationPitch, n_periods_of_fundamental_per_beat: int, n_repetitions_of_rhythm: int, partial: int, ) -> typing.Tuple[typing.Any]: """Generate rhythmical data for the respective partial. The rhythmical data contain the following information: 1. How often the rhythm get repeated (int) 2. How many periods one beat last (int) 3. The respective rhythm with duration values in seconds (basic.SequentialEvent[basic.SimpleEvent]) 4. Indispensability of each beat (typing.Tuple[int]) """ duration_of_one_period = 1 / (fundamental.frequency * partial) duration_of_one_beat = (duration_of_one_period * n_periods_of_fundamental_per_beat) rhythm = basic.SequentialEvent( [basic.SimpleEvent(duration_of_one_beat) for _ in range(partial)]) if partial == 1: indispensability_for_bar = (0, ) else: indispensability_for_bar = indispensability.indispensability_for_bar( (partial, )) return ( n_repetitions_of_rhythm, n_periods_of_fundamental_per_beat, rhythm, indispensability_for_bar, )
def _render_vibrations_to_filtered_isis_files( nested_vibrations: basic.SimultaneousEvent, ): threads = [] for nth_cycle, cycle in enumerate(nested_vibrations): sample_player_event = basic.SimpleEvent(sixtycombinations.constants.DURATION) sample_player_event.path = "{}/{}.wav".format( sixtycombinations.constants.ISIS_FILES_BUILD_PATH, nth_cycle ) for nth_speaker, speaker_data in enumerate(cycle): adapted_speaker_data = basic.SimultaneousEvent( [basic.SequentialEvent([sample_player_event])] + speaker_data[:] ) sound_file_converter = sixtycombinations.converters.frontends.VibrationsToFilteredIsisSoundFileConverter( nth_cycle, nth_speaker ) thread = threading.Thread( target=lambda: sound_file_converter.convert(adapted_speaker_data) ) thread.start() threads.append(thread) while any([th.isAlive() for th in threads]): time.sleep(0.5)
def _get_variable_data( melody_part: typing.Tuple[typing.Tuple[ typing.Tuple[pitches.JustIntonationPitch, ...], fractions.Fraction, typing.Optional[basic.SequentialEvent[music.NoteLike]], ], ..., ], ) -> typing.Tuple[typing.Tuple[typing.Tuple[int, fractions.Fraction], ...], basic.SequentialEvent]: event_blueprint = basic.SequentialEvent([]) bars_to_fill = [] for nth_bar, bar in enumerate(melody_part): _, duration, brahms_melody = bar if brahms_melody: brahms_melody_pitches = functools.reduce( operator.add, brahms_melody.get_parameter("pitch_or_pitches")) if brahms_melody and ( min(brahms_melody_pitches) >= instruments. AMBITUS_SUSTAINING_INSTRUMENTS_JUST_INTONATION_PITCHES. borders[0] and max(brahms_melody_pitches) <= instruments. AMBITUS_SUSTAINING_INSTRUMENTS_JUST_INTONATION_PITCHES. borders[1]): event_blueprint.append(brahms_melody) else: bars_to_fill.append((nth_bar, duration)) event_blueprint.append(basic.SimpleEvent(duration)) return tuple(bars_to_fill), event_blueprint
def make_rhythms( a: typing.Tuple[fractions.Fraction, fractions.Fraction], b: typing.Tuple[fractions.Fraction, fractions.Fraction], c: typing.Tuple[fractions.Fraction, fractions.Fraction], d: typing.Tuple[fractions.Fraction, fractions.Fraction], is_inversed: bool = False, ) -> typing.Dict[int, basic.SequentialEvent[basic.SimpleEvent]]: if is_inversed: a = tuple(reversed(a)) b = tuple(reversed(b)) c = tuple(reversed(c)) d = tuple(reversed(d)) rhythms = { 2: basic.SequentialEvent( [basic.SimpleEvent(fractions.Fraction(1, 4)) for _ in range(4)] ), 4: basic.SequentialEvent( [ basic.SimpleEvent(duration) for duration in ( (fractions.Fraction(1, 4),) + a + (fractions.Fraction(1, 4), fractions.Fraction(1, 4)) + tuple(reversed(a)) + (fractions.Fraction(1, 4),) ) ] ), 8: basic.SequentialEvent( [ basic.SimpleEvent(duration) for duration in ( (fractions.Fraction(1, 4),) + b + c + d + (fractions.Fraction(1, 4), fractions.Fraction(1, 4)) + tuple(reversed(d)) + tuple(reversed(c)) + tuple(reversed(b)) + (fractions.Fraction(1, 4),) ) ] ), } return rhythms
def convert_nested_loop_to_sequential_event(nested_loop): result = basic.SequentialEvent([]) for element in nested_loop: if isinstance(element, tuple): result.append(convert_nested_loop_to_sequential_event(element)) else: result.append(basic.SimpleEvent(element)) return result
def find_solutions( beats_per_part: int, units_per_part: int, compounds_per_part: int, division_size: int, ) -> tuple: possible_cpps = tuple( toussaint.euclidean(compounds_per_part, n) for n in range(1, compounds_per_part + 1) if units_per_part % n == 0) solutions = [] for ccp in possible_cpps: length_ccp = len(ccp) tests_if_addable = ( units_per_part % length_ccp == 0, beats_per_part % length_ccp == 0, ) units_per_metre = units_per_part // length_ccp beats_per_metre = beats_per_part // length_ccp if all(tests_if_addable): is_still_addable = True mets = [] for item in ccp: unit_divisions = toussaint.euclidean(units_per_metre, item) item_tests = (beats_per_metre % item == 0, all(unit_divisions)) if all(item_tests): available_unit_sizes_per_compound = beats_per_metre // item comps = [] for amount_units in unit_divisions: u_sizes = toussaint.euclidean( available_unit_sizes_per_compound, amount_units) if all(u_sizes): comps.append( basic.SequentialEvent([ basic.SequentialEvent([ basic.SimpleEvent(division_size) for _ in range(s) ]) for s in u_sizes ])) else: is_still_addable = False mets.append(basic.SequentialEvent(comps)) else: is_still_addable = False if is_still_addable: solutions.append(basic.SequentialEvent(mets)) return tuple(solutions)
def _initialise_rests(self, duration: numbers.Number) -> None: test_point_maker = infit.Uniform(7, 12) test_points = [] while sum(test_points) < duration: test_points.append(next(test_point_maker)) test_points[-1] -= sum(test_points) - duration rests = basic.SequentialEvent([]) for absolute_time, test_point_duration in zip( tools.accumulate_from_zero(test_points), test_points): absolute_position_on_timeline = absolute_time / duration activate_rest = self.get_value_of_at( "activate_rest", absolute_position_on_timeline) if activate_rest: rest_duration = self.get_value_of_at( "rest_duration", absolute_position_on_timeline) if rest_duration > test_point_duration: rest_duration = test_point_duration rest = basic.SimpleEvent(rest_duration) rest.is_rest = True rests.append(rest) else: rest_duration = 0 remaining_playing = test_point_duration - rest_duration if remaining_playing: playing = basic.SimpleEvent(remaining_playing) playing.is_rest = False rests.append(playing) # rests_duration = 0 # while rests_duration < duration: # absolute_time = rests.duration / duration # rest_duration = self.get_value_of_at("rest_duration", absolute_time) # event = basic.SimpleEvent(rest_duration) # event.is_rest = self.get_value_of_at("activate_rest", absolute_time) # rests_duration += rest_duration # rests.append(event) # difference = rests_duration - duration # rests[-1].duration -= difference self._rests = rests
def _make_remix_events( split_annotated_note_likes: typing.Tuple[typing.Tuple[ basic.SequentialEvent[typing.Union[basic.SimpleEvent, classes.AnnotatedNoteLike]], float, # start str, # path ]], ) -> basic.SimultaneousEvent[basic.SequentialEvent[basic.SimpleEvent]]: remix_events = basic.SimultaneousEvent([]) for annotated_note_likes, start, path in split_annotated_note_likes: sequential_event = basic.SequentialEvent([]) if start > 0: rest_before_remix_event = basic.SimpleEvent(start) sequential_event.append(rest_before_remix_event) remix_event = basic.SimpleEvent(annotated_note_likes.duration) remix_event.path = "{}.wav".format(path) sequential_event.append(remix_event) remix_events.append(sequential_event) return remix_events
def _render_soundfile_or_midi_file_for_instrument( instrument_id, filtered_time_brackets, midi_file_converter, return_pitch: bool = False, ): time_brackets_converter = (converters.symmetrical.time_brackets. TimeBracketsToEventConverter(instrument_id)) converted_time_brackets = time_brackets_converter.convert( filtered_time_brackets) if converted_time_brackets: n_sequential_events = max( len(simultaneous_event) for simultaneous_event in converted_time_brackets if isinstance(simultaneous_event, basic.SimultaneousEvent)) simultaneous_event = basic.SimultaneousEvent( [basic.SequentialEvent([]) for _ in range(n_sequential_events)]) for event in converted_time_brackets: if isinstance(event, basic.SimpleEvent): rest = basic.SimpleEvent(event.duration) for seq in simultaneous_event: seq.append(rest) else: for ev, sequential_event in zip(event, simultaneous_event): ev = PLAYING_INDICATORS_CONVERTER.convert(ev) sequential_event.extend(ev) event_duration = event.duration missing_sequential_events = n_sequential_events - len(event) if missing_sequential_events: for sequential_event in simultaneous_event[ -missing_sequential_events:]: sequential_event.append( basic.SimpleEvent(event_duration)) if return_pitch: simultaneous_event.set_parameter("return_pitch", True) midi_file_converter.convert(simultaneous_event)
(1, 0, 0, ("n", "t",), "e"), # nihil (1, 0, 1, ("n",), "i"), (1, 0, 1, ("H",), "i"), (1, None, 0, ("l",), "_"), ), ) SINGING_PHRASES = [] for raw_phrase in RAW_PHRASES: converted_phrase = basic.SequentialEvent([]) for duration, pitch, volume, consonants, vowel in raw_phrase: event = basic.SimpleEvent(duration) event.pitch = pitch event.volume = volumes.DirectVolume(volume) event.consonants = consonants event.vowel = vowel converted_phrase.append(event) for event_index, event in enumerate(converted_phrase): previous_pitch = None if event.pitch: for previous_event in reversed(converted_phrase[:event_index]): if previous_event.pitch: previous_pitch = previous_event.pitch break if previous_pitch: event.pitch_distance = event.pitch - previous_pitch
def _family_data_to_families( last_pitch_of_last_melodic_phrase: pitches.JustIntonationPitch, first_pitch_of_next_melodic_phrase: pitches.JustIntonationPitch, duration_in_seconds: float, duration_of_following_cengkok_part_in_seconds: float, n_root_notes_per_family: typing.Tuple[int, ...], density: float, rest_distribution: typing.Tuple[float, ...], ) -> basic.SequentialEvent[typing.Union[families.FamilyOfPitchCurves, basic.SimpleEvent]]: # (1) get root notes n_root_notes_summed = sum(n_root_notes_per_family) ( choosen_cps_scale, frame_pitches, ) = _find_closest_approximation_of_interval_in_cps_scale_candidates( first_pitch_of_next_melodic_phrase - last_pitch_of_last_melodic_phrase, *common_product_set_scales.COMMON_PRODUCT_SET_SCALES, ) root_notes = _extend_cps_scale_melody(frame_pitches, choosen_cps_scale, n_root_notes_summed) difference_between_root_note_and_last_cantus_firmus_pitch = ( last_pitch_of_last_melodic_phrase - root_notes[0]) pitch_pair_to_connection_pitch = _find_connection_pitch_between_two_pitches( choosen_cps_scale) # (2) get duration for each family / for each rest concatenated_duration_for_all_families = duration_in_seconds * density concatenated_duration_for_all_rests = ( duration_in_seconds - concatenated_duration_for_all_families) duration_per_family_in_seconds = tuple( (n_root_notes / n_root_notes_summed) * concatenated_duration_for_all_families for n_root_notes in n_root_notes_per_family) assert round(sum(duration_per_family_in_seconds), 4) == round(concatenated_duration_for_all_families, 4) n_families = len(n_root_notes_per_family) n_rests = n_families + 1 if rest_distribution: assert len(rest_distribution) == n_rests summed_weights = sum(rest_distribution) duration_per_rest_in_seconds = [ concatenated_duration_for_all_rests * (weight / summed_weights) for weight in rest_distribution ] else: duration_per_rest_in_seconds = [ concatenated_duration_for_all_rests / n_rests for _ in range(n_rests) ] # (3) build families / rests family_structure = basic.SequentialEvent([]) root_note_indices = tuple( tools.accumulate_from_zero(n_root_notes_per_family)) for ( root_notes_index_start, root_notes_index_end, duration_for_current_family_in_seconds, duration_for_current_rest, ) in zip( root_note_indices, root_note_indices[1:], duration_per_family_in_seconds, duration_per_rest_in_seconds, ): # between each family there is a rest family_structure.append(basic.SimpleEvent(duration_for_current_rest)) root_notes_for_current_family = root_notes[ root_notes_index_start:root_notes_index_end] connection_notes_for_current_family = _get_connection_pitches_for_root_pitches( root_notes_for_current_family, pitch_pair_to_connection_pitch) new_family = families.RootAndConnectionBasedFamilyOfPitchCurves( duration_for_current_family_in_seconds, tuple((pitch + difference_between_root_note_and_last_cantus_firmus_pitch ).normalize(mutate=False) for pitch in root_notes_for_current_family), tuple((pitch + difference_between_root_note_and_last_cantus_firmus_pitch ).normalize(mutate=False) for pitch in connection_notes_for_current_family), generations=GENERATIONS, population_size=POPULATION_SIZE, ) family_structure.append(new_family) family_structure.append( basic.SimpleEvent(duration_per_rest_in_seconds[-1] + duration_of_following_cengkok_part_in_seconds)) assert round(family_structure.duration, 3) == (round( duration_in_seconds + duration_of_following_cengkok_part_in_seconds, 3)) return family_structure
*envelope_points) for relative_start_time in rhythm.absolute_times: beat_and_weight_pairs.append(( (relative_start_time + absolute_start_time_of_current_state) % DURATION, round( weight_envelope.value_at( relative_start_time)), )) activity_level = edwards.ActivityLevel() absolute_beats = sorted( set(absolute_time for absolute_time, weight in sorted(beat_and_weight_pairs, key=operator.itemgetter(0)) if activity_level(weight))) filtered_absolute_beats = [absolute_beats[0]] for absolute_beat in absolute_beats: if (absolute_beat - filtered_absolute_beats[-1]) >= MIN_DISTANCE: filtered_absolute_beats.append(absolute_beat) rhythmical_grid = basic.SequentialEvent([ basic.SimpleEvent(b - a) for a, b in zip(filtered_absolute_beats, filtered_absolute_beats[1:]) ]) ISIS_RHYTHMICAL_GRID_PER_CYCLE.append(rhythmical_grid)
] ) for cycle in RING_POSITION_TO_LOUDSPEAKER ] ) for nth_cycle, cycle in enumerate(GROUPS): is_first = True for nth_group, group in enumerate(cycle): if is_first: n_phases_rest = group.attack + group.sustain is_first = False else: n_phases_rest = group.sustain rest = basic.SimpleEvent(n_phases_rest * (1 / group.fundamental.frequency)) common_pitch_data_with_previous_harmony = ( group.common_pitch_data_with_previous_harmony ) common_pitch_data_with_next_harmony = group.common_pitch_data_with_next_harmony pitch_to_loudspeaker_mapping = PITCH_TO_LOUDSPEAKER_MAPPING[nth_cycle][ nth_group % len(PITCH_TO_LOUDSPEAKER_MAPPING[nth_cycle]) ] a_or_b = nth_group % 2 for nth_pitch, pitch in enumerate(group.harmony): nth_loudspeaker = pitch_to_loudspeaker_mapping.index(nth_pitch) nth_partial = int((pitch - group.fundamental).ratio) is_connection_pitch_to_previous_harmony = ( pitch == common_pitch_data_with_previous_harmony[0][1]
def _structure_time( splitted_parts: typing.Tuple[phrases.Phrases, ...], phrase_parts: typing.Tuple[ typing.Tuple[ typing.Tuple[typing.Tuple[int, typing.Tuple[int, ...]], ...], float ], ..., ], duration_in_seconds: float, ) -> StructureType: """Make composition structure. Returns SequentialEvent which consist of SequentialEvents. Each contained SequentialEvent represent one part. Each part contains two sub events again. 1. The first sub-event is only a SimpleEvent. Its duration indicates the duration (in seconds) of the timebracket based / stochastic / free flow area of this part. 2. The second sub-event is a SequentialEvent which contains PhraseEvents. It represents the cengkok based area of this part. """ n_parts = len(phrase_parts) duration_per_part_in_seconds = duration_in_seconds / n_parts structure = basic.SequentialEvent([]) for nth_phrase_part, phrase_part in enumerate(phrase_parts): phrase_indices, tempo = phrase_part cengkok_based_area = ot2_basic.SequentialEventWithTempo([], tempo=tempo) for ( reptition_of_cantus_firmus_index, phrases_of_cantus_firmus_repetition_indices, ) in phrase_indices: nth_reptition_of_cantus_firmus = splitted_parts[ reptition_of_cantus_firmus_index ] for ( phrase_of_cantus_firmus_repetition_index ) in phrases_of_cantus_firmus_repetition_indices: cengkok_based_area.extend( nth_reptition_of_cantus_firmus[ phrase_of_cantus_firmus_repetition_index ] ) # make last tone longer if its not the complete of the phrase if phrase_of_cantus_firmus_repetition_index != 3: cengkok_based_area[-1].duration *= 2 # change grid from 1/2 to 1/4 cengkok_based_area = cengkok_based_area.set_parameter( "duration", lambda old_duration: old_duration * fractions.Fraction(1, 2), mutate=False, ) duration_of_cengkok_based_area_in_seconds = ( cengkok_based_area.duration * 4 * tempos.TempoPointConverter().convert(tempo) ) cengkok_based_area.duration_in_seconds = duration_of_cengkok_based_area_in_seconds remaining_duration = ( duration_per_part_in_seconds - duration_of_cengkok_based_area_in_seconds ) if remaining_duration < 0: remaining_duration = ADDED_DURATION_FOR_STOCHASTIC_PART_IF_CENGKOK_PART_IS_TOO_LONG warnings.warn( f"The phrase part number '{nth_phrase_part}' has a too long cengkok" " based area, so that there isn't any time left for the stochastic" " area!" ) structure.append( basic.SequentialEvent( [basic.SimpleEvent(remaining_duration), cengkok_based_area] ) ) return structure
def convert( self, groups_per_cycle: typing.Tuple[typing.Tuple[type( constants.GROUPS[0][0])]], ): reaper_marker_events = basic.SimultaneousEvent([]) for nth_cycle, groups in enumerate(groups_per_cycle): absolute_start_time_for_cycle = constants.ABSOLUTE_START_TIME_PER_GROUP[ nth_cycle] for nth_group, group in enumerate(groups): reaper_marker_events_for_group = basic.SequentialEvent([]) absolute_start_time_for_group = ( absolute_start_time_for_cycle + group.relative_start_time) absolute_start_time_for_group %= constants.DURATION reaper_marker_events_for_group.append( basic.SimpleEvent(absolute_start_time_for_group)) for state_name in "attack sustain release".split(" "): n_phases = getattr(group, state_name) duration_of_state = ( 1 / group.fundamental.frequency) * n_phases state_marker = basic.SimpleEvent(duration_of_state) state_marker.name = "Group({},{})-{}".format( nth_cycle, nth_group, state_name.upper()) state_marker.color = self.cycle_index_to_color[nth_cycle] reaper_marker_events_for_group.append(state_marker) absolute_start_time_and_reaper_marker_events_pairs = [] for absolute_time, state_marker in zip( reaper_marker_events_for_group.absolute_times[1:], reaper_marker_events_for_group[1:], ): absolute_time %= constants.DURATION absolute_start_time_and_reaper_marker_events_pairs.append( (absolute_time, state_marker)) sorted_absolute_start_time_and_reaper_marker_events_pairs = sorted( absolute_start_time_and_reaper_marker_events_pairs, key=operator.itemgetter(0), ) if sorted_absolute_start_time_and_reaper_marker_events_pairs[ 0][0] != 0: sorted_absolute_start_time_and_reaper_marker_events_pairs.insert( 0, (0, basic.SimpleEvent(1))) sorted_absolute_start_times = tuple( map( lambda pair: pair[0], sorted_absolute_start_time_and_reaper_marker_events_pairs, )) adjusted_reaper_marker_events_for_group = basic.SequentialEvent( []) for absolute_start_time0, absolute_start_time1, event in zip( sorted_absolute_start_times, sorted_absolute_start_times[1:] + (1, ), map( lambda pair: pair[1], sorted_absolute_start_time_and_reaper_marker_events_pairs, ), ): event.duration = absolute_start_time1 - absolute_start_time0 adjusted_reaper_marker_events_for_group.append(event) reaper_marker_events.append( adjusted_reaper_marker_events_for_group) with open(self._path, "w") as f: f.write( self._reaper_marker_converter.convert(reaper_marker_events))