def make_melody(self) -> old.Melody: if self._gender: self._melody_register = -self._melody_register melody_pitches = tuple( p if p.is_empty else p.register(p.octave + self._melody_register) for p in self._melody_pitches) transposition_pitch = ( (globals.FEMALE_SOIL, globals.MALE_SOIL)[self._gender].pitches_per_vox_per_bar[ self._bar_number][0][0].normalize()) if transposition_pitch.cents > 600: transposition_pitch = transposition_pitch.register(-1) melody_pitches = tuple(p if p.is_empty else p + transposition_pitch for p in melody_pitches) if self._gender: melody_pitches = tuple( p if p.is_empty else p.inverse(transposition_pitch) for p in melody_pitches) distributed_beats = tools.euclid( self._n_bars * self._metrical_numbers[0], sum(self._melody_beats)) absolute_rhythms = tools.accumulate_from_zero(self._melody_beats) distributed_rhythms = tuple( sum(distributed_beats[pos0:pos1]) for pos0, pos1 in zip(absolute_rhythms, absolute_rhythms[1:])) absolute_rhythms = tools.accumulate_from_zero(distributed_rhythms) rhythms = tuple( sum(self._rhythms[0][pos0:pos1]) for pos0, pos1 in zip(absolute_rhythms, absolute_rhythms[1:])) return old.Melody(tuple(map(old.Tone, melody_pitches, rhythms)))
def interpolation_trigger(self, value0: int, value1: int, duration: int, function): if duration > 0: if value0 != value1: nsteps = abs(value0 - value1) timestamps = tuple(range(duration)) if value0 > value1: steps_in_between = tuple(reversed(range(value1, value0))) else: steps_in_between = tuple(range(value0, value1)) if nsteps > duration: steps_in_between = tuple( steps_in_between[idx] for idx in tools.accumulate_from_zero( tools.euclid(nsteps, duration) )[:-1] ) elif nsteps < duration: timestamps = tuple( timestamps[idx] for idx in tools.accumulate_from_zero( tools.euclid(duration, nsteps) )[:-1] ) for time_stamp, value in zip(timestamps, steps_in_between): function(value, time_stamp) function(value1, time_stamp + 1)
def sco(self) -> str: lines = [] instrument_number = itertools.cycle((1, 2, 1, 3, 1, 4, 1)) for melody in self.harmonic_melodies: delay = tuple(float(dur) * self.__tempo_factor for dur in melody.delay) absolute_delay = tools.accumulate_from_zero(delay) for start_position, pitch, duration, volume in zip( absolute_delay, melody.pitch, delay, melody.volume ): if not pitch.is_empty: attack_duration = next(self.__attack_duration) release_duration = next(self.__release_duration) envelope_duration = attack_duration + release_duration duration += self.__anticipation_time + self.__overlaying_time if duration <= envelope_duration: duration = envelope_duration + 0.01 line = "i{} {} {} {} {} {} {}".format( next(instrument_number), start_position, duration, pitch.freq * globals.CONCERT_PITCH, volume, attack_duration, release_duration, ) lines.append(line) if not lines: lines.append("i1 0 {} 100 0".format(self.__duration)) return "\n".join(lines)
def __converted(self) -> list: rhythm = Compound.convert_essence_and_multiply2rhythm( self.essence, self.multiply) if self.representation == "relative": return rhythm else: return tools.accumulate_from_zero(rhythm)[:-1]
def modulate(self, other: "Compound") -> "Compound": intr_other = other.intr converted_self = list(self) assert sum(intr_other) == len(converted_self) indices = tools.accumulate_from_zero(intr_other) return type(self)(sum(converted_self[x:y]) for x, y in zip(indices, indices[1:]))
def __init__( self, n_repetitions: int, # how many beats one loop containts loop_duration: fractions.Fraction, # how many bars one loop containts loop_size: int, bars: tuple, rhythm_and_metricity_per_prime: dict, instrument_prime_mapping: dict, ): self._n_repetitions = n_repetitions self._basic_bars = bars self._bars = tuple( tuple(abjad.TimeSignature(b) for b in bars) * n_repetitions) self._loop_duration = loop_duration self._duration = loop_duration * n_repetitions self._loop_size = loop_size self._instrument_prime_mapping = instrument_prime_mapping self._absolute_rhythm_and_metricity_per_prime = { prime: (( # absolute rhythms tools.accumulate_from_zero( rhythm_and_metricity_per_prime[prime][0] * n_repetitions) [:-1], # metricities, rescaled to range 0 - 1 tuple( tools.scale(rhythm_and_metricity_per_prime[prime][1], 0, 1) * n_repetitions), )) for prime in rhythm_and_metricity_per_prime } absolute_rhythm_and_metricities = functools.reduce( operator.add, (tuple(zip(*self._absolute_rhythm_and_metricity_per_prime[prime])) for prime in self._absolute_rhythm_and_metricity_per_prime), ) absolute_rhythm_and_metricities_dict = {} for position, metricity in absolute_rhythm_and_metricities: is_addable = True if position in absolute_rhythm_and_metricities_dict: if metricity < absolute_rhythm_and_metricities_dict[position]: is_addable = False if is_addable: absolute_rhythm_and_metricities_dict.update( {position: metricity}) self._absolute_rhythm_and_metricities = tuple( sorted( ((position, absolute_rhythm_and_metricities_dict[position]) for position in absolute_rhythm_and_metricities_dict), key=operator.itemgetter(0), )) self._absolute_rhythm = tuple( map(operator.itemgetter(0), self._absolute_rhythm_and_metricities)) self._metricities = tuple( map(operator.itemgetter(1), self._absolute_rhythm_and_metricities))
def make_string_table(strings: tuple, available_columns=37) -> pylatex.LongTabu: import bisect from mu.utils import tools available_frets = 120 * 3 frets_per_column = tools.accumulate_from_zero( tools.euclid(available_frets, available_columns)) table = pylatex.LongTabu(("|l| " + "l " * available_columns)[:-1] + "|", row_height=2) table.add_hline() for st_idx, st in enumerate( sorted(strings, key=lambda s: s.number, reverse=True)): string_number = len(strings) - st_idx fret = st.fret.convert2absolute_fret(120) row = ["" for i in range(available_columns)] best_column = bisect.bisect_left(frets_per_column, fret) if best_column == available_columns: best_column -= 1 if fret % 10 != 0 and fret % 2 == 0: color = "red" elif fret % 2 != 0 and fret % 5 != 0: color = "blue" else: color = "black" col = pylatex.TextColor(color, st.fret.convert2relative_fret()) row[best_column] = col table.add_row(["{0}".format(string_number)] + row) table.add_hline() return table
def _get_absolute_rhythm_and_metricity_per_prime(self) -> tuple: return { prime: ( tools.accumulate_from_zero( self._rhythm_and_metricity_per_prime[prime][0])[:-1], self._rhythm_and_metricity_per_prime[prime][1], ) for prime in self._rhythm_and_metricity_per_prime }
def _get_rhythm_and_metricity_per_prime(self) -> dict: data_per_prime = {} for prime in self.primes: resulting_rhythm = tools.accumulate_from_zero( self._pulse_rhythm_and_metricity_per_beat[0])[::prime] duration = sum(self._pulse_rhythm_and_metricity_per_beat[0]) if resulting_rhythm[-1] != duration: resulting_rhythm.append(duration) resulting_rhythm = tuple( fractions.Fraction(b - a) for a, b in zip(resulting_rhythm, resulting_rhythm[1:])) metricities = self._pulse_rhythm_and_metricity_per_beat[1][::prime] data_per_prime.update({prime: (resulting_rhythm, metricities)}) return data_per_prime
def alternating_hands(seed_rhythm: tuple) -> tuple: """Distributes seed_rhythm on right and left hand. seed_rhythm is expected to be written in relative form. """ n_elements = len(seed_rhythm) absolute_rhythm = tools.accumulate_from_zero(seed_rhythm + seed_rhythm) cycle = itertools.cycle((True, False)) distribution = tuple(next(cycle) for n in range(n_elements)) distribution += __mirror(distribution) right, left = [], [] for idx, dis in enumerate(distribution): item = absolute_rhythm[idx] if dis: right.append(item) else: left.append(item) return (tuple(right), tuple(left)), absolute_rhythm[-1]
def find_start_and_duration_and_volume(self) -> tuple: """1. start values, 2. duration per attack, 3. volume per attack""" attack_indices, weight_per_attack = self.find_attack_indices() has_first_attack = True if 0 not in attack_indices: attack_indices = (0, ) + attack_indices has_first_attack = False duration_values = tuple((b - a) * self.segment._tempo_factor for a, b in zip( attack_indices, attack_indices[1:] + (int(self.segment._duration), ))) start_values = tools.accumulate_from_zero(duration_values) if not has_first_attack: start_values = start_values[1:] duration_values = duration_values[1:] volume_per_attack = tools.scale(weight_per_attack, *self._volume_range) return start_values, duration_values, volume_per_attack
def interpolate_by_n_points(self, n_points: int) -> tuple: duration = self.duration points = tuple(range(n_points + 1)) point_position_per_interpolation = tools.accumulate_from_zero( tuple(float(point.delay / duration) * n_points for point in self[:-1]) ) point_position_per_interpolation = tuple( tools.find_closest_index(item, points) for item in point_position_per_interpolation ) points_per_interpolation = tuple( b - a for a, b in zip( point_position_per_interpolation, point_position_per_interpolation[1:] ) ) interpolations = tuple( item0.interpolate(item1, points + 1)[:-1] for item0, item1, points in zip(self, self[1:-1], points_per_interpolation) ) interpolations += ( self[-2].interpolate(self[-1], points_per_interpolation[-1]), ) return tuple(functools.reduce(operator.add, interpolations))
def render(self, name: str) -> None: self.server.recordOptions(dur=self.duration, filename="{}.wav".format(name), sampletype=4) import random as random_ambient_noise_lv random_ambient_noise_lv.seed(1) n_events = tools.find_closest_index( self.duration, tools.accumulate_from_zero(self.duration_per_event)) duration_per_event = self.duration_per_event[:n_events] sample_path_per_event = self.sample_path_per_event[:n_events] lv_per_effect = { "{}_lv".format(effect): tuple( next(self.level_per_effect[effect] ) if self.activity_object_per_effect[effect] (self.activity_lv_per_effect[effect]) else 0 for i in duration_per_event) for effect in self.__effects } ambient_noise_lv_per_event = tuple( random_ambient_noise_lv.uniform(0.2, 0.4) for i in duration_per_event) ambient_noise_lv_per_event = tuple((a, b) for a, b in zip( (0, ) + ambient_noise_lv_per_event, ambient_noise_lv_per_event)) # general dynamic level for each slice event_lv = tuple(self.volume * lv for lv in self.curve(n_events, "points")) ################################################################ # controlling different dsp parameter filter_freq_per_event = tuple( next(self.filter_freq_maker) for i in duration_per_event) filter_q_per_event = tuple( next(self.filter_q_maker) for i in duration_per_event) rm_freq_per_event = tuple( next(self.rm_freq_maker) for i in duration_per_event) transpo_per_event = tuple( next(self.transpo_maker) for i in duration_per_event) chenlee_chaos_per_event = tuple( next(self.chenlee_chaos_maker) for i in duration_per_event) chenlee_pitch_per_event = tuple( next(self.chenlee_pitch_maker) for i in duration_per_event) lorenz_chaos_per_event = tuple( next(self.lorenz_chaos_maker) for i in duration_per_event) lorenz_pitch_per_event = tuple( next(self.lorenz_pitch_maker) for i in duration_per_event) ################################################################ e = synthesis.pyo.Events( instr=SlicePlayer, path=sample_path_per_event, dur=duration_per_event, lv=event_lv, filter_freq=filter_freq_per_event, filter_q=filter_q_per_event, rm_freq=rm_freq_per_event, h_transpo=transpo_per_event, chenlee_chaos=chenlee_chaos_per_event, chenlee_pitch=chenlee_pitch_per_event, lorenz_chaos=lorenz_chaos_per_event, lorenz_pitch=lorenz_pitch_per_event, ambient_noise_lv=ambient_noise_lv_per_event, **lv_per_effect, ) e.play() self.server.start()
def absolute_bar_durations(self) -> tuple: return tools.accumulate_from_zero( tuple(fractions.Fraction(ts.duration) for ts in self.bars))
def from_melody( cls, melody: old.Melody, bars: tuple, max_rest_size_to_ignore: fractions.Fraction = fractions.Fraction(1, 4), maximum_deviation_from_center: float = 0.5, ) -> "Bread": try: assert ( maximum_deviation_from_center >= 0 and maximum_deviation_from_center <= 1 ) except AssertionError: msg = "maximum_deviation_from_center has to be in range 0-1" raise ValueError(msg) adapted_melody = melody.tie().discard_rests(max_rest_size_to_ignore) smallest_unit_to_split = min( t.delay for t in adapted_melody if not t.pitch.is_empty ) if smallest_unit_to_split.numerator == 1: smallest_unit = fractions.Fraction( 1, smallest_unit_to_split.denominator * 2 ) else: smallest_unit = fractions.Fraction(1, smallest_unit_to_split.denominator) position_metricity_pairs_per_bar = ( cls._get_position_metricity_pairs( (ts, globals_.TIME_SIGNATURES2COMPOSITION_STRUCTURES[ts]), smallest_unit ) for ts in bars ) positions, metricities = zip( *tuple(functools.reduce(operator.add, position_metricity_pairs_per_bar)) ) positions = tools.accumulate_from_zero(positions)[:-1] slices = [] for tone in adapted_melody.convert2absolute(): if tone.pitch.is_empty: mp = None slices.append(Slice(tone.delay, tone.duration, False, mp)) else: mp = tone.pitch # figure out where to split the tone: # (1) find possible split position - candidates dev_range = (tone.duration - tone.delay) / 2 center = dev_range + tone.delay actual_dev = dev_range * maximum_deviation_from_center dev0, dev1 = center - actual_dev, center + actual_dev available_split_positions = tuple( (pos, met) for pos, met in zip(positions, metricities) if pos > dev0 and pos < dev1 ) # (2) choose the one with the highest metricity split_position = max( available_split_positions, key=operator.itemgetter(1) )[0] # add both slices for start, stop, does_slice_start_overlap_with_attack in ( (tone.delay, split_position, True), (split_position, tone.duration, False), ): slices.append( Slice(start, stop, does_slice_start_overlap_with_attack, mp) ) return cls(*slices)
def convert2score( self, reference_pitch: int = 0, stretch_factor: float = 1, n_divisions: int = 8, min_tone_size: fractions.Fraction = 0, min_rest_size: fractions.Fraction = fractions.Fraction(1, 10), ) -> None: pitches, delays = self.quantizise( stretch_factor=stretch_factor, n_divisions=n_divisions, min_tone_size=min_tone_size, min_rest_size=min_rest_size, ) bar_grid = tuple(fractions.Fraction(1, 1) for i in range(15)) grid = tuple(fractions.Fraction(1, 4) for i in range(50)) notes = abjad.Voice([]) absolute_delay = tools.accumulate_from_zero(delays) for pitch, delay, start, stop in zip(pitches, delays, absolute_delay, absolute_delay[1:]): seperated_by_bar = tools.accumulate_from_n( lily.seperate_by_grid(start, stop, bar_grid, hard_cut=True), start) sub_delays = functools.reduce( operator.add, tuple( functools.reduce( operator.add, tuple( lily.seperate_by_assignability(d) for d in lily.seperate_by_grid(start, stop, grid)), ) for start, stop in zip(seperated_by_bar, seperated_by_bar[1:])), ) subnotes = [] if pitch.is_empty: ct = None else: ct = pitch.cents / 100 # round to 12th tone ct = round(ct * 6) / 6 ct += reference_pitch for delay in sub_delays: if ct is None: obj = abjad.Rest(delay) else: obj = abjad.Note(ct, delay) subnotes.append(obj) if ct is not None and len(subnotes) > 1: for note in subnotes[:-1]: abjad.attach(abjad.Tie(), note) notes.extend(subnotes) score = abjad.Score([notes]) with open("{}.ly".format(self.name), "w") as f: f.write('\\version "2.19.83"\n') f.write(lily.EKMELILY_PREAMBLE) f.write("\n") f.write(format(score)) subprocess.call(["lilypond", "{}.ly".format(self.name)])
def notate(self, name: str) -> None: pitches, delays = self.pitch, self.delay bar_grid = tuple( fractions.Fraction(ts.numerator, ts.denominator) for ts in self.bars ) grid = tuple( fractions.Fraction(1, 4) for i in range(int(math.ceil(self.duration / fractions.Fraction(1, 4)))) ) notes = abjad.Voice([]) absolute_delay = tools.accumulate_from_zero(delays) for pitch, delay, start, stop in zip( pitches, delays, absolute_delay, absolute_delay[1:] ): seperated_by_bar = tools.accumulate_from_n( lily.seperate_by_grid(start, stop, bar_grid, hard_cut=True), start ) sub_delays = functools.reduce( operator.add, tuple( functools.reduce( operator.add, tuple( lily.seperate_by_assignability(d) for d in lily.seperate_by_grid(start, stop, grid) ), ) for start, stop in zip(seperated_by_bar, seperated_by_bar[1:]) ), ) subnotes = [] if pitch.is_empty: ct = None else: if self.ratio2pitchclass_dict: ct = lily.convert2abjad_pitch(pitch, self.ratio2pitchclass_dict) else: ct = lily.round_cents_to_12th_tone(pitch.cents) for delay in sub_delays: if ct is None: obj = abjad.Rest(delay) else: obj = abjad.Note(ct, delay) subnotes.append(obj) if ct is not None and len(subnotes) > 1: for note in subnotes[:-1]: abjad.attach(abjad.Tie(), note) notes.extend(subnotes) abjad.attach( abjad.LilyPondLiteral("\\accidentalStyle dodecaphonic", "before"), notes[0] ) abjad.attach(self.bars[0], notes[0], context="Voice") score = abjad.Score([notes]) lf = abjad.LilyPondFile( score, lilypond_version_token=abjad.LilyPondVersionToken("2.19.83"), includes=["lilypond-book-preamble.ly"], ) lily_name = "{}.ly".format(name) with open(lily_name, "w") as f: f.write(lily.EKMELILY_PREAMBLE) f.write(format(lf)) subprocess.call(["lilypond", "--png", "-dresolution=400", lily_name])
def concatenate(self) -> None: start_positions_of_tracks_for_first_segment = tuple( getattr(self.segments[0], track.name)["start"] for track in self.orchestration) minima_start_position_of_tracks_for_first_segment = min( start_positions_of_tracks_for_first_segment) added_value_for_start_position_for_first_segment = abs( minima_start_position_of_tracks_for_first_segment) adapted_duration_per_segment = list(segment.duration for segment in self.segments) for idx, segment in enumerate(self.segments[1:]): adapted_duration_per_segment[idx] += segment.start adapted_duration_per_segment[ 0] += added_value_for_start_position_for_first_segment start_position_per_segment = tuple( position + self.segments[0].start for position in tools.accumulate_from_zero( adapted_duration_per_segment)) for start_position in start_position_per_segment: try: assert start_position >= 0 except AssertionError: msg = "Segment has a too low start value." raise ValueError(msg) orc_name = ".concatenate" sco_name = ".concatenate" processes = [] print("CONCATENATING TRACKS") with progressbar.ProgressBar(max_value=len(self.orchestration)) as bar: for track_idx, track in enumerate(self.orchestration): local_orc_name = "{}_{}.orc".format(orc_name, track_idx) local_sco_name = "{}_{}.sco".format(sco_name, track_idx) with open(local_orc_name, "w") as f: f.write(self._make_sampler_orc(n_channels=1)) relevant_data = [] # start, duration, path is_first_segment = True for start_position_of_segment, segment in zip( start_position_per_segment, self.segments): path = "{}/{}/{}.wav".format(self.name, segment.name, track.name) start_position = (start_position_of_segment + getattr(segment, track.name)["start"]) if is_first_segment: start_position += ( added_value_for_start_position_for_first_segment) duration = getattr(segment, track.name)["duration"] if duration < segment.duration: duration = segment.duration duration += self.tail relevant_data.append((start_position, duration, path)) is_first_segment = False sco = " \n".join( tuple('i1 {} {} "{}" 1'.format(*d) for d in relevant_data)) with open(local_sco_name, "w") as f: f.write(sco) sf_name = "{}/{}/{}.wav".format(self.name, self._concatenated_path, track.name) processes.append( csound.render_csound(sf_name, local_orc_name, local_sco_name)) bar.update(track_idx) for process in processes: process.wait()
def convert_int_rhythm2binary_rhythm(int_rhythm: tuple) -> tuple: size = sum(int_rhythm) indices = tools.accumulate_from_zero(int_rhythm) return tuple(1 if idx in indices else 0 for idx in range(size))
def split_by_structure( nth_event: int, split_to_n_items: int, novent_line: lily.NOventLine, verse_maker: mus.SegmentMaker, change_novent_line: bool = True, set_n_novents2rest: int = 0, adapt_by_changed_structure: bool = False, ) -> None: assert split_to_n_items > 0 if novent_line[nth_event].pitch: novent_line.delay absolute_novent_line = tools.accumulate_from_zero( tuple(ev.delay for ev in novent_line)) start, stop = ( fractions.Fraction(absolute_novent_line[nth_event]), fractions.Fraction(absolute_novent_line[nth_event + 1]), ) if adapt_by_changed_structure: bar_idx = verse_maker.violin.get_responsible_bar_index(start) time_difference = verse_maker.time_distance_to_original_structure_per_bar[ bar_idx] start += time_difference stop += time_difference sml = verse_maker.transcription.spread_metrical_loop instruments = tuple(globals_.PITCH2INSTRUMENT[pitch.normalize()] for pitch in novent_line[nth_event].pitch) available_positions = tuple( set( functools.reduce( operator.add, (sml.get_rhythm_metricity_pairs_for_instrument( instrument, start, stop) for instrument in instruments), ))) if len(available_positions) < split_to_n_items: msg = "It can only be split to {} items because more positions ".format( len(available_positions)) msg += "aren't available." logging.warn(msg) if len(available_positions) > 1: start_positions_of_splitted_attack = [start] for position_and_metricity in tuple( filter( lambda pos: pos[0] != start and pos[0] != stop, sorted(available_positions, key=operator.itemgetter(1), reverse=True), ))[:split_to_n_items - 1]: position, _ = position_and_metricity start_positions_of_splitted_attack.append(position) sorted_start_positions = sorted(start_positions_of_splitted_attack) durations = tuple(b - a for a, b in zip( sorted_start_positions, sorted_start_positions[1:] + [stop])) return split( nth_event, novent_line, *durations, change_novent_line=change_novent_line, set_n_novents2rest=set_n_novents2rest, ) else: msg = ( "Can't split rests by pauses! Function can only split events that contain " ) msg += "pitch information." logging.warn(msg) if not change_novent_line: return [novent_line[nth_event].copy()]