def _synthezise(name: str, instrument: str, scale: tuple) -> None: """make short sound files for string players to get used to intonation""" # (1) generate file where scale get played up & down duration_per_tone = 1.5 rest_duration = 0.75 melody = old.Melody([]) for pitch in scale + tuple(reversed(scale))[1:]: melody.append(old.Tone(pitch, duration_per_tone)) melody.append(old.Tone(mel.TheEmptyPitch, rest_duration)) synth = synthesis.SimpleCsoundSinePlayer(melody) # for debugging: # synth.remove_files = False # synth.print_output = True synth.concert_pitch = globals_.CONCERT_PITCH synth.render("{}/soundfiles/wav/scale_{}".format(name, instrument)) instrument_path = "{}/soundfiles/wav/{}".format(name, instrument) tools.igmkdir(instrument_path) # (2) generate for each scale degree one file single_tone_duration = 3 duration_per_tone_rest_duration = 0.75 for idx, pitch in enumerate(scale): melody = old.Melody( [ old.Tone(pitch, single_tone_duration), old.Tone(mel.TheEmptyPitch, duration_per_tone_rest_duration), ] ) synth = synthesis.SimpleCsoundSinePlayer(melody) synth.concert_pitch = globals_.CONCERT_PITCH synth.render("{}/{}".format(instrument_path, idx + 1))
def test_set_item(self): t0 = old.Tone(ji.r(1, 1), rhy.Unit(2)) t1 = old.Tone(ji.r(2, 1), rhy.Unit(2)) melody0 = old.Melody([t0, t1]) melody1 = old.Melody([t1, t0]) melody0[0], melody0[1] = melody1[0], melody1[1] self.assertEqual(melody0, melody1)
def _filter_raw_data_and_convert2melody(raw_data: tuple) -> old.Melody: melody = old.Melody([], time_measure="absolute") for tone in raw_data: pitch, start, stop_time, volume = tone if melody: stop_last_tone = melody[-1].duration difference = start - stop_last_tone if difference > 0: melody.append(old.Tone(mel.TheEmptyPitch, stop_last_tone, start)) elif difference < 0: melody[-1].duration += difference else: if start != 0: melody.append(old.Tone(mel.TheEmptyPitch, 0, start)) melody.append(old.Tone(pitch, start, stop_time, volume=volume)) melody = melody.convert2relative() if melody[0].pitch.is_empty: melody = melody[1:] return melody
def test_tie(self): melodyTest0 = old.Melody([old.Tone(self.t0.pitch, self.t0.delay * 3)]) self.assertEqual(self.melody0.tie(), melodyTest0) melodyTest1 = old.Melody([old.Tone(self.t0.pitch, self.t0.delay * 2), self.t1]) melody1 = old.Melody([self.t0, self.t0, self.t1]) self.assertEqual(melody1.tie(), melodyTest1) melody2 = old.Melody([self.t0, self.t1, self.t0]) self.assertEqual(melody2.tie(), melody2)
def solve(self, a: int, b: int, x: int, mode: modes.Mode, rhythm: tuple) -> tuple: try: assert all((rhythm[0] >= 5, rhythm[1] >= 1)) except AssertionError: msg = "Rhythmical structure has to be bigger than (4, 0)." raise ValueError(msg) primes = (a, b, x) a, b = primes[self.main_prime], primes[self.side_prime] x = tuple(p for p in (mode.x, mode.y, mode.z) if p not in (a, b))[0] data0 = FlippedEx.detect_data_for_pitches( ((x, b), (a, b), (a, x), (b, x), (b, a), (x, a)), mode) data1 = FlippedEx.detect_data_for_pitches( ((a, x), (a, b), (x, b), (x, a), (b, a), (b, x)), mode) if data0[2] > data1[2]: data = data0 else: data = data1 if rhythm[1] == 1: rhythms_middle = tools.euclid(rhythm[0], 5) rhythms_else = rhythm[0] middle = tuple( old.Tone(p, r) for p, r in zip((mel.TheEmptyPitch, ) + data[0], rhythms_middle)) return ((old.Rest(rhythms_else), ), middle, (old.Rest(rhythms_else), )) else: complete_duration = rhythm[0] * rhythm[1] amount_pitches = len(data[0]) + len( data[1]) + 1 # middle + high + rest rhythmic_ranking = indispensability.bar_indispensability2indices( indispensability.indispensability_for_bar( rhythm))[:amount_pitches] positions_middle = rhythmic_ranking[::2] positions_high = (0, ) + rhythmic_ranking[1::2] low_line = (old.Rest(complete_duration), ) middle_line = (old.Tone(p, r) for p, r in zip( (mel.TheEmptyPitch, ) + data[0], (b - a for a, b in zip( positions_middle, positions_middle[1:] + (complete_duration, ), )), )) high_line = (old.Tone(p, r) for p, r in zip( (mel.TheEmptyPitch, ) + data[1], (b - a for a, b in zip(positions_high, positions_high[1:] + (complete_duration, ))), )) return (low_line, middle_line, high_line)
def test_split(self): tone0 = old.Tone(ji.r(1, 1, 2), rhy.Unit(2), rhy.Unit(1)) tone0B = old.Tone(ji.r(1, 1, 2), rhy.Unit(1), rhy.Unit(1)) tone1 = old.Tone(ji.r(1, 1, 2), rhy.Unit(3), rhy.Unit(1)) tone1B = old.Tone(ji.r(1, 1, 2), rhy.Unit(1), rhy.Unit(1)) pause0 = old.Rest(rhy.Unit(1)) pause1 = old.Rest(rhy.Unit(2)) melody0 = old.Melody([tone0, tone1]) melody1 = old.Melody([tone0B, pause0, tone1B, pause1]) self.assertEqual(melody0.split(), melody1)
def test_convert2relative(self): melody_converted = old.Melody( ( old.Tone(self.p0, self.d0 * 0, self.d0 * 1), old.Tone(self.p0, self.d0 * 1, self.d0 * 2), old.Tone(self.p0, self.d0 * 2, self.d0 * 3), ), time_measure="absolute", ) self.assertEqual(melody_converted.convert2relative(), self.melody0)
def test_find_exact_simultan_events(self): poly2 = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3), old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 1), old.Tone(ji.r(4, 3), 2)]), ) ) simultan_events4 = poly2.find_exact_simultan_events(0, 1) simultan_events4_expected = ( old.Tone(ji.r(3, 2), 1, 1), old.Tone(ji.r(3, 2), 2, 2), old.Tone(ji.r(4, 3), 1, 1), ) self.assertEqual(simultan_events4, simultan_events4_expected) simultan_events0 = self.poly0.find_exact_simultan_events(0, 0) self.assertEqual(simultan_events0, (self.poly0[1][0],)) simultan_events1 = self.poly0.find_exact_simultan_events(0, 0, False) self.assertEqual(simultan_events1, (self.poly0[1].convert2absolute()[0],)) simultan_events2 = self.poly1.find_exact_simultan_events(1, 0) simultan_events2_expected = (self.poly1[2][0], self.poly1[2][0]) self.assertEqual(simultan_events2, simultan_events2_expected) simultan_events3 = self.poly1.find_exact_simultan_events(1, 1) simultan_events3_expected = (self.t8, self.t7, self.t7, self.t7, self.t2) self.assertEqual(simultan_events3, simultan_events3_expected)
def __call__(self, melody: old.Melody) -> old.Melody: new_melody = old.Melody([]) for tone in melody: if not tone.pitch.is_empty and next(self.__add_tremolo_decider): rhythm = tone.delay duration_per_attack = [] tremolo_size_generator = next( self.__tremolo_size_generator_per_tone) while sum(duration_per_attack) < rhythm: duration_per_attack.append(next(tremolo_size_generator)) if len(duration_per_attack) > 1: duration_per_attack = duration_per_attack[:-1] difference = rhythm - sum(duration_per_attack) duration_per_attack[-1] += difference else: difference = sum(duration_per_attack) - rhythm duration_per_attack[-1] -= difference for duration in duration_per_attack: new_melody.append( old.Tone(tone.pitch.copy(), duration, volume=tone.volume)) else: new_melody.append(tone.copy()) return new_melody
def __call__( self, melody: old.Melody, is_consonance_per_tone: tuple, spectrum_profile_per_tone: tuple, ) -> old.Melody: new_melody = old.Melody([]) new_is_consonance_per_tone = [] new_spectrum_profile_per_tone = [] for tone, is_consonance, spectrum_profile in zip( melody, is_consonance_per_tone, spectrum_profile_per_tone ): new_is_consonance_per_tone.append(is_consonance) new_spectrum_profile_per_tone.append(spectrum_profile) test0 = is_consonance or not self.__only_on_non_dissonant_pitches test0 = test0 and not tone.pitch.is_empty if test0 and next(self.__add_tremolo_decider): rhythm = tone.delay duration_per_attack = [] tremolo_size_generator = next(self.__tremolo_size_generator_per_tone) while sum(duration_per_attack) < rhythm: duration_per_attack.append(next(tremolo_size_generator)) if len(duration_per_attack) > 1: duration_per_attack = duration_per_attack[:-1] difference = rhythm - sum(duration_per_attack) duration_per_attack[-1] += difference else: difference = sum(duration_per_attack) - rhythm duration_per_attack[-1] -= difference is_first = True for duration in duration_per_attack: if not is_first: new_spectrum_profile_per_tone.append(spectrum_profile) new_is_consonance_per_tone.append(False) volume = tone.volume * self.__tremolo_volume_factor else: volume = tone.volume new_melody.append( old.Tone(tone.pitch.copy(), duration, volume=volume) ) is_first = False else: new_melody.append(tone.copy()) return ( new_melody, tuple(new_is_consonance_per_tone), tuple(new_spectrum_profile_per_tone), )
def test_cut_up_by_idx(self): poly0 = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3), old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 1), old.Tone(ji.r(4, 3), 2)]), ) ) poly0_cut = poly0.cut_up_by_idx(2, 1) poly0_cut_expected = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 1), old.Tone(ji.r(1, 1), 1)]), old.Melody([old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly0_cut, poly0_cut_expected)
def test_cut_up_by_time(self): t0 = old.Tone(ji.r(1, 1), rhy.Unit(2)) t1 = old.Tone(ji.r(2, 1), rhy.Unit(2)) t2 = old.Tone(ji.r(1, 1), rhy.Unit(1)) r0 = old.Rest(1) melody0 = old.Melody([t0, t1, t1, t0, t1]) melody1 = old.Melody([t1, t1, t0]) melody2 = old.Melody([r0, t1, t1, t0]) melody3 = old.Melody([t2, t1, t1, t0]) melody4 = old.Melody([t1, t1, t2]) self.assertEqual(melody0.cut_up_by_time(2, 8), melody1) self.assertEqual(melody0.cut_up_by_time(1, 8), melody2) self.assertEqual(melody0.cut_up_by_time(1, 8, add_earlier=True), melody3) self.assertEqual(melody0.cut_up_by_time(2, 7, hard_cut=True), melody4) self.assertEqual(melody0.cut_up_by_time(2, 7, hard_cut=False), melody1)
def _convert_data2melody( data: tuple, name: str, tempo_estimation_method: str = "essentia") -> tuple: tempo = Transcription.estimate_tempo(name, method=tempo_estimation_method) melody = old.Melody([], time_measure="absolute") for tone in data: pitch, start, stop_time, volume = tone if melody: stop_last_tone = melody[-1].duration difference = start - stop_last_tone if difference > 0: melody.append( old.Tone(mel.TheEmptyPitch, stop_last_tone, start)) elif difference < 0: melody[-1].duration += difference else: if start != 0: melody.append(old.Tone(mel.TheEmptyPitch, 0, start)) melody.append(old.Tone(pitch, start, stop_time, volume=volume)) melody = melody.convert2relative() if melody[0].pitch.is_empty: melody = melody[1:] factor = tempo / 60 melody.delay = tuple(d * factor for d in melody.delay) melody.dur = tuple(d * factor for d in melody.dur) return tuple(melody)
def convert2voices(self, gender=True) -> tuple: len_voices = tuple(len(v) for v in self.voices) maxima = max(len_voices) rhythm_per_vox = [maxima // lv for lv in len_voices] return tuple( old.Melody( [ old.Tone(f.convert2pitch(gender=gender).normalize(), rhythm) for f in vox ] ) for vox, rhythm in zip(self.voices, rhythm_per_vox) )
def solve(self, a: int, b: int, x: int, mode: modes.Mode, rhythm: tuple) -> tuple: primes0, primes1 = self.detect_start_and_stop_primes(a, b, x, mode) pitch0, pitch1 = tuple( AbstractExpansion.transform_primes2pitches(p, mode) for p in (primes0, primes1)) high_pitches = self.detect_high_pitches(primes0, primes1, mode) complete_duration = rhythm[0] * rhythm[1] low_line = old.Rest(complete_duration) middle_line = tuple( old.Tone(p, rhythm[1]) for p in ShiftedEx.detect_middle_pitches( pitch0, pitch1, rhythm[0])) high_line = ShiftedEx.make_high_line(high_pitches, rhythm) return (low_line, middle_line, high_line)
def _render_midi_diva(self, path: str) -> None: for v_idx, voice, volume_per_tone, diva_engine in zip( range(3), self._counterpoint_result[1], self._attribute_maker_outer.volume_per_tone_per_voice, self._diva_engine_per_voice, ): if diva_engine is not None: voice = tuple( old.Tone(p, r, volume=v) for p, r, v in zip(*(tuple(voice) + (volume_per_tone, )))) diva_path = "{}/diva{}{}.mid".format(path, self._gender_code, v_idx) diva_engine(voice, self._tempo_factor).render(diva_path)
def __init__( self, tempo_factor: float, pitches: tuple, rhythm: binr.Compound, discard_rests: bool = True, ): self.__tempo_factor = tempo_factor self.__pitches = pitches self.__rhythm = rhythm melody = old.Melody( old.Tone(ji.JIPitch(p, multiply=globals.CONCERT_PITCH), r * tempo_factor) if not p.is_empty else old. Rest(r * tempo_factor) for p, r in zip(pitches, rhythm)) if discard_rests: melody = melody.discard_rests() super().__init__(melody, attack=0.08, decay=0.05, release=0.25)
def synthesize( self, stretch_factor: float = 1, n_divisions: int = 8, min_tone_size: fractions.Fraction = 0, min_rest_size: fractions.Fraction = fractions.Fraction(1, 10), concert_pitch: float = None, tie_notes: bool = False, remove_rests: bool = False, ) -> None: if not concert_pitch: concert_pitch = self.concert_pitch pitches, delays = self.quantizise( stretch_factor=stretch_factor, n_divisions=n_divisions, min_tone_size=min_tone_size, min_rest_size=min_rest_size, ) melody = old.Melody([old.Tone(p, d) for p, d in zip(pitches, delays)]) if remove_rests: melody = melody.discard_rests() if tie_notes: melody = melody.tie() sequence = [] for tone in melody: p = tone.pitch d = tone.delay p.multiply = concert_pitch d *= 4 sequence.append(midiplug.PyteqTone(p, d, d)) midiplug.Pianoteq(sequence).export2wav("{}_transcription".format( self.name), preset='"Erard Player"')
def test_cut_up_by_time(self): poly0 = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3), old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 1), old.Tone(ji.r(4, 3), 2)]), ) ) poly0_cut = poly0.cut_up_by_time(1, 3) poly0_cut_expected = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 1), old.Tone(ji.r(1, 1), 1)]), old.Melody([old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly0_cut, poly0_cut_expected) poly1_cut = poly0.cut_up_by_time(1, 3, add_earlier=False) poly1_cut_expected = old.Polyphon( ( old.Melody([old.Rest(1), old.Tone(ji.r(1, 1), 1)]), old.Melody([old.Rest(2)]), old.Melody([old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly1_cut, poly1_cut_expected) poly2_cut = poly0.cut_up_by_time(1, 3, hard_cut=False) poly2_cut_expected = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3)]), old.Melody([old.Rest(1), old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly2_cut[2], poly2_cut_expected[2])
def make_high_line(pitches: tuple, rhythm: tuple) -> tuple: def convert_pattern2pitches(pattern: tuple) -> tuple: return tuple(pitches[idx] if idx else mel.TheEmptyPitch for idx in pattern) if rhythm[1] == 1: return (old.Rest(rhythm[0] * rhythm[1]), ) elif rhythm[1] == 2: pattern0 = (0, 1, 0, None) pattern1 = (0, None, 0, 2) amount_pattern = (rhythm[0] - 1) // 2 pattern_cycle = itertools.cycle( tuple( convert_pattern2pitches(p) for p in (pattern0, pattern1))) pitch_line = functools.reduce( operator.add, tuple(next(pattern_cycle) for n in range(amount_pattern))) if rhythm[0] % 2 == 0: pitch_line = (pitches[0], mel.TheEmptyPitch) + pitch_line pitch_line = (mel.TheEmptyPitch, ) + pitch_line + (pitches[-1], ) try: assert len(pitch_line) == rhythm[0] * rhythm[1] except AssertionError: raise AssertionError("exp: {0} but {1}".format( len(pitch_line), rhythm[0] * rhythm[1])) return tuple( old.Melody(old.Tone(p, 1) for p in pitch_line).tie_pauses().discard_rests()) elif rhythm[1] == 3: patternA = (mel.TheEmptyPitch, pitches[1], pitches[0]) patternB = (mel.TheEmptyPitch, pitches[2], pitches[0]) dead_pattern = (mel.TheEmptyPitch, mel.TheEmptyPitch, pitches[0]) pattern_type_cycle = itertools.cycle((1, 0, 0)) choice_cycle = itertools.cycle((0, 1)) choices = tuple( next(choice_cycle) for n in range(((rhythm[0] - 1) // 2) * 2)) if rhythm[0] % 2 == 0: choices = (0, ) + choices choices = (1, ) + choices pattern_types = (next(pattern_type_cycle) for n in choices) pitch_line = functools.reduce( operator.add, tuple((patternA, patternB)[choice] if ptype == 0 else dead_pattern for choice, ptype in zip(choices, pattern_types)), ) try: assert len(pitch_line) == rhythm[0] * rhythm[1] except AssertionError: raise AssertionError("exp: {0} but {1}".format( len(pitch_line), rhythm[0] * rhythm[1])) return tuple( old.Melody(old.Tone(p, 1) for p in pitch_line).tie_pauses().discard_rests()) else: msg = "No solution for rhythmic structure {0} available".format( rhythm) raise ValueError(msg)
class PolyTest(unittest.TestCase): p0 = ji.r(5, 4) p1 = ji.r(3, 2) p2 = ji.r(1, 1) p3 = ji.r(6, 5) p4 = ji.r(7, 4) p5 = ji.r(9, 8) t0 = old.Tone(p0, rhy.Unit(1)) t1 = old.Tone(p1, rhy.Unit(1)) t2 = old.Tone(p2, rhy.Unit(1)) t3 = old.Tone(p3, rhy.Unit(1)) t3 = old.Tone(p3, rhy.Unit(1)) t4 = old.Tone(p4, rhy.Unit(1)) t5 = old.Tone(p5, rhy.Unit(1)) t6 = old.Tone(p0, rhy.Unit(2)) t7 = old.Tone(p0, rhy.Unit(0.5)) t8 = old.Tone(p0, rhy.Unit(1.5)) t9 = old.Tone(p1, rhy.Unit(1.5)) t10 = old.Tone(p5, rhy.Unit(0.5)) t11 = old.Tone(p2, rhy.Unit(1)) melody0 = old.Melody((t0, t1)) melody1 = old.Melody((t2, t3)) melody2 = old.Melody((t6, t6, t0, t7)) # duration 5.5 melody3 = old.Melody((t7, t6, t2, t2)) # duration 4.5 melody4 = old.Melody((t7, t7, t7, t2, t2)) # duration 3.5 melody5 = old.Melody((t10, t9, t3, t8, t0)) # duration 5.5 melody6 = old.Melody((t6, t6, t2, t7)) # duration 5.5 poly0 = old.Polyphon([melody0, melody1]) poly1 = old.Polyphon([melody2, melody3, melody4]) poly2 = old.Polyphon([melody6, melody5]) def test_chordify(self): chord0 = old.Chord(ji.JIHarmony([self.t0.pitch, self.t2.pitch]), rhy.Unit(1)) chord1 = old.Chord(ji.JIHarmony([self.t1.pitch, self.t3.pitch]), rhy.Unit(1)) cadence0 = old.Cadence([chord0, chord1]) self.assertEqual(cadence0, self.poly0.chordify()) chord0 = old.Chord(ji.JIHarmony([self.p0, self.p5]), rhy.Unit(0.5)) chord1 = old.Chord(ji.JIHarmony([self.p0, self.p1]), rhy.Unit(1.5)) chord2 = old.Chord(ji.JIHarmony([self.p0, self.p3]), rhy.Unit(1)) chord3 = old.Chord(ji.JIHarmony([self.p0]), rhy.Unit(1)) chord4 = old.Chord(ji.JIHarmony([self.p0, self.p2]), rhy.Unit(0.5)) chord5 = old.Chord(ji.JIHarmony([self.p0]), rhy.Unit(0.5)) expected = old.Cadence([chord0, chord1, chord2, chord3, chord4, chord4, chord5]) result = self.poly2.chordify( harmony_class=ji.JIHarmony, cadence_class=old.Cadence, add_longer=True ) for ex, re in zip(expected, result): print(ex, re) self.assertEqual(expected, result) def test_find_simultan_events(self): simultan_events0 = self.poly0.find_simultan_events(0, 0) self.assertEqual(simultan_events0, (self.poly0[1].convert2absolute()[0],)) simultan_events1 = self.poly0.find_simultan_events(1, 1) self.assertEqual(simultan_events1, (self.poly0[0].convert2absolute()[1],)) simultan_events2 = self.poly1.find_simultan_events(0, 1) simultan_events2_comp = ( self.poly1[1].convert2absolute()[1], self.poly1[1].convert2absolute()[2], self.poly1[1].convert2absolute()[3], self.poly1[2].convert2absolute()[-2], self.poly1[2].convert2absolute()[-1], ) self.assertEqual(simultan_events2, simultan_events2_comp) simultan_events3 = self.poly1.find_simultan_events(1, 1) simultan_events3_comp = ( self.poly1[0].convert2absolute()[0], self.poly1[0].convert2absolute()[1], self.poly1[2].convert2absolute()[1], self.poly1[2].convert2absolute()[2], self.poly1[2].convert2absolute()[3], ) self.assertEqual(simultan_events3, simultan_events3_comp) def test_find_exact_simultan_events(self): poly2 = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3), old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 1), old.Tone(ji.r(4, 3), 2)]), ) ) simultan_events4 = poly2.find_exact_simultan_events(0, 1) simultan_events4_expected = ( old.Tone(ji.r(3, 2), 1, 1), old.Tone(ji.r(3, 2), 2, 2), old.Tone(ji.r(4, 3), 1, 1), ) self.assertEqual(simultan_events4, simultan_events4_expected) simultan_events0 = self.poly0.find_exact_simultan_events(0, 0) self.assertEqual(simultan_events0, (self.poly0[1][0],)) simultan_events1 = self.poly0.find_exact_simultan_events(0, 0, False) self.assertEqual(simultan_events1, (self.poly0[1].convert2absolute()[0],)) simultan_events2 = self.poly1.find_exact_simultan_events(1, 0) simultan_events2_expected = (self.poly1[2][0], self.poly1[2][0]) self.assertEqual(simultan_events2, simultan_events2_expected) simultan_events3 = self.poly1.find_exact_simultan_events(1, 1) simultan_events3_expected = (self.t8, self.t7, self.t7, self.t7, self.t2) self.assertEqual(simultan_events3, simultan_events3_expected) def test_cut_up_by_time(self): poly0 = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3), old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 1), old.Tone(ji.r(4, 3), 2)]), ) ) poly0_cut = poly0.cut_up_by_time(1, 3) poly0_cut_expected = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 1), old.Tone(ji.r(1, 1), 1)]), old.Melody([old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly0_cut, poly0_cut_expected) poly1_cut = poly0.cut_up_by_time(1, 3, add_earlier=False) poly1_cut_expected = old.Polyphon( ( old.Melody([old.Rest(1), old.Tone(ji.r(1, 1), 1)]), old.Melody([old.Rest(2)]), old.Melody([old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly1_cut, poly1_cut_expected) poly2_cut = poly0.cut_up_by_time(1, 3, hard_cut=False) poly2_cut_expected = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3)]), old.Melody([old.Rest(1), old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly2_cut[2], poly2_cut_expected[2]) def test_cut_up_by_idx(self): poly0 = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]), old.Melody([old.Tone(ji.r(3, 2), 3), old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 1), old.Tone(ji.r(4, 3), 2)]), ) ) poly0_cut = poly0.cut_up_by_idx(2, 1) poly0_cut_expected = old.Polyphon( ( old.Melody([old.Tone(ji.r(1, 1), 1), old.Tone(ji.r(1, 1), 1)]), old.Melody([old.Tone(ji.r(3, 2), 2)]), old.Melody([old.Tone(ji.r(4, 3), 2)]), ) ) self.assertEqual(poly0_cut, poly0_cut_expected)
class ToneSetTest(unittest.TestCase): p0 = ji.r(5, 4) p1 = ji.r(3, 2) p2 = ji.r(1, 1) p3 = ji.r(6, 5) p4 = ji.r(7, 4) p5 = ji.r(9, 8) t0 = old.Tone(p0, rhy.Unit(1)) t1 = old.Tone(p1, rhy.Unit(1)) t2 = old.Tone(p2, rhy.Unit(1)) t3 = old.Tone(p3, rhy.Unit(1)) t3 = old.Tone(p3, rhy.Unit(1)) t4 = old.Tone(p4, rhy.Unit(1)) t5 = old.Tone(p5, rhy.Unit(1)) t0_set = old.Tone(p0, rhy.Unit(0), rhy.Unit(1)) t1_set = old.Tone(p1, rhy.Unit(1), rhy.Unit(1)) t2_set = old.Tone(p2, rhy.Unit(2), rhy.Unit(1)) t3_set = old.Tone(p3, rhy.Unit(3), rhy.Unit(1)) t4_set = old.Tone(p4, rhy.Unit(4), rhy.Unit(1)) t5_set = old.Tone(p5, rhy.Unit(5), rhy.Unit(1)) t6_set = old.Tone(p5, rhy.Unit(1), rhy.Unit(5)) mel0 = old.Melody([t0, t1, t2, t3, t4, t5]) mel1 = old.Melody([old.Rest(rhy.Unit(1)), t1, t2, t3, t4, t5]) mel2 = old.Melody([t0, t1]) set0 = old.ToneSet([t0_set, t1_set, t2_set, t3_set, t4_set, t5_set]) set1 = old.ToneSet([t1_set, t2_set, t3_set, t4_set, t5_set]) set2 = old.ToneSet([t1_set, t6_set, t2_set]) def test_constructor(self): self.assertEqual(old.ToneSet.from_melody(ToneSetTest.mel0), ToneSetTest.set0) def test_converter(self): self.assertEqual(ToneSetTest.mel0, ToneSetTest.set0.convert2melody()) self.assertEqual(ToneSetTest.mel1, ToneSetTest.set1.convert2melody()) def test_pop_by(self): popped = ToneSetTest.set0.copy().pop_by_pitch(ToneSetTest.p0, ToneSetTest.p1) self.assertEqual(ToneSetTest.mel2, popped.convert2melody()) popped = ToneSetTest.set0.copy().pop_by_start(rhy.Unit(0), rhy.Unit(1)) self.assertEqual(ToneSetTest.mel2, popped.convert2melody()) def test_pop_by_time(self): for t in self.set0.pop_by_time(1): self.assertEqual(t, self.t1_set) for t in self.set0.pop_by_time(1.5): self.assertEqual(t, self.t1_set) test_set0 = self.set2.pop_by_time(1.5) test_set_compare0 = old.ToneSet([self.t1_set, self.t6_set]) test_set1 = self.set2.pop_by_time(2.7) test_set_compare1 = old.ToneSet([self.t2_set, self.t6_set]) self.assertEqual(test_set0, test_set_compare0) self.assertEqual(test_set1, test_set_compare1) def test_pop_by_correct_dur_and_delay(self): poped_by = self.set0.pop_by_pitch(self.p0, self.p5) melody = poped_by.convert2melody() self.assertEqual(melody[0].delay, rhy.Unit(5)) self.assertEqual(melody[0].duration, rhy.Unit(1))
def test_copy(self): melody0 = old.Melody([old.Tone(self.p0, self.d0), old.Tone(self.p0, self.d0)]) self.assertEqual(melody0, melody0.copy())
def _convert_dissonant_tones2glissandi(self, melody: old.Melody): consonant_tones_positions = tools.find_all_indices_of_n( True, self.__is_not_dissonant_pitch_per_tone) new_melody = melody[:consonant_tones_positions[0]].copy() melody_size = len(melody) consonant_and_its_dissonant_tones = tuple( (melody[idx0], melody[idx0 + 1:idx1]) for idx0, idx1 in zip( consonant_tones_positions, consonant_tones_positions[1:] + (melody_size, ), )) for main_and_its_side_tones in consonant_and_its_dissonant_tones: main_tone, additional_tones = main_and_its_side_tones if not main_tone.pitch.is_empty: duration_per_pitch = (main_tone.duration, ) + tuple( t.duration for t in additional_tones) summed_duration = sum(duration_per_pitch) glissando = [] for t0, t1 in zip((main_tone, ) + tuple(additional_tones), additional_tones): pitch_difference = t0.pitch - main_tone.pitch duration = t0.duration if duration > self.glissando_duration: staying = duration - self.glissando_duration changing = self.glissando_duration else: staying = duration * 0.5 changing = float(staying) glissando.append( old.PitchInterpolation(staying, pitch_difference)) glissando.append( old.PitchInterpolation(changing, pitch_difference)) last_pitch = mel.TheEmptyPitch last_pitch_idx = -1 while last_pitch.is_empty: try: last_pitch = additional_tones[last_pitch_idx].pitch except IndexError: last_pitch = None break last_pitch_idx -= 1 if last_pitch is not None: pitch_difference = last_pitch - main_tone.pitch glissando.append( old.PitchInterpolation(0, pitch_difference)) glissando = old.GlissandoLine( interpolations.InterpolationLine(glissando)) else: glissando = None new_tone = old.Tone( main_tone.pitch, delay=summed_duration, duration=summed_duration, glissando=glissando, ) new_melody.append(new_tone) else: new_melody.extend((main_tone, ) + tuple(additional_tones)) return new_melody
def render(self, path: str) -> subprocess.Popen: adapted_rhythms = [ rhythm * self.__tempo_factor for rhythm in self.__rhythm ] adapted_rhythms[-1] += self.__overlaying_time melody = old.Melody( tuple( old.Tone(p, r, r, volume=v) for p, r, v in zip( self.__pitches, adapted_rhythms, self.__dynamics))) is_consonant_pitch_per_tone = tuple( self.__is_not_dissonant_pitch_per_tone) spectrum_profile_per_tone = tuple(self.__spectrum_profile_per_tone) if self.convert_dissonant_tones2glissandi: melody = self._convert_dissonant_tones2glissandi(melody) if self.__tremolo is not None: info = self.__tremolo(melody, is_consonant_pitch_per_tone, spectrum_profile_per_tone) melody, is_consonant_pitch_per_tone, spectrum_profile_per_tone = info for modulator in self.modulator: melody = modulator(melody) sequence = [] for tone, is_not_dissonant_pitch, spectrum_profile in zip( melody, is_consonant_pitch_per_tone, spectrum_profile_per_tone): pitch, rhythm, volume, glissando = ( tone.pitch, tone.delay, tone.volume, tone.glissando, ) if pitch.is_empty: tone = pteqer.mk_empty_attack( rhythm, next(self.empty_attack_dynamic_maker)) else: if is_not_dissonant_pitch: parameters = dict(self.parameter_non_dissonant_pitches) else: parameters = dict(self.parameter_dissonant_pitches) for par in parameters: value = parameters[par] if isinstance(value, infit.InfIt): parameters[par] = next(value) elif (isinstance(value, float) or isinstance(value, int) or value is None): parameters[par] = value else: msg = "Unknown value type: {}.".format(type(value)) raise TypeError(msg) if parameters["pinch_harmonic_pedal"] == 1: if parameters["pinch_harmonic_pedal"]: pitch -= ji.r(2, 1) tone = midiplug.PyteqTone( ji.JIPitch(pitch, multiply=globals.CONCERT_PITCH), rhythm, rhythm, volume=volume, glissando=glissando, spectrum_profile_3=spectrum_profile[0], spectrum_profile_5=spectrum_profile[1], spectrum_profile_6=spectrum_profile[0], spectrum_profile_7=spectrum_profile[2], **parameters, ) sequence.append(tone) pteq = midiplug.Pianoteq(tuple(sequence)) return pteq.export2wav(path, preset=self.preset, fxp=self.fxp)
def make_glitter_voices( self, include_dissonant_pitches: bool, glitter_modulater_per_voice: tuple, glitter_attack_duration: infit.InfIt, glitter_release_duration: infit.InfIt, glitter_type: str, glitter_chord: tuple, glitter_register_per_voice: tuple, glitter_wave_form_per_voice: tuple, glitter_volume_per_voice: tuple, ) -> dict: init_attributes = {} glitter_duration = self._duration_per_voice + self._anticipation_time glitter_duration += self._overlaying_time if glitter_type == "glitter": if include_dissonant_pitches: voice_base = self._counterpoint_result[0] else: voice_base = self._counterpoint_result[1] voices = tuple( old.Melody(old.Tone(p, r) for p, r in zip(vox[0], vox[1])) for vox in voice_base) for combination, modulator in zip( tuple(itertools.combinations(tuple(range(3)), 2)), glitter_modulater_per_voice, ): sound_engine = glitter.GlitterEngine( voices[combination[0]], voices[combination[1]], self._tempo_factor, anticipation_time=self._anticipation_time, overlaying_time=self._overlaying_time, modulator=modulator, attack_duration=glitter_attack_duration, release_duration=glitter_release_duration, ) voice_name = "glitter{}{}{}".format(self._gender_code, *sorted(combination)) init_attributes.update({ voice_name: { "start": -self._anticipation_time, "duration": glitter_duration, "sound_engine": sound_engine, } }) elif glitter_type == "drone": pitches = glitter_chord(*self._harmonic_primes) pitches = ( p.register(register) for p, register in zip(pitches, glitter_register_per_voice) if not p.is_empty) frequencies = tuple(None if p.is_empty else p.float * globals.CONCERT_PITCH for p in pitches) combinations = ((0, 1), (0, 2), (1, 2)) for idx, frequency in enumerate(frequencies): if frequency is not None: voice_name = "glitter{}{}{}".format( self._gender_code, *combinations[idx]) volume = glitter_volume_per_voice[idx] modulator = glitter_modulater_per_voice[idx] wave_form = glitter_wave_form_per_voice[idx] sound_engine = glitter.SineDroneEngine( frequency, glitter_duration, self._anticipation_time, self._overlaying_time, glitter_attack_duration, glitter_release_duration, volume, modulator, wave_form, ) init_attributes.update({ voice_name: { "start": -self._anticipation_time, "duration": glitter_duration, "sound_engine": sound_engine, } }) else: raise NotImplementedError() return init_attributes
class MelodyTest(unittest.TestCase): p0 = ji.r(14, 9) p1 = ji.r(7, 4) d0 = rhy.Unit(400) d1 = rhy.Unit(800) t0 = old.Tone(p0, d0) t1 = old.Tone(p1, d1) mel0 = mel.Mel([p0] * 3) mel1 = mel.Mel([p1] * 3) rhy0 = rhy.Compound([d0] * 3) rhy1 = rhy.Compound([d1] * 3) melody0 = old.Melody([t0] * 3) def test_constructor(self): old.Melody([self.t0, self.t0, self.t0]) def test_alternative_constructor(self): melody1 = old.Melody.from_parameter(self.mel0, self.rhy0) self.assertEqual(self.melody0, melody1) def test_duration(self): self.assertEqual(self.melody0.duration, sum(self.rhy0)) melody1 = old.Melody([old.Rest(3)]) self.assertEqual(melody1.duration, 3) def test_get_attributes(self): self.assertEqual(self.melody0.__get_pitch__(), self.mel0) self.assertEqual(self.melody0.__get_delay__(), self.rhy0) self.assertEqual(self.melody0.__get_duration__(), self.rhy0) def test_set_attributes(self): melody0 = old.Melody([]) melody0.__set_pitch__(self.mel0) melody0.__set_delay__(self.rhy0) self.assertEqual(melody0.__get_pitch__(), self.mel0) self.assertEqual(melody0.__get_delay__(), self.rhy0) self.assertEqual(melody0.pitch, self.mel0) self.assertEqual(melody0.delay, self.rhy0) melody0.__set_pitch__(self.mel1) melody0.__set_delay__(self.rhy1) melody0.__set_duration__(self.rhy0) self.assertEqual(melody0.__get_pitch__(), self.mel1) self.assertEqual(melody0.__get_delay__(), self.rhy1) self.assertEqual(melody0.__get_duration__(), self.rhy0) self.assertEqual(melody0.pitch, self.mel1) self.assertEqual(melody0.delay, self.rhy1) self.assertEqual(melody0.dur, self.rhy0) def test_set_item(self): t0 = old.Tone(ji.r(1, 1), rhy.Unit(2)) t1 = old.Tone(ji.r(2, 1), rhy.Unit(2)) melody0 = old.Melody([t0, t1]) melody1 = old.Melody([t1, t0]) melody0[0], melody0[1] = melody1[0], melody1[1] self.assertEqual(melody0, melody1) def test_freq(self): self.assertEqual(self.melody0.freq, self.mel0.freq) def test_add(self): compound = old.Melody([self.t0, self.t1, self.t1]) melody0 = old.Melody([self.t0]) melody1 = old.Melody([self.t1] * 2) self.assertEqual(melody0 + melody1, compound) def test_tie(self): melodyTest0 = old.Melody([old.Tone(self.t0.pitch, self.t0.delay * 3)]) self.assertEqual(self.melody0.tie(), melodyTest0) melodyTest1 = old.Melody([old.Tone(self.t0.pitch, self.t0.delay * 2), self.t1]) melody1 = old.Melody([self.t0, self.t0, self.t1]) self.assertEqual(melody1.tie(), melodyTest1) melody2 = old.Melody([self.t0, self.t1, self.t0]) self.assertEqual(melody2.tie(), melody2) def test_split(self): tone0 = old.Tone(ji.r(1, 1, 2), rhy.Unit(2), rhy.Unit(1)) tone0B = old.Tone(ji.r(1, 1, 2), rhy.Unit(1), rhy.Unit(1)) tone1 = old.Tone(ji.r(1, 1, 2), rhy.Unit(3), rhy.Unit(1)) tone1B = old.Tone(ji.r(1, 1, 2), rhy.Unit(1), rhy.Unit(1)) pause0 = old.Rest(rhy.Unit(1)) pause1 = old.Rest(rhy.Unit(2)) melody0 = old.Melody([tone0, tone1]) melody1 = old.Melody([tone0B, pause0, tone1B, pause1]) self.assertEqual(melody0.split(), melody1) def test_cut_up_by_time(self): t0 = old.Tone(ji.r(1, 1), rhy.Unit(2)) t1 = old.Tone(ji.r(2, 1), rhy.Unit(2)) t2 = old.Tone(ji.r(1, 1), rhy.Unit(1)) r0 = old.Rest(1) melody0 = old.Melody([t0, t1, t1, t0, t1]) melody1 = old.Melody([t1, t1, t0]) melody2 = old.Melody([r0, t1, t1, t0]) melody3 = old.Melody([t2, t1, t1, t0]) melody4 = old.Melody([t1, t1, t2]) self.assertEqual(melody0.cut_up_by_time(2, 8), melody1) self.assertEqual(melody0.cut_up_by_time(1, 8), melody2) self.assertEqual(melody0.cut_up_by_time(1, 8, add_earlier=True), melody3) self.assertEqual(melody0.cut_up_by_time(2, 7, hard_cut=True), melody4) self.assertEqual(melody0.cut_up_by_time(2, 7, hard_cut=False), melody1) def test_convert2absolute(self): melody_converted = old.Melody( ( old.Tone(self.p0, self.d0 * 0, self.d0 * 1), old.Tone(self.p0, self.d0 * 1, self.d0 * 2), old.Tone(self.p0, self.d0 * 2, self.d0 * 3), ) ) self.assertEqual(self.melody0.convert2absolute(), melody_converted) melody_converted = old.Melody( ( old.Tone(self.p0, self.d0 * 0, self.d0 * 1), old.Tone(self.p0, self.d0 * 1, self.d0 * 2), old.Tone(self.p0, self.d0 * 2, self.d0 * 3), ), time_measure="relative", ) self.assertEqual(self.melody0.convert2absolute(), melody_converted) def test_convert2relative(self): melody_converted = old.Melody( ( old.Tone(self.p0, self.d0 * 0, self.d0 * 1), old.Tone(self.p0, self.d0 * 1, self.d0 * 2), old.Tone(self.p0, self.d0 * 2, self.d0 * 3), ), time_measure="absolute", ) self.assertEqual(melody_converted.convert2relative(), self.melody0) def test_copy(self): melody0 = old.Melody([old.Tone(self.p0, self.d0), old.Tone(self.p0, self.d0)]) self.assertEqual(melody0, melody0.copy())
def convert2line(solution: dict, keys: tuple) -> tuple: pitches = (solution[key] for key in keys) return tuple( old.Melody(old.Tone(p, 1) for p in pitches).tie_pauses().discard_rests())
def convert2tone(self, delay: float) -> old.Tone: return old.Tone(self.set_pitch2right_octave(), delay, delay, volume=self.volume)
def __init__( self, name: str, tracks2ignore=tuple([]), volume_envelope=None, volume_envelope_per_track=dict([]), rhythm_maker=None, start: float = 0, group: tuple = (0, 0, 0), gender: bool = True, n_bars: int = 1, anticipation_time: float = 0.5, overlaying_time: float = 0.5, duration_per_bar: float = 5, include_voices: bool = True, include_diva: bool = True, include_glitter: bool = True, include_natural_radio: bool = True, include_percussion: bool = True, dynamic_range_of_voices: tuple = (0.2, 0.95), max_spectrum_profile_change: int = 10, voices_overlaying_time: float = 1, voices_entry_delay_per_voice: tuple = (0, 0, 0), glitter_include_dissonant_pitches: bool = True, glitter_modulater_per_voice: tuple = ("randomi", "randomi", "randomi"), glitter_attack_duration: infit.InfIt = 0.5, glitter_release_duration: infit.InfIt = 0.5, glitter_type: str = "glitter", glitter_chord=None, glitter_register_per_voice: tuple = (2, 2, 2), glitter_wave_form_per_voice: tuple = ("sine", "sine", "sine"), glitter_volume_per_voice: tuple = (1, 1, 1), radio_samples: tuple = ( globals.SAM_RADIO_BIELEFELD[2], globals.SAM_RADIO_UK[4], globals.SAM_RADIO_ITALY[2], ), radio_make_envelope: bool = True, radio_average_volume: float = 0.3, radio_min_volume: float = 0.65, radio_max_volume: float = 1, radio_n_changes: int = 5, radio_crossfade_duration: float = 0.5, radio_shadow_time: float = 0.175, radio_silent_channels: tuple = tuple([]), radio_attack_duration: infit.InfIt = infit.Gaussian(0.4, 0.095), radio_release_duration: infit.InfIt = infit.Gaussian(0.4, 0.095), tremolo_maker_per_voice: tuple = (None, None, None), cp_constraints_interpolation: tuple = tuple([]), cp_add_dissonant_pitches_to_nth_voice: tuple = (True, True, True), speech_init_attributes: dict = {}, ambitus_maker: ambitus.AmbitusMaker = ambitus.SymmetricalRanges( ji.r(1, 1), ji.r(3, 1), ji.r(5, 4)), start_harmony: tuple = None, pteq_engine_per_voice: tuple = ( pteq.mk_contrasting_pte(), pteq.mk_contrasting_pte(), pteq.mk_contrasting_pte(), ), diva_engine_per_voice: tuple = ( diva.FloatingDivaMidiEngine, diva.FloatingDivaMidiEngine, diva.FloatingDivaMidiEngine, ), percussion_engine_per_voice: tuple = ( percussion.Rhythmizer((0, )), percussion.Rhythmizer((1, )), percussion.Rhythmizer((2, )), ), # in case the user want to use her or his own metrical numbers metrical_numbers: tuple = None, ) -> None: assert glitter_type in ("glitter", "drone") if glitter_chord is None: glitter_chord = harmony.find_harmony(gender=gender)[0] self._voices_entry_delay_per_voice = voices_entry_delay_per_voice self._n_bars = n_bars self._cp_constraints_interpolation = cp_constraints_interpolation self._gender = gender self._gender_code = ("N", "P")[int(gender)] self._bar_number = globals.MALE_SOIL.detect_group_index(group) if metrical_numbers is None: metrical_numbers = globals.MALE_SOIL.metre_per_vox_per_bar[ self._bar_number] self._metrical_numbers = metrical_numbers self._anticipation_time = anticipation_time self._overlaying_time = overlaying_time self._voices_overlaying_time = voices_overlaying_time self._pteq_engine_per_voice = pteq_engine_per_voice self._diva_engine_per_voice = diva_engine_per_voice self._ambitus_maker = ambitus_maker self._start_harmony = start_harmony self._cp_add_dissonant_pitches_to_nth_voice = ( cp_add_dissonant_pitches_to_nth_voice) self._tremolo_maker_per_voice = tremolo_maker_per_voice if rhythm_maker is None: def rhythm_maker(self) -> tuple: return tuple( binr.Compound.from_euclid(metrical_prime * self._n_bars, metrical_prime * self._n_bars) for metrical_prime in self._metrical_numbers) rhythms = polyrhythms.Polyrhythm( *rhythm_maker(self)).transformed_rhythms self._percussion_engine_per_voice = percussion_engine_per_voice self._rhythms = rhythms self._bar_size = int(sum(rhythms[0])) // n_bars self._weight_per_beat_for_one_bar = self.make_weight_per_beat_for_one_bar( self._metrical_numbers, self._bar_size) self._tempo_factor = self.convert_duration2factor( duration_per_bar, self._bar_size) self._include_diva = include_diva self._weight_per_beat = tuple(self._weight_per_beat_for_one_bar * self._n_bars) self._harmonic_primes = globals.MALE_SOIL.harmonic_primes_per_bar[ self._bar_number] self._counterpoint_result = self.make_counterpoint_result() init_attributes = {} self._duration_per_voice = duration_per_bar * self._n_bars self._voices_inner = tuple( old.Melody(old.Tone(p, r) for p, r in zip(vox[0], vox[1])) for vox in self._counterpoint_result[0]) self._duration = self._voices_inner[0].duration self._attribute_maker_inner = pteqer.AttributeMaker( self._voices_inner, metricity_per_beat=self._weight_per_beat, max_spectrum_profile_change=max_spectrum_profile_change, dynamic_range=dynamic_range_of_voices, ) self._voices_outer = tuple( old.Melody(old.Tone(p, r) for p, r in zip(vox[0], vox[1])) for vox in self._counterpoint_result[1]) self._attribute_maker_outer = pteqer.AttributeMaker( self._voices_outer, metricity_per_beat=self._weight_per_beat, max_spectrum_profile_change=max_spectrum_profile_change, dynamic_range=dynamic_range_of_voices, ) # make pianoteq voices if include_voices: init_attributes.update(self.make_voices()) # make diva voices if include_diva: init_attributes.update(self.make_diva_voices()) # make glitter voices if include_glitter: init_attributes.update( self.make_glitter_voices( glitter_include_dissonant_pitches, glitter_modulater_per_voice, glitter_attack_duration, glitter_release_duration, glitter_type, glitter_chord, glitter_register_per_voice, glitter_wave_form_per_voice, glitter_volume_per_voice, )) # make natural radio voices if include_natural_radio: voices_inner_and_outer = [] for voices, volume_per_voice in ( ( self._voices_inner, self._attribute_maker_inner.volume_per_tone_per_voice, ), ( self._voices_outer, self._attribute_maker_outer.volume_per_tone_per_voice, ), ): voices = tuple( old.Melody( old.Tone( tone.pitch, tone.delay, tone.delay, volume=volume) for tone, volume in zip(voice, volume_per_tone)) for voice, volume_per_tone in zip(voices, volume_per_voice)) voices_inner_and_outer.append(voices) init_attributes.update( self.make_natural_radio( voices_inner_and_outer[0], voices_inner_and_outer[1], self._tempo_factor, gender, make_envelope=radio_make_envelope, samples=radio_samples, n_changes=radio_n_changes, crossfade_duration=radio_crossfade_duration, anticipation_time=self._anticipation_time, overlaying_time=self._overlaying_time, average_volume=radio_average_volume, min_volume=radio_min_volume, max_volume=radio_max_volume, shadow_time=radio_shadow_time, silent_channels=radio_silent_channels, attack_duration=radio_attack_duration, release_duration=radio_release_duration, )) # make percussion voices if include_percussion: init_attributes.update(self.make_percussion_voices()) # make speech voices init_attributes.update(speech_init_attributes) super().__init__( name=name, start=start, tracks2ignore=tracks2ignore, volume_envelope=volume_envelope, volume_envelope_per_track=volume_envelope_per_track, **init_attributes, )