예제 #1
0
    def from_complex_scale(
        cls,
        name: str,
        original_scale: tuple,
        intonations_per_scale_degree: tuple,
        octave_of_first_pitch: int = 0,
        tempo_estimation_method: str = "essentia",
    ) -> "Transcription":
        root = cls._get_root("{}.svl".format(name))
        frequency_range = root[0][0].attrib["minimum"], root[0][0].attrib[
            "maximum"]
        data = cls._filter_data_from_root(root)

        frequencies = tuple(map(operator.itemgetter(0), data))
        pitch_transcriber = ComplexScaleTranscriber(
            original_scale, intonations_per_scale_degree)

        octavater = ji.r(1, 1).register(octave_of_first_pitch)
        pitches = tuple(octavater + pitch
                        for pitch in pitch_transcriber(frequencies))
        new_data = tuple(
            (pitch, ) + tone[1:] for pitch, tone in zip(pitches, data))

        melody = cls._convert_data2melody(new_data, name,
                                          tempo_estimation_method)
        return cls(name, tuple(melody), frequency_range)
예제 #2
0
def mk_loop_cadence_with_tuk(loopsize: int,
                             meter: metre.Metre) -> old.JICadence:
    size = meter.size
    loop_amount = size // loopsize
    rest = size % loopsize
    harmony0 = ji.JIHarmony([ji.r(3, 2)])
    harmony1 = ji.JIHarmony([ji.r(1, 1)])
    basic_cadence = [old.Chord(harmony0, 0.5)
                     ] + [old.Chord(harmony1, 1) for i in range(loopsize - 1)]
    basic_cadence.append(old.Chord(harmony1, 0.5))
    print(old.JICadence(basic_cadence).duration)
    cadence = [basic_cadence for i in range(loop_amount)]
    cadence = functools.reduce(operator.add, cadence)
    if rest:
        cadence.append(old.Chord(harmony0, rest))
    return old.JICadence(cadence)
예제 #3
0
 def mk_combination_pitches(comb: int, octave: ji.JIPitch):
     pitches = (tuple(
         ji.r(functools.reduce(operator.mul, com), 1)
         for com in itertools.combinations((3, 5, 7, 9), comb)), )
     pitches += (tuple(p.inverse().normalize(2) + octave
                       for p in pitches[0]), )
     return (tuple(p.normalize(2) + octave for p in pitches[0]), pitches[1])
예제 #4
0
def mk_empty_attack(
    duration: float,
    volume: float,
    frequency: float = 35,
    hammer_noise: float = 3,
    impedance: float = 0.3,
    cutoff: float = 0.3,
    q_factor: float = 5,
    string_length: float = 0.8,
    strike_point: float = 1 / 2,
    hammer_hard_piano: float = 1,
    hammer_hard_mezzo: float = 1.5,
    hammer_hard_forte: float = 2,
    blooming_energy: float = 0,
) -> midiplug.PyteqTone:
    """Helps making percussive sounds with Pianoteq."""
    return midiplug.PyteqTone(
        ji.JIPitch(ji.r(1, 1), multiply=frequency),
        duration,
        duration,
        volume=volume,
        hammer_noise=hammer_noise,
        impedance=impedance,
        cutoff=cutoff,
        q_factor=q_factor,
        string_length=string_length,
        strike_point=strike_point,
        hammer_hard_piano=hammer_hard_piano,
        hammer_hard_mezzo=hammer_hard_mezzo,
        hammer_hard_forte=hammer_hard_forte,
        blooming_energy=blooming_energy,
    )
예제 #5
0
    def test_cut_up_by_time(self):
        t0 = old.Tone(ji.r(1, 1), rhy.Unit(2))
        t1 = old.Tone(ji.r(2, 1), rhy.Unit(2))
        t2 = old.Tone(ji.r(1, 1), rhy.Unit(1))
        r0 = old.Rest(1)

        melody0 = old.Melody([t0, t1, t1, t0, t1])
        melody1 = old.Melody([t1, t1, t0])
        melody2 = old.Melody([r0, t1, t1, t0])
        melody3 = old.Melody([t2, t1, t1, t0])
        melody4 = old.Melody([t1, t1, t2])

        self.assertEqual(melody0.cut_up_by_time(2, 8), melody1)
        self.assertEqual(melody0.cut_up_by_time(1, 8), melody2)
        self.assertEqual(melody0.cut_up_by_time(1, 8, add_earlier=True), melody3)
        self.assertEqual(melody0.cut_up_by_time(2, 7, hard_cut=True), melody4)
        self.assertEqual(melody0.cut_up_by_time(2, 7, hard_cut=False), melody1)
예제 #6
0
 def test_cut_up_by_idx(self):
     poly0 = old.Polyphon(
         (
             old.Melody([old.Tone(ji.r(1, 1), 2), old.Tone(ji.r(1, 1), 3)]),
             old.Melody([old.Tone(ji.r(3, 2), 3), old.Tone(ji.r(3, 2), 2)]),
             old.Melody([old.Tone(ji.r(4, 3), 1), old.Tone(ji.r(4, 3), 2)]),
         )
     )
     poly0_cut = poly0.cut_up_by_idx(2, 1)
     poly0_cut_expected = old.Polyphon(
         (
             old.Melody([old.Tone(ji.r(1, 1), 1), old.Tone(ji.r(1, 1), 1)]),
             old.Melody([old.Tone(ji.r(3, 2), 2)]),
             old.Melody([old.Tone(ji.r(4, 3), 2)]),
         )
     )
     self.assertEqual(poly0_cut, poly0_cut_expected)
예제 #7
0
def __mk_siter_gong_simplified():
    def mk_combination_pitches(comb: int, octave: ji.JIPitch):
        pitches = (tuple(
            ji.r(functools.reduce(operator.mul, com), 1)
            for com in itertools.combinations((3, 5, 7, 9), comb)
            if tuple(sorted(com)) != (3, 9)), )
        pitches += (tuple(p.inverse().normalize(2) + octave
                          for p in pitches[0]), )
        return tuple(p.normalize(2) + octave for p in pitches[0]), pitches[1]

    def mk_re():
        return sound.PyteqEngine(preset='"Cimbalom hard"')

    pitch_gong_plus = ji.r(3 * 5 * 7, 1).normalize(2) + ji.r(1, 4)
    pitch_gong_minus = pitch_gong_plus.inverse().normalize(2) + ji.r(1, 4)
    pitches_tong = mk_combination_pitches(2, ji.r(1, 2))
    pitches = (
        pitches_tong[0] + (pitch_gong_plus, ),
        pitches_tong[1] + (pitch_gong_minus, ),
    )

    pitch2notation = tuple(
        instruments.mk_p2n(p, idx) for idx, p in enumerate(pitches))
    pitch2notation = instruments.combine_p2n(*pitch2notation)

    notation_styles = tuple(
        notation.MelodicLineStyle("Large", label, False, True, False)
        for label in ("+", "-"))
    vertical_line_metrical = notation.VerticalLine(1.6, "", 1)
    vertical_line_compound = notation.VerticalLine(0.9, "", 1)
    vertical_line_unit = notation.VerticalLine(0.15, "", 1)
    vertical_line_style = notation.VerticalLineStyle(vertical_line_metrical,
                                                     vertical_line_compound,
                                                     vertical_line_unit)

    re = mk_re()
    render_engines = (re, re)

    return instruments.Instrument(
        "Siter_gong",
        pitch2notation,
        notation_styles,
        render_engines,
        vertical_line_style,
    )
예제 #8
0
    def __call__(self, mode):
        """Return real pitch depending on the corresponding mode.

        This pitch is in no specific octave yet.
        """
        primes = self.__key(mode)
        if not mode.gender:
            primes = tuple(reversed(primes))
        return ji.r(primes[0], primes[1])
예제 #9
0
def mk_loop_cadence(loopsize: int, meter: metre.Metre) -> old.JICadence:
    size = meter.size
    loop_amount = size // loopsize
    rest = size % loopsize
    harmony = ji.JIHarmony([ji.r(3, 2)])
    cadence = [old.Chord(harmony, loopsize) for i in range(loop_amount)]
    if rest:
        cadence.append(old.Chord(harmony, rest))
    return old.JICadence(cadence)
예제 #10
0
    def find_high_pitch(primes0: tuple, primes1: tuple,
                        mode: modes.Mode) -> ji.JIPitch:
        if primes0[0] == primes1[0]:
            if mode.gender:
                p = ji.r(primes0[0], primes0[1] * primes1[1])
            else:
                p = ji.r(primes0[0], 1)
        elif primes0[1] == primes1[1]:
            if mode.gender:
                p = ji.r(1, primes0[1])
            else:
                p = ji.r(primes0[0] * primes1[0], primes0[1])
        else:
            msg = "Unknown combination of primes: {0} and {1}.".format(
                primes0, primes1)
            raise ValueError(msg)

        return p.normalize() + ji.r(4, 1)
예제 #11
0
    def _make_transcription(
        self,
        starting_scale_degree: int,
        starting_intonation: int,
        cent_distances: float,
    ) -> tuple:

        pitches = [(starting_scale_degree, starting_intonation, 0)]
        fitness = 0

        for distance in cent_distances:
            last_scale_degree, last_intonation, last_octave = pitches[-1]
            adapted_distance = (
                distance +
                self._deviation_from_ideal_scale_degree_per_intonation[
                    last_scale_degree][last_intonation])
            closest_item = tools.find_closest_item(
                adapted_distance,
                self._distance_to_other_scale_degrees_per_scale_degree[
                    last_scale_degree],
                key=operator.itemgetter(1),
            )

            new_scale_degree = closest_item[0][0]
            new_octave = last_octave + closest_item[0][1]

            last_pitch = self._intonations_per_scale_degree[last_scale_degree][
                last_intonation]
            last_pitch += ji.r(1, 1).register(last_octave)

            octavater = ji.r(1, 1).register(new_octave)
            possible_intonations = tuple(
                intonation + octavater for intonation in
                self._intonations_per_scale_degree[new_scale_degree])
            last_pitch_cents = last_pitch.cents
            distance_per_intonation = tuple(into.cents - last_pitch_cents
                                            for into in possible_intonations)
            new_intonation = tools.find_closest_index(distance,
                                                      distance_per_intonation)
            fitness += distance_per_intonation[new_intonation]

            pitches.append((new_scale_degree, new_intonation, new_octave))

        return tuple(pitches), fitness
예제 #12
0
 def apply(self, melody) -> tuple:
     length_melody = len(melody)
     distribution = []
     while len(distribution) < length_melody:
         try:
             distr = next(self.pattern)
         except StopIteration:
             self.mk_pattern()
             distr = next(self.pattern)
         distribution.append(distr)
     melodies = [[type(melody[0])(None, 0, melody[0].duration)]
                 for i in range(self.amount_voices)]
     pitchchange_cylce = itertools.cycle([0, 0, 0, 1, 0, 1, 0, 2, 0, 0])
     d = melody[0].delay
     first = True
     for tone, distr in zip(melody, distribution):
         hits = 1
         for i, dis in enumerate(distr):
             if dis == 1:
                 next_pitchcycle = next(pitchchange_cylce)
                 if next_pitchcycle == 0:
                     newpitch = tone.pitch.set_val_border(1)
                     newpitch += ji.r(hits, 1)
                 elif next_pitchcycle == 1:
                     newpitch = tone.pitch.copy()
                 else:
                     newpitch = tone.pitch.set_val_border(2).scalar(hits)
                     newpitch = newpitch.set_val_border(1).normalize(2)
                     newpitch = newpitch + ji.r(2, 1)
                 if first is True:
                     melodies[i][0].pitch = newpitch
                     first = False
                 else:
                     melodies[i].append(
                         type(tone)(newpitch, d, tone.duration))
                 hits += 1
         d += tone.delay
     for m in melodies:
         for p0, p1 in zip(m, m[1:]):
             p0.delay = p1.delay - p0.delay
         m[-1].delay = m[-1].duration
     return tuple(old.JIMelody(mel) for mel in melodies)
예제 #13
0
    def mk_pitch_depending_on_gender(self,
                                     primes0: tuple = None,
                                     primes1: tuple = None,
                                     gender: bool = None):
        if gender is None:
            gender = self.harmonic_gender
        if primes0:
            p0 = functools.reduce(operator.mul, primes0)
        else:
            p0 = 1

        if primes1:
            p1 = functools.reduce(operator.mul, primes1)
        else:
            p1 = 1

        if gender:
            return ji.r(p0, p1)
        else:
            return ji.r(p1, p0)
def make(name: str = "TWO", gender=False, group=0, sub_group0=1):
    return (segments.Chord(
        "{}_Bell0".format(name),
        ambitus_maker=ambitus.SymmetricalRanges(ji.r(4, 1), ji.r(2, 1),
                                                ji.r(5, 4)),
        group=(group, sub_group0, 0),
        chord=harmony.find_harmony(name="A", idx=0, gender=gender),
        gender=gender,
        n_bars=1,
        duration_per_bar=10,
        start=0,
        dynamic_range_of_voices=(0.95, 1),
        voices_entry_delay_per_voice=(0, 0.15, 0.225),
        anticipation_time=5,
        overlaying_time=0,
        pteq_engine_per_voice=(
            pteq.mk_trippy_bell_pte(
                fxp='"pbIII/fxp/Bells_no_stretching.fxp"',
                preset=None,
                empty_attack_dynamic_maker=infit.Value(0),
            ),
            pteq.mk_trippy_bell_pte(
                fxp='"pbIII/fxp/Bells_no_stretching.fxp"',
                preset=None,
                empty_attack_dynamic_maker=infit.Value(0),
            ),
            pteq.mk_trippy_bell_pte(
                fxp='"pbIII/fxp/Bells_no_stretching.fxp"',
                preset=None,
                empty_attack_dynamic_maker=infit.Value(0),
            ),
        ),
        speech_init_attributes={},
        include_glitter=False,
        include_diva=False,
        include_natural_radio=False,
        include_percussion=False,
    ), )
예제 #15
0
class ToneSetTest(unittest.TestCase):
    p0 = ji.r(5, 4)
    p1 = ji.r(3, 2)
    p2 = ji.r(1, 1)
    p3 = ji.r(6, 5)
    p4 = ji.r(7, 4)
    p5 = ji.r(9, 8)
    t0 = old.Tone(p0, rhy.Unit(1))
    t1 = old.Tone(p1, rhy.Unit(1))
    t2 = old.Tone(p2, rhy.Unit(1))
    t3 = old.Tone(p3, rhy.Unit(1))
    t3 = old.Tone(p3, rhy.Unit(1))
    t4 = old.Tone(p4, rhy.Unit(1))
    t5 = old.Tone(p5, rhy.Unit(1))
    t0_set = old.Tone(p0, rhy.Unit(0), rhy.Unit(1))
    t1_set = old.Tone(p1, rhy.Unit(1), rhy.Unit(1))
    t2_set = old.Tone(p2, rhy.Unit(2), rhy.Unit(1))
    t3_set = old.Tone(p3, rhy.Unit(3), rhy.Unit(1))
    t4_set = old.Tone(p4, rhy.Unit(4), rhy.Unit(1))
    t5_set = old.Tone(p5, rhy.Unit(5), rhy.Unit(1))
    t6_set = old.Tone(p5, rhy.Unit(1), rhy.Unit(5))
    mel0 = old.Melody([t0, t1, t2, t3, t4, t5])
    mel1 = old.Melody([old.Rest(rhy.Unit(1)), t1, t2, t3, t4, t5])
    mel2 = old.Melody([t0, t1])
    set0 = old.ToneSet([t0_set, t1_set, t2_set, t3_set, t4_set, t5_set])
    set1 = old.ToneSet([t1_set, t2_set, t3_set, t4_set, t5_set])
    set2 = old.ToneSet([t1_set, t6_set, t2_set])

    def test_constructor(self):
        self.assertEqual(old.ToneSet.from_melody(ToneSetTest.mel0), ToneSetTest.set0)

    def test_converter(self):
        self.assertEqual(ToneSetTest.mel0, ToneSetTest.set0.convert2melody())
        self.assertEqual(ToneSetTest.mel1, ToneSetTest.set1.convert2melody())

    def test_pop_by(self):
        popped = ToneSetTest.set0.copy().pop_by_pitch(ToneSetTest.p0, ToneSetTest.p1)
        self.assertEqual(ToneSetTest.mel2, popped.convert2melody())
        popped = ToneSetTest.set0.copy().pop_by_start(rhy.Unit(0), rhy.Unit(1))
        self.assertEqual(ToneSetTest.mel2, popped.convert2melody())

    def test_pop_by_time(self):
        for t in self.set0.pop_by_time(1):
            self.assertEqual(t, self.t1_set)
        for t in self.set0.pop_by_time(1.5):
            self.assertEqual(t, self.t1_set)
        test_set0 = self.set2.pop_by_time(1.5)
        test_set_compare0 = old.ToneSet([self.t1_set, self.t6_set])
        test_set1 = self.set2.pop_by_time(2.7)
        test_set_compare1 = old.ToneSet([self.t2_set, self.t6_set])
        self.assertEqual(test_set0, test_set_compare0)
        self.assertEqual(test_set1, test_set_compare1)

    def test_pop_by_correct_dur_and_delay(self):
        poped_by = self.set0.pop_by_pitch(self.p0, self.p5)
        melody = poped_by.convert2melody()
        self.assertEqual(melody[0].delay, rhy.Unit(5))
        self.assertEqual(melody[0].duration, rhy.Unit(1))
def find_common_harmonics(p0: ji.JIPitch,
                          p1: ji.JIPitch,
                          gender: bool = True,
                          border: bool = MAX_HARMONIC) -> tuple:
    """Find all common (sub-)harmonics between two pitches.

    If gender is True the function return common harmonics.
    If gender is False the function return common subharmonics.

    border declares the highest partial that shall be inspected.

    Return tuple containing CommonHarmonic objects.
    """
    if not p0.is_empty and not p1.is_empty:
        harmonics = tuple(ji.r(b + 1, 1) for b in range(border))

        if not gender:
            harmonics = tuple(p.inverse() for p in harmonics)

        harmonics_per_pitch = tuple(
            tuple(p + h for h in harmonics) for p in (p0, p1))
        authentic_harmonics = list(
            (h, idx0, harmonics_per_pitch[1].index(h), True)
            for idx0, h in enumerate(harmonics_per_pitch[0])
            if h in harmonics_per_pitch[1])
        normalized_authentic_harmonics = tuple(h[0].normalize()
                                               for h in authentic_harmonics)

        normalized_harmonics_per_pitch = tuple(
            tuple(p.normalize() for p in har) for har in harmonics_per_pitch)
        octaves_per_harmonic = tuple(
            tuple(p.octave for p in har) for har in harmonics_per_pitch)
        unauthentic_harmonics = []
        for har0_idx, har0 in enumerate(normalized_harmonics_per_pitch[0]):
            if har0 not in normalized_authentic_harmonics:
                if har0 in normalized_harmonics_per_pitch[1]:
                    har1_idx = normalized_harmonics_per_pitch[1].index(har0)
                    oc = tuple(octaves[idx]
                               for octaves, idx in zip(octaves_per_harmonic, (
                                   har0_idx, har1_idx)))
                    unauthentic_harmonics.append((har0, ) + oc + (False, ))

        return tuple(
            CommonHarmonic(h[0], (h[1], h[2]), gender, h[3])
            for h in authentic_harmonics + unauthentic_harmonics)

    else:
        return tuple([])
예제 #17
0
 def mk_pitches(inverse=False):
     pitches0 = tuple(ji.r(p**2, 1) for p in (3, 5, 7))
     pitches0 += (ji.r(3**3, 1), )
     pitches1 = tuple(ji.r(p, 1) for p in (9, 19, 5, 11, 3, 13, 7))
     pitches2 = tuple(ji.r(1, 1) for p in (1, ))
     octaves = (ji.r(1, 2), ji.r(1, 1), ji.r(2, 1))
     pitches = (pitches0, pitches1, pitches2)
     if inverse:
         pitches = tuple(tuple(p.inverse() for p in pi) for pi in pitches)
     return functools.reduce(
         operator.add,
         tuple(
             tuple(p.normalize(2) + o for p in pi)
             for pi, o in zip(pitches, octaves)),
     )
예제 #18
0
 def mk_mdc_gong_and_tong(cadence, timeflow) -> tuple:
     cadence_gong, cadence_tong = [], []
     for chord in cadence:
         hg, ht = [], []
         for p in chord.pitch:
             if p < ji.r(1, 2):
                 hg.append(p)
             else:
                 ht.append(p)
         cadence_gong.append(old.Chord(ji.JIHarmony(hg), chord.delay))
         cadence_tong.append(old.Chord(ji.JIHarmony(ht), chord.delay))
     cadences = tuple(
         old.JICadence(c).discard_rests()
         for c in (cadence_gong, cadence_tong))
     return tuple(
         MDC.mk_mdc_by_cadence(c, timeflow,
                               Score.TIME_LV_PER_INSTRUMENT[0], False)
         for c in cadences)
예제 #19
0
def _adjust_scale(instrument: int, scale: tuple) -> tuple:
    if instrument == "violin":
        octave = 0
    elif instrument == "viola":
        octave = 0
    elif instrument == "cello":
        octave = -1
    else:
        raise NotImplementedError(instrument)

    adapted_scale = []
    for idx, pitch in enumerate(scale):
        registered_pitch = pitch.register(octave)
        if not idx and instrument == "viola":
            registered_pitch -= ji.r(2, 1)
        adapted_scale.append(registered_pitch)

    return tuple(adapted_scale)
예제 #20
0
 def find_normalize_dependent_combinations(
     self,
     primes,
     maxlevel,
     gender,
     compound,
     compound_asymmetrical,
     three_compound,
     allow_stacking,
     normalize,
     additional_intervals,
 ):
     # find all intervals:
     intervals = ji.JIHarmony(
         Tree.find_intervals(
             primes,
             maxlevel,
             gender,
             compound,
             compound_asymmetrical,
             three_compound,
         ))
     for additional in additional_intervals:
         intervals.add(additional)
     intervals = [
         i.set_val_border(1).normalize(normalize) for i in intervals
     ]
     self.intervals = intervals
     comb_intervals = [
         inter for inter in intervals if inter != ji.r(1, 1, val_border=2)
     ]
     if allow_stacking is True:
         combinations = tuple(
             itertools.combinations_with_replacement(comb_intervals, 2))
     else:
         combinations = tuple(itertools.combinations(comb_intervals, 2))
     valid_combinations = []
     for interval in intervals:
         valid = []
         for comb in combinations:
             if (comb[0] + comb[1]).normalize(normalize) == interval:
                 valid.append(comb)
         valid_combinations.append(valid)
     return valid_combinations
예제 #21
0
    def from_complex_scale(
        cls,
        svl_path: str,
        sf_path: str,
        complex_scale_transcriber: ComplexScaleTranscriber,
        time_transcriber: TimeTranscriber,
        octave_of_first_pitch: int = 0,
        ratio2pitchclass_dict: dict = None,
    ) -> "Transcription":
        root = cls._get_root(svl_path)
        frequency_range = root[0][0].attrib["minimum"], root[0][0].attrib["maximum"]
        data = cls._filter_data_from_root(root)

        frequencies = tuple(map(operator.itemgetter(0), data))
        octavater = ji.r(1, 1).register(octave_of_first_pitch)
        pitches = tuple(
            octavater + pitch for pitch in complex_scale_transcriber(frequencies)
        )
        new_data = tuple((pitch,) + tone[1:] for pitch, tone in zip(pitches, data))

        melody, metre, spread_metrical_loop, tempo = time_transcriber(sf_path, new_data)
        if isinstance(metre, abjad.TimeSignature):
            bars = tuple(
                metre
                for i in range(
                    int(
                        math.ceil(
                            melody.duration
                            / fractions.Fraction(metre.numerator, metre.denominator)
                        )
                    )
                )
            )
        else:
            bars = metre
        return cls(
            melody,
            bars,
            frequency_range,
            ratio2pitchclass_dict,
            float(tempo),
            spread_metrical_loop,
        )
예제 #22
0
    def __call__(self, frequencies: tuple) -> tuple:
        cent_distances = tuple(
            mel.SimplePitch.hz2ct(f0, f1)
            for f0, f1 in zip(frequencies, frequencies[1:]))
        starting_scale_degree = self._detect_starting_scale_degree(
            cent_distances)

        transcription_and_fitness_pairs = []
        for n, intonation in enumerate(
                self._intonations_per_scale_degree[starting_scale_degree]):
            transcription_and_fitness_pairs.append(
                self._make_transcription(starting_scale_degree, n,
                                         cent_distances))

        best = min(transcription_and_fitness_pairs,
                   key=operator.itemgetter(1))[0]

        # convert abstract data to actual pitch objects
        return tuple(self._intonations_per_scale_degree[data[0]][data[1]] +
                     ji.r(1, 1).register(data[2]) for data in best)
예제 #23
0
def _adapt_left_hand(left: lily.NOventLine, vm) -> None:
    tw.swap_duration(9, 10, F(1, 4), left)
    tw.swap_duration(20, 21, F(1, 4), left)
    tw.swap_duration(30, 31, F(1, 4), left)
    tw.swap_duration(40, 41, F(1, 4), left)
    for n in (6, 8, 10, 16, 18, 21, 27, 29, 31, 37, 39, 41):
        left[n].pitch = [left[n].pitch[0].register(keyboard.SYMBOLIC_GONG_OCTAVE)]
        left[n].pedal = attachments.Pedal(False)
        left[n].volume = 0.43
        left[n].ottava = attachments.Ottava(-1)

    tw.split(41, left, F(1, 4), F(1, 4))
    tw.split(31, left, F(1, 4), F(1, 4))
    tw.split(21, left, F(1, 4), F(1, 4))
    tw.split(10, left, F(1, 4), F(1, 4))

    tw.swap_duration(20, 19, F(1, 16), left)

    tw.crop(3, left, F(1, 16))
    left[3].pitch = [ji.r(7, 8)]

    left[10].pitch = [
        ji.r(35, 24).register(keyboard.SYMBOLIC_GONG_OCTAVE),
        ji.r(35, 128),
    ]
    left[10].ottava = attachments.Ottava(-1)
    left[10].arpeggio = None

    tw.add_kenong(12, left, ji.r(14, 9))

    left[13].pitch = [
        ji.r(64, 63).register(keyboard.SYMBOLIC_GONG_OCTAVE),
        ji.r(64, 63),
        ji.r(128, 189),
    ]
    tw.crop(15, left, F(1, 16))
    left[16].pitch = [ji.r(7, 8)]
예제 #24
0
 def find_best_fret_for_pitch(self, pitch):
     octave = 0
     comp = ji.r(2, 1)
     while pitch.float >= comp.float:
         pitch -= comp
         octave += 1
     factor = pitch.float
     closest = bisect.bisect_right(self.division_floats, factor)
     possible_solutions = []
     for c in [closest - 1, closest, closest + 1]:
         try:
             possible_solutions.append(self.division_floats[c])
         except IndexError:
             pass
     hof = crosstrainer.MultiDimensionalRating(size=1, fitness=[-1])
     for pos in possible_solutions:
         if pos > factor:
             diff = pos / factor
         else:
             diff = factor / pos
         hof.append(pos, diff)
     return MonochordFret(self.division_floats.index(hof._items[0]), octave)
예제 #25
0
def add_acciaccatura(
    nth_event: int,
    pitch: ji.JIPitch,
    novent_line: lily.NOventLine,
    add_glissando: bool = False,
    use_artifical_harmonic: bool = False,
) -> None:
    if use_artifical_harmonic:
        abjad_note = abjad.Chord(
            _get_artifical_harmonic_pitches(pitch),
            abjad.Duration(1, 8),
        )
        abjad.tweak(abjad_note.note_heads[1]).style = "harmonic"
        pitch = pitch + ji.r(4, 1)

    else:
        abjad_note = abjad.Note(
            lily.convert2abjad_pitch(pitch, globals_.RATIO2PITCHCLASS),
            abjad.Duration(1, 8),
        )
    novent_line[nth_event].acciaccatura = attachments.Acciaccatura(
        [pitch], abjad_note, add_glissando)
예제 #26
0
from mu.mel import ji

CONCERT_PITCH = 260
PRIMES = [2, 3, 5, 7, 9, 11]

PITCHES_POSITIVE = []
PITCHES_NEGATIVE = []
for prime in PRIMES:
    p0 = ji.r(prime, 1).normalize(2)
    p1 = ji.r(1, prime).normalize(2)
    if prime == 2:
        p0 += ji.r(2, 1)
        p1 += ji.r(2, 1)
    p2 = p1 + ji.r(4, 1)
    p3 = p0 + ji.r(4, 1)
    PITCHES_POSITIVE.append(p0)
    PITCHES_POSITIVE.append(p2)
    PITCHES_NEGATIVE.append(p1)
    PITCHES_NEGATIVE.append(p3)

if __name__ == "__main__":
    import pyteqNew as pyteq

    for idx, pitches in enumerate([PITCHES_POSITIVE, PITCHES_NEGATIVE]):
        if idx < 1:
            name = "Positive"
        else:
            name = "Negative"

        for p_idx, pitch in enumerate(sorted(pitches)):
            local_name = name + str(p_idx)
예제 #27
0
def make(name: str = "ONE", gender=False, group=0, sub_group0=1):
    return (
        segments.Silence(name="{}_start_silence".format(name), duration=18),
        segments.Chord(
            "{}_Speech0".format(name),
            ambitus_maker=ambitus.SymmetricalRanges(ji.r(7, 1), ji.r(2, 1), ji.r(4, 3)),
            group=(group, sub_group0, 0),
            chord=harmony.find_harmony(name="A", gender=gender),
            gender=gender,
            n_bars=1,
            duration_per_bar=34,
            start=0,
            dynamic_range_of_voices=(0.8, 1),
            anticipation_time=2.25,
            overlaying_time=3.25,
            speech_init_attributes={
                "speech0": {
                    "start": 0,
                    "duration": 35,
                    "sound_engine": speech.BrokenRadio(
                        (globals.SAM_SPEECH_SLICED_DERRIDA_KAFKA.path,),
                        duration=32,
                        volume=0.42,
                        activity_lv_per_effect={
                            "original": 8,
                            "harmonizer": 6,
                            "filter": 10,
                            "noise": 10,
                            "lorenz": 10,
                            "chenlee": 6,
                        },
                        level_per_effect={
                            "original": infit.Gaussian(0.4, 0.05),
                            "filter": infit.Gaussian(10.2, 2.25),
                            "harmonizer": infit.Gaussian(0.7, 0.15),
                            "chenlee": infit.Gaussian(0.1, 0.02),
                            "lorenz": infit.Gaussian(0.4, 0.02),
                            "noise": infit.Gaussian(0.32, 0.04),
                        },
                        transpo_maker=infit.Uniform(0.14, 0.8),
                        filter_freq_maker=infit.Gaussian(110, 50),
                        filter_q_maker=infit.Gaussian(5, 1),
                        curve=interpolations.InterpolationLine(
                            [
                                interpolations.FloatInterpolationEvent(0.25, 0.01),
                                interpolations.FloatInterpolationEvent(0.8, 1),
                                interpolations.FloatInterpolationEvent(0.3, 1),
                                interpolations.FloatInterpolationEvent(0, 0.1),
                            ]
                        ),
                    ),
                },
                "speech1": {
                    "start": 0.1,
                    "duration": 35,
                    "sound_engine": speech.BrokenRadio(
                        (globals.SAM_SPEECH_SLICED_DERRIDA_KAFKA.path,),
                        duration=32,
                        volume=0.3,
                        activity_lv_per_effect={
                            "original": 6,
                            "harmonizer": 4,
                            "filter": 9,
                            "noise": 9,
                            "lorenz": 8,
                            "chenlee": 7,
                        },
                        level_per_effect={
                            "original": infit.Gaussian(0.4, 0.05),
                            "filter": infit.Gaussian(10.2, 2.25),
                            "harmonizer": infit.Gaussian(0.7, 0.15),
                            "chenlee": infit.Gaussian(0.1, 0.02),
                            "lorenz": infit.Gaussian(0.4, 0.02),
                            "noise": infit.Gaussian(0.32, 0.04),
                        },
                        transpo_maker=infit.Uniform(1.14, 1.5),
                        filter_freq_maker=infit.Gaussian(200, 50),
                        filter_q_maker=infit.Gaussian(5, 1),
                        curve=interpolations.InterpolationLine(
                            [
                                interpolations.FloatInterpolationEvent(0.3, 0.01),
                                interpolations.FloatInterpolationEvent(0.8, 1),
                                interpolations.FloatInterpolationEvent(0.3, 1),
                                interpolations.FloatInterpolationEvent(0, 0.1),
                            ]
                        ),
                    ),
                },
                "speech2": {
                    "start": 0.075,
                    "duration": 35,
                    "sound_engine": speech.BrokenRadio(
                        (globals.SAM_SPEECH_SLICED_DERRIDA_KAFKA.path,),
                        duration=32,
                        volume=0.34,
                        activity_lv_per_effect={
                            "original": 6,
                            "harmonizer": 4,
                            "filter": 9,
                            "noise": 9,
                            "lorenz": 8,
                            "chenlee": 7,
                        },
                        level_per_effect={
                            "original": infit.Gaussian(0.4, 0.05),
                            "filter": infit.Gaussian(10.2, 2.25),
                            "harmonizer": infit.Gaussian(0.7, 0.15),
                            "chenlee": infit.Gaussian(0.1, 0.02),
                            "lorenz": infit.Gaussian(0.4, 0.02),
                            "noise": infit.Gaussian(0.32, 0.04),
                        },
                        transpo_maker=infit.Uniform(1.14, 1.5),
                        filter_freq_maker=infit.Gaussian(200, 50),
                        filter_q_maker=infit.Gaussian(5, 1),
                        curve=interpolations.InterpolationLine(
                            [
                                interpolations.FloatInterpolationEvent(0.4, 0.01),
                                interpolations.FloatInterpolationEvent(0.8, 1),
                                interpolations.FloatInterpolationEvent(0.3, 1),
                                interpolations.FloatInterpolationEvent(0, 0.1),
                            ]
                        ),
                    ),
                },
            },
            include_glitter=False,
            include_diva=False,
            include_natural_radio=False,
            include_percussion=False,
            include_voices=False,
        ),
    )
예제 #28
0
 def get_from_func_pitch(func) -> ji.JIPitch:
     p = func(mode)
     if p.set_val_border(2).primes == (3,):  # 3 * 9 doesn't exist in current tuning
         return p.normalize(2) - ji.r(2, 1)
     else:
         return mel.TheEmptyPitch
예제 #29
0
def make(name: str = "TWO", gender=False, group=0, sub_group0=1):
    return (
        segments.FreeStyleCP(
            "{}_0".format(name),
            ambitus_maker=ambitus.SymmetricalRanges(ji.r(5, 1), ji.r(9, 4),
                                                    ji.r(16, 11)),
            decision_type="activity",
            energy_per_voice=(9, 7, 7),
            weight_range=(4, 10),
            metrical_numbers=(12, 9, 12),
            silence_decider_per_voice=(
                infit.ActivityLevel(2),
                infit.ActivityLevel(1),
                infit.ActivityLevel(2),
            ),
            group=(group, sub_group0, 0),
            start_harmony=harmony.find_harmony("A",
                                               True,
                                               0,
                                               tuple([]),
                                               gender=gender),
            gender=gender,
            n_bars=2,
            duration_per_bar=13,
            start=0,
            dynamic_range_of_voices=(0.2, 0.5),
            anticipation_time=0.01,
            overlaying_time=0.25,
            cp_add_dissonant_pitches_to_nth_voice=(True, True, True),
            glitter_include_dissonant_pitches=True,
            pteq_engine_per_voice=(
                pteq.mk_super_soft_trippy_pte(
                    empty_attack_dynamic_maker=infit.Value(0.2),
                    fxp='"pbIII/fxp/VibraphoneV-BHumanizednostretching.fxp"',
                    preset=None,
                    sustain_pedal=0,
                ),
                pteq.mk_dreamy_pte(
                    # modulator=(ornamentations.SoftLineGlissandoMaker(),),
                    convert_dissonant_tones2glissandi=True,
                    empty_attack_dynamic_maker=infit.Value(0.2),
                ),
                pteq.mk_dreamy_pte(
                    empty_attack_dynamic_maker=infit.Value(0.2),
                    convert_dissonant_tones2glissandi=True,
                ),
            ),
            speech_init_attributes={},
            percussion_engine_per_voice=(
                percussion.Rhythmizer(
                    voice_meters2occupy=(0, ),
                    chord=infit.Cycle((
                        harmony.find_harmony(name="A", gender=gender),
                        harmony.find_harmony(name="C", gender=gender),
                    )),
                    sample_maker=infit.Cycle((
                        percussion.ResonanceSample(
                            path=infit.Cycle(
                                globals.SAM_CYMBALS_BIG_AGGRESSIVE),
                            pitch_factor=infit.Uniform(2, 6),
                            resonance_filter_bandwidth=infit.Uniform(0.4, 2),
                            resonance_filter_octave=infit.Cycle(
                                (2, 4, 3, 4, 1, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.ResonanceSample(
                            path=infit.Cycle(
                                globals.SAM_CYMBALS_MIDDLE_AGGRESSIVE),
                            pitch_factor=infit.Uniform(2, 5),
                            resonance_filter_bandwidth=infit.Uniform(0.5, 2),
                            resonance_filter_octave=infit.Cycle(
                                (2, 3, 4, 1, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                    )),
                    likelihood_range=(0.5, 0.1),
                    volume_range=(0.1, 0.5),
                    ignore_beats_occupied_by_voice=False,
                ),
                percussion.Rhythmizer(
                    voice_meters2occupy=(1, ),
                    chord=infit.Cycle((
                        harmony.find_harmony(name="A", gender=gender),
                        harmony.find_harmony(name="C", gender=gender),
                    )),
                    sample_maker=infit.Cycle((
                        percussion.ResonanceSample(
                            path=infit.Cycle(globals.SAM_CYMBALS_BIG_CLOSE),
                            pitch_factor=infit.Gaussian(3, 5),
                            resonance_filter_bandwidth=infit.Gaussian(
                                0.8, 0.5),
                            resonance_filter_octave=infit.Cycle((2, )),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.3),
                            glissando_offset=0,
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.ResonanceSample(
                            path=infit.Cycle(
                                globals.SAM_CYMBALS_MIDDLE_CLOSE_LOUD),
                            pitch_factor=infit.Gaussian(3, 4),
                            resonance_filter_bandwidth=infit.Uniform(0.5, 1),
                            resonance_filter_octave=infit.Cycle((2, )),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=0,
                            glissando_size=infit.Gaussian(1, 0.1),
                        ),
                    )),
                    likelihood_range=(0.6, 0.1),
                    volume_range=(0.1, 0.8),
                    ignore_beats_occupied_by_voice=False,
                ),
                percussion.Rhythmizer(
                    voice_meters2occupy=(0, 1, 2),
                    chord=infit.Cycle((harmony.find_harmony(name="B",
                                                            gender=gender), )),
                    sample_maker=infit.Cycle((
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((2, 4, 8, 4)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_LOW_LOW_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_LOW_LOW_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((16, 8, 4, 8, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((8, 4, 2, 4)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_HIGH_LOW_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_HIGH_LOW_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((8, 16, 8, 4, 8, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                    )),
                    likelihood_range=(0.1, 0.5),
                    volume_range=(0.1, 0.5),
                    ignore_beats_occupied_by_voice=False,
                ),
            ),
            include_glitter=True,
            include_diva=False,
            include_natural_radio=True,
            include_percussion=True,
            voices_overlaying_time=5,
            radio_samples=(
                globals.SAM_RADIO_ITALY[-1],
                globals.SAM_RADIO_BIELEFELD[-1],
            ),
            radio_n_changes=8,
            radio_average_volume=0.1,
            radio_shadow_time=0.01,
            radio_min_volume=0.825,
        ),
        segments.Chord(
            "{}_Bell2".format(name),
            ambitus_maker=ambitus.SymmetricalRanges(ji.r(12, 1), ji.r(2, 1),
                                                    ji.r(15, 8)),
            group=(group, sub_group0, 1),
            chord=harmony.find_harmony(name="A", idx=0, gender=gender),
            gender=gender,
            n_bars=1,
            duration_per_bar=8,
            start=4,
            dynamic_range_of_voices=(0.7, 0.9),
            anticipation_time=0.2,
            overlaying_time=1.25,
            glitter_modulater_per_voice=("randomh", "randomh", "randomh"),
            pteq_engine_per_voice=(
                pteq.mk_bright_bell(
                    fxp='"pbIII/fxp/Bells_no_stretching.fxp"',
                    preset=None,
                    empty_attack_dynamic_maker=infit.Value(0),
                ),
                pteq.mk_bright_bell(
                    fxp='"pbIII/fxp/GlockenspielHumanizednostretching.fxp"',
                    preset=None,
                    empty_attack_dynamic_maker=infit.Value(0),
                ),
                pteq.mk_bright_bell(
                    fxp='"pbIII/fxp/GlockenspielHumanizednostretching.fxp"',
                    preset=None,
                    empty_attack_dynamic_maker=infit.Value(0),
                ),
            ),
            # speech_init_attributes={
            #     # "speech0": {
            #     #     "start": 0.3,
            #     #     "duration": 12.5,
            #     #     "sound_engine": speech.Sampler(
            #     #         globals.SAM_RADIO_CAROLINA[1], volume=0.6
            #     #     ),
            #     # },
            #     # "speech2": {
            #     #     "start": -0.3,
            #     #     "duration": 12.5,
            #     #     "sound_engine": speech.Sampler(
            #     #         globals.SAM_RADIO_ROEHRENRADIO_CLOSE_MITTELWELLE[-1], volume=0.6
            #     #     ),
            #     # },
            # },
            include_glitter=False,
            include_diva=False,
            include_natural_radio=False,
            include_percussion=False,
        ),
        segments.FreeStyleCP(
            "{}_1".format(name),
            ambitus_maker=ambitus.SymmetricalRanges(ji.r(5, 1), ji.r(9, 4),
                                                    ji.r(16, 11)),
            decision_type="activity",
            energy_per_voice=(9, 6, 7),
            weight_range=(4, 10),
            metrical_numbers=(13, 9, 13),
            silence_decider_per_voice=(
                infit.ActivityLevel(2),
                infit.ActivityLevel(1),
                infit.ActivityLevel(2),
            ),
            group=(group, sub_group0, 1),
            start_harmony=harmony.find_harmony("A",
                                               True,
                                               0,
                                               tuple([]),
                                               gender=gender),
            gender=gender,
            n_bars=2,
            duration_per_bar=15,
            start=-3.8,
            dynamic_range_of_voices=(0.2, 0.4),
            anticipation_time=0.01,
            overlaying_time=0.25,
            cp_add_dissonant_pitches_to_nth_voice=(True, True, True),
            glitter_include_dissonant_pitches=True,
            pteq_engine_per_voice=(
                pteq.mk_super_soft_trippy_pte(
                    empty_attack_dynamic_maker=infit.Value(0.2),
                    fxp='"pbIII/fxp/VibraphoneV-BHumanizednostretching.fxp"',
                    preset=None,
                    sustain_pedal=0,
                ),
                pteq.mk_dreamy_pte(
                    # modulator=(ornamentations.SoftLineGlissandoMaker(),),
                    convert_dissonant_tones2glissandi=True,
                    empty_attack_dynamic_maker=infit.Value(0.2),
                ),
                pteq.mk_dreamy_pte(
                    empty_attack_dynamic_maker=infit.Value(0.2),
                    convert_dissonant_tones2glissandi=True,
                ),
            ),
            speech_init_attributes={},
            percussion_engine_per_voice=(
                percussion.Rhythmizer(
                    voice_meters2occupy=(0, ),
                    chord=infit.Cycle((
                        harmony.find_harmony(name="A", gender=gender),
                        harmony.find_harmony(name="C", gender=gender),
                    )),
                    sample_maker=infit.Cycle((
                        percussion.ResonanceSample(
                            path=infit.Cycle(
                                globals.SAM_CYMBALS_BIG_AGGRESSIVE),
                            pitch_factor=infit.Uniform(2, 6),
                            resonance_filter_bandwidth=infit.Uniform(0.4, 2),
                            resonance_filter_octave=infit.Cycle(
                                (2, 4, 3, 4, 1, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.ResonanceSample(
                            path=infit.Cycle(
                                globals.SAM_CYMBALS_MIDDLE_AGGRESSIVE),
                            pitch_factor=infit.Uniform(2, 5),
                            resonance_filter_bandwidth=infit.Uniform(0.5, 2),
                            resonance_filter_octave=infit.Cycle(
                                (2, 3, 4, 1, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                    )),
                    likelihood_range=(0.5, 0.1),
                    volume_range=(0.1, 0.5),
                    ignore_beats_occupied_by_voice=False,
                ),
                percussion.Rhythmizer(
                    voice_meters2occupy=(1, ),
                    chord=infit.Cycle((
                        harmony.find_harmony(name="A", gender=gender),
                        harmony.find_harmony(name="C", gender=gender),
                    )),
                    sample_maker=infit.Cycle((
                        percussion.ResonanceSample(
                            path=infit.Cycle(globals.SAM_CYMBALS_BIG_CLOSE),
                            pitch_factor=infit.Gaussian(3, 5),
                            resonance_filter_bandwidth=infit.Gaussian(
                                0.8, 0.5),
                            resonance_filter_octave=infit.Cycle((2, )),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.3),
                            glissando_offset=0,
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.ResonanceSample(
                            path=infit.Cycle(
                                globals.SAM_CYMBALS_MIDDLE_CLOSE_LOUD),
                            pitch_factor=infit.Gaussian(3, 4),
                            resonance_filter_bandwidth=infit.Uniform(0.5, 1),
                            resonance_filter_octave=infit.Cycle((2, )),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=0,
                            glissando_size=infit.Gaussian(1, 0.1),
                        ),
                    )),
                    likelihood_range=(0.6, 0.1),
                    volume_range=(0.1, 0.8),
                    ignore_beats_occupied_by_voice=False,
                ),
                percussion.Rhythmizer(
                    voice_meters2occupy=(0, 1, 2),
                    chord=infit.Cycle((harmony.find_harmony(name="B",
                                                            gender=gender), )),
                    sample_maker=infit.Cycle((
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((2, 4, 8, 4)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_LOW_LOW_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_LOW_LOW_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((16, 8, 4, 8, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_HIGH_HIGH_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((8, 4, 2, 4)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                        percussion.Sample(
                            path=infit.Cycle(
                                globals.SAM_KENDANG_HIGH_LOW_CLOSE_HAND),
                            frequency=globals.SAM_KENDANG_HIGH_LOW_CLOSE_HAND.
                            information["frequency"],
                            pitch_factor=infit.Cycle((8, 16, 8, 4, 8, 2)),
                            glissando_direction=True,
                            glissando_duration=infit.Uniform(0.001, 0.2),
                            glissando_offset=infit.Uniform(0, 0.1),
                            glissando_size=infit.Gaussian(1, 0.2),
                        ),
                    )),
                    likelihood_range=(0.1, 0.5),
                    volume_range=(0.1, 0.5),
                    ignore_beats_occupied_by_voice=False,
                ),
            ),
            include_glitter=True,
            include_diva=False,
            include_natural_radio=True,
            include_percussion=True,
            voices_overlaying_time=5,
            radio_samples=(
                globals.SAM_RADIO_ITALY[-1],
                globals.SAM_RADIO_BIELEFELD[-1],
            ),
            radio_n_changes=8,
            radio_average_volume=0.1,
            radio_shadow_time=0.01,
            radio_min_volume=0.825,
        ),
    )
예제 #30
0
    def _help_tonality_flux(
        scale_degree: int,
        slice0: breads.Slice,
        slice1: breads.Slice,
        p0: ji.JIPitch,
        p1: ji.JIPitch,
        available_pitches_per_tone: tuple,
        # harmonicity_border for intervals in parallel movement
        harmonicity_border: float = ji.r(7, 6).harmonicity_simplified_barlow,
        # minimal harmonic closeness for intervals in counter movement
        min_closeness: float = 0.75,
        maximum_octave_difference: tuple = (1, 1),
        get_available_pitches_from_adapted_instrument=None,
    ) -> None:
        movement_direction = slice0.melody_pitch < slice1.melody_pitch

        # (1) find pitches for microtonal parallel or counter movement of voices
        parallel_candidates = []
        counter_candidates = []
        for sd in range(7):
            if sd != scale_degree:
                avp0, avp1 = tuple(
                    tuple(p for p in avp
                          if globals_.PITCH2SCALE_DEGREE[p] == sd)
                    for avp in available_pitches_per_tone)
                intervals0, intervals1 = tuple(
                    tuple((main_pitch - side_pitch).normalize()
                          for side_pitch in avp)
                    for main_pitch, avp in ((p0, avp0), (p1, avp1)))

                for it0, it1 in itertools.product(intervals0, intervals1):

                    relevant_pitch0 = avp0[intervals0.index(it0)]
                    relevant_pitch1 = avp1[intervals1.index(it1)]
                    rp_movement_direction = relevant_pitch0 < relevant_pitch1

                    if it0 == it1 or it0 == it1.inverse().normalize():
                        harmonicity = it0.harmonicity_simplified_barlow
                        if harmonicity >= harmonicity_border:
                            parallel_candidates.append(
                                ((relevant_pitch0, relevant_pitch1),
                                 harmonicity))

                    if movement_direction != rp_movement_direction:
                        closeness0 = globals_.CLOSENESS_FROM_PX_TO_PY[p0][
                            relevant_pitch0]
                        closeness1 = globals_.CLOSENESS_FROM_PX_TO_PY[p1][
                            relevant_pitch1]
                        tests = (closeness0 > min_closeness,
                                 closeness1 > min_closeness)
                        if all(tests):
                            counter_candidates.append((
                                (relevant_pitch0, relevant_pitch1),
                                closeness0 + closeness1,
                            ))

        if parallel_candidates:
            hp0, hp1 = max(parallel_candidates, key=operator.itemgetter(1))[0]

        elif counter_candidates:
            hp0, hp1 = max(counter_candidates, key=operator.itemgetter(1))[0]

        else:
            hp0, hp1 = (max(
                ((hp, globals_.CLOSENESS_FROM_PX_TO_PY[mp][hp]) for hp in avp
                 if globals_.PITCH2SCALE_DEGREE[hp] != scale_degree),
                key=operator.itemgetter(1),
            )[0] for mp, avp in zip((p0, p1), available_pitches_per_tone))

        registered_hp0, registered_hp1 = VerseMaker._register_tonality_flux_pitch(
            slice0.melody_pitch,
            slice1.melody_pitch,
            hp0,
            hp1,
            get_available_pitches_from_adapted_instrument,
            maximum_octave_difference,
        )

        slice0.harmonic_pitch = registered_hp0
        slice1.harmonic_pitch = registered_hp1