Ejemplo n.º 1
0
 def estimate_rhythm(self, melody: old.Melody) -> tuple:
     melody.dur = rhy.Compound(melody.dur).stretch(self.stretch_factor)
     melody.delay = rhy.Compound(melody.delay).stretch(self.stretch_factor)
     melody = quantizise.quantisize_rhythm(
         melody, self.n_divisions, self.min_tone_size, self.min_rest_size
     )
     melody.dur = rhy.Compound(melody.dur).stretch(self.post_stretch_factor)
     melody.delay = rhy.Compound(melody.delay).stretch(self.post_stretch_factor)
     return melody
Ejemplo n.º 2
0
    def _calculate_metricity_of_melody(
        time_signature: tuple, melody: old.Melody
    ) -> float:
        duration = melody.duration

        fitness_per_beat = {}
        rising = fractions.Fraction(1, 8)
        position = 0
        for _ in range(
            int(
                math.ceil(
                    duration
                    / (time_signature[0].numerator / time_signature[0].denominator)
                )
            )
        ):
            for fitness in time_signature[1]:
                fitness_per_beat.update({position: fitness})
                position += rising

        metricity = 0
        for tone in melody.convert2absolute():
            if tone.pitch:
                try:
                    metricity += fitness_per_beat[tone.delay]
                except KeyError:
                    metricity -= 0.01

        return metricity
Ejemplo n.º 3
0
    def estimate_best_meter(self, melody: old.Melody) -> tuple:
        """Return (Melody, TimeSignature) - pair."""
        meter_fitness_pairs = []
        for meter in self.potential_meters:
            n_potential_upbeats = int(
                fractions.Fraction(meter[0].numerator, meter[0].denominator)
                / fractions.Fraction(1, 8)
            )
            for n_upbeats in range(n_potential_upbeats):
                adadpted_melody = melody.copy()
                if n_upbeats:
                    adadpted_melody.insert(
                        0, old.Rest(n_upbeats * fractions.Fraction(1, 8))
                    )
                metricity = self._calculate_metricity_of_melody(meter, adadpted_melody)
                meter_fitness_pairs.append((adadpted_melody, meter[0], metricity))

        return max(meter_fitness_pairs, key=operator.itemgetter(2))[:2]
Ejemplo n.º 4
0
    def mk_envelope(
        voice: old.Melody,
        shadow_time: float,
        duration: float,
        average_volume: float,
        min_volume: float,
        max_volume: float,
        make_envelope: bool,
        anticipation_time: float,
        overlaying_time: float,
    ) -> interpolations.InterpolationLine:

        volume_difference = max_volume - min_volume
        max_volume_of_melody = max(voice.volume)

        def detect_volume(tone_volume: float) -> float:
            percentage = tone_volume / max_volume_of_melody
            return (percentage * volume_difference) + min_volume

        if make_envelope:
            events = tuple(
                (float(tone.delay) + anticipation_time, detect_volume(tone.volume))
                for tone in voice.convert2absolute()
            )
            envelope = interpolations.ShadowInterpolationLine(
                average_volume, shadow_time, events, duration
            )

        else:
            envelope = interpolations.InterpolationLine(
                (
                    interpolations.FloatInterpolationEvent(duration, max_volume),
                    interpolations.FloatInterpolationEvent(0, max_volume),
                )
            )

        return envelope
Ejemplo n.º 5
0
    def __init__(
        self,
        voice: old.Melody,
        new_sample_positions: tuple,
        sample_per_change: tuple,
        make_envelope: bool,
        average_volume: float,
        min_volume: float,
        max_volume: float,
        duration: float,
        tempo_factor: float,
        shadow_time: float,
        crossfade_duration: float = 0.25,
        anticipation_time: float = 0,
        overlaying_time: float = 0,
        attack_duration: infit.InfIt = infit.Value(0.5),
        release_duration: infit.InfIt = infit.Value(0.5),
        random_seed: int = 100,
    ) -> None:

        assert max_volume > min_volume
        assert min_volume > average_volume

        if not isinstance(attack_duration, infit.InfIt):
            attack_duration = infit.Value(attack_duration)

        if not isinstance(release_duration, infit.InfIt):
            release_duration = infit.Value(release_duration)

        import random as random_module

        random_module.seed(random_seed)

        self.__random_module = random_module

        voice.delay = rhy.Compound(voice.delay).stretch(tempo_factor)
        voice.dur = rhy.Compound(voice.dur).stretch(tempo_factor)

        self.__sndinfo_per_sample = {
            sample: pyo.sndinfo(sample) for sample in set(sample_per_change)
        }
        self.__n_channels_per_sample = {
            sample: self.__sndinfo_per_sample[sample][3]
            for sample in self.__sndinfo_per_sample
        }
        self.__duration_per_sample = {
            sample: self.__sndinfo_per_sample[sample][1]
            for sample in self.__sndinfo_per_sample
        }
        self.__new_sample_positions = new_sample_positions
        self.__sample_per_change = sample_per_change
        self.__duration = duration
        self.__envelope = self.mk_envelope(
            voice,
            shadow_time,
            duration,
            average_volume,
            min_volume,
            max_volume,
            make_envelope,
            anticipation_time,
            overlaying_time,
        )
        self.__anticipation_time = anticipation_time
        self.__overlaying_time = overlaying_time
        self.__tempo_factor = tempo_factor
        self.__attack_duration = attack_duration
        self.__release_duration = release_duration
        self.__crossfade_duration = crossfade_duration
        self.__halved_crossfade_duration = crossfade_duration * 0.5
Ejemplo n.º 6
0
    def from_melody(
        cls,
        melody: old.Melody,
        bars: tuple,
        max_rest_size_to_ignore: fractions.Fraction = fractions.Fraction(1, 4),
        maximum_deviation_from_center: float = 0.5,
    ) -> "Bread":
        try:
            assert (
                maximum_deviation_from_center >= 0
                and maximum_deviation_from_center <= 1
            )
        except AssertionError:
            msg = "maximum_deviation_from_center has to be in range 0-1"
            raise ValueError(msg)

        adapted_melody = melody.tie().discard_rests(max_rest_size_to_ignore)
        smallest_unit_to_split = min(
            t.delay for t in adapted_melody if not t.pitch.is_empty
        )
        if smallest_unit_to_split.numerator == 1:
            smallest_unit = fractions.Fraction(
                1, smallest_unit_to_split.denominator * 2
            )
        else:
            smallest_unit = fractions.Fraction(1, smallest_unit_to_split.denominator)

        position_metricity_pairs_per_bar = (
            cls._get_position_metricity_pairs(
                (ts, globals_.TIME_SIGNATURES2COMPOSITION_STRUCTURES[ts]), smallest_unit
            )
            for ts in bars
        )
        positions, metricities = zip(
            *tuple(functools.reduce(operator.add, position_metricity_pairs_per_bar))
        )
        positions = tools.accumulate_from_zero(positions)[:-1]

        slices = []

        for tone in adapted_melody.convert2absolute():
            if tone.pitch.is_empty:
                mp = None
                slices.append(Slice(tone.delay, tone.duration, False, mp))
            else:
                mp = tone.pitch

                # figure out where to split the tone:

                # (1) find possible split position - candidates
                dev_range = (tone.duration - tone.delay) / 2
                center = dev_range + tone.delay
                actual_dev = dev_range * maximum_deviation_from_center
                dev0, dev1 = center - actual_dev, center + actual_dev
                available_split_positions = tuple(
                    (pos, met)
                    for pos, met in zip(positions, metricities)
                    if pos > dev0 and pos < dev1
                )

                # (2) choose the one with the highest metricity
                split_position = max(
                    available_split_positions, key=operator.itemgetter(1)
                )[0]

                # add both slices
                for start, stop, does_slice_start_overlap_with_attack in (
                    (tone.delay, split_position, True),
                    (split_position, tone.duration, False),
                ):
                    slices.append(
                        Slice(start, stop, does_slice_start_overlap_with_attack, mp)
                    )

        return cls(*slices)
Ejemplo n.º 7
0
    def __call__(self, melody: old.Melody) -> old.Melody:
        new_melody = melody.copy()
        melody_size = len(melody)

        for idx, tone in enumerate(new_melody):
            halved_duration = tone.duration * 0.5

            # only add ornamentation if there isn't any glissando yet
            if (not tone.glissando and not tone.pitch.is_empty
                    and halved_duration > self.__minima_gissando_duration):
                previous = None
                following = None
                previous_distance = None
                following_distance = None

                if idx != 0 and not melody[idx - 1].pitch.is_empty:
                    previous = melody[idx - 1]
                    previous_distance = previous.pitch.cents - tone.pitch.cents

                if idx + 1 != melody_size and not melody[idx +
                                                         1].pitch.is_empty:
                    following = melody[idx + 1]
                    following_distance = following.pitch.cents - tone.pitch.cents

                beginning_and_end_glissando = (
                    previous is not None
                    and abs(previous_distance) > self.__minima_gissando_size,
                    following is not None
                    and abs(following_distance) > self.__minima_gissando_size,
                )

                if any(beginning_and_end_glissando):

                    if next(self.__al):
                        if all(beginning_and_end_glissando):
                            glissando_type = next(
                                self.__glissando_type_generator)
                        else:
                            glissando_type = beginning_and_end_glissando.index(
                                True)

                        glissando_type = ((True, False), (False, True),
                                          (True, True))[glissando_type]

                        glissando_line = []
                        is_first = True
                        for is_allowed, distance in zip(
                                glissando_type,
                            (previous_distance, following_distance)):
                            if is_allowed:
                                data = self.get_glissando_values(
                                    halved_duration, distance)
                                remaining_time = halved_duration - data[1]
                                if is_first:
                                    data = (
                                        old.PitchInterpolation(
                                            data[1],
                                            mel.SimplePitch(0, data[0])),
                                        old.PitchInterpolation(
                                            remaining_time,
                                            mel.SimplePitch(0, 0)),
                                    )
                                else:
                                    data = (
                                        old.PitchInterpolation(
                                            remaining_time,
                                            mel.SimplePitch(0, 0)),
                                        old.PitchInterpolation(
                                            data[1], mel.SimplePitch(0, 0)),
                                        old.PitchInterpolation(
                                            0, mel.SimplePitch(0, data[0])),
                                    )
                            else:
                                data = [
                                    old.PitchInterpolation(
                                        halved_duration, mel.SimplePitch(0, 0))
                                ]
                                if not is_first:
                                    data.append(
                                        old.PitchInterpolation(
                                            0, mel.SimplePitch(0, 0)))

                            glissando_line.extend(data)
                            is_first = False

                        new_melody[idx].glissando = old.GlissandoLine(
                            interpolations.InterpolationLine(glissando_line))

        return new_melody
Ejemplo n.º 8
0
    def transform_melody(
        self,
        melody: old.Melody,
        mapping: dict = {
            instr: prime
            for prime, instr in zip(globals_.METRICAL_PRIMES, ("violin",
                                                               "viola",
                                                               "cello"))
        },
    ) -> tuple:
        prime_number_per_event = []
        for tone in melody:
            if tone.pitch.is_empty:
                if prime_number_per_event:
                    prime_number_per_event.append(prime_number_per_event[-1])

                else:
                    prime_number_per_event.append(None)

            else:
                prime_number_per_event.append(
                    mapping[globals_.PITCH2INSTRUMENT[tone.pitch.normalize()]])

        if prime_number_per_event[0] is None:
            prime_number_per_event[0] = int(prime_number_per_event[1])

        # as high metricity as possible / as low deviation as possible
        hof = crosstrainer.MultiDimensionalRating(size=2, fitness=[1, -1])

        n_possible_offsets = len(
            self._rhythm_and_metricity_per_prime[prime_number_per_event[0]][0])
        for n_offsets in range(n_possible_offsets):
            if n_offsets > 0:
                adapted_melody = melody.copy()
                offset_duration = sum(self._rhythm_and_metricity_per_prime[
                    prime_number_per_event[0]][0][:n_offsets])
                adapted_melody.insert(
                    0, old.Tone(mel.TheEmptyPitch, delay=offset_duration))
                adapted_prime_number_per_event = (
                    prime_number_per_event[0],
                ) + tuple(prime_number_per_event)

            else:
                adapted_melody = melody.copy()
                adapted_prime_number_per_event = tuple(prime_number_per_event)

            expected_distances = tuple(
                fractions.Fraction(d) for d in adapted_melody.delay)

            positions = [
                Point(
                    adapted_prime_number_per_event[0],
                    0,
                    0,
                    self._absolute_rhythm_and_metricity_per_prime,
                    self.duration,
                )
            ]
            for expected_distance, prime_number in zip(
                    expected_distances,
                    adapted_prime_number_per_event[1:] +
                (adapted_prime_number_per_event[-1], ),
            ):
                positions.append(positions[-1].find_next_position(
                    prime_number, expected_distance))

            absolute_rhythm = tuple(p.position for p in positions)
            complete_duration = (positions[-1].nth_loop + 1) * self.duration

            if absolute_rhythm[-1] != complete_duration:
                absolute_rhythm += ((positions[-1].nth_loop + 1) *
                                    self.duration, )
                adapted_melody.append(old.Tone(mel.TheEmptyPitch, delay=1))

            relative_rhythm = tuple(
                b - a for a, b in zip(absolute_rhythm, absolute_rhythm[1:]))
            new_melody = old.Melody([
                old.Tone(pitch=t.pitch, volume=t.volume, delay=r, duration=r)
                for t, r in zip(adapted_melody, relative_rhythm)
            ])

            summed_metricity = sum(p.metricity
                                   for p in positions[:-1]) / len(positions)
            summed_deviation = sum(
                abs(exp - real)
                for exp, real in zip(expected_distances, relative_rhythm))
            hof.append(
                (new_melody, positions[-1].nth_loop + 1),
                summed_metricity,
                summed_deviation,
            )

        best = hof.convert2list()[-1]
        return (best[0][0], lambda: self.spread(best[0][-1], mapping)), best[1]