예제 #1
0
    def add_midi_score(self):
        """Transform a scale degree sequence into a midi pitch sequence"""

        fifth_degree_pitch = 7 + self.tonics[self.tonic]
        while fifth_degree_pitch < 45:
            fifth_degree_pitch += 12
        start_index = Voice.all_midi_pitches.index(fifth_degree_pitch)
        self.melody_range = Voice.all_midi_pitches[start_index:start_index +
                                                   11]
        self.logger.warning(f"Melody range: {self.melody_range}")

        self.unit_length = sum(self.finalized_rhythms[0])
        if self.time_sig in {(4, 3), (4, 2)}:
            chord_quarter_length = self.measure_length // 2
        else:
            chord_quarter_length = self.measure_length
        self.logger.warning(f"Unit length: {self.unit_length}")
        self.logger.warning(f"Chord quarter length: {chord_quarter_length}")

        Voice.max_note_duration = 960 * chord_quarter_length
        if Voice.pickup:
            index_shift = self.add_pickup_notes()
        else:
            index_shift = 0
        self.current_time = Voice.pickup_duration

        print(f"Break melody: {self.break_notes}")
        self.nested_scale_degrees.pop()
        self.add_midi_section(self.nested_scale_degrees, 0, {}, index_shift)

        self.unnested_scale_degrees.pop()

        if not self.repeat_ending:
            self.midi_notes.append(
                Voice.Note("Rest", self.current_time, Voice.max_note_duration))
            self.current_time += Voice.max_note_duration
            return

        second_pickup_fraction = Fraction(numerator=sum(
            self.finalized_rhythms[-5][1:]),
                                          denominator=self.unit_length)
        second_pickup_duration = int(Voice.max_note_duration *
                                     second_pickup_fraction)

        ending_duration = Voice.max_note_duration - second_pickup_duration
        self.midi_notes.append(
            Voice.Note("Rest", self.current_time, ending_duration))

        self.current_time += ending_duration

        remaining_melody = self.nested_scale_degrees[-4:]
        remaining_melody[0] = remaining_melody[0][1:]

        end_notes_num = self.add_midi_section(remaining_melody, -5, {-5: 1})
        self.unnested_scale_degrees.extend(
            self.unnested_scale_degrees[-end_notes_num:])

        self.midi_notes.append(
            Voice.Note("Rest", self.current_time, Voice.max_note_duration))
        self.current_time += Voice.max_note_duration
예제 #2
0
def set_melodic_figures(nested_scale_degrees):
    chosen_figurations = []
    melodic_figures = {
        (2, (1, )): "IPT",
        (0, (2, 1)): "DCN",
        (2, (-1, 1)): "OPT",
        (0, (-1, )): "CN",
        (1, (-1, )): "PIN",
        (1, (2, )): "CIN",
        (1, (-2, -1)): "OPT",
        (4, (2, )): "IPT",
        (3, (4, )): "CIN",
        (3, (1, )): "IPT",
        (1, (0, )): "RET",
        (2, (2, )): "ANT",
        (3, (2, )): "IPT",
        (3, (1, 3)): "ANT",
        (2, (2, 3)): "OPT",
        (4, (1, 2)): "IPT",
        (1, (-2, )): "OPT",
        (1, (-1, 0)): "OPT",
        (3, (1, 2)): "IPT",
        (5, (3, )): "5PT",
        (0, (1, )): "CN",
        (1, (1, )): "ANT"
    }
    chord_index = 0
    for previous_melody_group, current_melody_group in zip(
            nested_scale_degrees, nested_scale_degrees[1:]):
        chord_index += 1
        figure_group = []
        main_pitch_diff = current_melody_group[0] - previous_melody_group[0]
        main_pitch_direction = Voice.calculate_slope(main_pitch_diff)
        main_pitch_diff = abs(main_pitch_diff)
        embellish_degrees = previous_melody_group[1:]
        if not embellish_degrees:
            chosen_figurations.append(None)
            continue
        for previous_melody_note in embellish_degrees:
            pitch_diff = previous_melody_note - previous_melody_group[0]
            pitch_direction = Voice.calculate_slope(pitch_diff)
            if pitch_direction == 0:
                reference_direction = 0
            if 0 != main_pitch_direction == pitch_direction:
                reference_direction = 1 * abs(pitch_diff)
            elif 0 != main_pitch_direction != pitch_direction:
                reference_direction = -1 * abs(pitch_diff)
            elif main_pitch_direction == 0:
                reference_direction = pitch_diff
            figure_group.append(reference_direction)
        figure_group = tuple(figure_group)
        if chord_index == 8:
            modifier = "PICK_"
        else:
            modifier = ""
        chosen_figurations.append(modifier + melodic_figures[(main_pitch_diff,
                                                              figure_group)])
    print(chosen_figurations)
    return chosen_figurations
예제 #3
0
    def add_chord_section(self, start_index, end_index, all_note_durations,
                          all_voices_used, chord_units_used):
        """Extend accompaniment with selected chords"""

        unique_chord_iter = iter(self.chosen_chord_voicings)
        chord_sequence = Voice.chord_sequence[:end_index]

        for chord_index, current_chord_obj in enumerate(chord_sequence):
            if chord_index < start_index:
                if chord_index in self.unique_chord_indices:
                    current_pitch_combo = next(unique_chord_iter)
                    last_pitch_combo = current_pitch_combo
                continue

            pitches_to_degrees = current_chord_obj.pitches_to_degrees
            note_durations = all_note_durations[chord_index % chord_units_used]
            voices_used = all_voices_used[chord_index % chord_units_used]

            if chord_index in self.unique_chord_indices:
                current_pitch_combo = next(unique_chord_iter)
                last_pitch_combo = current_pitch_combo

                for voice_index, current_pitch in enumerate(
                        current_pitch_combo):
                    note_time = self.current_time
                    for beat_index, note_duration in enumerate(note_durations):
                        if voice_index in voices_used[beat_index]:
                            Voice.midi_score[voice_index + 1].append(
                                Voice.Note(current_pitch, note_time,
                                           note_duration))
                            Voice.chorale_scale_degrees[voice_index].append(
                                pitches_to_degrees[current_pitch])
                        else:
                            Voice.midi_score[voice_index + 1].append(
                                Voice.Note("Rest", note_time, note_duration))
                            Voice.chorale_scale_degrees[voice_index].append(
                                None)
                        note_time += note_duration
            else:
                for voice_index, last_pitch in enumerate(last_pitch_combo):
                    note_time = self.current_time

                    for beat_index, note_duration in enumerate(note_durations):
                        if voice_index in voices_used[beat_index]:
                            Voice.midi_score[voice_index + 1].append(
                                Voice.Note(last_pitch, note_time,
                                           note_duration))
                            Voice.chorale_scale_degrees[voice_index].append(
                                pitches_to_degrees[last_pitch])
                        else:
                            Voice.midi_score[voice_index + 1].append(
                                Voice.Note("Rest", note_time, note_duration))
                            Voice.chorale_scale_degrees[voice_index].append(
                                None)
                        note_time += note_duration

            self.current_time += Voice.max_note_duration
예제 #4
0
def test_unnested_climaxes(obj):
    section1 = Voice.merge_lists(*obj.nested_scale_degrees[:4])
    section2 = Voice.merge_lists(*obj.nested_scale_degrees[4:8])
    section3 = Voice.merge_lists(*obj.nested_scale_degrees[8:12])
    section4 = Voice.merge_lists(*obj.nested_scale_degrees[12:])

    if max(section1) == max(section2):
        return False
    if max(section3) <= max(section4):
        return False
    return True
예제 #5
0
	def test_turns(self):
		self.assertEqual(Voice.get_turns([]), 0)
		self.assertEqual(Voice.get_turns([1]), 0)
		self.assertEqual(Voice.get_turns([3, 3, 3, 3, 3]), 0)
		self.assertEqual(Voice.get_turns([5, 5, 5, 5, 6]), 1)
		self.assertEqual(Voice.get_turns([6, 6, 6, 6, 5]), 1)
		
		self.assertEqual(Voice.get_turns([2, 3, 4]), 1)
		self.assertEqual(Voice.get_turns([5, 6, 5, 4]), 2)
		self.assertEqual(Voice.get_turns([2, 3, 3, 2, 1, 2]), 3)
예제 #6
0
	def test_motifs(self):
		self.assertFalse(Voice.has_cross_duplicates([]))
		self.assertFalse(Voice.has_cross_duplicates([0, 0, 0, 0, 0]))
		self.assertFalse(Voice.has_cross_duplicates([1, 2, 3, 4, 5, 6, 7]))
		self.assertFalse(Voice.has_cross_duplicates([1, 2, 2, 3, 2, 1, 1]))

		self.assertTrue(Voice.has_cross_duplicates([2, 4, 2, 4, 1, 2, 3]))
		self.assertTrue(Voice.has_cross_duplicates([2, 2, 4, 4, 2, 4, 1, 2]))
		self.assertTrue(Voice.has_cross_duplicates([1, 5, 5, 5, 1, 1, 5]))
예제 #7
0
def test_bounds(obj):
    if min(obj.unnested_scale_degrees) < -3:
        return False
    if max(obj.unnested_scale_degrees) > 7:
        return False
    if max(obj.unnested_scale_degrees) < 5:
        return False
    section1 = Voice.merge_lists(*obj.nested_scale_degrees[:3])
    if min(section1) < 0:
        return False
    return True
예제 #8
0
    def has_melody_figure(self):
        """Check specific melody against figuration options"""

        last_rhythm_symbol = self.rhythm_symbols[self.chord_index - 1]
        if last_rhythm_symbol == -1:
            if self.chord_index == 15:
                section3 = Voice.merge_lists(*self.nested_scale_degrees[8:12])
                section4 = Voice.merge_lists(*self.nested_scale_degrees[12:14])

                if max(section3) <= max(section4):
                    return False
            self.nested_scale_degrees[self.chord_index -
                                      1] = [self.previous_degree_choice]
            return True
        if last_rhythm_symbol == -2:
            self.melody_figure_options[self.chord_index -
                                       1] = (self.get_pickup_sequences(
                                           self.current_degree_choice))
            return self.add_valid_figure()

        remaining_figures = self.melody_figure_options[self.chord_index - 1]
        if remaining_figures:
            return self.add_valid_figure()

        degree_mvmt = self.current_degree_choice - self.previous_degree_choice
        melody_slope = Voice.calculate_slope(degree_mvmt)
        degree_mvmt = abs(degree_mvmt)

        embellish_amount = len(self.finalized_rhythms[self.chord_index - 1])
        if embellish_amount == 2:
            all_figurations = self.all_single_figurations
        elif embellish_amount == 3:
            all_figurations = self.all_double_figurations
        possible_scale_degrees = all_figurations[degree_mvmt](
            self.previous_degree_choice, self.current_degree_choice,
            melody_slope)

        self.melody_figure_options[self.chord_index -
                                   1] = possible_scale_degrees
        return self.add_valid_figure()
예제 #9
0
    def add_valid_figure(self):
        """Find and add specific figuration of base melody using idioms"""
        valid_figure = None
        remaining_figures = self.melody_figure_options[self.chord_index - 1]

        random.shuffle(remaining_figures)
        # alias has side effect but allows easier referencing
        while remaining_figures:
            inbetween, fig_type = remaining_figures.pop()
            if min(inbetween) < -3 or max(inbetween) > 7:
                continue
            if self.chord_index - 1 < 3 and min(inbetween) < 0:
                continue

            unnested_scalar_melody = self.unnested_scale_degrees[:]
            unnested_scalar_melody.extend(inbetween)
            """
			chord 8 and 12 are short-circuited
			only need to evaluate once going forward
			so, use the next indices for testing at divisible checkpoints
			"""
            if self.chord_index == 13 and max(unnested_scalar_melody) < 5:
                continue
            if self.chord_index == 9:
                section1 = Voice.merge_lists(*self.nested_scale_degrees[:4])
                section2 = Voice.merge_lists(*self.nested_scale_degrees[4:8])

                if max(section1) == max(section2):
                    continue

            valid_figure = inbetween
            break

        if valid_figure is None:
            return False
        self.nested_scale_degrees[self.chord_index - 1] = [
            self.previous_degree_choice, *valid_figure
        ]
        self.chosen_figurations[self.chord_index - 1] = fig_type
        return True
예제 #10
0
    def add_pickup_notes(self):
        """Adds pickup notes to beginning of the piece"""

        rest_rhythm = self.finalized_rhythms[7][0]
        Voice.pickup_duration = Voice.max_note_duration
        first_scale_degree = self.unnested_scale_degrees[0]

        note_alterations = {}

        rest_fraction = Fraction(numerator=rest_rhythm,
                                 denominator=self.unit_length)
        rest_duration = int(Voice.pickup_duration * rest_fraction)
        self.midi_notes.append(Voice.Note("Rest", 0, rest_duration))

        self.chord_index = 0
        pickup_degree_sequence, _ = random.choice(
            self.get_pickup_sequences(first_scale_degree))
        current_time = rest_duration

        unnested_scale_degrees = []
        for note_index, note_rhythm in enumerate(self.pickup_rhythm):
            pickup_scale_degree = pickup_degree_sequence[note_index]
            note_offset = note_alterations.get(pickup_scale_degree % 7, 0)
            midi_pitch = self.melody_range[pickup_scale_degree +
                                           3] + note_offset

            embellish_fraction = Fraction(numerator=note_rhythm,
                                          denominator=self.unit_length)
            note_duration = int(Voice.pickup_duration * embellish_fraction)
            self.midi_notes.append(
                Voice.Note(midi_pitch, current_time, note_duration))
            unnested_scale_degrees.append(pickup_scale_degree)

            current_time += note_duration

        unnested_scale_degrees.extend(self.unnested_scale_degrees)
        self.unnested_scale_degrees = unnested_scale_degrees

        return len(self.pickup_rhythm)
예제 #11
0
	def test_list_merger(self):
		self.assertEqual(Voice.merge_lists([]), [])
		self.assertEqual(Voice.merge_lists([], [], []), [])
		self.assertEqual(Voice.merge_lists([], [0], []), [0])
		self.assertEqual(Voice.merge_lists([5], [], []), [5])
		
		self.assertEqual(Voice.merge_lists([], [], [2, 3]), [2, 3])
		self.assertEqual(Voice.merge_lists([4], [], [9, 1, 3], []), [4, 9, 1, 3])
		self.assertEqual(Voice.merge_lists([1, 2], [3], [4, 5]), [1, 2, 3, 4, 5])

		list1 = [-5, -4]
		list2 = [-3, -2]
		list3 = Voice.merge_lists(list1, list2)
		list3.append(0)
		self.assertFalse(list1[-1] == 0)
		self.assertFalse(list2[-1] == 0)

		list4 = Voice.merge_lists(list1, [])
		self.assertTrue(list1 is not list4)
예제 #12
0
	def test_partition(self):
		simple_durations = Voice.simple_beat_durations
		compound_durations = Voice.compound_beat_durations

		self.assertEqual(Voice.partition_rhythm(simple_durations, 0), [])
		self.assertEqual(Voice.partition_rhythm(simple_durations, 4), [4])
		self.assertEqual(Voice.partition_rhythm(simple_durations, 1.5), [1.5])
		self.assertEqual(Voice.partition_rhythm(compound_durations, 2), [2])
		self.assertEqual(
			Voice.partition_rhythm(compound_durations, Fraction("2/3")),
			[Fraction("2/3")])

		self.assertEqual(
			Voice.partition_rhythm(compound_durations, Fraction("5/6")),
			[Fraction("2/3"), Fraction("1/6")])
		self.assertEqual(
			Voice.partition_rhythm(compound_durations, Fraction("5/3")),
			[Fraction("4/3"), Fraction("1/3")])
		self.assertEqual(
			Voice.partition_rhythm(compound_durations, Fraction("10/3")),
			[Fraction("8/3"), Fraction("2/3")])
예제 #13
0
def test_proper_leaps(obj):
    # breaking this rule is too advanced for you
    unnested_antecedent = Voice.merge_lists(*obj.nested_scale_degrees[:8])
    unnested_consequent = Voice.merge_lists(*obj.nested_scale_degrees[8:])
    proper_antecedent_leaps = Voice.has_proper_leaps(unnested_antecedent)
    proper_consequent_leaps = Voice.has_proper_leaps(unnested_consequent)
    if not (proper_antecedent_leaps and proper_consequent_leaps):
        return False

    ante_cons_transition = Voice.merge_lists(*obj.nested_scale_degrees[5:10])
    if (not Voice.has_proper_leaps(ante_cons_transition)
            and obj.nested_scale_degrees[0] != obj.nested_scale_degrees[8]):
        return False

    return True
예제 #14
0
def set_melodic_figures(nested_scale_degrees):
    """Defines figurations found in actual music"""

    chosen_figurations = []
    """
	IPT = inner passing tone
	OPT = outer passing tone
	ANT = anticipation
	RET = retardation
	CIN = current incomplete neighbor
	PIN = previous incomplete neighbor
	DN = double neighbor
	CN = complete neighbor
	DCN = double complete neighbor
	Format of keys: (degree difference between base notes, (figurations))
	"""
    melodic_figures = {
        (2, (1, )): "IPT",
        (0, (2, 1)): "DCN",
        (2, (-1, 1)): "OPT",
        (0, (-1, )): "CN",
        (1, (-1, )): "PIN",
        (1, (2, )): "CIN",
        (1, (-2, -1)): "OPT",
        (4, (2, )): "IPT",
        (3, (4, )): "CIN",
        (3, (1, )): "IPT",
        (1, (0, )): "RET",
        (2, (2, )): "ANT",
        (3, (2, )): "IPT",
        (3, (1, 3)): "ANT",
        (2, (2, 3)): "OPT",
        (4, (1, 2)): "IPT",
        (1, (-2, )): "OPT",
        (1, (-1, 0)): "OPT",
        (3, (1, 2)): "IPT",
        (5, (3, )): "5PT",
        (0, (1, )): "CN",
        (1, (1, )): "ANT"
    }
    chord_index = 0
    for previous_melody_group, current_melody_group in zip(
            nested_scale_degrees, nested_scale_degrees[1:]):
        chord_index += 1
        figure_group = []
        main_pitch_diff = current_melody_group[0] - previous_melody_group[0]
        main_pitch_direction = Voice.calculate_slope(main_pitch_diff)
        main_pitch_diff = abs(main_pitch_diff)
        embellish_degrees = previous_melody_group[1:]
        if not embellish_degrees:
            chosen_figurations.append(None)
            continue
        for previous_melody_note in embellish_degrees:
            pitch_diff = previous_melody_note - previous_melody_group[0]
            pitch_direction = Voice.calculate_slope(pitch_diff)
            if pitch_direction == 0:
                reference_direction = 0
            if 0 != main_pitch_direction == pitch_direction:
                reference_direction = 1 * abs(pitch_diff)
            elif 0 != main_pitch_direction != pitch_direction:
                reference_direction = -1 * abs(pitch_diff)
            elif main_pitch_direction == 0:
                reference_direction = pitch_diff
            figure_group.append(reference_direction)
        figure_group = tuple(figure_group)
        if chord_index == 8:
            modifier = "PICK_"
        else:
            modifier = ""
        chosen_figurations.append(modifier + melodic_figures[(main_pitch_diff,
                                                              figure_group)])
    print(chosen_figurations)
    return chosen_figurations
예제 #15
0
	def test_slopes(self):
		self.assertEqual(Voice.calculate_slope(0), 0)
		self.assertEqual(Voice.calculate_slope(1), 1)
		self.assertEqual(Voice.calculate_slope(-1), -1)
		self.assertEqual(Voice.calculate_slope(5), 1)
		self.assertEqual(Voice.calculate_slope(-3), -1)
예제 #16
0
	def test_leaps(self):
		self.assertFalse(Voice.has_proper_leaps([7, 6, 2, 1]))
		self.assertFalse(Voice.has_proper_leaps([0, 5, 6, 5]))
		self.assertFalse(Voice.has_proper_leaps([2, 6, 2, 3]))
		self.assertFalse(Voice.has_proper_leaps([4, 0, 5, 1]))

		self.assertFalse(Voice.has_proper_leaps([0, 5, 10, 9]))
		self.assertFalse(Voice.has_proper_leaps([9, 5, 0, 1]))

		self.assertFalse(Voice.has_proper_leaps([6, 7, 2, 2, 1]))
		self.assertFalse(Voice.has_proper_leaps([0, 5, 5, 5, 6]))
		self.assertTrue(Voice.has_proper_leaps([1, 4, 4, 3, 2]))
		self.assertTrue(Voice.has_proper_leaps([10, 6, 6, 7, 6]))

		self.assertTrue(Voice.has_proper_leaps([]))
		self.assertTrue(Voice.has_proper_leaps([1, 2, 3, 4]))
		self.assertTrue(Voice.has_proper_leaps([0, 5, 4, 3]))
		self.assertTrue(Voice.has_proper_leaps([5, 2, 3, 1]))
		self.assertTrue(Voice.has_proper_leaps([8, 6, 4, 2]))
		self.assertTrue(Voice.has_proper_leaps([1, 3, 5, 7]))
예제 #17
0
    def add_midi_section(self,
                         melody_section,
                         chord_start_index,
                         note_start_indices,
                         index_shift=None):
        """Add midi notes to the pitch sequence using scale degrees"""

        object_index = 0
        melodic_minor = False
        add_rest = False

        melody_section_iter = iter(melody_section)
        next_scale_group = next(melody_section_iter, None)
        for chord_index, current_scale_group in enumerate(
                melody_section, chord_start_index):

            next_scale_group = next(melody_section_iter, None)
            current_chord_name = Voice.chord_sequence[chord_index].chord_name
            if self.mode == "ionian" and current_chord_name in Chord.major_mode_alterations:
                note_alterations = Chord.major_mode_alterations[
                    current_chord_name]
            elif self.mode == "aeolian" and current_chord_name in Chord.minor_mode_alterations:
                note_alterations = Chord.minor_mode_alterations[
                    current_chord_name]
            else:
                note_alterations = {}

            if self.mode == "aeolian" and current_chord_name in self.primary_dominants:
                if melodic_minor:
                    note_alterations[5] = 1
                else:
                    # catch raised notes inbetween 2 chords
                    next_chord_name = Voice.chord_sequence[chord_index +
                                                           1].chord_name
                    if next_chord_name in self.primary_dominants:
                        affected_scale_group = current_scale_group + next_scale_group
                    else:
                        affected_scale_group = current_scale_group
                    scale_group_str = "".join(
                        str(scale_degree)
                        for scale_degree in affected_scale_group)
                    if any(str_combo in scale_group_str
                           for str_combo in ("56", "65", "-1-2", "-2-1")):
                        melodic_minor = True
                        note_alterations[5] = 1
                        print("Melodic minor!")

            for embellish_index, scale_degree in enumerate(
                    current_scale_group,
                    note_start_indices.get(chord_index, 0)):
                embellish_duration = self.finalized_rhythms[chord_index][
                    embellish_index]
                # account for negative scale degrees
                note_offset = note_alterations.get(scale_degree % 7, 0)
                embellish_fraction = Fraction(numerator=embellish_duration,
                                              denominator=self.unit_length)

                raw_note_duration = int(Voice.max_note_duration *
                                        embellish_fraction)
                # integers required for midi output

                if raw_note_duration > 960 and self.rhythm_symbols[
                        chord_index] >= 0 and self.break_notes:
                    fixed_note_duration = 960
                    add_rest = True
                    extra_duration = raw_note_duration - 960
                else:
                    fixed_note_duration = raw_note_duration

                midi_pitch = self.melody_range[scale_degree + 3] + note_offset
                if chord_index == 7 and embellish_index == 0 and self.rhythm_symbols[
                        6] == -1:
                    self.midi_notes.append(
                        Voice.Note("Rest", self.current_time,
                                   fixed_note_duration))
                    # needed all numbers in unnested sequence for validation
                    self.unnested_scale_degrees.pop(object_index + index_shift)
                else:
                    self.midi_notes.append(
                        Voice.Note(midi_pitch, self.current_time,
                                   fixed_note_duration))
                    if add_rest:
                        self.midi_notes.append(
                            Voice.Note("Rest", self.current_time + 960,
                                       extra_duration))

                self.current_time += raw_note_duration
                add_rest = False
                object_index += 1

        return object_index
예제 #18
0
    # adding melody to track
    for new_note in Voice.midi_score[0]:
        if isinstance(new_note.pitch, int):
            MyMIDI.addNote(track, channel, *new_note, 100)

    strum_ending = random.choice((True, True, True, False))
    print(f"Strum ending: {strum_ending}")
    if strum_ending:
        time_shift = 0
        for voice_index, part in enumerate(Voice.midi_score[2:], 2):
            time_shift += 90
            old_midi_obj = Voice.midi_score[voice_index][-2]
            new_midi_obj = Voice.Note(
                old_midi_obj.pitch,
                old_midi_obj.time + time_shift,
                old_midi_obj.duration,
            )
            Voice.midi_score[voice_index][-2] = new_midi_obj

    # adding chords to track
    for voice_index, part in enumerate(Voice.midi_score[1:]):
        track += 1
        channel += 1
        volume = Voice.voice_volumes[voice_index]
        for new_note in part:
            if isinstance(new_note.pitch, int):
                MyMIDI.addNote(track, channel, *new_note, volume)
    """
	3/4 time sig feels slower at same tempo because 
	beats are in groups of 3 instead of 2
예제 #19
0
    def make_accompanyment(self):
        """Rhythmically embellish chord progression"""

        if Voice.pickup:
            for _ in range(4):
                Voice.midi_score.append(
                    [Voice.Note("Rest", 0, Voice.pickup_duration)])
                Voice.chorale_scale_degrees.append([None])
        else:
            for _ in range(4):
                Voice.midi_score.append([])
                Voice.chorale_scale_degrees.append([])

        chord_accompaniments = {
            (2, 2): [
                ((960, 960), ({0, 1, 2, 3}, {})),
                ((960 * 3 // 2, 480), ({0, 1, 2, 3}, {})),
                ((480, 480, 480, 480), ({0, 1, 2, 3}, {}, {0, 1, 2, 3}, {})),
                (
                    (480, 240, 480, 240, 480),
                    ({0, 1, 2, 3}, {}, {0, 1, 2, 3}, {}, {0, 1, 2, 3}),
                ),
            ],
            (2, 3): [
                ((960, 960), ({0, 1, 2, 3}, {})),
                ((960 * 10 // 6, 960 * 2 // 6), ({0, 1, 2, 3}, {})),
                (
                    (960 * 2 // 3, 320, 960 * 2 // 3, 320),
                    ({0, 1, 2, 3}, {}, {0, 1, 2, 3}, {}),
                ),
            ],
            (3, 2): [
                ((960, 960 * 2), ({0, 1, 2, 3}, {})),
                ((960 * 2, 960), ({0, 1, 2, 3}, {})),
                (
                    (480, 480, 480, 480, 480, 480),
                    ({0, 1, 2, 3}, {}, {0, 1, 2, 3}, {}, {0, 1, 2, 3}, {}),
                ),
            ],
            (4, 2): [
                ((960, 960), ({0, 1, 2, 3}, {})),
                ((960 * 3 // 2, 480), ({0, 1, 2, 3}, {})),
                ((480, 480, 480, 480), ({0, 1, 2, 3}, {}, {0, 1, 2, 3}, {})),
                (
                    (480, 240, 480, 240, 480),
                    ({0, 1, 2, 3}, {}, {0, 1, 2, 3}, {}, {0, 1, 2, 3}),
                ),
                ((960 * 2, 960 * 2), ({0, 1, 2, 3}, {})),
            ],
            (4, 3): [((960, 960), ({0, 1, 2, 3}, {})),
                     ((960 * 10 // 6, 960 * 2 // 6), ({0, 1, 2, 3}, {})),
                     (
                         (960 * 2 // 3, 320, 960 * 2 // 3, 320),
                         ({0, 1, 2, 3}, {}, {0, 1, 2, 3}, {}),
                     ), ((960 * 2, 960 * 2), ({0, 1, 2, 3}, {}))]
        }

        chord_accompaniment = chord_accompaniments[Voice.time_sig]

        if Voice.time_sig[0] == 4:
            if Voice.chord_acceleration:
                chord_accompaniment.pop()

        note_durations, voices_used = random.choice(chord_accompaniment)

        chord_units_used = sum(note_durations) // Voice.max_note_duration
        if chord_units_used == 0:
            chord_units_used = 1
        print(f"Chord units used: {chord_units_used}")
        all_note_durations = []
        all_voices_used = []
        note_index = 0
        for _ in range(chord_units_used):
            all_note_durations.append([])
            all_voices_used.append([])
            while sum(all_note_durations[-1]) < Voice.max_note_duration:
                all_note_durations[-1].append(note_durations[note_index])
                all_voices_used[-1].append(voices_used[note_index])
                note_index += 1

        print(f"All note durations: {all_note_durations}")
        print(f"All voices used: {all_voices_used}")
        num_chords = len(Voice.chord_sequence)
        self.current_time = Voice.pickup_duration
        self.add_chord_section(0, -2, all_note_durations, all_voices_used,
                               chord_units_used)

        end_note_durations = ((Voice.max_note_duration, ),
                              (Voice.max_note_duration, ))
        end_voices_used = [[{0, 1, 2, 3}], [{}]]

        if self.repeat_ending:
            self.add_chord_section(-2 % num_chords, num_chords,
                                   all_note_durations, all_voices_used,
                                   chord_units_used)
            self.add_chord_section(-4 % num_chords, -2, all_note_durations,
                                   all_voices_used, chord_units_used)

        self.add_chord_section(-2 % num_chords, num_chords, end_note_durations,
                               end_voices_used, 2)
예제 #20
0
    def validate_base_melody(self):
        """Check current base melody with idioms"""
        melodic_mvmt = "".join(
            str(slope)
            for slope in self.melodic_direction[:self.chord_index + 1])

        if "_" * 3 in melodic_mvmt:
            # Avoid long rests
            return False
        if "_" * 2 in melodic_mvmt:
            all_rest_indices = set()
            start_index = 0
            while True:
                rest_index = melodic_mvmt.find("__", start_index)
                if rest_index == -1:
                    break
                all_rest_indices.add(rest_index)
                start_index = rest_index + 1
            if all_rest_indices - self.good_double_rest_indices:
                # Avoid triple repeats only between phrases")
                return False

        start_index = 0
        while True:
            rest_index = melodic_mvmt.find("_", start_index)

            if rest_index in self.bad_single_rest_indices:
                return False
            if rest_index == -1:
                break
            start_index = rest_index + 1

        relevant_melodic_mvt = melodic_mvmt[1:]

        current_move_distance = self.current_degree_choice - self.previous_degree_choice
        abs_current_move_distance = abs(current_move_distance)
        if abs_current_move_distance > 7:
            # Keep leaps within octave
            return False
        if self.chord_index == 14:
            if abs_current_move_distance > 4:
                # Don't end with a large leap
                return False
            if relevant_melodic_mvt.count('>') > relevant_melodic_mvt.count(
                    '<'):
                # Descending motion should predominate
                return False
        if abs_current_move_distance > 4 and self.chord_index not in self.valid_leap_indices:
            # Large leap can only occur halfway through
            return False
        if len(self.unnested_scale_degrees) >= 3:
            if self.chord_index < 9:
                nested_part_half = self.nested_scale_degrees[:8]
            else:
                nested_part_half = self.nested_scale_degrees[8:]
            unnested_part_half = Voice.merge_lists(*nested_part_half)
            if not Voice.has_proper_leaps(unnested_part_half):
                # "Leap should be followed by contrary stepwise motion (full melody)"
                return False
        if self.chord_index == 11:
            ante_cons_transition = Voice.merge_lists(
                *self.nested_scale_degrees[5:10])
            if (not Voice.has_proper_leaps(ante_cons_transition)
                    and self.nested_scale_degrees[0] !=
                    self.nested_scale_degrees[8]):
                return False

        # score divides into 4 sections, 16 items
        # first 2 sections: antecedent
        # last 2 sections: consequent
        current_section = self.chord_index // 4
        section_start_index = current_section * 4
        section_scale_degrees = (
            self.chosen_scale_degrees[section_start_index:section_start_index +
                                      4])
        end_degree = section_scale_degrees[-1]
        while end_degree is None:
            section_scale_degrees.pop()
            end_degree = section_scale_degrees[-1]

        section_max_degree = max(section_scale_degrees)
        if current_section <= 2:
            if section_scale_degrees.count(section_max_degree) > 2:
                return False
            if section_scale_degrees.count(section_max_degree) == 2:
                for scale_degree0, scale_degree1 in zip(
                        section_scale_degrees, section_scale_degrees[1:]):
                    if (scale_degree0 == section_max_degree
                            and scale_degree0 == scale_degree1):
                        break
                else:
                    return False

        if self.chord_index == 2:
            if self.chosen_figurations[0] != "IPT":
                return False
        if self.chord_index >= 3:
            previous_melody_note = self.chosen_scale_degrees[self.chord_index -
                                                             3]
            for chord_index, melody_group in enumerate(
                    self.nested_scale_degrees[self.chord_index -
                                              3:self.chord_index - 1],
                    self.chord_index - 3):
                for fig_index, current_melody_note in enumerate(melody_group):
                    pitch_diff = current_melody_note - previous_melody_note
                    if (abs(pitch_diff) > 4 and pitch_diff < 0 and
                        (fig_index != 0
                         or chord_index not in self.valid_leap_indices)):
                        return False
                    previous_melody_note = current_melody_note
            if (self.chord_index not in self.quick_turn_indices
                    and melodic_mvmt[self.chord_index - 2:] in {"><>", "<><"}):
                # No late melodic jukes
                return False

        if self.chord_index == 8:
            section1 = self.chosen_scale_degrees[:4]
            section2 = self.chosen_scale_degrees[4:8]
            if max(section1) == max(section2):
                return False
        elif self.chord_index == 15:
            section3 = self.chosen_scale_degrees[8:12]
            section4 = self.chosen_scale_degrees[12:]
            if max(section3) <= max(section4):
                return False
            if abs(self.nested_scale_degrees[-3][-1]) > 1:
                return False
            if (self.chosen_figurations.count("OPT") > 2
                    and self.nested_scale_degrees[0:4] !=
                    self.nested_scale_degrees[8:12]):
                return False

        num_still_figures = self.chosen_figurations.count("CN")
        num_still_figures += self.chosen_figurations.count("DN")
        num_still_figures += self.chosen_figurations.count("DCN")

        if num_still_figures > 2:
            return False
        if self.chosen_figurations.count("OPT") > 4:
            return False
        if self.chosen_figurations.count("ANT") > 1:
            return False

        return True