Ejemplo n.º 1
0
    def testSquash(self):
        # MonophonicMelody in C, transposed to C, and squashed to 1 octave.
        events = [12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 6 + 4, NO_EVENT]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)
        expected = [
            12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 5 + 4, NO_EVENT
        ]
        self.assertEqual(expected, list(melody))

        # MonophonicMelody in D, transposed to C, and squashed to 1 octave.
        events = [12 * 5 + 2, 12 * 5 + 4, 12 * 6 + 7, 12 * 6 + 6, 12 * 5 + 1]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)
        expected = [12 * 5, 12 * 5 + 2, 12 * 5 + 5, 12 * 5 + 4, 12 * 5 + 11]
        self.assertEqual(expected, list(melody))

        # MonophonicMelody in D, transposed to E, and squashed to 1 octave.
        events = [12 * 5 + 2, 12 * 5 + 4, 12 * 6 + 7, 12 * 6 + 6, 12 * 4 + 11]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=4)
        expected = [12 * 5 + 4, 12 * 5 + 6, 12 * 5 + 9, 12 * 5 + 8, 12 * 5 + 1]
        self.assertEqual(expected, list(melody))
Ejemplo n.º 2
0
    def testGetMajorKey(self):
        # D Major.
        events = [
            NO_EVENT, 12 * 2 + 2, 12 * 3 + 4, 12 * 5 + 1, 12 * 6 + 6,
            12 * 4 + 11, 12 * 3 + 9, 12 * 5 + 7, NOTE_OFF
        ]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        self.assertEqual(2, melody.get_major_key())

        # C# Major with accidentals.
        events = [
            NO_EVENT, 12 * 2 + 1, 12 * 4 + 8, 12 * 5 + 5, 12 * 6 + 6,
            12 * 3 + 3, 12 * 2 + 11, 12 * 3 + 10, 12 * 5, 12 * 2 + 8,
            12 * 4 + 1, 12 * 3 + 5, 12 * 5 + 9, 12 * 4 + 3, NOTE_OFF
        ]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        self.assertEqual(1, melody.get_major_key())

        # One note in C Major.
        events = [NO_EVENT, 12 * 2 + 11, NOTE_OFF]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        self.assertEqual(0, melody.get_major_key())
Ejemplo n.º 3
0
    def testSetLength(self):
        events = [60]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events, start_step=9)
        melody.set_length(5)
        self.assertListEqual([60, NOTE_OFF, NO_EVENT, NO_EVENT, NO_EVENT],
                             list(melody))
        self.assertEquals(9, melody.start_step)
        self.assertEquals(14, melody.end_step)

        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events, start_step=9)
        melody.set_length(5, from_left=True)
        self.assertListEqual([NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 60],
                             list(melody))
        self.assertEquals(5, melody.start_step)
        self.assertEquals(10, melody.end_step)

        events = [60, NO_EVENT, NO_EVENT, NOTE_OFF]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.set_length(3)
        self.assertListEqual([60, NO_EVENT, NO_EVENT], list(melody))
        self.assertEquals(0, melody.start_step)
        self.assertEquals(3, melody.end_step)

        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.set_length(3, from_left=True)
        self.assertListEqual([NO_EVENT, NO_EVENT, NOTE_OFF], list(melody))
        self.assertEquals(1, melody.start_step)
        self.assertEquals(4, melody.end_step)
Ejemplo n.º 4
0
    def testSquashCenterOctaves(self):
        # Move up an octave.
        events = [
            12 * 4, NO_EVENT, 12 * 4 + 2, NOTE_OFF, 12 * 4 + 4, NO_EVENT,
            12 * 4 + 5, 12 * 5 + 2, 12 * 4 - 1, NOTE_OFF
        ]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 4, max_note=12 * 7, transpose_to_key=0)
        expected = [
            12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 5 + 4, NO_EVENT,
            12 * 5 + 5, 12 * 6 + 2, 12 * 5 - 1, NOTE_OFF
        ]
        self.assertEqual(expected, list(melody))

        # Move down an octave.
        events = [
            12 * 6, NO_EVENT, 12 * 6 + 2, NOTE_OFF, 12 * 6 + 4, NO_EVENT,
            12 * 6 + 5, 12 * 7 + 2, 12 * 6 - 1, NOTE_OFF
        ]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 4, max_note=12 * 7, transpose_to_key=0)
        expected = [
            12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 5 + 4, NO_EVENT,
            12 * 5 + 5, 12 * 6 + 2, 12 * 5 - 1, NOTE_OFF
        ]
        self.assertEqual(expected, list(melody))
Ejemplo n.º 5
0
 def testExtendMelodies(self):
     melody1 = melodies_lib.MonophonicMelody()
     melody1.from_event_list([60])
     melody2 = melodies_lib.MonophonicMelody()
     melody2.from_event_list([60])
     melody3 = melodies_lib.MonophonicMelody()
     melody3.from_event_list([60])
     melody4 = melodies_lib.MonophonicMelody()
     melody4.from_event_list([60])
     melodies = [melody1, melody2, melody3, melody4]
     softmax = [[[
         0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0
     ]],
                [[
                    0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                    0.0, 0.0, 1.0
                ]],
                [[
                    1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                    0.0, 0.0, 0.0
                ]],
                [[
                    0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                    0.0, 0.0, 0.0
                ]]]
     self.melody_encoder_decoder.extend_event_sequences(melodies, softmax)
     self.assertListEqual(list(melody1), [60, 60])
     self.assertListEqual(list(melody2), [60, 71])
     self.assertListEqual(list(melody3), [60, NO_EVENT])
     self.assertListEqual(list(melody4), [60, NOTE_OFF])
Ejemplo n.º 6
0
    def testTranspose(self):
        # MonophonicMelody transposed down 5 half steps. 2 octave range.
        events = [12 * 5 + 4, NO_EVENT, 12 * 5 + 5, NOTE_OFF, 12 * 6, NO_EVENT]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.transpose(transpose_amount=-5, min_note=12 * 5, max_note=12 * 7)
        expected = [
            12 * 5 + 11, NO_EVENT, 12 * 5, NOTE_OFF, 12 * 5 + 7, NO_EVENT
        ]
        self.assertEqual(expected, list(melody))

        # MonophonicMelody transposed up 19 half steps. 2 octave range.
        events = [12 * 5 + 4, NO_EVENT, 12 * 5 + 5, NOTE_OFF, 12 * 6, NO_EVENT]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.transpose(transpose_amount=19, min_note=12 * 5, max_note=12 * 7)
        expected = [
            12 * 6 + 11, NO_EVENT, 12 * 6, NOTE_OFF, 12 * 6 + 7, NO_EVENT
        ]
        self.assertEqual(expected, list(melody))

        # MonophonicMelody transposed zero half steps. 1 octave range.
        events = [12 * 4 + 11, 12 * 5, 12 * 5 + 11, NOTE_OFF, 12 * 6, NO_EVENT]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        melody.transpose(transpose_amount=0, min_note=12 * 5, max_note=12 * 6)
        expected = [
            12 * 5 + 11, 12 * 5, 12 * 5 + 11, NOTE_OFF, 12 * 5, NO_EVENT
        ]
        self.assertEqual(expected, list(melody))
Ejemplo n.º 7
0
    def testToSequence(self):
        # Sequence produced from lead sheet should contain notes from melody
        # sequence and chords from chord sequence as text annotations.
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list([
            NO_EVENT, 1, NO_EVENT, NOTE_OFF, NO_EVENT, 2, 3, NOTE_OFF, NO_EVENT
        ])
        chords = chords_lib.ChordProgression()
        chords.from_event_list(
            [NO_CHORD, 'A', 'A', 'C#m', 'C#m', 'D', 'B', 'B', 'B'])
        lead_sheet = lead_sheets_lib.LeadSheet()
        lead_sheet.from_melody_and_chords(melody, chords)
        sequence = lead_sheet.to_sequence(velocity=10,
                                          instrument=1,
                                          sequence_start_time=2,
                                          qpm=60.0)

        melody_sequence = melody.to_sequence(velocity=10,
                                             instrument=1,
                                             sequence_start_time=2,
                                             qpm=60.0)
        chords_sequence = chords.to_sequence(sequence_start_time=2, qpm=60.0)
        self.assertEquals(melody_sequence.ticks_per_quarter,
                          sequence.ticks_per_quarter)
        self.assertProtoEquals(melody_sequence.tempos, sequence.tempos)
        self.assertEquals(melody_sequence.total_time, sequence.total_time)
        self.assertProtoEquals(melody_sequence.notes, sequence.notes)
        self.assertProtoEquals(chords_sequence.text_annotations,
                               sequence.text_annotations)
Ejemplo n.º 8
0
 def testFromNotesPolyphonic(self):
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 4, 16), (19, 100, 4, 12)])
     melody = melodies_lib.MonophonicMelody()
     with self.assertRaises(melodies_lib.PolyphonicMelodyException):
         melody.from_quantized_sequence(self.quantized_sequence,
                                        start_step=0,
                                        track=0,
                                        ignore_polyphonic_notes=False)
     self.assertFalse(list(melody))
Ejemplo n.º 9
0
    def testToSequenceEmpty(self):
        melody = melodies_lib.MonophonicMelody()
        sequence = melody.to_sequence(velocity=10,
                                      instrument=1,
                                      sequence_start_time=2,
                                      qpm=60.0)

        self.assertProtoEquals(
            'ticks_per_quarter: 96 '
            'tempos < qpm: 60.0 > ', sequence)
Ejemplo n.º 10
0
 def testFromNotesStepsPerBar(self):
     self.quantized_sequence.time_signature = sequences_lib.TimeSignature(
         7, 8)
     self.quantized_sequence.steps_per_quarter = 12
     self.quantized_sequence.tracks[0] = []
     melody = melodies_lib.MonophonicMelody()
     melody.from_quantized_sequence(self.quantized_sequence,
                                    start_step=0,
                                    track=0,
                                    ignore_polyphonic_notes=False)
     self.assertEqual(42, melody.steps_per_bar)
Ejemplo n.º 11
0
    def testGetKeyHistogram(self):
        # One C.
        events = [NO_EVENT, 12 * 5, NOTE_OFF]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        expected = [1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0]
        self.assertListEqual(expected, list(melody.get_major_key_histogram()))

        # One C and one C#.
        events = [NO_EVENT, 12 * 5, NOTE_OFF, 12 * 7 + 1, NOTE_OFF]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        expected = [1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]
        self.assertListEqual(expected, list(melody.get_major_key_histogram()))

        # One C, one C#, and one D.
        events = [NO_EVENT, 12 * 5, NOTE_OFF, 12 * 7 + 1, NO_EVENT, 12 * 9 + 2]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        expected = [2, 2, 2, 2, 1, 2, 1, 2, 2, 2, 2, 1]
        self.assertListEqual(expected, list(melody.get_major_key_histogram()))
Ejemplo n.º 12
0
    def testGetNoteHistogram(self):
        events = [
            NO_EVENT, NOTE_OFF, 12 * 2 + 1, 12 * 3, 12 * 5 + 11, 12 * 6 + 3,
            12 * 4 + 11
        ]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        expected = [1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2]
        self.assertEqual(expected, list(melody.get_note_histogram()))

        events = [
            0, 1, NO_EVENT, NOTE_OFF, 12 * 2 + 1, 12 * 3, 12 * 6 + 3,
            12 * 5 + 11, NO_EVENT, 12 * 4 + 11, 12 * 7 + 1
        ]
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list(events)
        expected = [2, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2]
        self.assertEqual(expected, list(melody.get_note_histogram()))

        melody = melodies_lib.MonophonicMelody()
        expected = [0] * 12
        self.assertEqual(expected, list(melody.get_note_histogram()))
Ejemplo n.º 13
0
    def testFromNotesPolyphonicWithIgnorePolyphonicNotes(self):
        testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                        [(12, 100, 0, 8), (19, 100, 0, 12),
                                         (12, 100, 4, 12), (19, 100, 4, 16)])
        melody = melodies_lib.MonophonicMelody()
        melody.from_quantized_sequence(self.quantized_sequence,
                                       start_step=0,
                                       track=0,
                                       ignore_polyphonic_notes=True)
        expected = ([19] + [NO_EVENT] * 3 + [19] + [NO_EVENT] * 11)

        self.assertEqual(expected, list(melody))
        self.assertEqual(16, melody.steps_per_bar)
Ejemplo n.º 14
0
 def testSquashMaxNote(self):
     events = [
         12 * 5, 12 * 5 + 2, 12 * 5 + 4, 12 * 5 + 5, 12 * 5 + 11, 12 * 6,
         12 * 6 + 1
     ]
     melody = melodies_lib.MonophonicMelody()
     melody.from_event_list(events)
     melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)
     expected = [
         12 * 5, 12 * 5 + 2, 12 * 5 + 4, 12 * 5 + 5, 12 * 5 + 11, 12 * 5,
         12 * 5 + 1
     ]
     self.assertEqual(expected, list(melody))
Ejemplo n.º 15
0
 def testFromNotesTimeOverlap(self):
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 4, 8), (11, 100, 13, 15),
                                      (13, 100, 8, 16)])
     melody = melodies_lib.MonophonicMelody()
     melody.from_quantized_sequence(self.quantized_sequence,
                                    start_step=0,
                                    track=0,
                                    ignore_polyphonic_notes=False)
     expected = [
         NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 12, NO_EVENT, NO_EVENT,
         NO_EVENT, 13, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 11, NO_EVENT
     ]
     self.assertEqual(expected, list(melody))
Ejemplo n.º 16
0
 def testFromNotesTrimEmptyMeasures(self):
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 6, 7), (11, 100, 8, 9)])
     melody = melodies_lib.MonophonicMelody()
     melody.from_quantized_sequence(self.quantized_sequence,
                                    start_step=0,
                                    track=0,
                                    ignore_polyphonic_notes=False)
     expected = [
         NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 12,
         NOTE_OFF, 11
     ]
     self.assertEqual(expected, list(melody))
     self.assertEqual(16, melody.steps_per_bar)
Ejemplo n.º 17
0
    def testToSequenceEndsWithNonzeroStart(self):
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list([NO_EVENT, 1, NO_EVENT], start_step=4)
        sequence = melody.to_sequence(velocity=100,
                                      instrument=0,
                                      sequence_start_time=0.5,
                                      qpm=60.0)

        self.assertProtoEquals(
            'ticks_per_quarter: 96 '
            'tempos < qpm: 60.0 > '
            'total_time: 2.25 '
            'notes < pitch: 1 velocity: 100 start_time: 1.75 end_time: 2.25 > ',
            sequence)
Ejemplo n.º 18
0
 def testFromQuantizedSequence(self):
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 0, 40), (11, 55, 1, 2),
                                      (40, 45, 10, 14), (55, 120, 16, 17),
                                      (52, 99, 19, 20)])
     melody = melodies_lib.MonophonicMelody()
     melody.from_quantized_sequence(self.quantized_sequence,
                                    start_step=0,
                                    track=0)
     expected = ([
         12, 11, NOTE_OFF, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT,
         NO_EVENT, NO_EVENT, 40, NO_EVENT, NO_EVENT, NO_EVENT, NOTE_OFF,
         NO_EVENT, 55, NOTE_OFF, NO_EVENT, 52
     ])
     self.assertEqual(expected, list(melody))
     self.assertEqual(16, melody.steps_per_bar)
Ejemplo n.º 19
0
 def testFromNotesStartAndEndStep(self):
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 4, 8), (11, 100, 9, 10),
                                      (13, 100, 13, 15), (14, 100, 19, 20),
                                      (15, 100, 21, 27)])
     melody = melodies_lib.MonophonicMelody()
     melody.from_quantized_sequence(self.quantized_sequence,
                                    start_step=18,
                                    track=0,
                                    ignore_polyphonic_notes=False)
     expected = [
         NO_EVENT, NO_EVENT, NO_EVENT, 14, NOTE_OFF, 15, NO_EVENT, NO_EVENT,
         NO_EVENT, NO_EVENT, NO_EVENT
     ]
     self.assertEqual(expected, list(melody))
     self.assertEqual(16, melody.start_step)
     self.assertEqual(27, melody.end_step)
Ejemplo n.º 20
0
    def testToSequenceEndsWithSustainedNote(self):
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list([
            NO_EVENT, 1, NO_EVENT, NOTE_OFF, NO_EVENT, 2, 3, NO_EVENT, NO_EVENT
        ])
        sequence = melody.to_sequence(velocity=100,
                                      instrument=0,
                                      sequence_start_time=0,
                                      qpm=60.0)

        self.assertProtoEquals(
            'ticks_per_quarter: 96 '
            'tempos < qpm: 60.0 > '
            'total_time: 2.25 '
            'notes < pitch: 1 velocity: 100 start_time: 0.25 end_time: 0.75 > '
            'notes < pitch: 2 velocity: 100 start_time: 1.25 end_time: 1.5 > '
            'notes < pitch: 3 velocity: 100 start_time: 1.5 end_time: 2.25 > ',
            sequence)
Ejemplo n.º 21
0
 def testSquash(self):
     # LeadSheet squash should agree with melody squash & chords transpose.
     melody_events = [
         12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 6 + 4, NO_EVENT
     ]
     chord_events = ['C', 'Am', 'Dm', 'G', 'C', NO_CHORD]
     lead_sheet = lead_sheets_lib.LeadSheet()
     lead_sheet.from_event_list(zip(melody_events, chord_events))
     lead_sheet.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)
     expected_melody = melodies_lib.MonophonicMelody()
     expected_melody.from_event_list(melody_events[:])
     transpose_amount = expected_melody.squash(min_note=12 * 5,
                                               max_note=12 * 6,
                                               transpose_to_key=0)
     expected_chords = chords_lib.ChordProgression()
     expected_chords.from_event_list(chord_events[:])
     expected_chords.transpose(transpose_amount=transpose_amount)
     self.assertEqual(expected_melody, lead_sheet.melody)
     self.assertEqual(expected_chords, lead_sheet.chords)
Ejemplo n.º 22
0
 def testSetLength(self):
     # Setting LeadSheet length should agree with setting length on melody and
     # chords separately.
     melody_events = [60]
     chord_events = ['C7']
     lead_sheet = lead_sheets_lib.LeadSheet()
     lead_sheet.from_event_list(zip(melody_events, chord_events),
                                start_step=9)
     lead_sheet.set_length(5)
     expected_melody = melodies_lib.MonophonicMelody()
     expected_melody.from_event_list(melody_events[:], start_step=9)
     expected_melody.set_length(5)
     expected_chords = chords_lib.ChordProgression()
     expected_chords.from_event_list(chord_events[:], start_step=9)
     expected_chords.set_length(5)
     self.assertEquals(expected_melody, lead_sheet.melody)
     self.assertEquals(expected_chords, lead_sheet.chords)
     self.assertEquals(9, lead_sheet.start_step)
     self.assertEquals(14, lead_sheet.end_step)
Ejemplo n.º 23
0
 def testSquashAndEncode(self):
     events = [100, 100, 107, 111, NO_EVENT, 99, 112, NOTE_OFF, NO_EVENT]
     melody = melodies_lib.MonophonicMelody()
     melody.from_event_list(events)
     sequence_example = self.melody_encoder_decoder.squash_and_encode(
         melody)
     expected_inputs = [[
         0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0
     ],
                        [
                            0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                            0.0, 0.0, 0.0, 0.0, 0.0
                        ],
                        [
                            0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                            1.0, 0.0, 0.0, 0.0, 0.0
                        ],
                        [
                            0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                            0.0, 0.0, 0.0, 0.0, 1.0
                        ],
                        [
                            1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                            0.0, 0.0, 0.0, 0.0, 0.0
                        ],
                        [
                            0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                            0.0, 0.0, 0.0, 0.0, 1.0
                        ],
                        [
                            0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                            0.0, 0.0, 0.0, 0.0, 0.0
                        ],
                        [
                            0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                            0.0, 0.0, 0.0, 0.0, 0.0
                        ]]
     expected_labels = [2, 9, 13, 0, 13, 2, 1, 0]
     expected_sequence_example = sequence_example_lib.make_sequence_example(
         expected_inputs, expected_labels)
     self.assertEqual(sequence_example, expected_sequence_example)
Ejemplo n.º 24
0
 def testTranspose(self):
     # LeadSheet transposition should agree with melody & chords transpositions.
     melody_events = [
         12 * 5 + 4, NO_EVENT, 12 * 5 + 5, NOTE_OFF, 12 * 6, NO_EVENT
     ]
     chord_events = [NO_CHORD, 'C', 'F', 'Dm', 'D', 'G']
     lead_sheet = lead_sheets_lib.LeadSheet()
     lead_sheet.from_event_list(zip(melody_events, chord_events))
     lead_sheet.transpose(transpose_amount=-5,
                          min_note=12 * 5,
                          max_note=12 * 7)
     expected_melody = melodies_lib.MonophonicMelody()
     expected_melody.from_event_list(melody_events[:])
     expected_melody.transpose(transpose_amount=-5,
                               min_note=12 * 5,
                               max_note=12 * 7)
     expected_chords = chords_lib.ChordProgression()
     expected_chords.from_event_list(chord_events[:])
     expected_chords.transpose(transpose_amount=-5)
     self.assertEqual(expected_melody, lead_sheet.melody)
     self.assertEqual(expected_chords, lead_sheet.chords)
Ejemplo n.º 25
0
 def testMonophonicMelodyExtractor(self):
     quantized_sequence = sequences_lib.QuantizedSequence()
     quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track(quantized_sequence, 0,
                                     [(12, 100, 2, 4), (11, 1, 6, 7)])
     testing_lib.add_quantized_track(quantized_sequence, 1,
                                     [(12, 127, 2, 4), (14, 50, 6, 8)])
     expected_events = [[
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11
     ], [
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT
     ]]
     expected_melodies = []
     for events_list in expected_events:
         melody = melodies_lib.MonophonicMelody()
         melody.from_event_list(events_list,
                                steps_per_quarter=1,
                                steps_per_bar=4)
         expected_melodies.append(melody)
     unit = pipelines_common.MonophonicMelodyExtractor(min_bars=1,
                                                       min_unique_pitches=1,
                                                       gap_bars=1)
     self._unit_transform_test(unit, quantized_sequence, expected_melodies)
Ejemplo n.º 26
0
    def testToSequenceSimple(self):
        melody = melodies_lib.MonophonicMelody()
        melody.from_event_list([
            NO_EVENT, 1, NO_EVENT, NOTE_OFF, NO_EVENT, 2, 3, NOTE_OFF, NO_EVENT
        ])
        sequence = melody.to_sequence(velocity=10,
                                      instrument=1,
                                      sequence_start_time=2,
                                      qpm=60.0)

        self.assertProtoEquals(
            'ticks_per_quarter: 96 '
            'tempos < qpm: 60.0 > '
            'total_time: 3.75 '
            'notes < '
            '  pitch: 1 velocity: 10 instrument: 1 start_time: 2.25 end_time: 2.75 '
            '> '
            'notes < '
            '  pitch: 2 velocity: 10 instrument: 1 start_time: 3.25 end_time: 3.5 '
            '> '
            'notes < '
            '  pitch: 3 velocity: 10 instrument: 1 start_time: 3.5 end_time: 3.75 '
            '> ', sequence)
Ejemplo n.º 27
0
    def events_to_input(self, events, position):
        """Returns the input vector for the given position in the melody.

    Returns a self.input_size length list of floats. Assuming MIN_NOTE = 48
    and MAX_NOTE = 84, then self.input_size = 74. Each index represents a
    different input signal to the model.

    Indices [0, 74):
    [0, 36): A note is playing at that pitch [48, 84).
    36: Any note is playing.
    37: Silence is playing.
    38: The current event is the note-on event of the currently playing note.
    39: Whether the melody is currently ascending or descending.
    40: The last event is repeating 1 bar ago.
    41: The last event is repeating 2 bars ago.
    [42, 49): Time keeping toggles.
    49: The next event is the start of a bar.
    [50, 62): The keys the current melody is in.
    [62, 74): The keys the last 3 notes are in.

    Args:
      events: A melodies_lib.MonophonicMelody object.
      position: An integer event position in the melody.

    Returns:
      An input vector, an self.input_size length list of floats.
    """
        current_note = None
        is_attack = False
        is_ascending = None
        last_3_notes = collections.deque(maxlen=3)
        sub_melody = melodies_lib.MonophonicMelody()
        sub_melody.from_event_list(events[:position + 1])
        for note in sub_melody:
            if note == MELODY_NO_EVENT:
                is_attack = False
            elif note == MELODY_NOTE_OFF:
                current_note = None
            else:
                is_attack = True
                current_note = note
                if last_3_notes:
                    if note > last_3_notes[-1]:
                        is_ascending = True
                    if note < last_3_notes[-1]:
                        is_ascending = False
                if note in last_3_notes:
                    last_3_notes.remove(note)
                last_3_notes.append(note)

        input_ = [0.0] * self.input_size
        if current_note:
            # The pitch of current note if a note is playing.
            input_[current_note - self.min_note] = 1.0
            # A note is playing.
            input_[self.note_range] = 1.0
        else:
            # Silence is playing.
            input_[self.note_range + 1] = 1.0

        # The current event is the note-on event of the currently playing note.
        if is_attack:
            input_[self.note_range + 2] = 1.0

        # Whether the melody is currently ascending or descending.
        if is_ascending is not None:
            input_[self.note_range + 3] = 1.0 if is_ascending else -1.0

        # Last event is repeating N bars ago.
        for i, lookback_distance in enumerate(LOOKBACK_DISTANCES):
            lookback_position = position - lookback_distance
            if (lookback_position >= 0
                    and events[position] == events[lookback_position]):
                input_[self.note_range + 4 + i] = 1.0

        # Binary time counter giving the metric location of the *next* note.
        n = len(sub_melody)
        for i in range(NUM_BINARY_TIME_COUNTERS):
            input_[self.note_range + 6 + i] = 1.0 if (n / 2**i) % 2 else -1.0

        # The next event is the start of a bar.
        if len(sub_melody) % STEPS_PER_BAR == 0:
            input_[self.note_range + 13] = 1.0

        # The keys the current melody is in.
        key_histogram = sub_melody.get_major_key_histogram()
        max_val = max(key_histogram)
        for i, key_val in enumerate(key_histogram):
            if key_val == max_val:
                input_[self.note_range + 14 + i] = 1.0

        # The keys the last 3 notes are in.
        last_3_note_melody = melodies_lib.MonophonicMelody()
        last_3_note_melody.from_event_list(list(last_3_notes))
        key_histogram = last_3_note_melody.get_major_key_histogram()
        max_val = max(key_histogram)
        for i, key_val in enumerate(key_histogram):
            if key_val == max_val:
                input_[self.note_range + 14 + NOTES_PER_OCTAVE + i] = 1.0

        return input_
def run_with_flags(melody_rnn_sequence_generator):
    """Generates melodies and saves them as MIDI files.

  Uses the options specified by the flags defined in this module. Intended to be
  called from the main function of one of the melody generator modules.

  Args:
    melody_rnn_sequence_generator: A MelodyRnnSequenceGenerator object specific
        to your model.
  """
    if not FLAGS.output_dir:
        tf.logging.fatal('--output_dir required')
        return

    FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
    if FLAGS.primer_midi:
        FLAGS.primer_midi = os.path.expanduser(FLAGS.primer_midi)

    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)

    primer_sequence = None
    qpm = FLAGS.qpm if FLAGS.qpm else constants.DEFAULT_QUARTERS_PER_MINUTE
    if FLAGS.primer_melody:
        primer_melody = melodies_lib.MonophonicMelody()
        primer_melody.from_event_list(ast.literal_eval(FLAGS.primer_melody))
        primer_sequence = primer_melody.to_sequence(qpm=qpm)
    elif FLAGS.primer_midi:
        primer_sequence = midi_io.midi_file_to_sequence_proto(
            FLAGS.primer_midi)
        if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
            qpm = primer_sequence.tempos[0].qpm

    # Derive the total number of seconds to generate based on the QPM of the
    # priming sequence and the num_steps flag.
    total_seconds = _steps_to_seconds(FLAGS.num_steps, qpm)

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generate_request = generator_pb2.GenerateSequenceRequest()
    if primer_sequence:
        generate_request.input_sequence.CopyFrom(primer_sequence)
        generate_section = (
            generate_request.generator_options.generate_sections.add())
        # Set the start time to begin on the next step after the last note ends.
        notes_by_end_time = sorted(primer_sequence.notes,
                                   key=lambda n: n.end_time)
        last_end_time = notes_by_end_time[
            -1].end_time if notes_by_end_time else 0
        generate_section.start_time_seconds = last_end_time + _steps_to_seconds(
            1, qpm)
        generate_section.end_time_seconds = total_seconds

        if generate_section.start_time_seconds >= generate_section.end_time_seconds:
            tf.logging.fatal(
                'Priming sequence is longer than the total number of steps '
                'requested: Priming sequence length: %s, Generation length '
                'requested: %s', generate_section.start_time_seconds,
                total_seconds)
            return
    else:
        generate_section = (
            generate_request.generator_options.generate_sections.add())
        generate_section.start_time_seconds = 0
        generate_section.end_time_seconds = total_seconds
        generate_request.input_sequence.tempos.add().qpm = qpm
    tf.logging.debug('generate_request: %s', generate_request)

    # Make the generate request num_outputs times and save the output as midi
    # files.
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    digits = len(str(FLAGS.num_outputs))
    for i in range(FLAGS.num_outputs):
        generate_response = melody_rnn_sequence_generator.generate(
            generate_request)

        midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
        midi_path = os.path.join(FLAGS.output_dir, midi_filename)
        midi_io.sequence_proto_to_midi_file(
            generate_response.generated_sequence, midi_path)

    tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs,
                    FLAGS.output_dir)
Ejemplo n.º 29
0
    def _generate(self, generate_sequence_request):
        if len(generate_sequence_request.generator_options.generate_sections
               ) != 1:
            raise sequence_generator.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % (len(generate_sequence_request.generator_options.
                       generate_sections)))

        generate_section = (
            generate_sequence_request.generator_options.generate_sections[0])
        primer_sequence = generate_sequence_request.input_sequence

        notes_by_end_time = sorted(primer_sequence.notes,
                                   key=lambda n: n.end_time)
        last_end_time = notes_by_end_time[
            -1].end_time if notes_by_end_time else 0
        if last_end_time > generate_section.start_time_seconds:
            raise sequence_generator.SequenceGeneratorException(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time_seconds,
                 notes_by_end_time[-1].end_time))

        # Quantize the priming sequence.
        quantized_sequence = sequences_lib.QuantizedSequence()
        quantized_sequence.from_note_sequence(primer_sequence,
                                              self._steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_melodies, _ = melodies_lib.extract_melodies(
            quantized_sequence,
            min_bars=0,
            min_unique_pitches=1,
            gap_bars=float('inf'),
            ignore_polyphonic_notes=True)
        assert len(extracted_melodies) <= 1

        qpm = (primer_sequence.tempos[0].qpm
               if primer_sequence and primer_sequence.tempos else
               constants.DEFAULT_QUARTERS_PER_MINUTE)
        start_step = self._seconds_to_steps(
            generate_section.start_time_seconds, qpm)
        end_step = self._seconds_to_steps(generate_section.end_time_seconds,
                                          qpm)

        if extracted_melodies and extracted_melodies[0]:
            melody = extracted_melodies[0]
        else:
            tf.logging.warn(
                'No melodies were extracted from the priming sequence. '
                'Melodies will be generated from scratch.')
            melody = melodies_lib.MonophonicMelody()
            melody.from_event_list([
                random.randint(self._melody_encoder_decoder.min_note,
                               self._melody_encoder_decoder.max_note)
            ])
            start_step += 1

        transpose_amount = melody.squash(
            self._melody_encoder_decoder.min_note,
            self._melody_encoder_decoder.max_note,
            self._melody_encoder_decoder.transpose_to_key)

        # Ensure that the melody extends up to the step we want to start generating.
        melody.set_length(start_step)

        inputs = self._session.graph.get_collection('inputs')[0]
        initial_state = self._session.graph.get_collection('initial_state')[0]
        final_state = self._session.graph.get_collection('final_state')[0]
        softmax = self._session.graph.get_collection('softmax')[0]

        final_state_ = None
        for i in range(end_step - len(melody)):
            if i == 0:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody], full_length=True)
                initial_state_ = self._session.run(initial_state)
            else:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody])
                initial_state_ = final_state_

            feed_dict = {inputs: inputs_, initial_state: initial_state_}
            final_state_, softmax_ = self._session.run([final_state, softmax],
                                                       feed_dict)
            self._melody_encoder_decoder.extend_event_sequences([melody],
                                                                softmax_)

        melody.transpose(-transpose_amount)

        generate_response = generator_pb2.GenerateSequenceResponse()
        generate_response.generated_sequence.CopyFrom(
            melody.to_sequence(qpm=qpm))
        return generate_response
Ejemplo n.º 30
0
 def testGetInputsBatch(self):
     events1 = [100, 100, 107, 111, NO_EVENT, 99, 112, NOTE_OFF, NO_EVENT]
     melody1 = melodies_lib.MonophonicMelody()
     melody1.from_event_list(events1)
     events2 = [9, 10, 12, 14, 15, 17, 19, 21, 22]
     melody2 = melodies_lib.MonophonicMelody()
     melody2.from_event_list(events2)
     transpose_amount1 = melody1.squash(
         self.melody_encoder_decoder.min_note,
         self.melody_encoder_decoder.max_note,
         self.melody_encoder_decoder.transpose_to_key)
     transpose_amount2 = melody2.squash(
         self.melody_encoder_decoder.min_note,
         self.melody_encoder_decoder.max_note,
         self.melody_encoder_decoder.transpose_to_key)
     self.assertEqual(transpose_amount1, -40)
     self.assertEqual(transpose_amount2, 50)
     melodies = [melody1, melody2]
     expected_inputs1 = [[
         0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0
     ],
                         [
                             0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             1.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 1.0
                         ],
                         [
                             1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 1.0
                         ],
                         [
                             0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ]]
     expected_inputs2 = [[
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         1.0
     ],
                         [
                             0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             1.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 1.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 1.0
                         ],
                         [
                             0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0
                         ]]
     expected_full_length_inputs_batch = [
         expected_inputs1, expected_inputs2
     ]
     expected_last_event_inputs_batch = [
         expected_inputs1[-1:], expected_inputs2[-1:]
     ]
     self.assertListEqual(
         expected_full_length_inputs_batch,
         self.melody_encoder_decoder.get_inputs_batch(melodies, True))
     self.assertListEqual(
         expected_last_event_inputs_batch,
         self.melody_encoder_decoder.get_inputs_batch(melodies))