예제 #1
0
  def testStartCapture_Multiple(self):
    captor_1 = self.midi_hub.start_capture(
        120, 0.0, stop_signal=midi_hub.MidiSignal(note=3))
    captor_2 = self.midi_hub.start_capture(
        120, 1.0,
        stop_signal=midi_hub.MidiSignal(type='control_change', control=1))

    self.send_capture_messages()

    captor_1.join()
    captor_2.join()

    captured_seq_1 = captor_1.captured_sequence()
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 4.0
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(0, 64, 0.01, 3), Note(1, 64, 2, 4), Note(2, 64, 3, 4)])
    self.assertProtoEquals(captured_seq_1, expected_seq)

    captured_seq_2 = captor_2.captured_sequence()
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 6.0
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seq_2, expected_seq)
예제 #2
0
  def testExtractChordsForMelodiesCoincidentChords(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 11)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8),
         (50, 100, 33, 37), (52, 100, 34, 37)])
    testing_lib.add_chords_to_sequence(
        self.note_sequence,
        [('C', 2), ('G7', 6), ('E13', 8), ('Cmaj7', 8)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
        ignore_polyphonic_notes=True)
    chord_progressions, stats = chords_lib.extract_chords_for_melodies(
        quantized_sequence, melodies)
    expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
                ['Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
    stats_dict = dict((stat.name, stat) for stat in stats)
    self.assertIsNone(chord_progressions[0])
    self.assertEqual(expected,
                     [list(chords) for chords in chord_progressions[1:]])
    self.assertEqual(stats_dict['coincident_chords'].count, 1)
 def testExtractLeadSheetFragmentsCoincidentChords(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 11)])
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8),
        (50, 100, 33, 37), (52, 100, 34, 37)])
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('C', 2), ('G7', 6), ('Cmaj7', 33), ('F', 33)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   lead_sheets, _ = lead_sheets_lib.extract_lead_sheet_fragments(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True, require_chords=True)
   melodies, _ = melodies_lib.extract_melodies(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   chord_progressions, _ = chords_lib.extract_chords_for_melodies(
       quantized_sequence, melodies)
   # Last lead sheet should be rejected for coincident chords.
   self.assertEqual(list(melodies[:2]),
                    list(lead_sheet.melody for lead_sheet in lead_sheets))
   self.assertEqual(list(chord_progressions[:2]),
                    list(lead_sheet.chords for lead_sheet in lead_sheets))
예제 #4
0
  def testFromRelativeQuantizedNoteSequence(self):
    self.note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=100)
    performance = performance_lib.MetricPerformance(quantized_sequence)

    self.assertEqual(100, performance.steps_per_quarter)

    pe = performance_lib.PerformanceEvent
    expected_performance = [
        pe(pe.NOTE_ON, 60),
        pe(pe.NOTE_ON, 64),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_ON, 67),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 67),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 64),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 60),
    ]
    self.assertEqual(expected_performance, list(performance))
예제 #5
0
  def testInferChordsForSequence(self):
    # Test non-quantized sequence.
    sequence = copy.copy(self.note_sequence)
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 1.0, 3.0), (64, 100, 1.0, 2.0), (67, 100, 1.0, 2.0),
         (65, 100, 2.0, 3.0), (69, 100, 2.0, 3.0),
         (62, 100, 3.0, 5.0), (65, 100, 3.0, 4.0), (69, 100, 3.0, 4.0)])
    expected_sequence = copy.copy(sequence)
    testing_lib.add_chords_to_sequence(
        expected_sequence, [('C', 1.0), ('F/C', 2.0), ('Dm', 3.0)])
    sequences_lib.infer_chords_for_sequence(sequence)
    self.assertProtoEquals(expected_sequence, sequence)

    # Test quantized sequence.
    sequence = copy.copy(self.note_sequence)
    sequence.quantization_info.steps_per_quarter = 1
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 1.1, 3.0), (64, 100, 1.0, 1.9), (67, 100, 1.0, 2.0),
         (65, 100, 2.0, 3.2), (69, 100, 2.1, 3.1),
         (62, 100, 2.9, 4.8), (65, 100, 3.0, 4.0), (69, 100, 3.0, 4.1)])
    testing_lib.add_quantized_steps_to_sequence(
        sequence,
        [(1, 3), (1, 2), (1, 2), (2, 3), (2, 3), (3, 5), (3, 4), (3, 4)])
    expected_sequence = copy.copy(sequence)
    testing_lib.add_chords_to_sequence(
        expected_sequence, [('C', 1.0), ('F/C', 2.0), ('Dm', 3.0)])
    testing_lib.add_quantized_chord_steps_to_sequence(
        expected_sequence, [1, 2, 3])
    sequences_lib.infer_chords_for_sequence(sequence)
    self.assertProtoEquals(expected_sequence, sequence)
예제 #6
0
  def testApplySustainControlChangesWithRepeatedNotes(self):
    """Verify that sustain control handles repeated notes correctly.

    For example, a single pitch played before sustain:
    x-- x-- x--
    After sustain:
    x---x---x--

    Notes should be extended until either the end of the sustain control or the
    beginning of another note of the same pitch.
    """
    sequence = copy.copy(self.note_sequence)
    testing_lib.add_control_changes_to_sequence(
        sequence, 0,
        [(1.0, 64, 127), (4.0, 64, 0)])
    expected_sequence = copy.copy(sequence)
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 0.25, 1.50), (60, 100, 1.25, 1.50), (72, 100, 2.00, 3.50),
         (60, 100, 2.0, 3.00), (60, 100, 3.50, 4.50)])
    testing_lib.add_track_to_sequence(
        expected_sequence, 0,
        [(60, 100, 0.25, 1.25), (60, 100, 1.25, 2.00), (72, 100, 2.00, 4.00),
         (60, 100, 2.0, 3.50), (60, 100, 3.50, 4.50)])

    sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
    self.assertProtoEquals(expected_sequence, sus_sequence)
 def testTranspositionPipeline(self):
   note_sequence = common_testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         qpm: 60}""")
   tp = note_sequence_pipelines.TranspositionPipeline(range(0, 2))
   testing_lib.add_track_to_sequence(
       note_sequence, 0,
       [(12, 100, 1.0, 4.0)])
   testing_lib.add_track_to_sequence(
       note_sequence, 1,
       [(36, 100, 2.0, 2.01)],
       is_drum=True)
   transposed = tp.transform(note_sequence)
   self.assertEqual(2, len(transposed))
   self.assertEqual(2, len(transposed[0].notes))
   self.assertEqual(2, len(transposed[1].notes))
   self.assertEqual(12, transposed[0].notes[0].pitch)
   self.assertEqual(13, transposed[1].notes[0].pitch)
   self.assertEqual(36, transposed[0].notes[1].pitch)
   self.assertEqual(36, transposed[1].notes[1].pitch)
예제 #8
0
  def testExtractPerformances(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=10)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=8, max_events_truncate=10)
    self.assertEqual(0, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=3)
    self.assertEqual(1, len(perfs))
    self.assertEqual(3, len(perfs[0]))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, max_steps_truncate=100)
    self.assertEqual(1, len(perfs))
    self.assertEqual(100, perfs[0].num_steps)
 def testExtractLeadSheetFragments(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, .5, 1), (11, 1, 1.5, 2.75)])
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 127, .5, 1), (14, 50, 1.5, 2),
        (50, 100, 8.25, 9.25), (52, 100, 8.5, 9.25)])
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('C', .5), ('G7', 1.5), ('Cmaj7', 8.25)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   lead_sheets, _ = lead_sheets_lib.extract_lead_sheet_fragments(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True, require_chords=True)
   melodies, _ = melodies_lib.extract_melodies(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   chord_progressions, _ = chords_lib.extract_chords_for_melodies(
       quantized_sequence, melodies)
   self.assertEqual(list(melodies),
                    list(lead_sheet.melody for lead_sheet in lead_sheets))
   self.assertEqual(list(chord_progressions),
                    list(lead_sheet.chords for lead_sheet in lead_sheets))
예제 #10
0
 def testExtractLeadSheetFragmentsNoChords(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 11)])
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8),
        (50, 100, 33, 37), (52, 100, 34, 37)])
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('C', 2), ('G7', 6), (NO_CHORD, 10)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   lead_sheets, stats = lead_sheets_lib.extract_lead_sheet_fragments(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True, require_chords=True)
   melodies, _ = melodies_lib.extract_melodies(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   chord_progressions, _ = chords_lib.extract_chords_for_melodies(
       quantized_sequence, melodies)
   stats_dict = dict([(stat.name, stat) for stat in stats])
   # Last lead sheet should be rejected for having no chords.
   self.assertEqual(list(melodies[:2]),
                    list(lead_sheet.melody for lead_sheet in lead_sheets))
   self.assertEqual(list(chord_progressions[:2]),
                    list(lead_sheet.chords for lead_sheet in lead_sheets))
   self.assertEqual(stats_dict['empty_chord_progressions'].count, 1)
예제 #11
0
  def testFromQuantizedNoteSequenceWithQuantizedVelocity(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)
    performance = list(performance_lib.Performance(
        quantized_sequence, num_velocity_bins=16))

    pe = performance_lib.PerformanceEvent
    expected_performance = [
        pe(pe.VELOCITY, 13),
        pe(pe.NOTE_ON, 60),
        pe(pe.NOTE_ON, 64),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.VELOCITY, 16),
        pe(pe.NOTE_ON, 67),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 67),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 64),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 60),
    ]
    self.assertEqual(expected_performance, performance)
예제 #12
0
 def testDrumsExtractor(self):
   note_sequence = common_testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         qpm: 60}""")
   testing_lib.add_track_to_sequence(
       note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 7), (12, 1, 6, 8)],
       is_drum=True)
   testing_lib.add_track_to_sequence(
       note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       note_sequence, steps_per_quarter=1)
   expected_events = [
       [NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
        DRUMS(11, 12)]]
   expected_drum_tracks = []
   for events_list in expected_events:
     drums = drums_lib.DrumTrack(
         events_list, steps_per_quarter=1, steps_per_bar=4)
     expected_drum_tracks.append(drums)
   unit = drum_pipelines.DrumsExtractor(min_bars=1, gap_bars=1)
   self._unit_transform_test(unit, quantized_sequence, expected_drum_tracks)
예제 #13
0
  def testExtractPerformancesRelativeQuantized(self):
    self.note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=10)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=8, max_events_truncate=10)
    self.assertEqual(0, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=3)
    self.assertEqual(1, len(perfs))
    self.assertEqual(3, len(perfs[0]))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, max_steps_truncate=100)
    self.assertEqual(1, len(perfs))
    self.assertEqual(100, perfs[0].num_steps)
예제 #14
0
  def testEncodeNoteSequenceAddEos(self):
    encoder = music_encoders.MidiPerformanceEncoder(
        steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,
        add_eos=True)

    ns = music_pb2.NoteSequence()
    testing_lib.add_track_to_sequence(
        ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])
    ids = encoder.encode_note_sequence(ns)

    expected_ids = [
        302,  # VELOCITY(25)
        41,   # NOTE-ON(60)
        45,   # NOTE-ON(64)
        277,  # TIME-SHIFT(100)
        309,  # VELOCITY(32)
        48,   # NOTE-ON(67)
        277,  # TIME-SHIFT(100)
        136,  # NOTE-OFF(67)
        277,  # TIME-SHIFT(100)
        133,  # NOTE-OFF(64
        277,  # TIME-SHIFT(100)
        129,  # NOTE-OFF(60)
        1     # EOS
    ]

    self.assertEqual(expected_ids, ids)
예제 #15
0
 def testMelodyExtractor(self):
   note_sequence = common_testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         qpm: 60}""")
   testing_lib.add_track_to_sequence(
       note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 7)])
   testing_lib.add_track_to_sequence(
       note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       note_sequence, steps_per_quarter=1)
   expected_events = [
       [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11],
       [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT]]
   expected_melodies = []
   for events_list in expected_events:
     melody = melodies_lib.Melody(
         events_list, steps_per_quarter=1, steps_per_bar=4)
     expected_melodies.append(melody)
   unit = melody_pipelines.MelodyExtractor(
       min_bars=1, min_unique_pitches=1, gap_bars=1)
   self._unit_transform_test(unit, quantized_sequence, expected_melodies)
예제 #16
0
  def testDecode(self):
    encoder = music_encoders.MidiPerformanceEncoder(
        steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,
        ngrams=[(277, 129)])

    ids = [
        302,  # VELOCITY(25)
        41,   # NOTE-ON(60)
        310   # TIME-SHIFT(100), NOTE-OFF(60)
    ]

    # Decode method returns MIDI filename, read and convert to NoteSequence.
    filename = encoder.decode(ids)
    ns = magenta.music.midi_file_to_sequence_proto(filename)

    # Remove default tempo & time signature.
    del ns.tempos[:]
    del ns.time_signatures[:]

    expected_ns = music_pb2.NoteSequence(ticks_per_quarter=220)
    testing_lib.add_track_to_sequence(expected_ns, 0, [(60, 97, 0.0, 1.0)])

    # Add source info fields.
    expected_ns.source_info.encoding_type = (
        music_pb2.NoteSequence.SourceInfo.MIDI)
    expected_ns.source_info.parser = (
        music_pb2.NoteSequence.SourceInfo.PRETTY_MIDI)

    self.assertEqual(expected_ns, ns)
예제 #17
0
  def testExtractDrumTracksTooShort(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 3, 4), (14, 50, 6, 7)],
        is_drum=True)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    drum_tracks, _ = drums_lib.extract_drum_tracks(
        quantized_sequence, min_bars=2, gap_bars=1)
    drum_tracks = [list(drums) for drums in drum_tracks]
    self.assertEqual([], drum_tracks)

    del self.note_sequence.notes[:]
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 3, 4), (14, 50, 7, 8)],
        is_drum=True)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    drum_tracks, _ = drums_lib.extract_drum_tracks(
        quantized_sequence, min_bars=2, gap_bars=1)
    drum_tracks = [list(drums) for drums in drum_tracks]
    self.assertEqual(
        [[NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
          DRUMS(14)]],
        drum_tracks)
예제 #18
0
  def testEncodeNoteSequence(self):
    encoder = music_encoders.TextMelodyEncoder(
        steps_per_quarter=4, min_pitch=21, max_pitch=108)
    encoder_absolute = music_encoders.TextMelodyEncoderAbsolute(
        steps_per_second=4, min_pitch=21, max_pitch=108)

    ns = music_pb2.NoteSequence()
    ns.tempos.add(qpm=60)
    testing_lib.add_track_to_sequence(
        ns, 0,
        [(60, 127, 0.0, 0.25), (62, 127, 0.25, 0.75), (64, 127, 1.25, 2.0)])
    ids = encoder.encode_note_sequence(ns)
    ids_absolute = encoder_absolute.encode_note_sequence(ns)

    expected_ids = [
        43,  # ON(60)
        45,  # ON(62)
        2,   # HOLD(62)
        3,   # OFF(62)
        2,   # REST
        47,  # ON(64)
        2,   # HOLD(64)
        2    # HOLD(64)
    ]

    self.assertEqual(expected_ids, ids)
    self.assertEqual(expected_ids, ids_absolute)
예제 #19
0
    def testStartPlayback_NoUpdates(self):
        # Use a time in the past to test handling of past notes.
        start_time = time.time() - 0.05
        seq = music_pb2.NoteSequence()
        notes = [Note(12, 100, 0.0, 1.0), Note(11, 55, 0.1, 0.5), Note(40, 45, 0.2, 0.6)]
        notes = [Note(note.pitch, note.velocity, note.start + start_time, note.end + start_time) for note in notes]
        testing_lib.add_track_to_sequence(seq, 0, notes)
        player = self.midi_hub.start_playback(seq, allow_updates=False)
        player.join()

        note_events = []
        for note in notes:
            note_events.append((note.start, "note_on", note.pitch))
            note_events.append((note.end, "note_off", note.pitch))

        # The first note on will not be sent since it started before
        # `start_playback` is called.
        del note_events[0]

        note_events = collections.deque(sorted(note_events))
        while not self.port.message_queue.empty():
            msg = self.port.message_queue.get()
            note_event = note_events.popleft()
            self.assertEquals(msg.type, note_event[1])
            self.assertEquals(msg.note, note_event[2])
            self.assertAlmostEqual(msg.time, note_event[0], delta=0.01)

        self.assertTrue(not note_events)
예제 #20
0
  def testQuantizer(self):
    steps_per_quarter = 4
    note_sequence = common_testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
    testing_lib.add_track_to_sequence(
        note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    expected_quantized_sequence = sequences_lib.QuantizedSequence()
    expected_quantized_sequence.qpm = 60.0
    expected_quantized_sequence.steps_per_quarter = steps_per_quarter
    testing_lib.add_quantized_track_to_sequence(
        expected_quantized_sequence, 0,
        [(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),
         (55, 120, 16, 17), (52, 99, 19, 20)])

    unit = pipelines_common.Quantizer(steps_per_quarter)
    self._unit_transform_test(unit, note_sequence,
                              [expected_quantized_sequence])
예제 #21
0
  def testSequenceToPianorollWeightedRoll(self):
    sequence = music_pb2.NoteSequence(total_time=2.0)
    testing_lib.add_track_to_sequence(
        sequence, 0, [(1, 100, 0.00, 1.00), (2, 100, 0.20, 0.50),
                      (3, 100, 1.20, 1.50), (4, 100, 0.40, 2.00), (6, 100, 0.10,
                                                                   0.60)])

    onset_upweight = 5.0
    expected_roll_weights = [
        [onset_upweight, onset_upweight, 1, onset_upweight],
        [onset_upweight, onset_upweight, onset_upweight, onset_upweight],
        [1, 1, onset_upweight, onset_upweight / 1],
        [1, 1, onset_upweight, onset_upweight / 2],
        [1, 1, 1, 1],
    ]

    expected_onsets = [
        [1, 1, 0, 1],
        [1, 1, 1, 1],
        [0, 0, 1, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 0],
    ]
    _, roll_weights, onsets, _ = data.sequence_to_pianoroll(
        sequence,
        frames_per_second=2,
        min_pitch=1,
        max_pitch=4,
        onset_upweight=onset_upweight)

    np.testing.assert_allclose(expected_roll_weights, roll_weights)
    np.testing.assert_allclose(expected_onsets, onsets)
예제 #22
0
  def testTfAugment(self):
    augmenter = data.NoteSequenceAugmenter(
        transpose_range=(-3, -3), stretch_range=(2.0, 2.0))

    with self.test_session() as sess:
      sequence_str = tf.placeholder(tf.string)
      augmented_sequence_str_ = augmenter.tf_augment(sequence_str)
      augmented_sequence_str = sess.run(
          [augmented_sequence_str_],
          feed_dict={sequence_str: self.sequence.SerializeToString()})
    augmented_sequence = music_pb2.NoteSequence.FromString(
        augmented_sequence_str[0])

    expected_sequence = music_pb2.NoteSequence()
    expected_sequence.tempos.add(qpm=30)
    testing_lib.add_track_to_sequence(
        expected_sequence, 0,
        [(29, 100, 4, 8), (30, 100, 12, 22), (31, 100, 22, 26),
         (32, 100, 34, 36)])
    testing_lib.add_track_to_sequence(
        expected_sequence, 1, [(57, 80, 8, 8.2), (58, 80, 24, 24.2)],
        is_drum=True)
    testing_lib.add_chords_to_sequence(
        expected_sequence, [('N.C.', 0), ('A', 16), ('Gbm', 32)])

    self.assertEqual(expected_sequence, augmented_sequence)
예제 #23
0
  def testToNoteSequence(self):
    converter = data.DrumsConverter(
        pitch_classes=data.REDUCED_DRUM_PITCH_CLASSES,
        slice_bars=None,
        gap_bars=None,
        steps_per_quarter=1,
        roll_input=True,
        roll_output=True,
        max_tensors_per_notesequence=None)

    tensors = converter.to_tensors(self.sequence)
    sequences = converter.to_notesequences(tensors.outputs)

    self.assertEqual(1, len(sequences))
    expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    expected_sequence.tempos.add(qpm=120)
    testing_lib.add_track_to_sequence(
        expected_sequence, 0,
        [(36, 80, 0, 0.5), (42, 80, 0.5, 1.0), (36, 80, 0.5, 1.0),
         (38, 80, 2.0, 2.5),
         (36, 80, 4.5, 5.0),
         (38, 80, 6.5, 7.0),
         (48, 80, 8.0, 8.5), (49, 80, 8.0, 8.5), (51, 80, 9.5, 10.0),
         (38, 80, 16.5, 17.0), (48, 80, 18.0, 18.5), (49, 80, 18.0, 18.5),
         (51, 80, 19.5, 20.0)],
        is_drum=True)
    for n in expected_sequence.notes:
      n.instrument = 9
    self.assertProtoEquals(expected_sequence, sequences[0])
예제 #24
0
  def testFromNoteSequence_TempoChange(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.tempos[:]

    # No tempos.
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Single tempo.
    self.note_sequence.tempos.add(qpm=60, time=0)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Multiple tempos with no change.
    self.note_sequence.tempos.add(qpm=60, time=1)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Tempo change.
    self.note_sequence.tempos.add(qpm=120, time=2)
    with self.assertRaises(sequences_lib.MultipleTempoException):
      sequences_lib.quantize_note_sequence(
          self.note_sequence, self.steps_per_quarter)
예제 #25
0
  def testExtractPianorollSequences(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(quantized_sequence)
    self.assertEqual(1, len(seqs))

    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
        quantized_sequence, min_steps_discard=2, max_steps_discard=5)
    self.assertEqual(1, len(seqs))

    self.note_sequence.notes[0].end_time = 1.0
    self.note_sequence.total_time = 1.0
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
        quantized_sequence, min_steps_discard=3, max_steps_discard=5)
    self.assertEqual(0, len(seqs))

    self.note_sequence.notes[0].end_time = 10.0
    self.note_sequence.total_time = 10.0
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
        quantized_sequence, min_steps_discard=3, max_steps_discard=5)
    self.assertEqual(0, len(seqs))
예제 #26
0
    def testStartCapture_Iterate_Period_Overrun(self):
        start_time = 1.0
        captor = self.midi_hub.start_capture(
            120, start_time, stop_signal=midi_hub.MidiSignal(type="control_change", control=1)
        )

        for msg in self.capture_messages[:-1]:
            threading.Timer(0.1 * msg.time, self.port.callback, args=[msg]).start()

        period = 0.26
        captured_seqs = []
        wall_start_time = time.time()
        for captured_seq in captor.iterate(period=period):
            time.sleep(0.5)
            captured_seqs.append(captured_seq)

        self.assertEquals(2, len(captured_seqs))

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        end_time = captured_seqs[0].total_time
        self.assertAlmostEqual(wall_start_time + period, end_time, delta=0.005)
        expected_seq.total_time = end_time
        testing_lib.add_track_to_sequence(expected_seq, 0, [Note(1, 64, 2, end_time)])
        self.assertProtoEquals(captured_seqs[0], expected_seq)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 6
        testing_lib.add_track_to_sequence(expected_seq, 0, [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
        self.assertProtoEquals(captured_seqs[1], expected_seq)
예제 #27
0
  def testToSequenceWithRepeatedNotes(self):
    performance = performance_lib.Performance(steps_per_second=100)

    pe = performance_lib.PerformanceEvent
    perf_events = [
        pe(pe.NOTE_ON, 60),
        pe(pe.NOTE_ON, 64),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_ON, 60),
        pe(pe.TIME_SHIFT, 100),
    ]
    for event in perf_events:
      performance.append(event)

    performance_ns = performance.to_sequence()

    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 2.0), (64, 100, 0.0, 2.0), (60, 100, 1.0, 2.0)])

    # Make comparison easier by sorting.
    performance_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
    self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))

    self.assertEqual(self.note_sequence, performance_ns)
예제 #28
0
  def testToSequenceWithContinuedNotesNotStarted(self):
    poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)

    pe = polyphony_lib.PolyphonicEvent
    poly_events = [
        # step 0
        pe(pe.NEW_NOTE, 60),
        pe(pe.NEW_NOTE, 64),
        pe(pe.STEP_END, None),
        # step 1
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.CONTINUED_NOTE, 64),
        pe(pe.CONTINUED_NOTE, 67),  # Was not started, should be ignored.
        pe(pe.STEP_END, None),

        pe(pe.END, None),
    ]
    for event in poly_events:
      poly_seq.append(event)

    poly_seq_ns = poly_seq.to_sequence(qpm=60.0)

    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 2.0), (64, 100, 0.0, 2.0)])

    # Make comparison easier
    poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
    self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))

    self.assertEqual(self.note_sequence, poly_seq_ns)
예제 #29
0
  def testFromQuantizedNoteSequence(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    poly_seq = list(polyphony_lib.PolyphonicSequence(quantized_sequence))

    pe = polyphony_lib.PolyphonicEvent
    expected_poly_seq = [
        pe(pe.START, None),
        # step 0
        pe(pe.NEW_NOTE, 64),
        pe(pe.NEW_NOTE, 60),
        pe(pe.STEP_END, None),
        # step 1
        pe(pe.NEW_NOTE, 67),
        pe(pe.CONTINUED_NOTE, 64),
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.STEP_END, None),
        # step 2
        pe(pe.CONTINUED_NOTE, 64),
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.STEP_END, None),
        # step 3
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.STEP_END, None),

        pe(pe.END, None),
    ]
    self.assertEqual(expected_poly_seq, poly_seq)
예제 #30
0
  def testToSequenceWithBaseNoteSequence(self):
    poly_seq = polyphony_lib.PolyphonicSequence(
        steps_per_quarter=1, start_step=1)

    pe = polyphony_lib.PolyphonicEvent
    poly_events = [
        # step 0
        pe(pe.NEW_NOTE, 60),
        pe(pe.NEW_NOTE, 64),
        pe(pe.STEP_END, None),
        # step 1
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.CONTINUED_NOTE, 64),
        pe(pe.STEP_END, None),

        pe(pe.END, None),
    ]
    for event in poly_events:
      poly_seq.append(event)

    base_seq = copy.deepcopy(self.note_sequence)
    testing_lib.add_track_to_sequence(
        base_seq, 0, [(60, 100, 0.0, 1.0)])

    poly_seq_ns = poly_seq.to_sequence(qpm=60.0, base_note_sequence=base_seq)

    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 1.0), (60, 100, 1.0, 3.0), (64, 100, 1.0, 3.0)])

    # Make comparison easier
    poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
    self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))

    self.assertEqual(self.note_sequence, poly_seq_ns)
예제 #31
0
    def testCaptureSequence_Mono(self):
        start_time = 1.0

        threading.Timer(0.1, self.send_capture_messages).start()
        self.midi_hub = midi_hub.MidiHub([self.port], [self.port],
                                         midi_hub.TextureType.MONOPHONIC)
        captured_seq = self.midi_hub.capture_sequence(
            120,
            start_time,
            stop_signal=midi_hub.MidiSignal(type='control_change', control=1))

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 6
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 3),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, 6)])
        self.assertProtoEquals(captured_seq, expected_seq)
예제 #32
0
  def testEncode(self):
    encoder = music_encoders.MidiPerformanceEncoder(
        steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)

    ns = music_pb2.NoteSequence()
    testing_lib.add_track_to_sequence(ns, 0, [(60, 97, 0.0, 1.0)])

    # Write NoteSequence to MIDI file as encoder takes in filename.
    with tempfile.NamedTemporaryFile(suffix='.mid') as f:
      magenta.music.sequence_proto_to_midi_file(ns, f.name)
      ids = encoder.encode(f.name)

    expected_ids = [
        302,  # VELOCITY(25)
        41,   # NOTE-ON(60)
        277,  # TIME-SHIFT(100)
        129   # NOTE-OFF(60)
    ]

    self.assertEqual(expected_ids, ids)
예제 #33
0
    def testDeepcopy(self):
        quantized = sequences_lib.QuantizedSequence()
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        quantized_copy = copy.deepcopy(quantized)
        self.assertEqual(quantized, quantized_copy)

        testing_lib.add_quantized_track_to_sequence(quantized, 1,
                                                    [(12, 100, 4, 20),
                                                     (19, 100, 8, 16),
                                                     (24, 100, 12, 14)])

        self.assertNotEqual(quantized, quantized_copy)
예제 #34
0
    def testTimeChangeSplitter(self):
        note_sequence = common_testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          time: 2.0
          numerator: 3
          denominator: 4}
        tempos: {
          qpm: 60}""")
        testing_lib.add_track_to_sequence(note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        expected_sequences = sequences_lib.split_note_sequence_on_time_changes(
            note_sequence)

        unit = note_sequence_pipelines.TimeChangeSplitter()
        self._unit_transform_test(unit, note_sequence, expected_sequences)
 def testExtractDrumTracksTooLongTruncate(self):
     testing_lib.add_track_to_sequence(self.note_sequence,
                                       0, [(12, 127, 2, 4), (14, 50, 6, 15),
                                           (14, 50, 10, 15),
                                           (16, 100, 14, 19)],
                                       is_drum=True)
     quantized_sequence = sequences_lib.quantize_note_sequence(
         self.note_sequence, steps_per_quarter=1)
     expected = [[
         NO_DRUMS, NO_DRUMS,
         DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
         DRUMS(14), NO_DRUMS, NO_DRUMS, NO_DRUMS,
         DRUMS(14), NO_DRUMS, NO_DRUMS, NO_DRUMS
     ]]
     drum_tracks, _ = drums_lib.extract_drum_tracks(quantized_sequence,
                                                    min_bars=1,
                                                    max_steps_truncate=14,
                                                    gap_bars=1)
     drum_tracks = [list(drums) for drums in drum_tracks]
     self.assertEqual(expected, drum_tracks)
예제 #36
0
    def testFromQuantizedNoteSequence_SplitRepeats(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(0, 100, 0.0, 2.0),
                                           (0, 100, 2.0, 4.0),
                                           (1, 100, 0.0, 2.0),
                                           (2, 100, 2.0, 4.0)])
        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=1)
        pianoroll_seq = list(
            pianoroll_lib.PianorollSequence(quantized_sequence,
                                            min_pitch=0,
                                            split_repeats=True))

        expected_pianoroll_seq = [
            (0, 1),
            (1, ),
            (0, 2),
            (0, 2),
        ]
        self.assertEqual(expected_pianoroll_seq, pianoroll_seq)
예제 #37
0
    def testStartCapture_StopMethod(self):
        start_time = 1.0
        captor = self.midi_hub.start_capture(120, start_time)

        self.send_capture_messages()
        time.sleep(0.1)

        stop_time = 5.5
        captor.stop(stop_time=stop_time)

        captured_seq = captor.captured_sequence()
        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = stop_time
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, stop_time)])
        self.assertProtoEquals(captured_seq, expected_seq)
예제 #38
0
 def testToNoteSequenceMultipleChunks(self):
   sequence = copy.deepcopy(self.sequence)
   testing_lib.add_track_to_sequence(
       sequence, 0,
       [(64, 100, 0, 2), (60, 100, 0, 4), (67, 100, 2, 4),
        (62, 100, 4, 6), (59, 100, 4, 8), (67, 100, 6, 8),
       ])
   testing_lib.add_track_to_sequence(
       sequence, 1,
       [(40, 100, 0, 0.125), (50, 100, 0, 0.125), (50, 100, 2, 2.125),
        (40, 100, 4, 4.125), (50, 100, 4, 4.125), (50, 100, 6, 6.125),
       ],
       is_drum=True)
   converter = data_hierarchical.MultiInstrumentPerformanceConverter(
       hop_size_bars=4, chunk_size_bars=2)
   tensors = converter.to_tensors(sequence)
   self.assertEqual(1, len(tensors.outputs))
   sequences = converter.to_notesequences(tensors.outputs)
   self.assertEqual(1, len(sequences))
   self.assertProtoEquals(sequence, sequences[0])
예제 #39
0
    def testCaptureSequence_StopTime(self):
        start_time = 1.0
        stop_time = time.time() + 1.0

        self.capture_messages[-1].time += time.time()
        threading.Timer(0.1, self.send_capture_messages).start()

        captured_seq = self.midi_hub.capture_sequence(120,
                                                      start_time,
                                                      stop_time=stop_time)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = stop_time
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, stop_time)])
        self.assertProtoEquals(captured_seq, expected_seq)
예제 #40
0
 def testTranspositionPipelineOutOfRangeNotes(self):
   note_sequence = common_testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         qpm: 60}""")
   tp = note_sequence_pipelines.TranspositionPipeline(
       range(-1, 2), min_pitch=0, max_pitch=12)
   testing_lib.add_track_to_sequence(
       note_sequence, 0,
       [(10, 100, 1.0, 2.0), (12, 100, 2.0, 4.0), (13, 100, 4.0, 5.0)])
   transposed = tp.transform(note_sequence)
   self.assertEqual(1, len(transposed))
   self.assertEqual(3, len(transposed[0].notes))
   self.assertEqual(9, transposed[0].notes[0].pitch)
   self.assertEqual(11, transposed[0].notes[1].pitch)
   self.assertEqual(12, transposed[0].notes[2].pitch)
예제 #41
0
  def testFromQuantizedNoteSequenceNotCommonTimeSig(self):
    self.note_sequence.time_signatures[0].numerator = 7
    self.note_sequence.time_signatures[0].denominator = 8

    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    melody = melodies_lib.Melody()
    melody.from_quantized_sequence(quantized_sequence,
                                   search_start_step=0, instrument=0)
    expected = ([12, 11, NOTE_OFF, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT,
                 NO_EVENT, NO_EVENT, NO_EVENT, 40, NO_EVENT, NO_EVENT, NO_EVENT,
                 NOTE_OFF, NO_EVENT, 55, NOTE_OFF, NO_EVENT, 52])
    self.assertEqual(expected, list(melody))
    self.assertEqual(14, melody.steps_per_bar)
예제 #42
0
    def testQuantizeNoteSequence_ImplicitTimeSignatureChange(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        del self.note_sequence.time_signatures[:]

        # No time signature.
        sequences_lib.quantize_note_sequence(self.note_sequence,
                                             self.steps_per_quarter)

        # Implicit time signature change.
        self.note_sequence.time_signatures.add(numerator=2,
                                               denominator=4,
                                               time=2)
        with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
            sequences_lib.quantize_note_sequence(self.note_sequence,
                                                 self.steps_per_quarter)
예제 #43
0
  def testQuantizer(self):
    steps_per_quarter = 4
    note_sequence = common_testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
    testing_lib.add_track_to_sequence(
        note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    expected_quantized_sequence = sequences_lib.quantize_note_sequence(
        note_sequence, steps_per_quarter)

    unit = note_sequence_pipelines.Quantizer(steps_per_quarter)
    self._unit_transform_test(unit, note_sequence,
                              [expected_quantized_sequence])
예제 #44
0
  def testApplySustainControlChangesWithIdenticalNotes(self):
    """In the case of identical notes, one should be dropped.

    This is an edge case because in most cases, the same pitch should not sound
    twice at the same time on one instrument.
    """
    sequence = copy.copy(self.note_sequence)
    testing_lib.add_control_changes_to_sequence(
        sequence, 0,
        [(1.0, 64, 127), (4.0, 64, 0)])
    expected_sequence = copy.copy(sequence)
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 2.00, 2.50), (60, 100, 2.00, 2.50)])
    testing_lib.add_track_to_sequence(
        expected_sequence, 0,
        [(60, 100, 2.00, 4.00)])

    sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
    self.assertProtoEquals(expected_sequence, sus_sequence)
예제 #45
0
    def testExtractPerformancesSplitInstruments(self):
        music_testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                                [(60, 100, 0.0, 4.0)])
        music_testing_lib.add_track_to_sequence(self.note_sequence, 1,
                                                [(62, 100, 0.0, 2.0),
                                                 (64, 100, 2.0, 4.0)])
        quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
            self.note_sequence, steps_per_second=100)

        perfs, _ = performance_pipeline.extract_performances(
            quantized_sequence, split_instruments=True)
        self.assertEqual(2, len(perfs))

        perfs, _ = performance_pipeline.extract_performances(
            quantized_sequence, min_events_discard=8, split_instruments=True)
        self.assertEqual(1, len(perfs))

        perfs, _ = performance_pipeline.extract_performances(
            quantized_sequence, min_events_discard=16, split_instruments=True)
        self.assertEqual(0, len(perfs))
예제 #46
0
  def testFilterDrums(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 1.0, 4.0), (19, 100, 0.95, 3.0)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 3,
        [(12, 100, 1.0, 4.0), (19, 100, 2.0, 5.0)])

    # Make instrument 0 a drum.
    for note in self.note_sequence.notes:
      if note.instrument == 0:
        note.is_drum = True

    testing_lib.add_quantized_track_to_sequence(
        self.expected_quantized_sequence, 3,
        [(12, 100, 4, 16), (19, 100, 8, 20)])

    quantized = sequences_lib.QuantizedSequence()
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
    self.assertEqual(self.expected_quantized_sequence, quantized)
예제 #47
0
    def testInferChordsForSequence(self):
        # Test non-quantized sequence.
        sequence = copy.copy(self.note_sequence)
        testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 1.0, 3.0),
                                                        (64, 100, 1.0, 2.0),
                                                        (67, 100, 1.0, 2.0),
                                                        (65, 100, 2.0, 3.0),
                                                        (69, 100, 2.0, 3.0),
                                                        (62, 100, 3.0, 5.0),
                                                        (65, 100, 3.0, 4.0),
                                                        (69, 100, 3.0, 4.0)])
        expected_sequence = copy.copy(sequence)
        testing_lib.add_chords_to_sequence(expected_sequence, [('C', 1.0),
                                                               ('F/C', 2.0),
                                                               ('Dm', 3.0)])
        sequences_lib.infer_chords_for_sequence(sequence)
        self.assertProtoEquals(expected_sequence, sequence)

        # Test quantized sequence.
        sequence = copy.copy(self.note_sequence)
        sequence.quantization_info.steps_per_quarter = 1
        testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 1.1, 3.0),
                                                        (64, 100, 1.0, 1.9),
                                                        (67, 100, 1.0, 2.0),
                                                        (65, 100, 2.0, 3.2),
                                                        (69, 100, 2.1, 3.1),
                                                        (62, 100, 2.9, 4.8),
                                                        (65, 100, 3.0, 4.0),
                                                        (69, 100, 3.0, 4.1)])
        testing_lib.add_quantized_steps_to_sequence(sequence, [(1, 3), (1, 2),
                                                               (1, 2), (2, 3),
                                                               (2, 3), (3, 5),
                                                               (3, 4), (3, 4)])
        expected_sequence = copy.copy(sequence)
        testing_lib.add_chords_to_sequence(expected_sequence, [('C', 1.0),
                                                               ('F/C', 2.0),
                                                               ('Dm', 3.0)])
        testing_lib.add_quantized_chord_steps_to_sequence(
            expected_sequence, [1, 2, 3])
        sequences_lib.infer_chords_for_sequence(sequence)
        self.assertProtoEquals(expected_sequence, sequence)
예제 #48
0
  def testStartPlayback_Updates(self):
    start_time = time.time() + 0.1
    seq = music_pb2.NoteSequence()
    notes = [Note(0, 100, start_time, start_time + 101),
             Note(1, 100, start_time, start_time + 101)]
    testing_lib.add_track_to_sequence(seq, 0, notes)
    player = self.midi_hub.start_playback(seq, allow_updates=True)

    # Sleep past first note start.
    concurrency.Sleeper().sleep_until(start_time + 0.2)

    new_seq = music_pb2.NoteSequence()
    notes = [Note(1, 100, 0.0, 0.8), Note(2, 100, 0.0, 1.0),
             Note(11, 55, 0.3, 0.5), Note(40, 45, 0.4, 0.6)]
    notes = [Note(note.pitch, note.velocity, note.start + start_time,
                  note.end + start_time) for note in notes]
    testing_lib.add_track_to_sequence(new_seq, 0, notes)
    player.update_sequence(new_seq)

    # Finish playing sequence.
    concurrency.Sleeper().sleep(0.8)

    # Start and end the unclosed note from the first sequence.
    note_events = [(start_time, 'note_on', 0),
                   (start_time + 0.3, 'note_off', 0)]
    # The second note will not be played since it started before the update
    # and was not in the original sequence.
    del notes[1]
    for note in notes:
      note_events.append((note.start, 'note_on', note.pitch))
      note_events.append((note.end, 'note_off', note.pitch))
    note_events = collections.deque(sorted(note_events))
    while not self.port.message_queue.empty():
      msg = self.port.message_queue.get()
      note_event = note_events.popleft()
      self.assertEquals(msg.type, note_event[1])
      self.assertEquals(msg.note, note_event[2])
      self.assertAlmostEqual(msg.time, note_event[0], delta=0.01)

    self.assertTrue(not note_events)
    player.stop()
예제 #49
0
    def testInferChordsForSequenceAddKeySignatures(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(
            sequence,
            0,
            [
                (60, 100, 0.0, 1.0),
                (64, 100, 0.0, 1.0),
                (67, 100, 0.0, 1.0),  # C
                (62, 100, 1.0, 2.0),
                (65, 100, 1.0, 2.0),
                (69, 100, 1.0, 2.0),  # Dm
                (60, 100, 2.0, 3.0),
                (65, 100, 2.0, 3.0),
                (69, 100, 2.0, 3.0),  # F
                (59, 100, 3.0, 4.0),
                (62, 100, 3.0, 4.0),
                (67, 100, 3.0, 4.0),  # G
                (66, 100, 4.0, 5.0),
                (70, 100, 4.0, 5.0),
                (73, 100, 4.0, 5.0),  # F#
                (68, 100, 5.0, 6.0),
                (71, 100, 5.0, 6.0),
                (75, 100, 5.0, 6.0),  # G#m
                (66, 100, 6.0, 7.0),
                (71, 100, 6.0, 7.0),
                (75, 100, 6.0, 7.0),  # B
                (65, 100, 7.0, 8.0),
                (68, 100, 7.0, 8.0),
                (73, 100, 7.0, 8.0)
            ])  # C#
        quantized_sequence = sequences_lib.quantize_note_sequence(
            sequence, steps_per_quarter=4)
        chord_inference.infer_chords_for_sequence(quantized_sequence,
                                                  chords_per_bar=2,
                                                  add_key_signatures=True)

        expected_key_signatures = [(0, 0.0), (6, 4.0)]
        key_signatures = [(ks.key, ks.time)
                          for ks in quantized_sequence.key_signatures]
        self.assertEqual(expected_key_signatures, key_signatures)
예제 #50
0
  def testStartCapture_IsDrum(self):
    start_time = 1.0
    captor = self.midi_hub.start_capture(120, start_time)

    # Channels are 0-indexed in mido.
    self.capture_messages[2].channel = 9
    self.send_capture_messages()
    time.sleep(0.1)

    stop_time = 5.5
    captor.stop(stop_time=stop_time)

    captured_seq = captor.captured_sequence()
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = stop_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, stop_time)])
    expected_seq.notes[0].is_drum = True
    self.assertProtoEquals(captured_seq, expected_seq)
예제 #51
0
  def testFromNoteSequence_TimeSignatureChange(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.time_signatures[:]
    quantized = sequences_lib.QuantizedSequence()
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)

    # Single time signature.
    self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=0)
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)

    # Multiple time signatures with no change.
    self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=1)
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)

    # Time signature change.
    self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
    with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
      quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
예제 #52
0
    def testExtractMelodiesLateStart(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 102, 103),
                                           (13, 100, 104, 106)])
        testing_lib.add_track_to_sequence(self.note_sequence, 1,
                                          [(12, 100, 100, 101),
                                           (13, 100, 102, 105)])

        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=1)

        expected = [[NO_EVENT, NO_EVENT, 12, NOTE_OFF, 13, NO_EVENT],
                    [12, NOTE_OFF, 13, NO_EVENT, NO_EVENT]]
        melodies, _ = melodies_lib.extract_melodies(
            quantized_sequence,
            min_bars=1,
            gap_bars=1,
            min_unique_pitches=2,
            ignore_polyphonic_notes=True)
        melodies = sorted([list(melody) for melody in melodies])
        self.assertEqual(expected, melodies)
예제 #53
0
  def testExtractMelodiesMelodyTooLong(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 2, 4), (14, 50, 6, 15)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 18)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14] +
                [NO_EVENT] * 7,
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14] +
                [NO_EVENT] * 7]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, max_steps_truncate=14,
        max_steps_discard=18, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=True)
    melodies = [list(melody) for melody in melodies]
    self.assertEqual(expected, melodies)
예제 #54
0
  def testExtractMelodiesTooFewPitches(self):
    # Test that extract_melodies discards melodies with too few pitches where
    # pitches are equivalent by octave.
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
         (24, 100, 3, 4), (25, 100, 4, 5)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
         (25, 100, 3, 4), (26, 100, 4, 5)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[12, 13, 18, 25, 26]]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=4,
        ignore_polyphonic_notes=True)
    melodies = [list(melody) for melody in melodies]
    self.assertEqual(expected, melodies)
  def testSequenceNotePitchVectorsVariableLengthFrames(self):
    sequence = music_pb2.NoteSequence()
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 0.0, 0.0), (62, 100, 0.0, 0.5),
         (60, 100, 1.5, 2.5),
         (64, 100, 2.0, 2.5), (67, 100, 2.25, 2.75), (70, 100, 2.5, 4.5),
         (60, 100, 6.0, 6.0),
        ])
    note_pitch_vectors = chord_inference.sequence_note_pitch_vectors(
        sequence, seconds_per_frame=[1.5, 2.0, 3.0, 5.0])

    expected_note_pitch_vectors = [
        [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
        [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
        [0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0],
        [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
        [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    ]

    self.assertEqual(expected_note_pitch_vectors, note_pitch_vectors.tolist())
예제 #56
0
    def testSplitNoteSequenceNoTimeChanges(self):
        # Tests splitting a NoteSequence on time changes for a NoteSequence that has
        # no time changes (time signature and tempo changes).
        sequence = copy.copy(self.note_sequence)
        testing_lib.add_track_to_sequence(sequence, 0, [(12, 100, 0.01, 10.0),
                                                        (11, 55, 0.22, 0.50),
                                                        (40, 45, 2.50, 3.50),
                                                        (55, 120, 4.0, 4.01),
                                                        (52, 99, 4.75, 5.0)])
        testing_lib.add_chords_to_sequence(sequence, [('C', 1.5), ('G7', 3.0),
                                                      ('F', 4.8)])

        expected_subsequence = music_pb2.NoteSequence()
        expected_subsequence.CopyFrom(sequence)
        expected_subsequence.subsequence_info.start_time_offset = 0.0
        expected_subsequence.subsequence_info.end_time_offset = 0.0

        subsequences = sequences_lib.split_note_sequence_on_time_changes(
            sequence)
        self.assertEquals(1, len(subsequences))
        self.assertProtoEquals(expected_subsequence, subsequences[0])
예제 #57
0
    def testAssertIsRelativeQuantizedNoteSequence(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])

        relative_quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=self.steps_per_quarter)
        absolute_quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
            self.note_sequence, steps_per_second=4)

        sequences_lib.assert_is_relative_quantized_sequence(
            relative_quantized_sequence)
        with self.assertRaises(sequences_lib.QuantizationStatusException):
            sequences_lib.assert_is_relative_quantized_sequence(
                absolute_quantized_sequence)
        with self.assertRaises(sequences_lib.QuantizationStatusException):
            sequences_lib.assert_is_relative_quantized_sequence(
                self.note_sequence)
예제 #58
0
    def testExtractSubsequence(self):
        sequence = copy.copy(self.note_sequence)
        testing_lib.add_track_to_sequence(sequence, 0, [(12, 100, 0.01, 10.0),
                                                        (11, 55, 0.22, 0.50),
                                                        (40, 45, 2.50, 3.50),
                                                        (55, 120, 4.0, 4.01),
                                                        (52, 99, 4.75, 5.0)])
        testing_lib.add_chords_to_sequence(sequence, [('C', 1.5), ('G7', 3.0),
                                                      ('F', 4.8)])
        expected_subsequence = copy.copy(self.note_sequence)
        testing_lib.add_track_to_sequence(expected_subsequence, 0,
                                          [(40, 45, 0.0, 1.0),
                                           (55, 120, 1.5, 1.51)])
        testing_lib.add_chords_to_sequence(expected_subsequence, [('C', 0.0),
                                                                  ('G7', 0.5)])
        expected_subsequence.total_time = 1.51
        expected_subsequence.subsequence_info.start_time_offset = 2.5
        expected_subsequence.subsequence_info.end_time_offset = 5.99

        subsequence = sequences_lib.extract_subsequence(sequence, 2.5, 4.75)
        self.assertProtoEquals(expected_subsequence, subsequence)
예제 #59
0
    def testStretchNoteSequence(self):
        expected_stretched_sequence = copy.deepcopy(self.note_sequence)
        expected_stretched_sequence.tempos[0].qpm = 40

        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.0, 10.0),
                                           (11, 55, 0.2, 0.5),
                                           (40, 45, 2.5, 3.5)])
        testing_lib.add_track_to_sequence(expected_stretched_sequence, 0,
                                          [(12, 100, 0.0, 15.0),
                                           (11, 55, 0.3, 0.75),
                                           (40, 45, 3.75, 5.25)])

        testing_lib.add_chords_to_sequence(self.note_sequence, [('B7', 0.5),
                                                                ('Em9', 2.0)])
        testing_lib.add_chords_to_sequence(expected_stretched_sequence,
                                           [('B7', 0.75), ('Em9', 3.0)])

        stretched_sequence = sequences_lib.stretch_note_sequence(
            self.note_sequence, stretch_factor=1.5)
        self.assertProtoEquals(expected_stretched_sequence, stretched_sequence)
예제 #60
0
 def setUp(self):
     sequence = music_pb2.NoteSequence()
     sequence.tempos.add(qpm=60)
     testing_lib.add_track_to_sequence(sequence,
                                       0, [(35, 100, 0, 10), (35, 55, 1, 2),
                                           (44, 55, 1, 2), (40, 45, 4, 5),
                                           (35, 45, 9, 10),
                                           (40, 45, 13, 13),
                                           (55, 120, 16, 18),
                                           (60, 100, 16, 17),
                                           (52, 99, 19, 20),
                                           (40, 45, 33, 34),
                                           (55, 120, 36, 37),
                                           (60, 100, 36, 37),
                                           (52, 99, 39, 42)],
                                       is_drum=True)
     testing_lib.add_track_to_sequence(sequence,
                                       1, [(35, 100, 5, 10), (35, 55, 6, 8),
                                           (44, 55, 7, 9)],
                                       is_drum=False)
     self.sequence = sequence