def testMelodyInferencePolyphonic(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(sequence, 0, [(36, 100, 0.0, 4.0),
                                                        (64, 100, 0.0, 1.0),
                                                        (67, 100, 0.0, 1.0),
                                                        (65, 100, 1.0, 2.0),
                                                        (69, 100, 1.0, 2.0),
                                                        (67, 100, 2.0, 4.0),
                                                        (71, 100, 2.0, 3.0),
                                                        (72, 100, 3.0, 4.0)])

        melody_inference.infer_melody_for_sequence(sequence)

        expected_sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(expected_sequence, 0,
                                          [(36, 100, 0.0, 4.0),
                                           (64, 100, 0.0, 1.0),
                                           (67, 100, 0.0, 1.0),
                                           (65, 100, 1.0, 2.0),
                                           (69, 100, 1.0, 2.0),
                                           (67, 100, 2.0, 4.0),
                                           (71, 100, 2.0, 3.0),
                                           (72, 100, 3.0, 4.0)])
        testing_lib.add_track_to_sequence(expected_sequence, 1,
                                          [(67, 127, 0.0, 1.0),
                                           (69, 127, 1.0, 2.0),
                                           (71, 127, 2.0, 3.0),
                                           (72, 127, 3.0, 4.0)])

        self.assertEqual(expected_sequence, sequence)
Exemplo n.º 2
0
  def testStartCapture_Iterate_Period_Overrun(self):
    start_time = 1.0
    captor = self.midi_hub.start_capture(
        120, start_time,
        stop_signal=midi_hub.MidiSignal(type='control_change', control=1))

    for msg in self.capture_messages[:-1]:
      threading.Timer(0.1 * msg.time, self.port.callback, args=[msg]).start()

    period = 0.26
    captured_seqs = []
    wall_start_time = time.time()
    for captured_seq in captor.iterate(period=period):
      time.sleep(0.5)
      captured_seqs.append(captured_seq)

    self.assertLen(captured_seqs, 2)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    end_time = captured_seqs[0].total_time
    self.assertAlmostEqual(wall_start_time + period, end_time, delta=0.01)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0, [Note(1, 64, 2, end_time)])
    self.assertProtoEquals(captured_seqs[0], expected_seq)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 6
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seqs[1], expected_seq)
Exemplo n.º 3
0
  def testStartCapture_Multiple(self):
    captor_1 = self.midi_hub.start_capture(
        120, 0.0, stop_signal=midi_hub.MidiSignal(note=3))
    captor_2 = self.midi_hub.start_capture(
        120, 1.0,
        stop_signal=midi_hub.MidiSignal(type='control_change', control=1))

    self.send_capture_messages()

    captor_1.join()
    captor_2.join()

    captured_seq_1 = captor_1.captured_sequence()
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 4.0
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(0, 64, 0.01, 3), Note(1, 64, 2, 4), Note(2, 64, 3, 4)])
    self.assertProtoEquals(captured_seq_1, expected_seq)

    captured_seq_2 = captor_2.captured_sequence()
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 6.0
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seq_2, expected_seq)
  def testMixSequences(self):
    sample_rate = 10

    sequence1 = music_pb2.NoteSequence()
    sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
    sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
    sequence1.total_time = 2.0

    samples1 = np.linspace(0, 1, int(sample_rate * sequence1.total_time))

    sequence2 = music_pb2.NoteSequence()
    sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)
    sequence2.total_time = 1.0

    samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))

    mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
        [samples1, samples2], sample_rate, [sequence1, sequence2])

    expected_sequence = music_pb2.NoteSequence()
    expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
    expected_sequence.notes.add(
        pitch=60, start_time=0.5, end_time=1.0, velocity=127)
    expected_sequence.notes.add(
        pitch=62, start_time=1.0, end_time=2.0, velocity=127)
    expected_sequence.notes.add(
        pitch=64, start_time=0.5, end_time=1.0, velocity=127)
    expected_sequence.notes.add(
        pitch=64, start_time=1.5, end_time=2.0, velocity=127)
    expected_sequence.total_time = 2.0

    self.assertProtoEquals(expected_sequence, mixed_sequence)

    expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
    np.testing.assert_array_equal(expected_samples, mixed_samples)
    def testMixSequencesTotalTime(self):
        sample_rate = 10

        sequence1 = music_pb2.NoteSequence()
        sequence1.notes.add(pitch=60,
                            start_time=0.5,
                            end_time=1.0,
                            velocity=90)
        sequence1.notes.add(pitch=62,
                            start_time=1.0,
                            end_time=1.5,
                            velocity=90)
        sequence1.total_time = 1.5

        samples1 = np.linspace(0, 1, int(sample_rate * 2))

        sequence2 = music_pb2.NoteSequence()
        sequence2.notes.add(pitch=64,
                            start_time=0.5,
                            end_time=0.9,
                            velocity=90)
        sequence2.total_time = 0.9

        samples2 = np.linspace(0, 1, int(sample_rate * 1))

        mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
            [samples1, samples2], sample_rate, [sequence1, sequence2])

        expected_sequence = music_pb2.NoteSequence()
        expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
        expected_sequence.notes.add(pitch=60,
                                    start_time=0.5,
                                    end_time=1.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=62,
                                    start_time=1.0,
                                    end_time=1.5,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=0.5,
                                    end_time=0.9,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=1.5,
                                    end_time=1.9,
                                    velocity=127)

        # Expected time is 1.9 because the sequences are repeated according to the
        # length of their associated audio. So sequence1 is not repeated at all
        # (audio is 2 seconds) and sequence2 is repeated once after shifting all the
        # notes by the audio length of 1 second. The final total_time is left as is
        # after the last repeat, so it ends up being 1 + .9 seconds.
        expected_sequence.total_time = 1.9

        self.assertProtoEquals(expected_sequence, mixed_sequence)

        expected_samples = np.concatenate([samples2, samples2
                                           ]) * .5 + samples1 * .5
        np.testing.assert_array_equal(expected_samples, mixed_samples)
    def testMixSequencesLongerNoteSequence(self):
        sample_rate = 10

        sequence1 = music_pb2.NoteSequence()
        sequence1.notes.add(pitch=60,
                            start_time=0.5,
                            end_time=1.0,
                            velocity=90)
        sequence1.notes.add(pitch=62,
                            start_time=1.0,
                            end_time=2.0,
                            velocity=90)
        sequence1.total_time = 2.0

        # samples1 will be .1 seconds shorter than sequence1
        samples1 = np.linspace(0, 1,
                               int(sample_rate * (sequence1.total_time - .1)))

        sequence2 = music_pb2.NoteSequence()
        sequence2.notes.add(pitch=64,
                            start_time=0.5,
                            end_time=1.0,
                            velocity=90)
        sequence2.total_time = 1.0

        samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))

        mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
            [samples1, samples2], sample_rate, [sequence1, sequence2])

        expected_sequence = music_pb2.NoteSequence()
        expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
        expected_sequence.notes.add(pitch=60,
                                    start_time=0.5,
                                    end_time=1.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=62,
                                    start_time=1.0,
                                    end_time=2.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=0.5,
                                    end_time=1.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=1.5,
                                    end_time=2.0,
                                    velocity=127)
        expected_sequence.total_time = 2.0

        self.assertProtoEquals(expected_sequence, mixed_sequence)

        # We expect samples1 to have 2 samples of padding and samples2 to be
        # repeated 1 time fully and once with a single sample.
        expected_samples = (
            np.concatenate([samples2, samples2, [samples2[0]]]) * .5 +
            np.concatenate([samples1, [0, 0]]) * .5)
        np.testing.assert_array_equal(expected_samples, mixed_samples)
Exemplo n.º 7
0
  def testStartCapture_MidCapture(self):
    start_time = 1.0
    captor = self.midi_hub.start_capture(120, start_time)

    # Receive the first 6 messages.
    for msg in self.capture_messages[0:6]:
      self.port.callback(msg)
    time.sleep(0.1)

    end_time = 3.5
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0, [Note(1, 64, 2, 3.5), Note(2, 64, 3, 3.5)])
    self.assertProtoEquals(captured_seq, expected_seq)

    end_time = 4.5
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 4.5), Note(2, 64, 3, 4.5), Note(3, 64, 4, 4.5)])
    self.assertProtoEquals(captured_seq, expected_seq)

    end_time = 6.0
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 6), Note(2, 64, 3, 6), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seq, expected_seq)

    # Receive the rest of the messages.
    for msg in self.capture_messages[6:]:
      self.port.callback(msg)
    time.sleep(0.1)

    end_time = 6.0
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seq, expected_seq)

    captor.stop()
    def testMelodyInferenceSingleNote(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.0)])

        melody_inference.infer_melody_for_sequence(sequence)

        expected_sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(expected_sequence, 0,
                                          [(60, 100, 0.5, 1.0)])
        testing_lib.add_track_to_sequence(expected_sequence, 1,
                                          [(60, 127, 0.5, 1.0)])

        self.assertEqual(expected_sequence, sequence)
Exemplo n.º 9
0
  def testStartCapture_Callback_Period(self):
    start_time = 1.0
    captor = self.midi_hub.start_capture(120, start_time)

    for msg in self.capture_messages[:-1]:
      threading.Timer(0.1 * msg.time, self.port.callback, args=[msg]).start()

    period = 0.26
    wall_start_time = time.time()
    captured_seqs = []

    def fn(captured_seq):
      self.assertAlmostEqual(0, (time.time() - wall_start_time) % period,
                             delta=0.01)
      captured_seqs.append(captured_seq)

    name = captor.register_callback(fn, period=period)
    time.sleep(1.0)
    captor.cancel_callback(name)

    self.assertLen(captured_seqs, 3)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    end_time = captured_seqs[0].total_time
    self.assertAlmostEqual(wall_start_time + period, end_time, delta=0.01)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0, [Note(1, 64, 2, end_time)])
    self.assertProtoEquals(captured_seqs[0], expected_seq)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    end_time = captured_seqs[1].total_time
    self.assertAlmostEqual(wall_start_time + 2 * period, end_time, delta=0.01)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, end_time)])
    self.assertProtoEquals(captured_seqs[1], expected_seq)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    end_time = captured_seqs[2].total_time
    self.assertAlmostEqual(wall_start_time + 3 * period, end_time, delta=0.01)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, end_time)])
    self.assertProtoEquals(captured_seqs[2], expected_seq)
Exemplo n.º 10
0
    def testInferChordsForSequence(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(
            sequence,
            0,
            [
                (60, 100, 0.0, 1.0),
                (64, 100, 0.0, 1.0),
                (67, 100, 0.0, 1.0),  # C
                (62, 100, 1.0, 2.0),
                (65, 100, 1.0, 2.0),
                (69, 100, 1.0, 2.0),  # Dm
                (60, 100, 2.0, 3.0),
                (65, 100, 2.0, 3.0),
                (69, 100, 2.0, 3.0),  # F
                (59, 100, 3.0, 4.0),
                (62, 100, 3.0, 4.0),
                (67, 100, 3.0, 4.0)
            ])  # G
        quantized_sequence = sequences_lib.quantize_note_sequence(
            sequence, steps_per_quarter=4)
        chord_inference.infer_chords_for_sequence(quantized_sequence,
                                                  chords_per_bar=2)

        expected_chords = [('C', 0.0), ('Dm', 1.0), ('F', 2.0), ('G', 3.0)]
        chords = [(ta.text, ta.time)
                  for ta in quantized_sequence.text_annotations]

        self.assertEqual(expected_chords, chords)
Exemplo n.º 11
0
def arrange_melody(degrees: List[int],
                   rhythm: List[float],
                   key: Key = MajorKey("C"),
                   octave: int = 4) -> music_pb2.NoteSequence:
    """Create a simple diatonic melody.

    Note: The `degrees` are ZERO-INDEXED, not one-indexed.

    TODO Add `velocities` arg.
    """
    assert len(degrees) == len(rhythm)
    seq = music_pb2.NoteSequence()
    t = 0.0
    for degree, duration in zip(degrees, rhythm):
        if degree != PAUSE:
            note = key.get_note(degree=degree, octave=octave)
            seq.notes.add(
                pitch=note.midi_num,
                start_time=t,
                end_time=t + duration,
                velocity=80,
            )
        t += duration
    seq.total_time = t
    return seq
Exemplo n.º 12
0
  def testStartPlayback_NoUpdates(self):
    # Use a time in the past to test handling of past notes.
    start_time = time.time() - 0.01
    seq = music_pb2.NoteSequence()
    notes = [Note(12, 100, 0.0, 1.0), Note(11, 55, 0.1, 0.5),
             Note(40, 45, 0.2, 0.6)]
    notes = [Note(note.pitch, note.velocity, note.start + start_time,
                  note.end + start_time) for note in notes]
    testing_lib.add_track_to_sequence(seq, 0, notes)
    player = self.midi_hub.start_playback(seq, allow_updates=False)
    player.join()

    note_events = []
    for note in notes:
      note_events.append((note.start, 'note_on', note.pitch))
      note_events.append((note.end, 'note_off', note.pitch))

    # The first note on will not be sent since it started before
    # `start_playback` is called.
    del note_events[0]

    note_events = collections.deque(sorted(note_events))
    while not self.port.message_queue.empty():
      msg = self.port.message_queue.get()
      note_event = note_events.popleft()
      self.assertEqual(msg.type, note_event[1])
      self.assertEqual(msg.note, note_event[2])
      self.assertAlmostEqual(msg.time, note_event[0], delta=0.01)

    self.assertFalse(note_events)
    def testSplitMidi(self):
        sequence = music_pb2.NoteSequence()
        sequence.notes.add(pitch=60, start_time=1.0, end_time=2.9)
        sequence.notes.add(pitch=60, start_time=8.0, end_time=11.0)
        sequence.notes.add(pitch=60, start_time=14.0, end_time=17.0)
        sequence.notes.add(pitch=60, start_time=20.0, end_time=23.0)
        sequence.total_time = 25.

        sample_rate = 160
        samples = np.zeros(sample_rate * int(sequence.total_time))
        splits = audio_label_data_utils.find_split_points(
            sequence, samples, sample_rate, 0, 3)

        self.assertEqual(splits,
                         [0., 3., 6., 9., 12., 15., 18., 21., 24., 25.])

        samples[int(8.5 * sample_rate)] = 1
        samples[int(8.5 * sample_rate) + 1] = -1
        splits = audio_label_data_utils.find_split_points(
            sequence, samples, sample_rate, 0, 3)

        self.assertEqual(splits, [
            0.0, 3.0, 6.0, 8.50625, 11.50625, 14.50625, 17.50625, 20.50625,
            23.50625, 25.
        ])
Exemplo n.º 14
0
    def testEventListChordsWithMelodies(self):
        note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
        note_sequence.tempos.add(qpm=60.0)
        testing_lib.add_chords_to_sequence(note_sequence,
                                           [('N.C.', 0), ('C', 2), ('G7', 6)])
        note_sequence.total_time = 8.0

        melodies = [
            melodies_lib.Melody([60, -2, -2, -1],
                                start_step=0,
                                steps_per_quarter=1,
                                steps_per_bar=4),
            melodies_lib.Melody([62, -2, -2, -1],
                                start_step=4,
                                steps_per_quarter=1,
                                steps_per_bar=4),
        ]

        quantized_sequence = sequences_lib.quantize_note_sequence(
            note_sequence, steps_per_quarter=1)
        chords = chords_lib.event_list_chords(quantized_sequence, melodies)

        expected_chords = [[NO_CHORD, NO_CHORD, 'C', 'C'],
                           ['C', 'C', 'G7', 'G7']]

        self.assertEqual(expected_chords, chords)
Exemplo n.º 15
0
    def to_sequence(self, sequence_start_time=0.0, qpm=120.0):
        """Converts the ChordProgression to NoteSequence proto.

    This doesn't generate actual notes, but text annotations specifying the
    chord changes when they occur.

    Args:
      sequence_start_time: A time in seconds (float) that the first chord in
          the sequence will land on.
      qpm: Quarter notes per minute (float).

    Returns:
      A NoteSequence proto encoding the given chords as text annotations.
    """
        seconds_per_step = 60.0 / qpm / self.steps_per_quarter

        sequence = music_pb2.NoteSequence()
        sequence.tempos.add().qpm = qpm
        sequence.ticks_per_quarter = STANDARD_PPQ

        current_figure = NO_CHORD
        for step, figure in enumerate(self):
            if figure != current_figure:
                current_figure = figure
                chord = sequence.text_annotations.add()
                chord.time = step * seconds_per_step + sequence_start_time
                chord.text = figure
                chord.annotation_type = CHORD_SYMBOL

        return sequence
Exemplo n.º 16
0
    def testInferChordsForSequenceWithBeats(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(
            sequence,
            0,
            [
                (60, 100, 0.0, 1.1),
                (64, 100, 0.0, 1.1),
                (67, 100, 0.0, 1.1),  # C
                (62, 100, 1.1, 1.9),
                (65, 100, 1.1, 1.9),
                (69, 100, 1.1, 1.9),  # Dm
                (60, 100, 1.9, 3.0),
                (65, 100, 1.9, 3.0),
                (69, 100, 1.9, 3.0),  # F
                (59, 100, 3.0, 4.5),
                (62, 100, 3.0, 4.5),
                (67, 100, 3.0, 4.5)
            ])  # G
        testing_lib.add_beats_to_sequence(sequence, [0.0, 1.1, 1.9, 1.9, 3.0])
        chord_inference.infer_chords_for_sequence(sequence)

        expected_chords = [('C', 0.0), ('Dm', 1.1), ('F', 1.9), ('G', 3.0)]
        chords = [(ta.text, ta.time) for ta in sequence.text_annotations
                  if ta.annotation_type == CHORD_SYMBOL]

        self.assertEqual(expected_chords, chords)
Exemplo n.º 17
0
    def testSequenceNotePitchVectors(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(sequence, 0, [
            (60, 100, 0.0, 0.0),
            (62, 100, 0.0, 0.5),
            (60, 100, 1.5, 2.5),
            (64, 100, 2.0, 2.5),
            (67, 100, 2.25, 2.75),
            (70, 100, 2.5, 4.5),
            (60, 100, 6.0, 6.0),
        ])
        note_pitch_vectors = chord_inference.sequence_note_pitch_vectors(
            sequence, seconds_per_frame=1.0)

        expected_note_pitch_vectors = [
            [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
        ]

        self.assertEqual(expected_note_pitch_vectors,
                         note_pitch_vectors.tolist())
Exemplo n.º 18
0
    def testInstrumentInfo_NoteSequenceToPrettyMidi(self):
        source_sequence = music_pb2.NoteSequence()
        source_sequence.notes.add(pitch=60,
                                  start_time=0.0,
                                  end_time=0.5,
                                  velocity=80,
                                  instrument=0)
        source_sequence.notes.add(pitch=60,
                                  start_time=0.5,
                                  end_time=1.0,
                                  velocity=80,
                                  instrument=1)
        instrument_info1 = source_sequence.instrument_infos.add()
        instrument_info1.name = 'inst_0'
        instrument_info1.instrument = 0
        instrument_info2 = source_sequence.instrument_infos.add()
        instrument_info2.name = 'inst_1'
        instrument_info2.instrument = 1
        translated_midi = midi_io.sequence_proto_to_pretty_midi(
            source_sequence)
        translated_sequence = midi_io.midi_to_note_sequence(translated_midi)

        self.assertEqual(len(source_sequence.instrument_infos),
                         len(translated_sequence.instrument_infos))
        self.assertEqual(source_sequence.instrument_infos[0].name,
                         translated_sequence.instrument_infos[0].name)
        self.assertEqual(source_sequence.instrument_infos[1].name,
                         translated_sequence.instrument_infos[1].name)
    def process(self, paths):
        midi_path, wav_path_base = paths

        if self._add_wav_glob:
            wav_paths = tf.io.gfile.glob(wav_path_base + '*')
        else:
            wav_paths = [wav_path_base]

        if midi_path:
            base_ns = midi_io.midi_file_to_note_sequence(midi_path)
            base_ns.filename = midi_path
        else:
            base_ns = music_pb2.NoteSequence()

        for wav_path in wav_paths:
            logging.info('Creating Example %s:%s', midi_path, wav_path)
            wav_data = tf.io.gfile.GFile(wav_path, 'rb').read()

            ns = copy.deepcopy(base_ns)

            # Use base names.
            ns.id = '%s:%s' % (wav_path.replace(
                self._wav_dir, ''), midi_path.replace(self._midi_dir, ''))

            Metrics.counter('create_example', 'read_midi_wav').inc()

            example = audio_label_data_utils.create_example(
                ns.id, ns, wav_data)

            Metrics.counter('create_example', 'created_example').inc()
            yield example
Exemplo n.º 20
0
  def testStartCapture_Iterate_Signal(self):
    start_time = 1.0
    captor = self.midi_hub.start_capture(
        120, start_time,
        stop_signal=midi_hub.MidiSignal(type='control_change', control=1))

    for msg in self.capture_messages[:-1]:
      threading.Timer(0.2 * msg.time, self.port.callback, args=[msg]).start()

    captured_seqs = []
    for captured_seq in captor.iterate(
        signal=midi_hub.MidiSignal(type='note_off')):
      captured_seqs.append(captured_seq)

    self.assertLen(captured_seqs, 4)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 3
    testing_lib.add_track_to_sequence(expected_seq, 0, [Note(1, 64, 2, 3)])
    self.assertProtoEquals(captured_seqs[0], expected_seq)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 4
    testing_lib.add_track_to_sequence(
        expected_seq, 0, [Note(1, 64, 2, 4), Note(2, 64, 3, 4)])
    self.assertProtoEquals(captured_seqs[1], expected_seq)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 5
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 5)])
    self.assertProtoEquals(captured_seqs[2], expected_seq)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 6
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seqs[3], expected_seq)
Exemplo n.º 21
0
def adjust_sequence_times(sequence, delta_time):
    """Adjusts note and total NoteSequence times by `delta_time`."""
    retimed_sequence = music_pb2.NoteSequence()
    retimed_sequence.CopyFrom(sequence)

    for note in retimed_sequence.notes:
        note.start_time += delta_time
        note.end_time += delta_time
    retimed_sequence.total_time += delta_time
    return retimed_sequence
    def run(self,
            pipeline,
            batch_size=None,
            filters='program',
            sample=False,
            softmax_temperature=1.,
            normalize_velocity=False,
            options=None):
        metadata_list = []  # gather metadata about each item of the dataset
        apply_filters = '__program__' if filters == 'program' else True
        dataset = make_simple_dataset(
            self._load_data(tqdm.tqdm(pipeline),
                            apply_filters=apply_filters,
                            normalize_velocity=normalize_velocity,
                            metadata_list=metadata_list),
            output_types=self.input_types,
            output_shapes=self.input_shapes,
            batch_size=batch_size
            or self._cfg['data_prep'].get('val_batch_size'))
        output_ids = self.model.run(self.trainer.session,
                                    dataset,
                                    sample,
                                    softmax_temperature,
                                    options=options) or []
        sequences = [self.output_encoding.decode(ids) for ids in output_ids]
        merged_sequences = []
        instrument_id = 0
        for seq, meta in zip(sequences, metadata_list):
            instrument_id += 1
            while meta['input_index'] > len(merged_sequences) - 1:
                merged_sequences.append(music_pb2.NoteSequence())
                instrument_id = 0

            # Apply features (instrument, velocity)
            if meta['note_features'] is not None:
                if self._cfg['output_encoding'].get('use_velocity', False):
                    # If the output has velocity information, do not override it
                    del meta['note_features']['velocity']

                set_note_fields(seq,
                                **meta['note_features'],
                                instrument=instrument_id)
            else:
                # If the style input had no notes, force the output to be empty
                seq.Clear()

            # Merge
            merged_sequences[-1].notes.extend(seq.notes)
            merged_sequences[-1].total_time = max(
                merged_sequences[-1].total_time, seq.total_time)
            instrument_info = merged_sequences[-1].instrument_infos.add()
            instrument_info.instrument = instrument_id
            instrument_info.name = meta['filter_name']

        return merged_sequences
Exemplo n.º 23
0
    def testSequenceToValuedIntervals(self):
        sequence = music_pb2.NoteSequence()
        sequence.notes.add(pitch=60, start_time=1.0, end_time=2.0, velocity=80)
        # Should be dropped because it is 0 duration.
        sequence.notes.add(pitch=60, start_time=3.0, end_time=3.0, velocity=90)

        intervals, pitches, velocities = metrics.sequence_to_valued_intervals(
            sequence)
        np.testing.assert_array_equal([[1., 2.]], intervals)
        np.testing.assert_array_equal([60], pitches)
        np.testing.assert_array_equal([80], velocities)
Exemplo n.º 24
0
  def testStartPlayback_Updates(self):
    start_time = time.time() + 0.1
    seq = music_pb2.NoteSequence()
    notes = [Note(0, 100, start_time, start_time + 101),
             Note(1, 100, start_time, start_time + 101)]
    testing_lib.add_track_to_sequence(seq, 0, notes)
    player = self.midi_hub.start_playback(seq, allow_updates=True)

    # Sleep past first note start.
    concurrency.Sleeper().sleep_until(start_time + 0.2)

    new_seq = music_pb2.NoteSequence()
    notes = [Note(1, 100, 0.0, 0.8), Note(2, 100, 0.0, 1.0),
             Note(11, 55, 0.3, 0.5), Note(40, 45, 0.4, 0.6)]
    notes = [Note(note.pitch, note.velocity, note.start + start_time,
                  note.end + start_time) for note in notes]
    testing_lib.add_track_to_sequence(new_seq, 0, notes)
    player.update_sequence(new_seq)

    # Finish playing sequence.
    concurrency.Sleeper().sleep(0.8)

    # Start and end the unclosed note from the first sequence.
    note_events = [(start_time, 'note_on', 0),
                   (start_time + 0.3, 'note_off', 0)]
    # The second note will not be played since it started before the update
    # and was not in the original sequence.
    del notes[1]
    for note in notes:
      note_events.append((note.start, 'note_on', note.pitch))
      note_events.append((note.end, 'note_off', note.pitch))
    note_events = collections.deque(sorted(note_events))
    while not self.port.message_queue.empty():
      msg = self.port.message_queue.get()
      note_event = note_events.popleft()
      self.assertEqual(msg.type, note_event[1])
      self.assertEqual(msg.note, note_event[2])
      self.assertAlmostEqual(msg.time, note_event[0], delta=0.01)

    self.assertFalse(note_events)
    player.stop()
Exemplo n.º 25
0
  def testStartPlayback_NoUpdates_UpdateError(self):
    # Use a time in the past to test handling of past notes.
    start_time = time.time()
    seq = music_pb2.NoteSequence()
    notes = [Note(0, 100, start_time + 100, start_time + 101)]
    testing_lib.add_track_to_sequence(seq, 0, notes)
    player = self.midi_hub.start_playback(seq, allow_updates=False)

    with self.assertRaises(midi_hub.MidiHubError):
      player.update_sequence(seq)

    player.stop()
Exemplo n.º 26
0
def stack_sequences(*seqs):
    """Stack note sequences on top of each other."""
    stacked_seqs = music_pb2.NoteSequence()
    stacked_seqs.total_time = max([seq.total_time for seq in seqs])
    for seq in seqs:
        for note in seq.notes:
            stacked_seqs.notes.add(
                pitch=note.pitch,
                start_time=note.start_time,
                end_time=note.end_time,
                velocity=note.velocity,
            )
    return stacked_seqs
Exemplo n.º 27
0
    def testEmptySequenceToPrettyMidi_DropEventsAfterLastNote(self):
        source_sequence = music_pb2.NoteSequence()

        # Translate without dropping.
        translated_midi = midi_io.sequence_proto_to_pretty_midi(
            source_sequence)
        self.assertLen(translated_midi.instruments, 1)
        self.assertEmpty(translated_midi.instruments[0].notes)

        # Translate dropping anything after 30 seconds.
        translated_midi = midi_io.sequence_proto_to_pretty_midi(
            source_sequence, drop_events_n_seconds_after_last_note=30)
        self.assertLen(translated_midi.instruments, 1)
        self.assertEmpty(translated_midi.instruments[0].notes)
  def testAddKeysToSequence(self):
    note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_key_signatures_to_sequence(note_sequence, [(0, 2), (7, 6)])
    note_sequence.total_time = 8.0

    expected_sequence = copy.deepcopy(note_sequence)
    del note_sequence.key_signatures[:]

    keys = [0, 0, 7]
    key_times = [2.0, 4.0, 6.0]
    chords_lib.add_keys_to_sequence(note_sequence, keys, key_times)

    self.assertEqual(expected_sequence, note_sequence)
  def testAddChordsToSequence(self):
    note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_chords_to_sequence(
        note_sequence, [('N.C.', 0), ('C', 2), ('G7', 6)])
    note_sequence.total_time = 8.0

    expected_sequence = copy.deepcopy(note_sequence)
    del note_sequence.text_annotations[:]

    chords = [NO_CHORD, 'C', 'C', 'G7']
    chord_times = [0.0, 2.0, 4.0, 6.0]
    chords_lib.add_chords_to_sequence(note_sequence, chords, chord_times)

    self.assertEqual(expected_sequence, note_sequence)
Exemplo n.º 30
0
    def __init__(self, tune_lines):
        self._ns = music_pb2.NoteSequence()
        # Standard ABC fields.
        self._ns.source_info.source_type = (
            music_pb2.NoteSequence.SourceInfo.SCORE_BASED)
        self._ns.source_info.encoding_type = (
            music_pb2.NoteSequence.SourceInfo.ABC)
        self._ns.source_info.parser = (
            music_pb2.NoteSequence.SourceInfo.MAGENTA_ABC)
        self._ns.ticks_per_quarter = constants.STANDARD_PPQ

        self._current_time = 0
        self._accidentals = ABCTune._sig_to_accidentals(0)
        self._bar_accidentals = {}
        self._current_unit_note_length = None
        self._current_expected_repeats = None

        # Default dynamic should be !mf! as per:
        # http://abcnotation.com/wiki/abc:standard:v2.1#decorations
        self._current_velocity = ABCTune.DECORATION_TO_VELOCITY['!mf!']

        self._in_header = True
        self._header_tempo_unit = None
        self._header_tempo_rate = None
        for line in tune_lines:
            line = re.sub('%.*$', '', line)  # Strip comments.
            line = line.strip()  # Strip whitespace.
            if not line:
                continue

            # If the lines begins with a letter and a colon, it's an information
            # field. Extract it.
            info_field_match = ABCTune.INFORMATION_FIELD_PATTERN.match(line)
            if info_field_match:
                self._parse_information_field(info_field_match.group(1),
                                              info_field_match.group(2))
            else:
                if self._in_header:
                    self._set_values_from_header()
                    self._in_header = False
                self._parse_music_code(line)
        if self._in_header:
            self._set_values_from_header()

        self._finalize()

        if self._ns.notes:
            self._ns.total_time = self._ns.notes[-1].end_time