Ejemplo n.º 1
0
  def testStartCapture_Iterate_Period_Overrun(self):
    start_time = 1.0
    captor = self.midi_hub.start_capture(
        120, start_time,
        stop_signal=midi_hub.MidiSignal(type='control_change', control=1))

    for msg in self.capture_messages[:-1]:
      threading.Timer(0.1 * msg.time, self.port.callback, args=[msg]).start()

    period = 0.26
    captured_seqs = []
    wall_start_time = time.time()
    for captured_seq in captor.iterate(period=period):
      time.sleep(0.5)
      captured_seqs.append(captured_seq)

    self.assertLen(captured_seqs, 2)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    end_time = captured_seqs[0].total_time
    self.assertAlmostEqual(wall_start_time + period, end_time, delta=0.005)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0, [Note(1, 64, 2, end_time)])
    self.assertProtoEquals(captured_seqs[0], expected_seq)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 6
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seqs[1], expected_seq)
Ejemplo n.º 2
0
    def testStartCapture_Multiple(self):
        captor_1 = self.midi_hub.start_capture(
            120, 0.0, stop_signal=midi_hub.MidiSignal(note=3))
        captor_2 = self.midi_hub.start_capture(120,
                                               1.0,
                                               stop_signal=midi_hub.MidiSignal(
                                                   type='control_change',
                                                   control=1))

        self.send_capture_messages()

        captor_1.join()
        captor_2.join()

        captured_seq_1 = captor_1.captured_sequence()
        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 4.0
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(0, 64, 0.01, 3),
             Note(1, 64, 2, 4),
             Note(2, 64, 3, 4)])
        self.assertProtoEquals(captured_seq_1, expected_seq)

        captured_seq_2 = captor_2.captured_sequence()
        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 6.0
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, 6)])
        self.assertProtoEquals(captured_seq_2, expected_seq)
Ejemplo n.º 3
0
  def testMixSequences(self):
    sample_rate = 10

    sequence1 = music_pb2.NoteSequence()
    sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
    sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
    sequence1.total_time = 2.0

    samples1 = np.linspace(0, 1, int(sample_rate * sequence1.total_time))

    sequence2 = music_pb2.NoteSequence()
    sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)
    sequence2.total_time = 1.0

    samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))

    mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
        [samples1, samples2], sample_rate, [sequence1, sequence2])

    expected_sequence = music_pb2.NoteSequence()
    expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
    expected_sequence.notes.add(
        pitch=60, start_time=0.5, end_time=1.0, velocity=127)
    expected_sequence.notes.add(
        pitch=62, start_time=1.0, end_time=2.0, velocity=127)
    expected_sequence.notes.add(
        pitch=64, start_time=0.5, end_time=1.0, velocity=127)
    expected_sequence.notes.add(
        pitch=64, start_time=1.5, end_time=2.0, velocity=127)
    expected_sequence.total_time = 2.0

    self.assertProtoEquals(expected_sequence, mixed_sequence)

    expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
    np.testing.assert_array_equal(expected_samples, mixed_samples)
Ejemplo n.º 4
0
    def testMelodyInferencePolyphonic(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(
            sequence, 0, [
                (36, 100, 0.0, 4.0), (64, 100, 0.0, 1.0), (67, 100, 0.0, 1.0),
                (65, 100, 1.0, 2.0), (69, 100, 1.0, 2.0),
                (67, 100, 2.0, 4.0), (71, 100, 2.0, 3.0),
                (72, 100, 3.0, 4.0)
            ])

        melody_inference.infer_melody_for_sequence(sequence)

        expected_sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(
            expected_sequence, 0, [
                (36, 100, 0.0, 4.0), (64, 100, 0.0, 1.0), (67, 100, 0.0, 1.0),
                (65, 100, 1.0, 2.0), (69, 100, 1.0, 2.0),
                (67, 100, 2.0, 4.0), (71, 100, 2.0, 3.0),
                (72, 100, 3.0, 4.0)
            ])
        testing_lib.add_track_to_sequence(
            expected_sequence, 1, [
                (67, 127, 0.0, 1.0), (69, 127, 1.0, 2.0),
                (71, 127, 2.0, 3.0), (72, 127, 3.0, 4.0)
            ])

        self.assertEqual(expected_sequence, sequence)
Ejemplo n.º 5
0
    def testStartCapture_Callback_Period(self):
        start_time = 1.0
        captor = self.midi_hub.start_capture(120, start_time)

        for msg in self.capture_messages[:-1]:
            threading.Timer(0.1 * msg.time, self.port.callback,
                            args=[msg]).start()

        period = 0.26
        wall_start_time = time.time()
        captured_seqs = []

        def fn(captured_seq):
            self.assertAlmostEqual(0, (time.time() - wall_start_time) % period,
                                   delta=0.01)
            captured_seqs.append(captured_seq)

        name = captor.register_callback(fn, period=period)
        time.sleep(1.0)
        captor.cancel_callback(name)

        self.assertEqual(3, len(captured_seqs))

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        end_time = captured_seqs[0].total_time
        self.assertAlmostEqual(wall_start_time + period, end_time, delta=0.005)
        expected_seq.total_time = end_time
        testing_lib.add_track_to_sequence(expected_seq, 0,
                                          [Note(1, 64, 2, end_time)])
        self.assertProtoEquals(captured_seqs[0], expected_seq)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        end_time = captured_seqs[1].total_time
        self.assertAlmostEqual(wall_start_time + 2 * period,
                               end_time,
                               delta=0.005)
        expected_seq.total_time = end_time
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, end_time)])
        self.assertProtoEquals(captured_seqs[1], expected_seq)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        end_time = captured_seqs[2].total_time
        self.assertAlmostEqual(wall_start_time + 3 * period,
                               end_time,
                               delta=0.005)
        expected_seq.total_time = end_time
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, end_time)])
        self.assertProtoEquals(captured_seqs[2], expected_seq)
    def testMixSequencesWithSustain(self):
        sample_rate = 10

        sequence1 = music_pb2.NoteSequence()
        sequence1.notes.add(pitch=60,
                            start_time=0.5,
                            end_time=0.6,
                            velocity=90)
        sequence1.notes.add(pitch=62,
                            start_time=1.0,
                            end_time=2.0,
                            velocity=90)
        sequence1.total_time = 2.0
        testing_lib.add_control_changes_to_sequence(sequence1, 0,
                                                    [(0.0, 64, 127),
                                                     (1.0, 64, 0)])

        samples1 = np.linspace(0, 1, sample_rate * sequence1.total_time)

        sequence2 = music_pb2.NoteSequence()
        sequence2.notes.add(pitch=64,
                            start_time=0.5,
                            end_time=0.6,
                            velocity=90)
        sequence2.total_time = 1.0
        testing_lib.add_control_changes_to_sequence(sequence2, 0,
                                                    [(0.0, 64, 127),
                                                     (0.9, 64, 0)])

        samples2 = np.linspace(0, 1, sample_rate * sequence2.total_time)

        mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
            [samples1, samples2], sample_rate, [sequence1, sequence2])

        expected_sequence = music_pb2.NoteSequence()
        expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
        expected_sequence.notes.add(pitch=60,
                                    start_time=0.5,
                                    end_time=1.0,
                                    velocity=90)
        expected_sequence.notes.add(pitch=62,
                                    start_time=1.0,
                                    end_time=2.0,
                                    velocity=90)
        expected_sequence.notes.add(pitch=64,
                                    start_time=0.5,
                                    end_time=0.9,
                                    velocity=90)
        expected_sequence.notes.add(pitch=64,
                                    start_time=1.5,
                                    end_time=1.9,
                                    velocity=90)
        expected_sequence.total_time = 2.0

        self.assertProtoEquals(expected_sequence, mixed_sequence)

        expected_samples = np.concatenate([samples2, samples2
                                           ]) * .5 + samples1 * .5
        np.testing.assert_array_equal(expected_samples, mixed_samples)
    def testMixSequencesTotalTime(self):
        sample_rate = 10

        sequence1 = music_pb2.NoteSequence()
        sequence1.notes.add(pitch=60,
                            start_time=0.5,
                            end_time=1.0,
                            velocity=90)
        sequence1.notes.add(pitch=62,
                            start_time=1.0,
                            end_time=1.5,
                            velocity=90)
        sequence1.total_time = 1.5

        samples1 = np.linspace(0, 1, int(sample_rate * 2))

        sequence2 = music_pb2.NoteSequence()
        sequence2.notes.add(pitch=64,
                            start_time=0.5,
                            end_time=0.9,
                            velocity=90)
        sequence2.total_time = 0.9

        samples2 = np.linspace(0, 1, int(sample_rate * 1))

        mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
            [samples1, samples2], sample_rate, [sequence1, sequence2])

        expected_sequence = music_pb2.NoteSequence()
        expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
        expected_sequence.notes.add(pitch=60,
                                    start_time=0.5,
                                    end_time=1.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=62,
                                    start_time=1.0,
                                    end_time=1.5,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=0.5,
                                    end_time=0.9,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=1.5,
                                    end_time=1.9,
                                    velocity=127)

        # Expected time is 1.9 because the sequences are repeated according to the
        # length of their associated audio. So sequence1 is not repeated at all
        # (audio is 2 seconds) and sequence2 is repeated once after shifting all the
        # notes by the audio length of 1 second. The final total_time is left as is
        # after the last repeat, so it ends up being 1 + .9 seconds.
        expected_sequence.total_time = 1.9

        self.assertProtoEquals(expected_sequence, mixed_sequence)

        expected_samples = np.concatenate([samples2, samples2
                                           ]) * .5 + samples1 * .5
        np.testing.assert_array_equal(expected_samples, mixed_samples)
    def testMixSequencesLongerNoteSequence(self):
        sample_rate = 10

        sequence1 = music_pb2.NoteSequence()
        sequence1.notes.add(pitch=60,
                            start_time=0.5,
                            end_time=1.0,
                            velocity=90)
        sequence1.notes.add(pitch=62,
                            start_time=1.0,
                            end_time=2.0,
                            velocity=90)
        sequence1.total_time = 2.0

        # samples1 will be .1 seconds shorter than sequence1
        samples1 = np.linspace(0, 1,
                               int(sample_rate * (sequence1.total_time - .1)))

        sequence2 = music_pb2.NoteSequence()
        sequence2.notes.add(pitch=64,
                            start_time=0.5,
                            end_time=1.0,
                            velocity=90)
        sequence2.total_time = 1.0

        samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))

        mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
            [samples1, samples2], sample_rate, [sequence1, sequence2])

        expected_sequence = music_pb2.NoteSequence()
        expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
        expected_sequence.notes.add(pitch=60,
                                    start_time=0.5,
                                    end_time=1.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=62,
                                    start_time=1.0,
                                    end_time=2.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=0.5,
                                    end_time=1.0,
                                    velocity=127)
        expected_sequence.notes.add(pitch=64,
                                    start_time=1.5,
                                    end_time=2.0,
                                    velocity=127)
        expected_sequence.total_time = 2.0

        self.assertProtoEquals(expected_sequence, mixed_sequence)

        # We expect samples1 to have 2 samples of padding and samples2 to be
        # repeated 1 time fully and once with a single sample.
        expected_samples = (
            np.concatenate([samples2, samples2, [samples2[0]]]) * .5 +
            np.concatenate([samples1, [0, 0]]) * .5)
        np.testing.assert_array_equal(expected_samples, mixed_samples)
Ejemplo n.º 9
0
  def testStartCapture_MidCapture(self):
    start_time = 1.0
    captor = self.midi_hub.start_capture(120, start_time)

    # Receive the first 6 messages.
    for msg in self.capture_messages[0:6]:
      self.port.callback(msg)
    time.sleep(0.1)

    end_time = 3.5
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0, [Note(1, 64, 2, 3.5), Note(2, 64, 3, 3.5)])
    self.assertProtoEquals(captured_seq, expected_seq)

    end_time = 4.5
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 4.5), Note(2, 64, 3, 4.5), Note(3, 64, 4, 4.5)])
    self.assertProtoEquals(captured_seq, expected_seq)

    end_time = 6.0
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 6), Note(2, 64, 3, 6), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seq, expected_seq)

    # Receive the rest of the messages.
    for msg in self.capture_messages[6:]:
      self.port.callback(msg)
    time.sleep(0.1)

    end_time = 6.0
    captured_seq = captor.captured_sequence(end_time)
    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = end_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seq, expected_seq)

    captor.stop()
Ejemplo n.º 10
0
    def testStartCapture_Iterate_Signal(self):
        start_time = 1.0
        captor = self.midi_hub.start_capture(120,
                                             start_time,
                                             stop_signal=midi_hub.MidiSignal(
                                                 type='control_change',
                                                 control=1))

        for msg in self.capture_messages[:-1]:
            threading.Timer(0.2 * msg.time, self.port.callback,
                            args=[msg]).start()

        captured_seqs = []
        for captured_seq in captor.iterate(signal=midi_hub.MidiSignal(
                type='note_off')):
            captured_seqs.append(captured_seq)

        self.assertEqual(4, len(captured_seqs))

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 3
        testing_lib.add_track_to_sequence(expected_seq, 0, [Note(1, 64, 2, 3)])
        self.assertProtoEquals(captured_seqs[0], expected_seq)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 4
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 4), Note(2, 64, 3, 4)])
        self.assertProtoEquals(captured_seqs[1], expected_seq)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 5
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, 5)])
        self.assertProtoEquals(captured_seqs[2], expected_seq)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 6
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, 6)])
        self.assertProtoEquals(captured_seqs[3], expected_seq)
Ejemplo n.º 11
0
    def testMelodyInferenceSingleNote(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.0)])

        melody_inference.infer_melody_for_sequence(sequence)

        expected_sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(expected_sequence, 0,
                                          [(60, 100, 0.5, 1.0)])
        testing_lib.add_track_to_sequence(expected_sequence, 1,
                                          [(60, 127, 0.5, 1.0)])

        self.assertEqual(expected_sequence, sequence)
Ejemplo n.º 12
0
    def testStartPlayback_Updates(self):
        start_time = time.time() + 0.1
        seq = music_pb2.NoteSequence()
        notes = [
            Note(0, 100, start_time, start_time + 101),
            Note(1, 100, start_time, start_time + 101)
        ]
        testing_lib.add_track_to_sequence(seq, 0, notes)
        player = self.midi_hub.start_playback(seq, allow_updates=True)

        # Sleep past first note start.
        concurrency.Sleeper().sleep_until(start_time + 0.2)

        new_seq = music_pb2.NoteSequence()
        notes = [
            Note(1, 100, 0.0, 0.8),
            Note(2, 100, 0.0, 1.0),
            Note(11, 55, 0.3, 0.5),
            Note(40, 45, 0.4, 0.6)
        ]
        notes = [
            Note(note.pitch, note.velocity, note.start + start_time,
                 note.end + start_time) for note in notes
        ]
        testing_lib.add_track_to_sequence(new_seq, 0, notes)
        player.update_sequence(new_seq)

        # Finish playing sequence.
        concurrency.Sleeper().sleep(0.8)

        # Start and end the unclosed note from the first sequence.
        note_events = [(start_time, 'note_on', 0),
                       (start_time + 0.3, 'note_off', 0)]
        # The second note will not be played since it started before the update
        # and was not in the original sequence.
        del notes[1]
        for note in notes:
            note_events.append((note.start, 'note_on', note.pitch))
            note_events.append((note.end, 'note_off', note.pitch))
        note_events = collections.deque(sorted(note_events))
        while not self.port.message_queue.empty():
            msg = self.port.message_queue.get()
            note_event = note_events.popleft()
            self.assertEqual(msg.type, note_event[1])
            self.assertEqual(msg.note, note_event[2])
            self.assertAlmostEqual(msg.time, note_event[0], delta=0.01)

        self.assertTrue(not note_events)
        player.stop()
Ejemplo n.º 13
0
    def testInstrumentInfo_NoteSequenceToPrettyMidi(self):
        source_sequence = music_pb2.NoteSequence()
        source_sequence.notes.add(pitch=60,
                                  start_time=0.0,
                                  end_time=0.5,
                                  velocity=80,
                                  instrument=0)
        source_sequence.notes.add(pitch=60,
                                  start_time=0.5,
                                  end_time=1.0,
                                  velocity=80,
                                  instrument=1)
        instrument_info1 = source_sequence.instrument_infos.add()
        instrument_info1.name = 'inst_0'
        instrument_info1.instrument = 0
        instrument_info2 = source_sequence.instrument_infos.add()
        instrument_info2.name = 'inst_1'
        instrument_info2.instrument = 1
        translated_midi = midi_io.sequence_proto_to_pretty_midi(
            source_sequence)
        translated_sequence = midi_io.midi_to_note_sequence(translated_midi)

        self.assertEqual(len(source_sequence.instrument_infos),
                         len(translated_sequence.instrument_infos))
        self.assertEqual(source_sequence.instrument_infos[0].name,
                         translated_sequence.instrument_infos[0].name)
        self.assertEqual(source_sequence.instrument_infos[1].name,
                         translated_sequence.instrument_infos[1].name)
Ejemplo n.º 14
0
    def process(self, paths):
        midi_path, wav_path_base = paths

        if self._add_wav_glob:
            wav_paths = tf.io.gfile.glob(wav_path_base + '*')
        else:
            wav_paths = [wav_path_base]

        if midi_path:
            base_ns = midi_io.midi_file_to_note_sequence(midi_path)
            base_ns.filename = midi_path
        else:
            base_ns = music_pb2.NoteSequence()

        for wav_path in wav_paths:
            logging.info('Creating Example %s:%s', midi_path, wav_path)
            wav_data = tf.io.gfile.GFile(wav_path, 'rb').read()

            ns = copy.deepcopy(base_ns)

            # Use base names.
            ns.id = '%s:%s' % (wav_path.replace(
                self._wav_dir, ''), midi_path.replace(self._midi_dir, ''))

            Metrics.counter('create_example', 'read_midi_wav').inc()

            example = audio_label_data_utils.create_example(
                ns.id, ns, wav_data)

            Metrics.counter('create_example', 'created_example').inc()
            yield example
Ejemplo n.º 15
0
  def testEncodeNoteSequence(self):
    encoder = music_encoders.TextMelodyEncoder(
        steps_per_quarter=4, min_pitch=21, max_pitch=108)
    encoder_absolute = music_encoders.TextMelodyEncoderAbsolute(
        steps_per_second=4, min_pitch=21, max_pitch=108)

    ns = music_pb2.NoteSequence()
    ns.tempos.add(qpm=60)
    testing_lib.add_track_to_sequence(
        ns, 0,
        [(60, 127, 0.0, 0.25), (62, 127, 0.25, 0.75), (64, 127, 1.25, 2.0)])
    ids = encoder.encode_note_sequence(ns)
    ids_absolute = encoder_absolute.encode_note_sequence(ns)

    expected_ids = [
        43,  # ON(60)
        45,  # ON(62)
        2,   # HOLD(62)
        3,   # OFF(62)
        2,   # REST
        47,  # ON(64)
        2,   # HOLD(64)
        2    # HOLD(64)
    ]

    self.assertEqual(expected_ids, ids)
    self.assertEqual(expected_ids, ids_absolute)
Ejemplo n.º 16
0
  def testEncodeNoteSequenceAddEos(self):
    encoder = music_encoders.MidiPerformanceEncoder(
        steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,
        add_eos=True)

    ns = music_pb2.NoteSequence()
    testing_lib.add_track_to_sequence(
        ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])
    ids = encoder.encode_note_sequence(ns)

    expected_ids = [
        302,  # VELOCITY(25)
        41,   # NOTE-ON(60)
        45,   # NOTE-ON(64)
        277,  # TIME-SHIFT(100)
        309,  # VELOCITY(32)
        48,   # NOTE-ON(67)
        277,  # TIME-SHIFT(100)
        136,  # NOTE-OFF(67)
        277,  # TIME-SHIFT(100)
        133,  # NOTE-OFF(64
        277,  # TIME-SHIFT(100)
        129,  # NOTE-OFF(60)
        1     # EOS
    ]

    self.assertEqual(expected_ids, ids)
Ejemplo n.º 17
0
    def testInferChordsForSequence(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(
            sequence,
            0,
            [
                (60, 100, 0.0, 1.0),
                (64, 100, 0.0, 1.0),
                (67, 100, 0.0, 1.0),  # C
                (62, 100, 1.0, 2.0),
                (65, 100, 1.0, 2.0),
                (69, 100, 1.0, 2.0),  # Dm
                (60, 100, 2.0, 3.0),
                (65, 100, 2.0, 3.0),
                (69, 100, 2.0, 3.0),  # F
                (59, 100, 3.0, 4.0),
                (62, 100, 3.0, 4.0),
                (67, 100, 3.0, 4.0)
            ])  # G
        quantized_sequence = sequences_lib.quantize_note_sequence(
            sequence, steps_per_quarter=4)
        chord_inference.infer_chords_for_sequence(quantized_sequence,
                                                  chords_per_bar=2)

        expected_chords = [('C', 0.0), ('Dm', 1.0), ('F', 2.0), ('G', 3.0)]
        chords = [(ta.text, ta.time)
                  for ta in quantized_sequence.text_annotations]

        self.assertEqual(expected_chords, chords)
Ejemplo n.º 18
0
  def testDecode(self):
    encoder = music_encoders.MidiPerformanceEncoder(
        steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,
        ngrams=[(277, 129)])

    ids = [
        302,  # VELOCITY(25)
        41,   # NOTE-ON(60)
        310   # TIME-SHIFT(100), NOTE-OFF(60)
    ]

    # Decode method returns MIDI filename, read and convert to NoteSequence.
    filename = encoder.decode(ids)
    ns = magenta.music.midi_file_to_sequence_proto(filename)

    # Remove default tempo & time signature.
    del ns.tempos[:]
    del ns.time_signatures[:]

    expected_ns = music_pb2.NoteSequence(ticks_per_quarter=220)
    testing_lib.add_track_to_sequence(expected_ns, 0, [(60, 97, 0.0, 1.0)])

    # Add source info fields.
    expected_ns.source_info.encoding_type = (
        music_pb2.NoteSequence.SourceInfo.MIDI)
    expected_ns.source_info.parser = (
        music_pb2.NoteSequence.SourceInfo.PRETTY_MIDI)

    self.assertEqual(expected_ns, ns)
Ejemplo n.º 19
0
    def testInferChordsForSequenceWithBeats(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(
            sequence,
            0,
            [
                (60, 100, 0.0, 1.1),
                (64, 100, 0.0, 1.1),
                (67, 100, 0.0, 1.1),  # C
                (62, 100, 1.1, 1.9),
                (65, 100, 1.1, 1.9),
                (69, 100, 1.1, 1.9),  # Dm
                (60, 100, 1.9, 3.0),
                (65, 100, 1.9, 3.0),
                (69, 100, 1.9, 3.0),  # F
                (59, 100, 3.0, 4.5),
                (62, 100, 3.0, 4.5),
                (67, 100, 3.0, 4.5)
            ])  # G
        testing_lib.add_beats_to_sequence(sequence, [0.0, 1.1, 1.9, 1.9, 3.0])
        chord_inference.infer_chords_for_sequence(sequence)

        expected_chords = [('C', 0.0), ('Dm', 1.1), ('F', 1.9), ('G', 3.0)]
        chords = [(ta.text, ta.time) for ta in sequence.text_annotations
                  if ta.annotation_type == CHORD_SYMBOL]

        self.assertEqual(expected_chords, chords)
Ejemplo n.º 20
0
    def testSequenceNotePitchVectors(self):
        sequence = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(sequence, 0, [
            (60, 100, 0.0, 0.0),
            (62, 100, 0.0, 0.5),
            (60, 100, 1.5, 2.5),
            (64, 100, 2.0, 2.5),
            (67, 100, 2.25, 2.75),
            (70, 100, 2.5, 4.5),
            (60, 100, 6.0, 6.0),
        ])
        note_pitch_vectors = chord_inference.sequence_note_pitch_vectors(
            sequence, seconds_per_frame=1.0)

        expected_note_pitch_vectors = [
            [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
        ]

        self.assertEqual(expected_note_pitch_vectors,
                         note_pitch_vectors.tolist())
Ejemplo n.º 21
0
    def to_sequence_fn(eager_note_croppings, eager_instrument_families):
        eager_note_croppings = eager_note_croppings.numpy()
        eager_instrument_families = eager_instrument_families.numpy()
        sequence = music_pb2.NoteSequence()
        sequence.tempos.add().qpm = 120
        sequence.ticks_per_quarter = 220
        distinct_families_list = []
        for i in range(len(eager_note_croppings)):
            cropping = NoteCropping(*eager_note_croppings[i])
            family = eager_instrument_families[i].argmax()

            if family not in distinct_families_list:
                distinct_families_list.append(family)

            note = sequence.notes.add()
            note.instrument = distinct_families_list.index(family)
            note.program = instrument_family_mappings.family_to_midi_instrument[
                family]
            note.start_time = cropping.start_idx / hparams.sample_rate
            note.end_time = cropping.end_idx / hparams.sample_rate
            note.pitch = cropping.pitch
            note.velocity = 70
            if note.end_time > sequence.total_time:
                sequence.total_time = note.end_time
        return sequence.SerializeToString()
Ejemplo n.º 22
0
    def to_sequence(self, sequence_start_time=0.0, qpm=120.0):
        """Converts the ChordProgression to NoteSequence proto.

    This doesn't generate actual notes, but text annotations specifying the
    chord changes when they occur.

    Args:
      sequence_start_time: A time in seconds (float) that the first chord in
          the sequence will land on.
      qpm: Quarter notes per minute (float).

    Returns:
      A NoteSequence proto encoding the given chords as text annotations.
    """
        seconds_per_step = 60.0 / qpm / self.steps_per_quarter

        sequence = music_pb2.NoteSequence()
        sequence.tempos.add().qpm = qpm
        sequence.ticks_per_quarter = STANDARD_PPQ

        current_figure = NO_CHORD
        for step, figure in enumerate(self):
            if figure != current_figure:
                current_figure = figure
                chord = sequence.text_annotations.add()
                chord.time = step * seconds_per_step + sequence_start_time
                chord.text = figure
                chord.annotation_type = CHORD_SYMBOL

        return sequence
Ejemplo n.º 23
0
    def testEncodeNoteSequenceNGrams(self):
        encoder = music_encoders.MidiPerformanceEncoder(steps_per_second=100,
                                                        num_velocity_bins=32,
                                                        min_pitch=21,
                                                        max_pitch=108,
                                                        ngrams=[
                                                            (41, 45),
                                                            (277, 309, 300),
                                                            (309, 48),
                                                            (277, 129, 130)
                                                        ])

        ns = music_pb2.NoteSequence()
        testing_lib.add_track_to_sequence(ns, 0, [(60, 100, 0.0, 4.0),
                                                  (64, 100, 0.0, 3.0),
                                                  (67, 127, 1.0, 2.0)])
        ids = encoder.encode_note_sequence(ns)

        expected_ids = [
            302,  # VELOCITY(25)
            310,  # NOTE-ON(60), NOTE-ON(64)
            277,  # TIME-SHIFT(100)
            312,  # VELOCITY(32), NOTE-ON(67)
            277,  # TIME-SHIFT(100)
            136,  # NOTE-OFF(67)
            277,  # TIME-SHIFT(100)
            133,  # NOTE-OFF(64
            277,  # TIME-SHIFT(100)
            129  # NOTE-OFF(60)
        ]

        self.assertEqual(expected_ids, ids)
    def testSplitMidi(self):
        sequence = music_pb2.NoteSequence()
        sequence.notes.add(pitch=60, start_time=1.0, end_time=2.9)
        sequence.notes.add(pitch=60, start_time=8.0, end_time=11.0)
        sequence.notes.add(pitch=60, start_time=14.0, end_time=17.0)
        sequence.notes.add(pitch=60, start_time=20.0, end_time=23.0)
        sequence.total_time = 25.

        sample_rate = 160
        samples = np.zeros(sample_rate * int(sequence.total_time))
        splits = audio_label_data_utils.find_split_points(
            sequence, samples, sample_rate, 0, 3)

        self.assertEqual(splits,
                         [0., 3., 6., 9., 12., 15., 18., 21., 24., 25.])

        samples[int(8.5 * sample_rate)] = 1
        samples[int(8.5 * sample_rate) + 1] = -1
        splits = audio_label_data_utils.find_split_points(
            sequence, samples, sample_rate, 0, 3)

        self.assertEqual(splits, [
            0.0, 3.0, 6.0, 8.50625, 11.50625, 14.50625, 17.50625, 20.50625,
            23.50625, 25.
        ])
Ejemplo n.º 25
0
    def testStartPlayback_NoUpdates(self):
        # Use a time in the past to test handling of past notes.
        start_time = time.time() - 0.05
        seq = music_pb2.NoteSequence()
        notes = [
            Note(12, 100, 0.0, 1.0),
            Note(11, 55, 0.1, 0.5),
            Note(40, 45, 0.2, 0.6)
        ]
        notes = [
            Note(note.pitch, note.velocity, note.start + start_time,
                 note.end + start_time) for note in notes
        ]
        testing_lib.add_track_to_sequence(seq, 0, notes)
        player = self.midi_hub.start_playback(seq, allow_updates=False)
        player.join()

        note_events = []
        for note in notes:
            note_events.append((note.start, 'note_on', note.pitch))
            note_events.append((note.end, 'note_off', note.pitch))

        # The first note on will not be sent since it started before
        # `start_playback` is called.
        del note_events[0]

        note_events = collections.deque(sorted(note_events))
        while not self.port.message_queue.empty():
            msg = self.port.message_queue.get()
            note_event = note_events.popleft()
            self.assertEqual(msg.type, note_event[1])
            self.assertEqual(msg.note, note_event[2])
            self.assertAlmostEqual(msg.time, note_event[0], delta=0.01)

        self.assertTrue(not note_events)
Ejemplo n.º 26
0
 def testEncodeEmptyNoteSequence(self):
     encoder = music_encoders.MidiPerformanceEncoder(steps_per_second=100,
                                                     num_velocity_bins=32,
                                                     min_pitch=21,
                                                     max_pitch=108)
     ids = encoder.encode_note_sequence(music_pb2.NoteSequence())
     self.assertEqual([], ids)
Ejemplo n.º 27
0
    def testEventListChordsWithMelodies(self):
        note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
        note_sequence.tempos.add(qpm=60.0)
        testing_lib.add_chords_to_sequence(note_sequence,
                                           [('N.C.', 0), ('C', 2), ('G7', 6)])
        note_sequence.total_time = 8.0

        melodies = [
            melodies_lib.Melody([60, -2, -2, -1],
                                start_step=0,
                                steps_per_quarter=1,
                                steps_per_bar=4),
            melodies_lib.Melody([62, -2, -2, -1],
                                start_step=4,
                                steps_per_quarter=1,
                                steps_per_bar=4),
        ]

        quantized_sequence = sequences_lib.quantize_note_sequence(
            note_sequence, steps_per_quarter=1)
        chords = chords_lib.event_list_chords(quantized_sequence, melodies)

        expected_chords = [[NO_CHORD, NO_CHORD, 'C', 'C'],
                           ['C', 'C', 'G7', 'G7']]

        self.assertEqual(expected_chords, chords)
Ejemplo n.º 28
0
def mix_sequences(individual_samples, sample_rate, individual_sequences):
    """Mix multiple audio/notesequence pairs together.

  All sequences will be repeated until they are as long as the longest sequence.

  Note that the mixed sequence will contain only the (sustain-processed) notes
  from the individual sequences. All other control changes and metadata will not
  be preserved.

  Args:
    individual_samples: A list of audio samples to mix.
    sample_rate: Rate at which to interpret the samples
    individual_sequences: A list of NoteSequences to mix.

  Returns:
    mixed_samples: The mixed audio.
    mixed_sequence: The mixed NoteSequence.
  """
    # Ensure that samples are always at least as long as their paired sequences.
    for i, (samples, sequence) in enumerate(
            zip(individual_samples, individual_sequences)):
        if len(samples) / sample_rate < sequence.total_time:
            padding = int(
                math.ceil((sequence.total_time - len(samples) / sample_rate) *
                          sample_rate))
            individual_samples[i] = np.pad(samples, [0, padding], 'constant')

    # Repeat each ns/wav pair to be as long as the longest wav.
    max_duration = np.max([len(s) for s in individual_samples]) / sample_rate

    extended_samples = []
    extended_sequences = []
    for samples, sequence in zip(individual_samples, individual_sequences):
        extended_samples.append(
            audio_io.repeat_samples_to_duration(samples, sample_rate,
                                                max_duration))
        extended_sequences.append(
            sequences_lib.repeat_sequence_to_duration(
                sequence,
                max_duration,
                sequence_duration=len(samples) / sample_rate))

    # Mix samples and sequences together
    mixed_samples = np.zeros_like(extended_samples[0])
    for samples in extended_samples:
        mixed_samples += samples / len(extended_samples)

    mixed_sequence = music_pb2.NoteSequence()
    mixed_sequence.ticks_per_quarter = constants.STANDARD_PPQ
    del mixed_sequence.notes[:]
    for sequence in extended_sequences:
        # Process sustain changes before copying notes.
        sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
        if sus_sequence.total_time > mixed_sequence.total_time:
            mixed_sequence.total_time = sus_sequence.total_time
        # TODO(fjord): Manage instrument/program numbers.
        mixed_sequence.notes.extend(sus_sequence.notes)

    return mixed_samples, mixed_sequence
Ejemplo n.º 29
0
    def generate(self,
                 num_steps=128,
                 temperature=1.0,
                 steps_per_second_avail=False,
                 empty=False):
        """
        generates a song.
        """
        if hasattr(self, 'num_steps'):
            num_steps = self.num_steps
        if hasattr(self, 'temperature'):
            temperature = self.temperature

        input_sequence = self.sequence

        if empty:
            input_sequence = music_pb2.NoteSequence()
            input_sequence.tempos.add(qpm=80)

        qpm = input_sequence.tempos[0].qpm
        if steps_per_second_avail:
            steps_per_quarter = int(self.model.steps_per_second * (1 /
                                                                   (qpm / 60)))
            seconds_per_step = 1 / self.model.steps_per_second
        else:
            seconds_per_step = 60.0 / qpm / self.model.steps_per_quarter
            steps_per_quarter = self.model.steps_per_quarter
        quantized_sequence = mm.quantize_note_sequence(input_sequence,
                                                       steps_per_quarter)

        last_end_time = (max(
            n.end_time
            for n in input_sequence.notes) if input_sequence.notes else 0)

        primer_sequence_steps = quantized_sequence.total_quantized_steps
        if primer_sequence_steps > num_steps:
            # easier to make num_steps bigger to accommodate for sizes
            # 4 times the size of original sequence..
            num_steps = primer_sequence_steps * 4

        total_seconds = num_steps * seconds_per_step
        input_sequence.total_time = min(total_seconds,
                                        input_sequence.total_time)
        generator_options = generator_pb2.GeneratorOptions()

        generator_options.args['temperature'].float_value = temperature
        generate_section = generator_options.generate_sections.add(
            start_time=last_end_time + seconds_per_step,
            end_time=total_seconds)

        self.output_sequence = self.model.generate(input_sequence,
                                                   generator_options)

        request_dict = self.put_request_dict
        utils.generated_sequence_2_mp3(self.output_sequence,
                                       f"{self.unique_id}",
                                       use_salamander=True,
                                       request_dict=request_dict)
Ejemplo n.º 30
0
def run():
    """
    Load Transformer model according to flags and start sampling.
    :raises:
        ValueError: if required flags are missing or invalid.
    """
    if FLAGS.model_path is None:
        raise ValueError('Required Transformer pre-trained model path.')

    if FLAGS.output_dir is None:
        raise ValueError('Required Midi output directory.')

    if FLAGS.decode_length <= 0:
        raise ValueError('Decode length must be > 0.')

    problem = utils.PianoPerformanceLanguageModelProblem()
    unconditional_encoders = problem.get_feature_encoders()
    primer_ns = music_pb2.NoteSequence()
    if FLAGS.primer_path is None:
        targets = []
    else:
        if FLAGS.max_primer_second <= 0:
            raise ValueError('Max primer second must be > 0.')

        primer_ns = utils.get_primer_ns(FLAGS.primer_path,
                                        FLAGS.max_primer_second)
        targets = unconditional_encoders['targets'].encode_note_sequence(
            primer_ns)

        # Remove the end token from the encoded primer.
        targets = targets[:-1]
        if len(targets) >= FLAGS.decode_length:
            raise ValueError(
                'Primer has more or equal events than maximum sequence length:'
                ' %d >= %d; Aborting' % (len(targets), FLAGS.decode_length))
    decode_length = FLAGS.decode_length - len(targets)

    # Set up HParams.
    hparams = trainer_lib.create_hparams(hparams_set=FLAGS.hparams_set)
    trainer_lib.add_problem_hparams(hparams, problem)
    hparams.num_hidden_layers = FLAGS.layers
    hparams.sampling_method = FLAGS.sample

    # Set up decoding HParams.
    decode_hparams = decoding.decode_hparams()
    decode_hparams.alpha = FLAGS.alpha
    decode_hparams.beam_size = FLAGS.beam_size

    # Create Estimator.
    utils.LOGGER.info('Loading model')
    run_config = trainer_lib.create_run_config(hparams)
    estimator = trainer_lib.create_estimator(FLAGS.model_name,
                                             hparams,
                                             run_config,
                                             decode_hparams=decode_hparams)

    generate(estimator, unconditional_encoders, decode_length, targets,
             primer_ns)