Esempio n. 1
0
    def testSquash(self):
        # Melody in C, transposed to C, and squashed to 1 octave.
        events = [12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 6 + 4, NO_EVENT]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)
        expected = [
            12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 5 + 4, NO_EVENT
        ]
        self.assertEqual(expected, list(melody))

        # Melody in D, transposed to C, and squashed to 1 octave.
        events = [12 * 5 + 2, 12 * 5 + 4, 12 * 6 + 7, 12 * 6 + 6, 12 * 5 + 1]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)
        expected = [12 * 5, 12 * 5 + 2, 12 * 5 + 5, 12 * 5 + 4, 12 * 5 + 11]
        self.assertEqual(expected, list(melody))

        # Melody in D, transposed to E, and squashed to 1 octave.
        events = [12 * 5 + 2, 12 * 5 + 4, 12 * 6 + 7, 12 * 6 + 6, 12 * 4 + 11]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=4)
        expected = [12 * 5 + 4, 12 * 5 + 6, 12 * 5 + 9, 12 * 5 + 8, 12 * 5 + 1]
        self.assertEqual(expected, list(melody))
Esempio n. 2
0
    def testSquashCenterOctaves(self):
        # Move up an octave.
        events = [
            12 * 4, NO_EVENT, 12 * 4 + 2, NOTE_OFF, 12 * 4 + 4, NO_EVENT,
            12 * 4 + 5, 12 * 5 + 2, 12 * 4 - 1, NOTE_OFF
        ]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 4, max_note=12 * 7, transpose_to_key=0)
        expected = [
            12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 5 + 4, NO_EVENT,
            12 * 5 + 5, 12 * 6 + 2, 12 * 5 - 1, NOTE_OFF
        ]
        self.assertEqual(expected, list(melody))

        # Move down an octave.
        events = [
            12 * 6, NO_EVENT, 12 * 6 + 2, NOTE_OFF, 12 * 6 + 4, NO_EVENT,
            12 * 6 + 5, 12 * 7 + 2, 12 * 6 - 1, NOTE_OFF
        ]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        melody.squash(min_note=12 * 4, max_note=12 * 7, transpose_to_key=0)
        expected = [
            12 * 5, NO_EVENT, 12 * 5 + 2, NOTE_OFF, 12 * 5 + 4, NO_EVENT,
            12 * 5 + 5, 12 * 6 + 2, 12 * 5 - 1, NOTE_OFF
        ]
        self.assertEqual(expected, list(melody))
Esempio n. 3
0
    def testGetMajorKey(self):
        # D Major.
        events = [
            NO_EVENT, 12 * 2 + 2, 12 * 3 + 4, 12 * 5 + 1, 12 * 6 + 6,
            12 * 4 + 11, 12 * 3 + 9, 12 * 5 + 7, NOTE_OFF
        ]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        self.assertEqual(2, melody.get_major_key())

        # C# Major with accidentals.
        events = [
            NO_EVENT, 12 * 2 + 1, 12 * 4 + 8, 12 * 5 + 5, 12 * 6 + 6,
            12 * 3 + 3, 12 * 2 + 11, 12 * 3 + 10, 12 * 5, 12 * 2 + 8,
            12 * 4 + 1, 12 * 3 + 5, 12 * 5 + 9, 12 * 4 + 3, NOTE_OFF
        ]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        self.assertEqual(1, melody.get_major_key())

        # One note in C Major.
        events = [NO_EVENT, 12 * 2 + 11, NOTE_OFF]
        melody = melodies_lib.Melody()
        melody.from_event_list(events)
        self.assertEqual(0, melody.get_major_key())
Esempio n. 4
0
    def testBasicOneHotEncoderTruncateNoteOff(self):
        steps_per_beat = 4
        transpose_to_key = 0
        min_note = 48
        max_note = 84
        num_classes = max_note - min_note + 2

        melody = melodies_lib.Melody()
        melody.from_event_list([
            NO_EVENT, 60, 62, 64, NO_EVENT, NOTE_OFF, 65, 67, NOTE_OFF, 69, 71,
            72, NO_EVENT, NOTE_OFF, 74, 76, NOTE_OFF
        ])
        transformed_melody = [
            NO_EVENT, 12, 14, 16, NO_EVENT, NOTE_OFF, 17, 19, NOTE_OFF, 21, 23,
            24, NO_EVENT, NOTE_OFF, 26, 28
        ]
        expected_inputs = [
            one_hot(note + 2, num_classes) for note in transformed_melody
        ]
        expected_labels = ([note + 2 for note in transformed_melody[1:]] +
                           [NOTE_OFF + 2])
        expected_sequence_example = make_sequence_example(
            expected_inputs, expected_labels)
        sequence_example, _ = encoders.basic_one_hot_encoder(
            melody, steps_per_beat, min_note, max_note, transpose_to_key)
        self.assertEqual(expected_sequence_example, sequence_example)
Esempio n. 5
0
 def testFromNotesPolyphonic(self):
     sequence = music_pb2.NoteSequence()
     add_track(sequence, 0, [(12, 100, 1.0, 4.0), (19, 100, 0.95, 3.0)])
     melody = melodies_lib.Melody(steps_per_bar=16)
     with self.assertRaises(melodies_lib.PolyphonicMelodyException):
         melody.from_notes(sequence.notes, bpm=60.0)
     self.assertFalse(list(melody))
 def testFromNotesPolyphonicWithIgnorePolyphonicNotes(self):
   sequence = music_pb2.NoteSequence()
   add_track(sequence, 0, [(12, 100, 0, 2.0), (19, 100, 0, 3.0)])
   melody = melodies_lib.Melody(steps_per_bar=16)
   melody.from_notes(sequence.notes, bpm=60.0, ignore_polyphonic_notes=True)
   expected = [12] + [-2] * 7 + [-1]
   self.assertEqual(expected, list(melody))
Esempio n. 7
0
def classes_to_melody(model_output, reconstruction_data, min_note=48):
  """Convert list of model outputs to Melody object.

  This method decodes sequence_to_melodies.basic_one_hot_encoder.

  Each model output is the index of the softmax class that is chosen.

  Args:
    model_output: List of integers. Each int is the chosen softmax class
        from the model output.
    reconstruction_data: basic_one_hot_encoder specific information
        needed to reconstruct the input Melody.
    min_note: Minimum pitch in model will be mapped to this MIDI pitch.

  Returns:
    A melodies_lib.Melody.
  """
  transpose_amount = reconstruction_data
  output = melodies_lib.Melody()
  output.from_event_list(
      [e - melodies_lib.NUM_SPECIAL_EVENTS
       if e < melodies_lib.NUM_SPECIAL_EVENTS
       else e + min_note - transpose_amount
       for e in model_output])
  return output
 def testFromNotesTrimEmptyMeasures(self):
   sequence = music_pb2.NoteSequence()
   add_track(sequence, 0, [(12, 100, 9.5, 9.75), (11, 100, 10.0, 10.25)])
   melody = melodies_lib.Melody(steps_per_bar=16)
   melody.from_notes(sequence.notes, bpm=60.0)
   expected = [NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 12,
               NOTE_OFF, 11, NOTE_OFF]
   self.assertEqual(expected, list(melody))
  def testGetNoteHistogram(self):
    events = [NO_EVENT, NOTE_OFF, 12 * 2 + 1, 12 * 3, 12 * 5 + 11, 12 * 6 + 3,
              12 * 4 + 11]
    melody = melodies_lib.Melody()
    melody.from_event_list(events)
    expected = [1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2]
    self.assertEqual(expected, list(melody.get_note_histogram()))

    events = [0, 1, NO_EVENT, NOTE_OFF, 12 * 2 + 1, 12 * 3, 12 * 6 + 3,
              12 * 5 + 11, NO_EVENT, 12 * 4 + 11, 12 * 7 + 1]
    melody = melodies_lib.Melody()
    melody.from_event_list(events)
    expected = [2, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2]
    self.assertEqual(expected, list(melody.get_note_histogram()))

    melody = melodies_lib.Melody()
    expected = [0] * 12
    self.assertEqual(expected, list(melody.get_note_histogram()))
 def testSquashMaxNote(self):
   events = [12 * 5, 12 * 5 + 2, 12 * 5 + 4, 12 * 5 + 5, 12 * 5 + 11, 12 * 6,
             12 * 6 + 1]
   melody = melodies_lib.Melody()
   melody.from_event_list(events)
   melody.squash(min_note=12 * 5, max_note=12 * 6, transpose_to_key=0)
   expected = [12 * 5, 12 * 5 + 2, 12 * 5 + 4, 12 * 5 + 5, 12 * 5 + 11, 12 * 5,
               12 * 5 + 1]
   self.assertEqual(expected, list(melody))
 def testFromNotesTimeOverlap(self):
   sequence = music_pb2.NoteSequence()
   add_track(sequence, 0, [(12, 100, 1.0, 5.0), (11, 100, 3.25, 3.75),
                           (13, 100, 2.0, 4.0)])
   melody = melodies_lib.Melody(steps_per_bar=16)
   melody.from_notes(sequence.notes, bpm=60.0)
   expected = [NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 12, NO_EVENT, NO_EVENT,
               NO_EVENT, 13, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 11,
               NO_EVENT, NOTE_OFF]
   self.assertEqual(expected, list(melody))
 def testFromNotes(self):
   sequence = music_pb2.NoteSequence()
   add_track(
       sequence, 0,
       [(12, 100, 0.01, 10.0), (11, 100, 0.22, 0.50), (40, 100, 2.50, 3.50),
        (55, 100, 4.0, 4.01), (52, 100, 4.75, 5.0)])
   melody = melodies_lib.Melody(steps_per_bar=16)
   melody.from_notes(sequence.notes, bpm=60.0)
   expected = [12, 11, NOTE_OFF, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT,
               NO_EVENT, NO_EVENT, NO_EVENT, 40, NO_EVENT, NO_EVENT, NO_EVENT,
               NOTE_OFF, NO_EVENT, 55, NOTE_OFF, NO_EVENT, 52, NOTE_OFF]
   self.assertEqual(expected, list(melody))
Esempio n. 13
0
  def testBasicOneHotEncoderOctaveSquash(self):
    steps_per_beat = 4
    transpose_to_key = 0
    min_note = 48
    max_note = 84
    num_classes = max_note - min_note + 2

    melody = melodies_lib.Melody()
    melody.from_event_list(
        [NO_EVENT, 84, 86, 52, NO_EVENT, NOTE_OFF, 65, 67, NOTE_OFF, 69, 71, 72,
         NO_EVENT, NOTE_OFF, 38, 40, NOTE_OFF])
    transformed_melody = [NO_EVENT, 24, 26, 4, NO_EVENT, NOTE_OFF, 17, 19,
                          NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 2, 4]
    expected_inputs = [one_hot(note + 2, num_classes)
                       for note in transformed_melody]
    expected_labels = ([note + 2 for note in transformed_melody[1:]] +
                       [NOTE_OFF + 2])
    expected_sequence_example = make_sequence_example(expected_inputs,
                                                      expected_labels)
    sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
        melody, steps_per_beat, min_note, max_note, transpose_to_key)
    self.assertEqual(expected_sequence_example, sequence_example)
Esempio n. 14
0
 def testSquashAllNotesOff(self):
     events = [NO_EVENT, NOTE_OFF, NO_EVENT, NO_EVENT]
     melody = melodies_lib.Melody()
     melody.from_event_list(events)
     melody.squash(min_note=12 * 4, max_note=12 * 7, transpose_to_key=0)
     self.assertEqual(events, list(melody))
Esempio n. 15
0
def sampler_loop(graph, checkpoint_dir, primer, num_gen_steps):
    """Generate many melodies simulatneously given a primer.

  Generate melodies by sampling from model output and feeding it back into
  the model as input at every step.

  Args:
    graph: A tf.Graph instance containing the graph to sample from.
    checkpoint_dir: Directory to look for most recent model checkpoint in.
    primer: A Melody object.
    num_gen_steps: How many time steps to generate.

  Returns:
    List of generated melodies, each as a Melody object.
  """
    logits = graph.get_collection('logits')[0]
    softmax = graph.get_collection('softmax')[0]
    initial_state = graph.get_collection('initial_state')[0]
    final_state = graph.get_collection('final_state')[0]
    melody_sequence = graph.get_collection('melody_sequence')[0]
    lengths = graph.get_collection('lengths')[0]

    with graph.as_default():
        saver = tf.train.Saver()

    session = tf.Session(graph=graph)

    logging.info('Checkpoint dir: %s', checkpoint_dir)
    checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)

    saver.restore(session, checkpoint_file)

    batch_size = logits.get_shape()[0].value

    # Convert primer Melody to model inputs.
    sequence_example, transpose_amount = encoders.basic_one_hot_encoder(primer)
    primer_input = [
        list(i.float_list.value)
        for i in sequence_example.feature_lists.feature_list['inputs'].feature
    ]

    # Run model over primer sequence.
    primer_input_batch = np.tile([primer_input], (batch_size, 1, 1))
    state, _ = session.run(
        [final_state, logits],
        feed_dict={
            initial_state: np.zeros(initial_state.get_shape().as_list()),
            melody_sequence: primer_input_batch,
            lengths: np.full(batch_size, len(primer), dtype=int)
        })

    # Sample from model repeatedly to generate melodies.
    generated_sequences = [list() for i in range(batch_size)]
    last_outputs = [melody_sequence] * batch_size
    singleton_lengths = np.full(batch_size, 1, dtype=int)
    for i in range(num_gen_steps):
        input_batch = np.transpose(
            [make_onehot(last_outputs, basic_rnn_ops.NUM_CLASSES)], (1, 0, 2))
        state, batch_logits, batch_softmax = session.run(
            [final_state, logits, softmax],
            feed_dict={
                initial_state: state,
                melody_sequence: input_batch,
                lengths: singleton_lengths
            })
        last_outputs = [
            np.random.choice(basic_rnn_ops.NUM_CLASSES, p=p_dist.flatten())
            for p_dist in batch_softmax
        ]
        for generated_seq, next_output in zip(generated_sequences,
                                              last_outputs):
            generated_seq.append(next_output)

    def decoder(event_list):
        return [
            e - melodies_lib.NUM_SPECIAL_EVENTS if
            e < melodies_lib.NUM_SPECIAL_EVENTS else e + 48 - transpose_amount
            for e in event_list
        ]

    primer_event_list = list(primer)
    generated_melodies = []
    for seq in generated_sequences:
        melody = melodies_lib.Melody(steps_per_bar=primer.steps_per_bar)
        melody.from_event_list(primer_event_list + decoder(seq))
        generated_melodies.append(melody)

    return generated_melodies