示例#1
0
  def testBasicOneHotEncoderOctaveSquash(self):
    steps_per_beat = 4
    transpose_to_key = 0
    min_note = 48
    max_note = 84
    num_classes = max_note - min_note + 2

    melody = melodies_lib.Melody()
    melody.from_event_list(
        [NO_EVENT, 84, 86, 52, NO_EVENT, NOTE_OFF, 65, 67, NOTE_OFF, 69, 71, 72,
         NO_EVENT, NOTE_OFF, 38, 40, NOTE_OFF])
    transformed_melody = [NO_EVENT, 24, 26, 4, NO_EVENT, NOTE_OFF, 17, 19,
                          NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 2, 4]
    expected_inputs = [one_hot(note + 2, num_classes)
                       for note in transformed_melody]
    expected_labels = ([note + 2 for note in transformed_melody[1:]] +
                       [NOTE_OFF + 2])
    expected_sequence_example = make_sequence_example(expected_inputs,
                                                      expected_labels)
    sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
        melody, steps_per_beat, min_note, max_note, transpose_to_key)
    self.assertEqual(expected_sequence_example, sequence_example)
示例#2
0
  def testBasicOneHotEncoderTransposeKey(self):
    steps_per_beat = 4
    transpose_to_key = 0
    min_note = 48
    max_note = 84
    num_classes = max_note - min_note + 2

    melody = melodies_lib.Melody()
    melody.from_event_list(
        [NO_EVENT, 61, 63, 65, NO_EVENT, NOTE_OFF, 66, 68, NOTE_OFF, 70, 72, 73,
         NO_EVENT, NOTE_OFF, 75, 77, NOTE_OFF])
    transformed_melody = [NO_EVENT, 12, 14, 16, NO_EVENT, NOTE_OFF, 17, 19,
                          NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 26, 28]
    expected_inputs = [one_hot(note + 2, num_classes)
                       for note in transformed_melody]
    expected_labels = ([note + 2 for note in transformed_melody[1:]] +
                       [NOTE_OFF + 2])
    expected_sequence_example = make_sequence_example(expected_inputs,
                                                      expected_labels)
    sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
        melody, steps_per_beat, min_note, max_note, transpose_to_key)
    self.assertEqual(expected_sequence_example, sequence_example)
示例#3
0
  def testBasicOneHotEncoder(self):
    steps_per_beat = 4
    transpose_to_key = 0
    min_note = 48
    max_note = 84
    num_classes = max_note - min_note + 2

    melody = melodies_lib.Melody()
    melody.from_event_list(
        [NO_EVENT, 60, 62, 64, NO_EVENT, NOTE_OFF, 65, 67, NOTE_OFF, 69, 71, 72,
         NO_EVENT, NOTE_OFF, 74, 76, 77, 79, NO_EVENT, NOTE_OFF])
    transformed_melody = [NO_EVENT, 12, 14, 16, NO_EVENT, NOTE_OFF, 17, 19,
                          NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 26, 28,
                          29, 31, NO_EVENT, NOTE_OFF]
    expected_inputs = ([one_hot(note + 2, num_classes)
                        for note in transformed_melody] +
                       [one_hot(0, num_classes)] * 12)
    expected_labels = [note + 2 for note in transformed_melody[1:]] + [0] * 13
    expected_sequence_example = make_sequence_example(expected_inputs,
                                                      expected_labels)
    sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
        melody, steps_per_beat, min_note, max_note, transpose_to_key)
    self.assertEqual(expected_sequence_example, sequence_example)
示例#4
0
def sampler_loop(graph, decoder, checkpoint_dir, primer, num_gen_steps):
  """Generate many melodies simulatneously given a primer.

  Generate melodies by sampling from model output and feeding it back into
  the model as input at every step.

  Args:
    graph: A tf.Graph instance containing the graph to sample from.
    decoder: A function that converts model output and reconstruction data into
        a Melody object. The method takes two inputs: A list of integers which
        are the softmax classes chosen at each step, and reconstruction data
        returned by the encoder. It returns a melodies_lib.Melody.
    checkpoint_dir: Directory to look for most recent model checkpoint in.
    primer: A Melody object.
    num_gen_steps: How many time steps to generate.

  Returns:
    List of generated melodies, each as a Melody object.
  """
  softmax = graph.get_collection('softmax')[0]
  initial_state = graph.get_collection('initial_state')[0]
  final_state = graph.get_collection('final_state')[0]
  melody_sequence = graph.get_collection('melody_sequence')[0]
  lengths = graph.get_collection('lengths')[0]

  with graph.as_default():
    saver = tf.train.Saver()

  session = tf.Session(graph=graph)

  logging.info('Checkpoint dir: %s', checkpoint_dir)
  checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)

  saver.restore(session, checkpoint_file)

  batch_size = softmax.get_shape()[0].value

  # Convert primer Melody to model inputs.
  sequence_example, encoder_information = sequence_to_melodies.basic_one_hot_encoder(primer)
  primer_input = [
      list(i.float_list.value)
      for i in sequence_example.feature_lists.feature_list['inputs'].feature]

  # Run model over primer sequence.
  primer_input_batch = np.tile([primer_input], (batch_size, 1, 1))
  state = session.run(
      final_state,
      feed_dict={initial_state: np.zeros(initial_state.get_shape().as_list()),
                 melody_sequence: primer_input_batch,
                 lengths: np.full(batch_size, len(primer),
                                  dtype=int)})

  # Sample from model repeatedly to generate melodies.
  generated_sequences = [list() for i in xrange(batch_size)]
  last_outputs = [melody_sequence] * batch_size
  singleton_lengths = np.full(batch_size, 1, dtype=int)
  for i in xrange(num_gen_steps):
    input_batch = np.transpose(
        [make_onehot(last_outputs, basic_rnn_ops.NUM_CLASSES)], (1, 0, 2))
    state, batch_softmax = session.run(
      [final_state, softmax],
      feed_dict={initial_state: state,
                 melody_sequence: input_batch,
                 lengths: singleton_lengths})
    last_outputs = [
        np.random.choice(basic_rnn_ops.NUM_CLASSES, p=p_dist.flatten())
        for p_dist in batch_softmax]
    for generated_seq, next_output in zip(generated_sequences, last_outputs):
      generated_seq.append(next_output)

  primer_event_list = list(primer)
  generated_melodies = []
  for seq in generated_sequences:
    melody = melodies_lib.Melody(steps_per_bar=primer.steps_per_bar)
    melody.from_event_list(
        primer_event_list + list(decoder(seq, encoder_information)))
    generated_melodies.append(melody)

  return generated_melodies