Exemplo n.º 1
0
 def transform(self, quantized_sequence):
   return melodies_lib.extract_melodies(
       quantized_sequence,
       min_bars=self.min_bars,
       min_unique_pitches=self.min_unique_pitches,
       gap_bars=self.gap_bars,
       ignore_polyphonic_notes=self.ignore_polyphonic_notes)
Exemplo n.º 2
0
  def testExtractMelodiesStatistics(self):
    self.quantized_sequence.steps_per_beat = 1
    testing_lib.add_quantized_track(
        self.quantized_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 7), (10, 100, 8, 10), (9, 100, 11, 14),
         (8, 100, 16, 40), (7, 100, 41, 42)])
    testing_lib.add_quantized_track(
        self.quantized_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 2, 8)])
    testing_lib.add_quantized_track(
        self.quantized_sequence, 2,
        [(12, 127, 0, 1)])
    testing_lib.add_quantized_track(
        self.quantized_sequence, 3,
        [(12, 127, 2, 4), (12, 50, 6, 8)])
    _, stats = melodies_lib.extract_melodies(
        self.quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=False)

    stats_dict = dict([(stat.name, stat) for stat in stats])
    self.assertEqual(stats_dict['polyphonic_tracks_discarded'].count, 1)
    self.assertEqual(stats_dict['melodies_discarded_too_short'].count, 1)
    self.assertEqual(stats_dict['melodies_discarded_too_few_pitches'].count, 1)
    self.assertEqual(
        stats_dict['melody_lengths_in_bars'].counters,
        {float('-inf'): 0, 0: 1, 1: 0, 2: 1, 10: 1, 20: 0, 30: 0, 40: 0, 50: 0,
         100: 0, 200: 0, 500: 0})
Exemplo n.º 3
0
    def testExtractMelodiesSimple(self):
        self.quantized_sequence.steps_per_beat = 1
        testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                        [(12, 100, 2, 4), (11, 1, 6, 7)])
        testing_lib.add_quantized_track(self.quantized_sequence, 1,
                                        [(12, 127, 2, 4), (14, 50, 6, 8)])
        expected = [[
            NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11, NOTE_OFF
        ],
                    [
                        NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT,
                        14, NO_EVENT, NOTE_OFF
                    ]]
        melodies, _ = melodies_lib.extract_melodies(
            self.quantized_sequence,
            min_bars=1,
            gap_bars=1,
            min_unique_pitches=2,
            ignore_polyphonic_notes=True)

        self.assertEqual(2, len(melodies))
        self.assertTrue(isinstance(melodies[0], melodies_lib.MonophonicMelody))
        self.assertTrue(isinstance(melodies[1], melodies_lib.MonophonicMelody))

        melodies = sorted([list(melody) for melody in melodies])
        self.assertEqual(expected, melodies)
Exemplo n.º 4
0
 def testExtractLeadSheetFragmentsNoChords(self):
     self.quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 2, 4), (11, 1, 6, 11)])
     testing_lib.add_quantized_track(self.quantized_sequence, 1,
                                     [(12, 127, 2, 4), (14, 50, 6, 8),
                                      (50, 100, 33, 37), (52, 100, 34, 37)])
     testing_lib.add_quantized_chords(self.quantized_sequence,
                                      [('C', 2), ('G7', 6), (NO_CHORD, 10)])
     lead_sheets, stats = lead_sheets_lib.extract_lead_sheet_fragments(
         self.quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True,
         require_chords=True)
     melodies, _ = melodies_lib.extract_melodies(
         self.quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True)
     chord_progressions, _ = chords_lib.extract_chords_for_melodies(
         self.quantized_sequence, melodies)
     stats_dict = dict([(stat.name, stat) for stat in stats])
     # Last lead sheet should be rejected for having no chords.
     self.assertEqual(list(melodies[:2]),
                      list(lead_sheet.melody for lead_sheet in lead_sheets))
     self.assertEqual(list(chord_progressions[:2]),
                      list(lead_sheet.chords for lead_sheet in lead_sheets))
     self.assertEqual(stats_dict['empty_chord_progressions'].count, 1)
Exemplo n.º 5
0
 def testExtractLeadSheetFragments(self):
     self.quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 2, 4), (11, 1, 6, 11)])
     testing_lib.add_quantized_track(self.quantized_sequence, 1,
                                     [(12, 127, 2, 4), (14, 50, 6, 8),
                                      (50, 100, 33, 37), (52, 100, 34, 37)])
     testing_lib.add_quantized_chords(self.quantized_sequence,
                                      [('C', 2), ('G7', 6), ('Cmaj7', 33)])
     lead_sheets, _ = lead_sheets_lib.extract_lead_sheet_fragments(
         self.quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True,
         require_chords=True)
     melodies, _ = melodies_lib.extract_melodies(
         self.quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True)
     chord_progressions, _ = chords_lib.extract_chords_for_melodies(
         self.quantized_sequence, melodies)
     self.assertEqual(list(melodies),
                      list(lead_sheet.melody for lead_sheet in lead_sheets))
     self.assertEqual(list(chord_progressions),
                      list(lead_sheet.chords for lead_sheet in lead_sheets))
Exemplo n.º 6
0
  def testExtractMelodiesStatistics(self):
    self.quantized_sequence.steps_per_quarter = 1
    testing_lib.add_quantized_track(
        self.quantized_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 7), (10, 100, 8, 10), (9, 100, 11, 14),
         (8, 100, 16, 40), (7, 100, 41, 42)])
    testing_lib.add_quantized_track(
        self.quantized_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 2, 8)])
    testing_lib.add_quantized_track(
        self.quantized_sequence, 2,
        [(12, 127, 0, 1)])
    testing_lib.add_quantized_track(
        self.quantized_sequence, 3,
        [(12, 127, 2, 4), (12, 50, 6, 8)])
    _, stats = melodies_lib.extract_melodies(
        self.quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=False)

    stats_dict = dict([(stat.name, stat) for stat in stats])
    self.assertEqual(stats_dict['polyphonic_tracks_discarded'].count, 1)
    self.assertEqual(stats_dict['melodies_discarded_too_short'].count, 1)
    self.assertEqual(stats_dict['melodies_discarded_too_few_pitches'].count, 1)
    self.assertEqual(
        stats_dict['melody_lengths_in_bars'].counters,
        {float('-inf'): 0, 0: 1, 1: 0, 2: 1, 10: 1, 20: 0, 30: 0, 40: 0, 50: 0,
         100: 0, 200: 0, 500: 0})
Exemplo n.º 7
0
    def testExtractMelodiesSimple(self):
        sequence = parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}""")
        add_track(sequence, 0, [(12, 100, 1.0, 2.0), (11, 1, 3.25, 3.75)])
        add_track(sequence, 1, [(12, 127, 1.0, 2.0), (14, 50, 3.0, 4.0),
                                (50, 0, 6.0, 7.0)])
        expected = [[
            NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11, NOTE_OFF
        ],
                    [
                        NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT,
                        14, NO_EVENT, NOTE_OFF
                    ]]
        melodies = melodies_lib.extract_melodies(sequence,
                                                 steps_per_beat=1,
                                                 min_bars=1,
                                                 min_unique_pitches=2)

        self.assertEqual(2, len(melodies))
        self.assertTrue(isinstance(melodies[0], melodies_lib.Melody))
        self.assertTrue(isinstance(melodies[1], melodies_lib.Melody))

        melodies = sorted([list(melody) for melody in melodies])
        self.assertEqual(expected, melodies)
Exemplo n.º 8
0
 def testExtractMelodiesPadEnd(self):
     self.quantized_sequence.steps_per_beat = 1
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 127, 2, 4), (14, 50, 6, 7)])
     testing_lib.add_quantized_track(self.quantized_sequence, 1,
                                     [(12, 127, 2, 4), (14, 50, 6, 8)])
     testing_lib.add_quantized_track(self.quantized_sequence, 2,
                                     [(12, 127, 2, 4), (14, 50, 6, 9)])
     expected = [[
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NOTE_OFF
     ], [
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT
     ],
                 [
                     NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT,
                     14, NO_EVENT, NO_EVENT, NOTE_OFF, NO_EVENT, NO_EVENT
                 ]]
     melodies, _ = melodies_lib.extract_melodies(
         self.quantized_sequence,
         min_bars=1,
         gap_bars=1,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True,
         pad_end=True)
     melodies = [list(melody) for melody in melodies]
     self.assertEqual(expected, melodies)
Exemplo n.º 9
0
 def testExtractChordsForMelodiesCoincidentChords(self):
     self.quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 2, 4), (11, 1, 6, 11)])
     testing_lib.add_quantized_track(self.quantized_sequence, 1,
                                     [(12, 127, 2, 4), (14, 50, 6, 8),
                                      (50, 100, 33, 37), (52, 100, 34, 37)])
     testing_lib.add_quantized_chords(self.quantized_sequence,
                                      [('C', 2), ('G7', 6), ('E13', 8),
                                       ('Cmaj7', 8)])
     melodies, _ = melodies_lib.extract_melodies(
         self.quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True)
     chord_progressions, stats = chords_lib.extract_chords_for_melodies(
         self.quantized_sequence, melodies)
     expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
                 ['Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
     stats_dict = dict([(stat.name, stat) for stat in stats])
     self.assertIsNone(chord_progressions[0])
     self.assertEqual(expected,
                      [list(chords) for chords in chord_progressions[1:]])
     self.assertEqual(stats_dict['coincident_chords'].count, 1)
Exemplo n.º 10
0
 def transform(self, quantized_sequence):
     return melodies_lib.extract_melodies(
         quantized_sequence,
         min_bars=self.min_bars,
         min_unique_pitches=self.min_unique_pitches,
         gap_bars=self.gap_bars,
         ignore_polyphonic_notes=self.ignore_polyphonic_notes)
Exemplo n.º 11
0
def run(melody_encoder_decoder, build_graph):
    """Generates melodies and saves them as MIDI files.

  Args:
    melody_encoder_decoder: A melodies_lib.MelodyEncoderDecoder object specific
        to your model.
    build_graph: A function that when called, returns the tf.Graph object for
        your model. The function will be passed the parameters:
        (mode, hparams_string, input_size, num_classes, sequence_example_file).
        For an example usage, see models/basic_rnn/basic_rnn_graph.py.
  """
    tf.logging.set_verbosity(tf.logging.INFO)

    if not FLAGS.run_dir:
        tf.logging.fatal("--run_dir required")
        return
    if not FLAGS.output_dir:
        tf.logging.fatal("--output_dir required")
        return

    FLAGS.run_dir = os.path.expanduser(FLAGS.run_dir)
    FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
    if FLAGS.primer_midi:
        FLAGS.primer_midi = os.path.expanduser(FLAGS.primer_midi)

    hparams = ast.literal_eval(FLAGS.hparams if FLAGS.hparams else "{}")
    hparams["batch_size"] = FLAGS.num_outputs
    hparams["dropout_keep_prob"] = 1.0
    hparams_string = repr(hparams)

    graph = build_graph(
        "generate", hparams_string, melody_encoder_decoder.input_size, melody_encoder_decoder.num_classes
    )

    train_dir = os.path.join(FLAGS.run_dir, "train")

    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)

    primer_melody = melodies_lib.Melody()
    bpm = melodies_lib.DEFAULT_BEATS_PER_MINUTE
    if FLAGS.primer_melody:
        primer_melody.from_event_list(ast.literal_eval(FLAGS.primer_melody))
    elif FLAGS.primer_midi:
        primer_sequence = midi_io.midi_file_to_sequence_proto(FLAGS.primer_midi)
        if primer_sequence.tempos:
            bpm = primer_sequence.tempos[0].bpm
        extracted_melodies = melodies_lib.extract_melodies(primer_sequence, min_bars=0, min_unique_pitches=1)
        if extracted_melodies:
            primer_melody = extracted_melodies[0]
        else:
            tf.logging.info(
                "No melodies were extracted from the MIDI file %s. " "Melodies will be generated from scratch.",
                FLAGS.primer_midi,
            )

    run_generate(graph, train_dir, FLAGS.output_dir, melody_encoder_decoder, primer_melody, FLAGS.num_steps, bpm)
Exemplo n.º 12
0
def run_conversion(melody_encoder_decoder,
                   note_sequences_file,
                   train_output,
                   eval_output=None,
                   eval_ratio=0.0):
    """Loop that converts NoteSequence protos to SequenceExample protos.

  Args:
    melody_encoder_decoder: A melodies_lib.MelodyEncoderDecoder object.
    note_sequences_file: String path pointing to TFRecord file of NoteSequence
        protos.
    train_output: String path to TFRecord file that training samples will be
        saved to.
    eval_output: If set, string path to TFRecord file that evaluation samples
        will be saved to. Omit this argument to not produce an eval set.
    eval_ratio: Fraction of input that will be saved to eval set. A random
        partition is chosen, so the actual train/eval ratio will vary.
  """
    reader = note_sequence_io.note_sequence_record_iterator(
        note_sequences_file)
    train_writer = tf.python_io.TFRecordWriter(train_output)
    eval_writer = (tf.python_io.TFRecordWriter(eval_output)
                   if eval_output else None)

    input_count = 0
    train_output_count = 0
    eval_output_count = 0
    tf.logging.info('Extracting melodies...')
    for sequence_data in reader:
        # Only extract melodies from 4/4 time music.
        if (not sequence_data.time_signatures or
                not (sequence_data.time_signatures[0].numerator == 4
                     and sequence_data.time_signatures[0].denominator == 4)):
            continue
        extracted_melodies = melodies_lib.extract_melodies(sequence_data)
        for melody in extracted_melodies:
            sequence_example = melody_encoder_decoder.encode(melody)
            serialized = sequence_example.SerializeToString()
            if eval_writer and random.random() < eval_ratio:
                eval_writer.write(serialized)
                eval_output_count += 1
            else:
                train_writer.write(serialized)
                train_output_count += 1
        input_count += 1
        if input_count % 10 == 0:
            tf.logging.info('Extracted %d melodies from %d sequences.',
                            eval_output_count + train_output_count,
                            input_count)

    tf.logging.info('Done. Extracted %d melodies from %d sequences.',
                    eval_output_count + train_output_count, input_count)
    tf.logging.info('Extracted %d melodies for training.', train_output_count)
    if eval_writer:
        tf.logging.info('Extracted %d melodies for evaluation.',
                        eval_output_count)
Exemplo n.º 13
0
def extract_lead_sheet_fragments(quantized_sequence,
                                 min_bars=7,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 require_chords=False):
    """Extracts a list of lead sheet fragments from the given QuantizedSequence.

  This function first extracts melodies using melodies_lib.extract_melodies,
  then extracts the chords underlying each melody using
  chords_lib.extract_chords_for_melodies.

  Args:
    quantized_sequence: A sequences_lib.QuantizedSequence object.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at the
        same time). If False, tracks with polyphony will be ignored.
    require_chords: If True, only return lead sheets that have at least one
        chord other than NO_CHORD. If False, lead sheets with only melody will
        also be returned.

  Returns:
    A python list of LeadSheet instances.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
    stats = dict([('empty_chord_progressions',
                   statistics.Counter('empty_chord_progressions'))])
    melodies, melody_stats = melodies_lib.extract_melodies(
        quantized_sequence,
        min_bars=min_bars,
        gap_bars=gap_bars,
        min_unique_pitches=min_unique_pitches,
        ignore_polyphonic_notes=ignore_polyphonic_notes)
    chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
        quantized_sequence, melodies)
    lead_sheets = []
    for melody, chords in zip(melodies, chord_progressions):
        if chords is not None:
            if require_chords and all(chord == chords_lib.NO_CHORD
                                      for chord in chords):
                stats['empty_chord_progressions'].increment()
            else:
                lead_sheets.append(LeadSheet(melody, chords))
    return lead_sheets, stats.values() + melody_stats + chord_stats
def run_conversion(encoder,
                   sequences_file,
                   train_output,
                   eval_output='',
                   eval_ratio=0.0):
    """Loop that converts NoteSequence protos to SequenceExample protos.

  Args:
    encoder: String name of encoder function from encoders.py to use.
    sequences_file: String path pointing to TFRecord file of NoteSequence
        protos.
    train_output: String path to TFRecord file that training samples will be
        saved to.
    eval_output: If set, string path to TFRecord file that evaluation samples
        will be saved to. Omit this argument to not produce an eval set.
    eval_ratio: Fraction of input that will be saved to eval set. A random
        partition is chosen, so the actual train/eval ratio will vary.
  """
    encoder_func = getattr(encoders, encoder)

    reader = tf.python_io.tf_record_iterator(sequences_file)
    train_writer = tf.python_io.TFRecordWriter(train_output)
    eval_writer = (tf.python_io.TFRecordWriter(eval_output)
                   if eval_output else None)

    input_count = 0
    train_output_count = 0
    eval_output_count = 0
    for buf in reader:
        sequence_data = music_pb2.NoteSequence()
        sequence_data.ParseFromString(buf)
        extracted_melodies = melodies_lib.extract_melodies(sequence_data)
        for melody in extracted_melodies:
            sequence_example, _ = encoder_func(melody)
            serialized = sequence_example.SerializeToString()
            if eval_writer and random.random() < eval_ratio:
                eval_writer.write(serialized)
                eval_output_count += 1
            else:
                train_writer.write(serialized)
                train_output_count += 1
        input_count += 1
        tf.logging.log_every_n(logging.INFO,
                               'Extracted %d melodies from %d sequences.', 500,
                               eval_output_count + train_output_count,
                               input_count)

    logging.info('Found %d sequences', input_count)
    logging.info('Extracted %d melodies for training.', train_output_count)
    if eval_writer:
        logging.info('Extracted %d melodies for evaluation.',
                     eval_output_count)
Exemplo n.º 15
0
def run_conversion(melody_encoder_decoder, note_sequences_file, train_output,
                   eval_output=None, eval_ratio=0.0):
  """Loop that converts NoteSequence protos to SequenceExample protos.

  Args:
    melody_encoder_decoder: A melodies_lib.MelodyEncoderDecoder object.
    note_sequences_file: String path pointing to TFRecord file of NoteSequence
        protos.
    train_output: String path to TFRecord file that training samples will be
        saved to.
    eval_output: If set, string path to TFRecord file that evaluation samples
        will be saved to. Omit this argument to not produce an eval set.
    eval_ratio: Fraction of input that will be saved to eval set. A random
        partition is chosen, so the actual train/eval ratio will vary.
  """
  reader = note_sequence_io.note_sequence_record_iterator(note_sequences_file)
  train_writer = tf.python_io.TFRecordWriter(train_output)
  eval_writer = (tf.python_io.TFRecordWriter(eval_output)
                 if eval_output else None)

  input_count = 0
  train_output_count = 0
  eval_output_count = 0
  tf.logging.info('Extracting melodies...')
  for sequence_data in reader:
    # Only extract melodies from 4/4 time music.
    if (not sequence_data.time_signatures or
        not (sequence_data.time_signatures[0].numerator == 4 and
             sequence_data.time_signatures[0].denominator == 4)):
      continue
    extracted_melodies = melodies_lib.extract_melodies(sequence_data)
    for melody in extracted_melodies:
      sequence_example = melody_encoder_decoder.encode(melody)
      serialized = sequence_example.SerializeToString()
      if eval_writer and random.random() < eval_ratio:
        eval_writer.write(serialized)
        eval_output_count += 1
      else:
        train_writer.write(serialized)
        train_output_count += 1
    input_count += 1
    if input_count % 10 == 0:
      tf.logging.info('Extracted %d melodies from %d sequences.',
                      eval_output_count + train_output_count,
                      input_count)

  tf.logging.info('Done. Extracted %d melodies from %d sequences.',
                  eval_output_count + train_output_count,
                  input_count)
  tf.logging.info('Extracted %d melodies for training.', train_output_count)
  if eval_writer:
    tf.logging.info('Extracted %d melodies for evaluation.', eval_output_count)
Exemplo n.º 16
0
 def testExtractMelodiesLateStart(self):
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}""")
   add_track(sequence, 0, [(12, 100, 103.0, 103.5), (13, 100, 104.0, 105.0)])
   add_track(sequence, 1, [(12, 100, 100.0, 100.5), (13, 100, 101.0, 102.0)])
   expected = [[NO_EVENT, NO_EVENT, 12, NOTE_OFF, 13, NO_EVENT, NOTE_OFF],
               [12, NOTE_OFF, 13, NO_EVENT, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence, steps_per_beat=1,
                                            min_bars=1, min_unique_pitches=2)
   melodies = sorted([list(melody) for melody in melodies])
   self.assertEqual(expected, melodies)
Exemplo n.º 17
0
 def testExtractMelodiesLateStart(self):
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}""")
   add_track(sequence, 0, [(12, 100, 103.0, 103.5), (13, 100, 104.0, 105.0)])
   add_track(sequence, 1, [(12, 100, 100.0, 100.5), (13, 100, 101.0, 102.0)])
   expected = [[NO_EVENT, NO_EVENT, 12, NOTE_OFF, 13, NO_EVENT, NOTE_OFF],
               [12, NOTE_OFF, 13, NO_EVENT, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence, steps_per_beat=1,
                                            min_bars=1, min_unique_pitches=2)
   melodies = sorted([list(melody) for melody in melodies])
   self.assertEqual(expected, melodies)
Exemplo n.º 18
0
 def transform(self, quantized_sequence):
     try:
         melodies, stats = melodies_lib.extract_melodies(
             quantized_sequence,
             min_bars=self.min_bars,
             min_unique_pitches=self.min_unique_pitches,
             gap_bars=self.gap_bars,
             ignore_polyphonic_notes=self.ignore_polyphonic_notes)
     except melodies_lib.NonIntegerStepsPerBarException as detail:
         tf.logging.warning('Skipped sequence: %s', detail)
         melodies = []
         stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
     self._set_stats(stats)
     return melodies
Exemplo n.º 19
0
def run_conversion(encoder, sequences_file, train_output, eval_output="", eval_ratio=0.0):
    """Loop that converts NoteSequence protos to SequenceExample protos.

  Args:
    encoder: A function that converts Melody to SequenceExample which is fed
        into a model. The function takes one input of type melodies_lib.Melody
        and outputs a tuple (tf.train.SequenceExample, reconstruction_data)
        where reconstruction_data is any extra data that is needed to
        reconstruct a Melody from the given SequenceExample.
    sequences_file: String path pointing to TFRecord file of NoteSequence
        protos.
    train_output: String path to TFRecord file that training samples will be
        saved to.
    eval_output: If set, string path to TFRecord file that evaluation samples
        will be saved to. Omit this argument to not produce an eval set.
    eval_ratio: Fraction of input that will be saved to eval set. A random
        partition is chosen, so the actual train/eval ratio will vary.
  """

    reader = note_sequence_io.note_sequence_record_iterator(sequences_file)
    train_writer = tf.python_io.TFRecordWriter(train_output)
    eval_writer = tf.python_io.TFRecordWriter(eval_output) if eval_output else None

    input_count = 0
    train_output_count = 0
    eval_output_count = 0
    for sequence_data in reader:
        extracted_melodies = melodies_lib.extract_melodies(sequence_data)
        for melody in extracted_melodies:
            sequence_example, _ = encoder(melody)
            serialized = sequence_example.SerializeToString()
            if eval_writer and random.random() < eval_ratio:
                eval_writer.write(serialized)
                eval_output_count += 1
            else:
                train_writer.write(serialized)
                train_output_count += 1
        input_count += 1
        tf.logging.log_every_n(
            logging.INFO,
            "Extracted %d melodies from %d sequences.",
            500,
            eval_output_count + train_output_count,
            input_count,
        )

    logging.info("Found %d sequences", input_count)
    logging.info("Extracted %d melodies for training.", train_output_count)
    if eval_writer:
        logging.info("Extracted %d melodies for evaluation.", eval_output_count)
Exemplo n.º 20
0
 def transform(self, quantized_sequence):
   try:
     melodies, stats = melodies_lib.extract_melodies(
         quantized_sequence,
         min_bars=self.min_bars,
         min_unique_pitches=self.min_unique_pitches,
         gap_bars=self.gap_bars,
         ignore_polyphonic_notes=self.ignore_polyphonic_notes)
   except melodies_lib.NonIntegerStepsPerBarException as detail:
     tf.logging.warning('Skipped sequence: %s', detail)
     melodies = []
     stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
   self._set_stats(stats)
   return melodies
def run_conversion(encoder, sequences_file, train_output, eval_output='', eval_ratio=0.0):
  """Loop that converts NoteSequence protos to SequenceExample protos.

  Args:
    encoder: A function that converts Melody to SequenceExample which is fed
        into a model. The function takes one input of type melodies_lib.Melody
        and outputs a tuple (tf.train.SequenceExample, reconstruction_data)
        where reconstruction_data is any extra data that is needed to
        reconstruct a Melody from the given SequenceExample.
    sequences_file: String path pointing to TFRecord file of NoteSequence
        protos.
    train_output: String path to TFRecord file that training samples will be
        saved to.
    eval_output: If set, string path to TFRecord file that evaluation samples
        will be saved to. Omit this argument to not produce an eval set.
    eval_ratio: Fraction of input that will be saved to eval set. A random
        partition is chosen, so the actual train/eval ratio will vary.
  """

  reader = note_sequence_io.note_sequence_record_iterator(sequences_file)
  train_writer = tf.python_io.TFRecordWriter(train_output)
  eval_writer = (tf.python_io.TFRecordWriter(eval_output)
                 if eval_output else None)

  input_count = 0
  train_output_count = 0
  eval_output_count = 0
  for sequence_data in reader:
    extracted_melodies = melodies_lib.extract_melodies(sequence_data)
    for melody in extracted_melodies:
      sequence_example, _ = encoder(melody)
      serialized = sequence_example.SerializeToString()
      if eval_writer and random.random() < eval_ratio:
        eval_writer.write(serialized)
        eval_output_count += 1
      else:
        train_writer.write(serialized)
        train_output_count += 1
    input_count += 1
    tf.logging.log_every_n(logging.INFO, 
                           'Extracted %d melodies from %d sequences.',
                           500,
                           eval_output_count + train_output_count,
                           input_count)

  logging.info('Found %d sequences', input_count)
  logging.info('Extracted %d melodies for training.', train_output_count)
  if eval_writer:
    logging.info('Extracted %d melodies for evaluation.', eval_output_count)
Exemplo n.º 22
0
 def testExtractMelodiesMelodyTooShort(self):
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}""")
   add_track(sequence, 0, [(12, 127, 1.0, 2.0), (14, 50, 3.0, 3.5)])
   add_track(sequence, 1, [(12, 127, 1.0, 2.0), (14, 50, 3.0, 4.0)])
   expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                NO_EVENT, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence,
                                            steps_per_beat=1,
                                            min_bars=2,
                                            min_unique_pitches=2)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 23
0
 def testExtractMelodiesLateStart(self):
   self.quantized_sequence.steps_per_quarter = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 100, 102, 103), (13, 100, 104, 106)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 100, 100, 101), (13, 100, 102, 105)])
   expected = [[NO_EVENT, NO_EVENT, 12, NOTE_OFF, 13, NO_EVENT],
               [12, NOTE_OFF, 13, NO_EVENT, NO_EVENT]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   melodies = sorted([list(melody) for melody in melodies])
   self.assertEqual(expected, melodies)
Exemplo n.º 24
0
 def testExtractMelodiesLateStart(self):
   self.quantized_sequence.steps_per_beat = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 100, 102, 103), (13, 100, 104, 106)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 100, 100, 101), (13, 100, 102, 104)])
   expected = [[NO_EVENT, NO_EVENT, 12, NOTE_OFF, 13, NO_EVENT, NOTE_OFF],
               [12, NOTE_OFF, 13, NO_EVENT, NOTE_OFF]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   melodies = sorted([list(melody) for melody in melodies])
   self.assertEqual(expected, melodies)
Exemplo n.º 25
0
 def testExtractMelodiesMelodyTooShort(self):
   self.quantized_sequence.steps_per_beat = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 127, 2, 4), (14, 50, 6, 7)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8)])
   expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                NO_EVENT, NOTE_OFF]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=2, gap_bars=1, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 26
0
 def testExtractMelodiesMelodyTooShort(self):
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}""")
   add_track(sequence, 0, [(12, 127, 1.0, 2.0), (14, 50, 3.0, 3.5)])
   add_track(sequence, 1, [(12, 127, 1.0, 2.0), (14, 50, 3.0, 4.0)])
   expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                NO_EVENT, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence,
                                            steps_per_beat=1,
                                            min_bars=2,
                                            min_unique_pitches=2)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
def run_conversion(encoder, sequences_file, train_output, eval_output='', eval_ratio=0.0):
  """Loop that converts NoteSequence protos to SequenceExample protos.

  Args:
    encoder: String name of encoder function from encoders.py to use.
    sequences_file: String path pointing to TFRecord file of NoteSequence
        protos.
    train_output: String path to TFRecord file that training samples will be
        saved to.
    eval_output: If set, string path to TFRecord file that evaluation samples
        will be saved to. Omit this argument to not produce an eval set.
    eval_ratio: Fraction of input that will be saved to eval set. A random
        partition is chosen, so the actual train/eval ratio will vary.
  """
  encoder_func = getattr(encoders, encoder)

  reader = tf.python_io.tf_record_iterator(sequences_file)
  train_writer = tf.python_io.TFRecordWriter(train_output)
  eval_writer = (tf.python_io.TFRecordWriter(eval_output)
                 if eval_output else None)

  input_count = 0
  train_output_count = 0
  eval_output_count = 0
  for buf in reader:
    sequence_data = music_pb2.NoteSequence()
    sequence_data.ParseFromString(buf)
    extracted_melodies = melodies_lib.extract_melodies(sequence_data)
    for melody in extracted_melodies:
      sequence_example, _ = encoder_func(melody)
      serialized = sequence_example.SerializeToString()
      if eval_writer and random.random() < eval_ratio:
        eval_writer.write(serialized)
        eval_output_count += 1
      else:
        train_writer.write(serialized)
        train_output_count += 1
    input_count += 1
    tf.logging.log_every_n(logging.INFO, 
                           'Extracted %d melodies from %d sequences.',
                           500,
                           eval_output_count + train_output_count,
                           input_count)

  logging.info('Found %d sequences', input_count)
  logging.info('Extracted %d melodies for training.', train_output_count)
  if eval_writer:
    logging.info('Extracted %d melodies for evaluation.', eval_output_count)
Exemplo n.º 28
0
 def testExtractMelodiesMelodyTooLongWithPad(self):
   self.quantized_sequence.steps_per_quarter = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 127, 2, 4), (14, 50, 6, 15)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 18)])
   expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=1, max_steps_truncate=14,
       max_steps_discard=18, gap_bars=1, min_unique_pitches=2,
       ignore_polyphonic_notes=True, pad_end=True)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 29
0
 def testExtractMelodiesTooFewPitches(self):
   # Test that extract_melodies discards melodies with too few pitches where
   # pitches are equivalent by octave.
   self.quantized_sequence.steps_per_beat = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
        (24, 100, 3, 4), (25, 100, 4, 5)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
        (25, 100, 3, 4), (26, 100, 4, 5)])
   expected = [[12, 13, 18, 25, 26, NOTE_OFF]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=4,
       ignore_polyphonic_notes=True)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 30
0
 def testExtractMelodiesTooFewPitches(self):
   # Test that extract_melodies discards melodies with too few pitches where
   # pitches are equivalent by octave.
   self.quantized_sequence.steps_per_quarter = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
        (24, 100, 3, 4), (25, 100, 4, 5)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
        (25, 100, 3, 4), (26, 100, 4, 5)])
   expected = [[12, 13, 18, 25, 26]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=4,
       ignore_polyphonic_notes=True)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 31
0
 def testExtractMelodiesRounding(self):
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         bpm: 60}""")
   add_track(
       sequence, 0,
       [(12, 100, 0.01, 0.24), (11, 100, 0.22, 0.55), (40, 100, 0.50, 0.75),
        (41, 100, 0.689, 1.18), (44, 100, 1.19, 1.69), (55, 100, 4.0, 4.01)])
   expected = [[12, 11, 40, 41, NOTE_OFF, 44, NO_EVENT, NOTE_OFF, NO_EVENT,
                NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT,
                NO_EVENT, 55, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence, steps_per_beat=4,
                                            min_bars=1,
                                            min_unique_pitches=2)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 32
0
 def testExtractMelodiesRounding(self):
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         bpm: 60}""")
   add_track(
       sequence, 0,
       [(12, 100, 0.01, 0.24), (11, 100, 0.22, 0.55), (40, 100, 0.50, 0.75),
        (41, 100, 0.689, 1.18), (44, 100, 1.19, 1.69), (55, 100, 4.0, 4.01)])
   expected = [[12, 11, 40, 41, NOTE_OFF, 44, NO_EVENT, NOTE_OFF, NO_EVENT,
                NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT,
                NO_EVENT, 55, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence, steps_per_beat=4,
                                            min_bars=1,
                                            min_unique_pitches=2)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 33
0
 def testExtractMultipleMelodiesFromSameTrack(self):
   self.quantized_sequence.steps_per_beat = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 7)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8),
        (50, 100, 33, 37), (52, 100, 34, 36)])
   expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11,
                NOTE_OFF],
               [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                NO_EVENT, NOTE_OFF],
               [NO_EVENT, 50, 52, NO_EVENT, NOTE_OFF]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   melodies = sorted([list(melody) for melody in melodies])
   self.assertEqual(expected, melodies)
Exemplo n.º 34
0
 def testExtractMultipleMelodiesFromSameTrack(self):
   self.quantized_sequence.steps_per_quarter = 1
   testing_lib.add_quantized_track(
       self.quantized_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 11)])
   testing_lib.add_quantized_track(
       self.quantized_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8),
        (50, 100, 33, 37), (52, 100, 34, 37)])
   expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11,
                NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT],
               [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                NO_EVENT],
               [NO_EVENT, 50, 52, NO_EVENT, NO_EVENT]]
   melodies, _ = melodies_lib.extract_melodies(
       self.quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   melodies = sorted([list(melody) for melody in melodies])
   self.assertEqual(expected, melodies)
Exemplo n.º 35
0
 def testExtractMelodiesTooFewPitches(self):
   # Test that extract_melodies discards melodies with too few pitches where
   # pitches are equivalent by octave.
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}""")
   add_track(sequence, 0, [(12, 100, 0.0, 0.5), (13, 100, 0.5, 1.0),
                           (18, 100, 1.0, 1.5), (24, 100, 1.5, 2.0),
                           (25, 100, 2.0, 2.5)])
   add_track(sequence, 1, [(12, 100, 0.0, 0.5), (13, 100, 0.5, 1.0),
                           (18, 100, 1.0, 1.5), (25, 100, 1.5, 2.0),
                           (26, 100, 2.0, 2.5)])
   expected = [[12, 13, 18, 25, 26, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence,
                                            steps_per_beat=1,
                                            min_bars=1,
                                            min_unique_pitches=4)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 36
0
 def testExtractMelodiesTooFewPitches(self):
   # Test that extract_melodies discards melodies with too few pitches where
   # pitches are equivalent by octave.
   sequence = parse_test_proto(music_pb2.NoteSequence, """
       time_signatures: {
         numerator: 4
         denominator: 4}""")
   add_track(sequence, 0, [(12, 100, 0.0, 0.5), (13, 100, 0.5, 1.0),
                           (18, 100, 1.0, 1.5), (24, 100, 1.5, 2.0),
                           (25, 100, 2.0, 2.5)])
   add_track(sequence, 1, [(12, 100, 0.0, 0.5), (13, 100, 0.5, 1.0),
                           (18, 100, 1.0, 1.5), (25, 100, 1.5, 2.0),
                           (26, 100, 2.0, 2.5)])
   expected = [[12, 13, 18, 25, 26, NOTE_OFF]]
   melodies = melodies_lib.extract_melodies(sequence,
                                            steps_per_beat=1,
                                            min_bars=1,
                                            min_unique_pitches=4)
   melodies = [list(melody) for melody in melodies]
   self.assertEqual(expected, melodies)
Exemplo n.º 37
0
def main(_):
    root = logging.getLogger()
    root.setLevel(logging.INFO)
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.INFO)
    root.addHandler(ch)

    if not os.path.isdir(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)

    primer_sequence = midi_io.midi_file_to_sequence_proto(FLAGS.primer_midi)
    bpm = primer_sequence.tempos[0].bpm if len(
        primer_sequence.tempos) else 120.0

    extracted_melodies = melodies_lib.extract_melodies(primer_sequence,
                                                       min_bars=1,
                                                       min_unique_pitches=1)

    if not extracted_melodies:
        logging.info('No melodies were extracted from MIDI file %s' %
                     FLAGS.primer_midi)
        return

    graph = make_graph(hparams_string=FLAGS.hparams)

    checkpoint_dir = os.path.join(FLAGS.experiment_run_dir, 'train')

    generated = []
    while len(generated) < FLAGS.num_outputs:
        generated.extend(
            sampler_loop(graph, checkpoint_dir, extracted_melodies[0],
                         FLAGS.num_steps))

    for i in range(FLAGS.num_outputs):
        sequence = generated[i].to_sequence(bpm=bpm)
        midi_io.sequence_proto_to_midi_file(
            sequence,
            os.path.join(FLAGS.output_dir, 'basic_rnn_sample_%d.mid' % i))

    logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs,
                 FLAGS.output_dir)
Exemplo n.º 38
0
  def testExtractMelodiesSimple(self):
    sequence = parse_test_proto(music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}""")
    add_track(sequence, 0, [(12, 100, 1.0, 2.0), (11, 1, 3.25, 3.75)])
    add_track(sequence, 1, [(12, 127, 1.0, 2.0), (14, 50, 3.0, 4.0),
                            (50, 0, 6.0, 7.0)])
    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11,
                 NOTE_OFF],
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT, NOTE_OFF]]
    melodies = melodies_lib.extract_melodies(sequence, steps_per_beat=1,
                                             min_bars=1, min_unique_pitches=2)

    self.assertEqual(2, len(melodies))
    self.assertTrue(isinstance(melodies[0], melodies_lib.Melody))
    self.assertTrue(isinstance(melodies[1], melodies_lib.Melody))

    melodies = sorted([list(melody) for melody in melodies])
    self.assertEqual(expected, melodies)
Exemplo n.º 39
0
def main(_):
  root = logging.getLogger()
  root.setLevel(logging.INFO)
  ch = logging.StreamHandler(sys.stdout)
  ch.setLevel(logging.INFO)
  root.addHandler(ch)

  if not os.path.isdir(FLAGS.output_dir):
    os.makedirs(FLAGS.output_dir)

  primer_sequence = midi_io.midi_file_to_sequence_proto(FLAGS.primer_midi)
  bpm = primer_sequence.tempos[0].bpm if len(primer_sequence.tempos) else 120.0

  extracted_melodies = melodies_lib.extract_melodies(primer_sequence,
                                                     min_bars=1,
                                                     min_unique_pitches=1)

  if not extracted_melodies:
    logging.info('No melodies were extracted from MIDI file %s'
                 % FLAGS.primer_midi)
    return

  graph = make_graph(hparams_string=FLAGS.hparams)

  checkpoint_dir = os.path.join(FLAGS.experiment_run_dir, 'train')
  
  generated = []
  while len(generated) < FLAGS.num_outputs:
    generated.extend(sampler_loop(graph, classes_to_melody,
                                  checkpoint_dir,
                                  extracted_melodies[0],
                                  FLAGS.num_steps))

  for i in xrange(FLAGS.num_outputs):
    sequence = generated[i].to_sequence(bpm=bpm)
    midi_io.sequence_proto_to_midi_file(
        sequence,
        os.path.join(FLAGS.output_dir, 'basic_rnn_sample_%d.mid' % i))

  logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, FLAGS.output_dir)
Exemplo n.º 40
0
  def testExtractMelodiesSimple(self):
    self.quantized_sequence.steps_per_beat = 1
    testing_lib.add_quantized_track(
        self.quantized_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 7)])
    testing_lib.add_quantized_track(
        self.quantized_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8)])
    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11,
                 NOTE_OFF],
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT, NOTE_OFF]]
    melodies, _ = melodies_lib.extract_melodies(
        self.quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=True)

    self.assertEqual(2, len(melodies))
    self.assertTrue(isinstance(melodies[0], melodies_lib.MonophonicMelody))
    self.assertTrue(isinstance(melodies[1], melodies_lib.MonophonicMelody))

    melodies = sorted([list(melody) for melody in melodies])
    self.assertEqual(expected, melodies)
Exemplo n.º 41
0
 def testExtractChordsForMelodies(self):
     self.quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track(self.quantized_sequence, 0,
                                     [(12, 100, 2, 4), (11, 1, 6, 11)])
     testing_lib.add_quantized_track(self.quantized_sequence, 1,
                                     [(12, 127, 2, 4), (14, 50, 6, 8),
                                      (50, 100, 33, 37), (52, 100, 34, 37)])
     testing_lib.add_quantized_chords(self.quantized_sequence,
                                      [('C', 2), ('G7', 6), ('Cmaj7', 33)])
     melodies, _ = melodies_lib.extract_melodies(
         self.quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True)
     chord_progressions, _ = chords_lib.extract_chords_for_melodies(
         self.quantized_sequence, melodies)
     expected = [[
         NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7', 'G7', 'G7',
         'G7'
     ], [NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
                 ['G7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
     self.assertEqual(expected,
                      [list(chords) for chords in chord_progressions])
Exemplo n.º 42
0
def run(melody_encoder_decoder, build_graph):
  """Generates melodies and saves them as MIDI files.

  Args:
    melody_encoder_decoder: A melodies_lib.MelodyEncoderDecoder object specific
        to your model.
    build_graph: A function that when called, returns the tf.Graph object for
        your model. The function will be passed the parameters:
        (mode, hparams_string, input_size, num_classes, sequence_example_file).
        For an example usage, see models/basic_rnn/basic_rnn_graph.py.
  """
  tf.logging.set_verbosity(tf.logging.INFO)

  if not FLAGS.run_dir:
    tf.logging.fatal('--run_dir required')
    return
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return

  FLAGS.run_dir = os.path.expanduser(FLAGS.run_dir)
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
  if FLAGS.primer_midi:
    FLAGS.primer_midi = os.path.expanduser(FLAGS.primer_midi)

  hparams = ast.literal_eval(FLAGS.hparams if FLAGS.hparams else '{}')
  hparams['batch_size'] = FLAGS.num_outputs
  hparams['dropout_keep_prob'] = 1.0
  hparams['temperature'] = FLAGS.temperature
  hparams_string = repr(hparams)

  graph = build_graph('generate',
                      hparams_string,
                      melody_encoder_decoder.input_size,
                      melody_encoder_decoder.num_classes)

  train_dir = os.path.join(FLAGS.run_dir, 'train')

  if not os.path.exists(FLAGS.output_dir):
    os.makedirs(FLAGS.output_dir)

  primer_melody = melodies_lib.MonophonicMelody()
  bpm = melodies_lib.DEFAULT_BEATS_PER_MINUTE
  if FLAGS.primer_melody:
    primer_melody.from_event_list(ast.literal_eval(FLAGS.primer_melody))
  elif FLAGS.primer_midi:
    primer_sequence = midi_io.midi_file_to_sequence_proto(FLAGS.primer_midi)
    quantized_sequence = sequences_lib.QuantizedSequence()
    quantized_sequence.from_note_sequence(primer_sequence,
                                          DEFAULT_STEPS_PER_BEAT)
    bpm = quantized_sequence.bpm
    extracted_melodies = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=0, min_unique_pitches=1,
        gap_bars=float('inf'), ignore_polyphonic_notes=True)
    if extracted_melodies:
      primer_melody = extracted_melodies[0]
    else:
      tf.logging.info('No melodies were extracted from the MIDI file %s. '
                      'Melodies will be generated from scratch.',
                      FLAGS.primer_midi)

  run_generate(graph, train_dir, FLAGS.output_dir, melody_encoder_decoder,
               primer_melody, FLAGS.num_steps, bpm)
Exemplo n.º 43
0
    def _generate(self, generate_sequence_request):
        if len(generate_sequence_request.generator_options.generate_sections
               ) != 1:
            raise sequence_generator.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % (len(generate_sequence_request.generator_options.
                       generate_sections)))

        generate_section = (
            generate_sequence_request.generator_options.generate_sections[0])
        primer_sequence = generate_sequence_request.input_sequence

        notes_by_end_time = sorted(primer_sequence.notes,
                                   key=lambda n: n.end_time)
        last_end_time = notes_by_end_time[
            -1].end_time if notes_by_end_time else 0
        if last_end_time > generate_section.start_time_seconds:
            raise sequence_generator.SequenceGeneratorException(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time_seconds,
                 notes_by_end_time[-1].end_time))

        # Quantize the priming sequence.
        quantized_sequence = sequences_lib.QuantizedSequence()
        quantized_sequence.from_note_sequence(primer_sequence,
                                              self._steps_per_beat)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_melodies, _ = melodies_lib.extract_melodies(
            quantized_sequence,
            min_bars=0,
            min_unique_pitches=1,
            gap_bars=float('inf'),
            ignore_polyphonic_notes=True)
        assert len(extracted_melodies) <= 1

        bpm = (primer_sequence.tempos[0].bpm
               if primer_sequence and primer_sequence.tempos else
               melodies_lib.DEFAULT_BEATS_PER_MINUTE)
        start_step = self._seconds_to_steps(
            generate_section.start_time_seconds, bpm)
        end_step = self._seconds_to_steps(generate_section.end_time_seconds,
                                          bpm)

        if extracted_melodies and extracted_melodies[0]:
            melody = extracted_melodies[0]
        else:
            tf.logging.warn(
                'No melodies were extracted from the priming sequence. '
                'Melodies will be generated from scratch.')
            melody = melodies_lib.MonophonicMelody()
            melody.from_event_list([
                random.randint(self._melody_encoder_decoder.min_note,
                               self._melody_encoder_decoder.max_note)
            ])
            start_step += 1

        transpose_amount = melody.squash(
            self._melody_encoder_decoder.min_note,
            self._melody_encoder_decoder.max_note,
            self._melody_encoder_decoder.transpose_to_key)

        # Ensure that the melody extends up to the step we want to start generating.
        melody.set_length(start_step)

        inputs = self._session.graph.get_collection('inputs')[0]
        initial_state = self._session.graph.get_collection('initial_state')[0]
        final_state = self._session.graph.get_collection('final_state')[0]
        softmax = self._session.graph.get_collection('softmax')[0]

        final_state_ = None
        for i in range(end_step - len(melody)):
            if i == 0:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody], full_length=True)
                initial_state_ = self._session.run(initial_state)
            else:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody])
                initial_state_ = final_state_

            feed_dict = {inputs: inputs_, initial_state: initial_state_}
            final_state_, softmax_ = self._session.run([final_state, softmax],
                                                       feed_dict)
            self._melody_encoder_decoder.extend_melodies([melody], softmax_)

        melody.transpose(-transpose_amount)

        generate_response = generator_pb2.GenerateSequenceResponse()
        generate_response.generated_sequence.CopyFrom(
            melody.to_sequence(bpm=bpm))
        return generate_response
  def _generate(self, generate_sequence_request):
    if len(generate_sequence_request.generator_options.generate_sections) != 1:
      raise sequence_generator.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          (len(generate_sequence_request.generator_options.generate_sections)))

    generate_section = (
        generate_sequence_request.generator_options.generate_sections[0])
    primer_sequence = generate_sequence_request.input_sequence
    bpm = (primer_sequence.tempos[0].bpm if primer_sequence.tempos
           else melodies_lib.DEFAULT_BEATS_PER_MINUTE)

    notes_by_end_time = sorted(primer_sequence.notes, key=lambda n: n.end_time)
    last_end_time = notes_by_end_time[-1].end_time if notes_by_end_time else 0
    if last_end_time > generate_section.start_time_seconds:
      raise sequence_generator.SequenceGeneratorException(
          'Got GenerateSection request for section that is before the end of '
          'the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time_seconds, notes_by_end_time[-1].end_time))

    # Quantize the priming sequence.
    quantized_sequence = sequences_lib.QuantizedSequence()
    quantized_sequence.from_note_sequence(
        primer_sequence, self._steps_per_beat)
    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=0, min_unique_pitches=1,
        gap_bars=float('inf'), ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    if extracted_melodies and extracted_melodies[0].events:
      melody = extracted_melodies[0]
    else:
      tf.logging.warn('No melodies were extracted from the priming sequence. '
                      'Melodies will be generated from scratch.')
      melody = melodies_lib.MonophonicMelody()
      melody.events = [
          random.randint(self._melody_encoder_decoder.min_note,
                         self._melody_encoder_decoder.max_note)]

    transpose_amount = melody.squash(
        self._melody_encoder_decoder.min_note,
        self._melody_encoder_decoder.max_note,
        self._melody_encoder_decoder.transpose_to_key)

    start_step = self._seconds_to_steps(
        generate_section.start_time_seconds, bpm)
    end_step = self._seconds_to_steps(generate_section.end_time_seconds, bpm)

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step)

    inputs = self._session.graph.get_collection('inputs')[0]
    initial_state = self._session.graph.get_collection('initial_state')[0]
    final_state = self._session.graph.get_collection('final_state')[0]
    softmax = self._session.graph.get_collection('softmax')[0]

    final_state_ = None
    for i in range(end_step - len(melody)):
      if i == 0:
        inputs_ = self._melody_encoder_decoder.get_inputs_batch(
            [melody], full_length=True)
        initial_state_ = self._session.run(initial_state)
      else:
        inputs_ = self._melody_encoder_decoder.get_inputs_batch([melody])
        initial_state_ = final_state_

      feed_dict = {inputs: inputs_, initial_state: initial_state_}
      final_state_, softmax_ = self._session.run(
          [final_state, softmax], feed_dict)
      self._melody_encoder_decoder.extend_melodies([melody], softmax_)

    melody.transpose(-transpose_amount)

    generate_response = generator_pb2.GenerateSequenceResponse()
    generate_response.generated_sequence.CopyFrom(melody.to_sequence(bpm=bpm))
    return generate_response