Beispiel #1
0
def test_melody_rnn_generation():
    primer_midi = download('https://s3.amazonaws.com/cadl/share/21150_Harry-Potter-and-the-Philosophers-Stone.mid')
    download('https://s3.amazonaws.com/cadl/models/basic_rnn.mag')

    magenta_utils.synthesize(primer_midi)
    fname = 'primer.mid'
    assert(os.path.exists('primer.mid'))
    generated_primer = midi_io.midi_file_to_sequence_proto(fname)
    assert(len(generated_primer.notes) == 14)

    fname = 'synthesis.mid'
    assert(os.path.exists('synthesis.mid'))
    generated_synthesis = midi_io.midi_file_to_sequence_proto(fname)
    assert(len(generated_synthesis.notes) == 243)
Beispiel #2
0
def test_convert_to_monophonic():
    primer_midi = download(
        'https://s3.amazonaws.com/cadl/share/21150_Harry-Potter-and-the-Philosophers-Stone.mid'
    )
    primer_sequence = midi_io.midi_file_to_sequence_proto(primer_midi)
    magenta_utils.convert_to_monophonic(primer_sequence)
    assert (len(primer_sequence.notes) == 254)
Beispiel #3
0
def test_harry_potter():
    primer_midi = download(
        'https://s3.amazonaws.com/cadl/share/21150_Harry-Potter-and-the-Philosophers-Stone.mid'
    )
    primer_sequence = midi_io.midi_file_to_sequence_proto(primer_midi)
    assert (len(primer_sequence.notes) == 282)
    assert (len(primer_sequence.time_signatures) == 1)
Beispiel #4
0
def test_melody_rnn_generation():
    primer_midi = download(
        'https://s3.amazonaws.com/cadl/share/21150_Harry-Potter-and-the-Philosophers-Stone.mid'
    )
    download('https://s3.amazonaws.com/cadl/models/basic_rnn.mag')

    magenta_utils.synthesize(primer_midi)
    fname = 'primer.mid'
    assert (os.path.exists('primer.mid'))
    generated_primer = midi_io.midi_file_to_sequence_proto(fname)
    assert (len(generated_primer.notes) == 14)

    fname = 'synthesis.mid'
    assert (os.path.exists('synthesis.mid'))
    generated_synthesis = midi_io.midi_file_to_sequence_proto(fname)
    assert (len(generated_synthesis.notes) == 243)
Beispiel #5
0
def midi_file_to_melody(midi_file,
                        steps_per_quarter=4,
                        qpm=None,
                        ignore_polyphonic_notes=True):
    """Loads a melody from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes.
    qpm: Tempo in quarters per a minute. If not set, tries to use the first
        tempo of the midi track and defaults to
        magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails.
    ignore_polyphonic_notes: Only use the highest simultaneous note if True.

  Returns:
    A Melody object extracted from the MIDI file.
  """
    sequence = midi_io.midi_file_to_sequence_proto(midi_file)
    if qpm is None:
        if sequence.tempos:
            qpm = sequence.tempos[0].qpm
        else:
            qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
    quantized_sequence = sequences_lib.quantize_note_sequence(
        sequence, steps_per_quarter=steps_per_quarter)
    melody = Melody()
    melody.from_quantized_sequence(
        quantized_sequence, ignore_polyphonic_notes=ignore_polyphonic_notes)
    return melody
Beispiel #6
0
def compare_directory(directory):
    files_in_dir = tf.gfile.ListDirectory(directory)
    for file_in_dir in files_in_dir:
        if not file_in_dir.endswith('.abc'):
            continue
        abc = os.path.join(directory, file_in_dir)
        midis = {}
        ref_num = 1
        while True:
            midi = re.sub(r'\.abc$',
                          str(ref_num) + '.mid',
                          os.path.join(directory, file_in_dir))
            if not tf.gfile.Exists(midi):
                break
            midis[ref_num] = midi
            ref_num += 1

        print('parsing {}'.format(abc))
        tunes, exceptions = abc_parser.parse_tunebook_file(abc)
        if len(tunes) != len(midis) - len(exceptions):
            raise ValueError(
                'Different number of tunes and midis for {}'.format(abc))

        for tune in tunes.values():
            expanded_tune = sequences_lib.expand_section_groups(tune)
            midi_ns = midi_io.midi_file_to_sequence_proto(
                midis[tune.reference_number])
            # abc2midi adds a 1-tick delay to the start of every note, but we don't.
            tick_length = ((1 / (midi_ns.tempos[0].qpm / 60)) /
                           midi_ns.ticks_per_quarter)
            for note in midi_ns.notes:
                note.start_time -= tick_length
            if len(midi_ns.notes) != len(expanded_tune.notes):
                pdb.set_trace()
Beispiel #7
0
    def testIsDrumDetection(self):
        """Verify that is_drum instruments are properly tracked.

    self.midi_is_drum_filename is a MIDI file containing two tracks
    set to channel 9 (is_drum == True). Each contains one NoteOn. This
    test is designed to catch a bug where the second track would lose
    is_drum, remapping the drum track to an instrument track.
    """
        sequence_proto = midi_io.midi_file_to_sequence_proto(
            self.midi_is_drum_filename)
        with tempfile.NamedTemporaryFile(prefix='MidiDrumTest') as temp_file:
            midi_io.sequence_proto_to_midi_file(sequence_proto, temp_file.name)
            midi_data1 = mido.MidiFile(filename=self.midi_is_drum_filename)
            # Use the file object when writing to the tempfile
            # to avoid permission error.
            midi_data2 = mido.MidiFile(file=temp_file)

        # Count number of channel 9 Note Ons.
        channel_counts = [0, 0]
        for index, midi_data in enumerate([midi_data1, midi_data2]):
            for event in midi_data:
                if (event.type == 'note_on' and event.velocity > 0
                        and event.channel == 9):
                    channel_counts[index] += 1
        self.assertEqual(channel_counts, [2, 2])
Beispiel #8
0
def midi_file_to_drum_track(midi_file, steps_per_quarter=4, qpm=None):
  """Loads a drum track from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of DrumTrack. For example, 4 = 16th notes.
    qpm: Tempo in quarters per a minute. If not set, tries to use the first
        tempo of the midi track and defaults to
        magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails.

  Returns:
    A DrumTrack object extracted from the MIDI file.
  """
  sequence = midi_io.midi_file_to_sequence_proto(midi_file)
  if qpm is None:
    if sequence.tempos:
      qpm = sequence.tempos[0].qpm
    else:
      qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
  quantized_sequence = sequences_lib.QuantizedSequence()
  quantized_sequence.qpm = qpm
  quantized_sequence.from_note_sequence(
      sequence, steps_per_quarter=steps_per_quarter)
  drum_track = DrumTrack()
  drum_track.from_quantized_sequence(quantized_sequence)
  return drum_track
Beispiel #9
0
def main(unused_argv):
    logging.set_verbosity(FLAGS.log)
    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    for input_file in sorted(os.listdir(FLAGS.input_dir)):
        if not input_file.endswith('.wav'):
            continue
        wav_filename = input_file
        midi_filename = input_file.replace('.wav', '.mid')
        logging.info('Aligning %s to %s', midi_filename, wav_filename)

        samples = audio_io.load_audio(
            os.path.join(FLAGS.input_dir, wav_filename),
            align_fine_lib.SAMPLE_RATE)
        ns = midi_io.midi_file_to_sequence_proto(
            os.path.join(FLAGS.input_dir, midi_filename))

        aligned_ns, unused_stats = align_fine_lib.align_cpp(
            samples,
            align_fine_lib.SAMPLE_RATE,
            ns,
            align_fine_lib.CQT_HOP_LENGTH_FINE,
            sf2_path=FLAGS.sf2_path,
            penalty_mul=FLAGS.penalty_mul)

        midi_io.sequence_proto_to_midi_file(
            aligned_ns, os.path.join(FLAGS.output_dir, midi_filename))

    logging.info('Done')
Beispiel #10
0
def main(unused_argv):
  logging.set_verbosity(FLAGS.log)
  if not os.path.exists(FLAGS.output_dir):
    os.makedirs(FLAGS.output_dir)
  for input_file in sorted(os.listdir(FLAGS.input_dir)):
    if not input_file.endswith('.wav'):
      continue
    wav_filename = input_file
    midi_filename = input_file.replace('.wav', '.mid')
    logging.info('Aligning %s to %s', midi_filename, wav_filename)

    samples = audio_io.load_audio(
        os.path.join(FLAGS.input_dir, wav_filename), align_fine_lib.SAMPLE_RATE)
    ns = midi_io.midi_file_to_sequence_proto(
        os.path.join(FLAGS.input_dir, midi_filename))

    aligned_ns, unused_stats = align_fine_lib.align_cpp(
        samples,
        align_fine_lib.SAMPLE_RATE,
        ns,
        align_fine_lib.CQT_HOP_LENGTH_FINE,
        sf2_path=FLAGS.sf2_path,
        penalty_mul=FLAGS.penalty_mul)

    midi_io.sequence_proto_to_midi_file(
        aligned_ns, os.path.join(FLAGS.output_dir, midi_filename))

  logging.info('Done')
Beispiel #11
0
  def testIsDrumDetection(self):
    """Verify that is_drum instruments are properly tracked.

    self.midi_is_drum_filename is a MIDI file containing two tracks
    set to channel 9 (is_drum == True). Each contains one NoteOn. This
    test is designed to catch a bug where the second track would lose
    is_drum, remapping the drum track to an instrument track.
    """
    sequence_proto = midi_io.midi_file_to_sequence_proto(
        self.midi_is_drum_filename)
    with tempfile.NamedTemporaryFile(prefix='MidiDrumTest') as temp_file:
      midi_io.sequence_proto_to_midi_file(sequence_proto, temp_file.name)
      midi_data1 = mido.MidiFile(filename=self.midi_is_drum_filename)
      # Use the file object when writing to the tempfile
      # to avoid permission error.
      midi_data2 = mido.MidiFile(file=temp_file)

    # Count number of channel 9 Note Ons.
    channel_counts = [0, 0]
    for index, midi_data in enumerate([midi_data1, midi_data2]):
      for event in midi_data:
        if (event.type == 'note_on' and
            event.velocity > 0 and event.channel == 9):
          channel_counts[index] += 1
    self.assertEqual(channel_counts, [2, 2])
Beispiel #12
0
def midi_file_to_melody(midi_file, steps_per_quarter=4, qpm=None,
                        ignore_polyphonic_notes=True):
  """Loads a melody from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes.
    qpm: Tempo in quarters per a minute. If not set, tries to use the first
        tempo of the midi track and defaults to
        magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails.
    ignore_polyphonic_notes: Only use the highest simultaneous note if True.

  Returns:
    A Melody object extracted from the MIDI file.
  """
  sequence = midi_io.midi_file_to_sequence_proto(midi_file)
  if qpm is None:
    if sequence.tempos:
      qpm = sequence.tempos[0].qpm
    else:
      qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
  quantized_sequence = sequences_lib.quantize_note_sequence(
      sequence, steps_per_quarter=steps_per_quarter)
  melody = Melody()
  melody.from_quantized_sequence(
      quantized_sequence, ignore_polyphonic_notes=ignore_polyphonic_notes)
  return melody
Beispiel #13
0
def midi_file_to_drum_track(midi_file, steps_per_quarter=4):
  """Loads a drum track from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of DrumTrack. For example, 4 = 16th notes.

  Returns:
    A DrumTrack object extracted from the MIDI file.
  """
  sequence = midi_io.midi_file_to_sequence_proto(midi_file)
  quantized_sequence = sequences_lib.quantize_note_sequence(
      sequence, steps_per_quarter=steps_per_quarter)
  drum_track = DrumTrack()
  drum_track.from_quantized_sequence(quantized_sequence)
  return drum_track
Beispiel #14
0
def midi_file_to_drum_track(midi_file, steps_per_quarter=4):
    """Loads a drum track from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of DrumTrack. For example, 4 = 16th notes.

  Returns:
    A DrumTrack object extracted from the MIDI file.
  """
    sequence = midi_io.midi_file_to_sequence_proto(midi_file)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        sequence, steps_per_quarter=steps_per_quarter)
    drum_track = DrumTrack()
    drum_track.from_quantized_sequence(quantized_sequence)
    return drum_track
Beispiel #15
0
    def compare_directory(self, directory):
        self.maxDiff = None  # pylint: disable=invalid-name

        files_in_dir = tf.gfile.ListDirectory(directory)
        files_parsed = 0
        for file_in_dir in files_in_dir:
            if not file_in_dir.endswith('.abc'):
                continue
            abc = os.path.join(directory, file_in_dir)
            midis = {}
            ref_num = 1
            while True:
                midi = re.sub(r'\.abc$',
                              str(ref_num) + '.mid',
                              os.path.join(directory, file_in_dir))
                if not tf.gfile.Exists(midi):
                    break
                midis[ref_num] = midi
                ref_num += 1

            print('parsing {}: {}'.format(files_parsed, abc))
            tunes, exceptions = abc_parser.parse_abc_tunebook_file(abc)
            files_parsed += 1
            self.assertEqual(len(tunes), len(midis) - len(exceptions))

            for tune in tunes.values():
                expanded_tune = sequences_lib.expand_section_groups(tune)
                midi_ns = midi_io.midi_file_to_sequence_proto(
                    midis[tune.reference_number])
                # abc2midi adds a 1-tick delay to the start of every note, but we don't.
                tick_length = ((1 / (midi_ns.tempos[0].qpm / 60)) /
                               midi_ns.ticks_per_quarter)
                for note in midi_ns.notes:
                    note.start_time -= tick_length
                    # For now, don't compare velocities.
                    note.velocity = 90
                if len(midi_ns.notes) != len(expanded_tune.notes):
                    pdb.set_trace()
                    self.assertProtoEquals(midi_ns, expanded_tune)
                for midi_note, test_note in zip(midi_ns.notes,
                                                expanded_tune.notes):
                    try:
                        self.assertProtoEquals(midi_note, test_note)
                    except Exception as e:  # pylint: disable=broad-except
                        print(e)
                        pdb.set_trace()
                self.assertEqual(midi_ns.total_time, expanded_tune.total_time)
Beispiel #16
0
  def load_primer(self):
    """Loads default MIDI primer file.

    Also assigns the steps per bar of this file to be the model's defaults.
    """

    if not os.path.exists(self.midi_primer):
      tf.logging.warn('ERROR! No such primer file exists! %s', self.midi_primer)
      return

    self.primer_sequence = midi_io.midi_file_to_sequence_proto(self.midi_primer)
    quantized_seq = sequences_lib.quantize_note_sequence(
        self.primer_sequence, steps_per_quarter=4)
    extracted_melodies, _ = melody_pipelines.extract_melodies(
        quantized_seq, min_bars=0, min_unique_pitches=1)
    self.primer = extracted_melodies[0]
    self.steps_per_bar = self.primer.steps_per_bar
Beispiel #17
0
  def compare_directory(self, directory):
    self.maxDiff = None  # pylint: disable=invalid-name

    files_in_dir = tf.gfile.ListDirectory(directory)
    files_parsed = 0
    for file_in_dir in files_in_dir:
      if not file_in_dir.endswith('.abc'):
        continue
      abc = os.path.join(directory, file_in_dir)
      midis = {}
      ref_num = 1
      while True:
        midi = re.sub(r'\.abc$', str(ref_num) + '.mid',
                      os.path.join(directory, file_in_dir))
        if not tf.gfile.Exists(midi):
          break
        midis[ref_num] = midi
        ref_num += 1

      print('parsing {}: {}'.format(files_parsed, abc))
      tunes, exceptions = abc_parser.parse_abc_tunebook_file(abc)
      files_parsed += 1
      self.assertEqual(len(tunes), len(midis) - len(exceptions))

      for tune in tunes.values():
        expanded_tune = sequences_lib.expand_section_groups(tune)
        midi_ns = midi_io.midi_file_to_sequence_proto(
            midis[tune.reference_number])
        # abc2midi adds a 1-tick delay to the start of every note, but we don't.
        tick_length = ((1 / (midi_ns.tempos[0].qpm / 60)) /
                       midi_ns.ticks_per_quarter)
        for note in midi_ns.notes:
          note.start_time -= tick_length
          # For now, don't compare velocities.
          note.velocity = 90
        if len(midi_ns.notes) != len(expanded_tune.notes):
          pdb.set_trace()
          self.assertProtoEquals(midi_ns, expanded_tune)
        for midi_note, test_note in zip(midi_ns.notes, expanded_tune.notes):
          try:
            self.assertProtoEquals(midi_note, test_note)
          except Exception as e:  # pylint: disable=broad-except
            print(e)
            pdb.set_trace()
        self.assertEqual(midi_ns.total_time, expanded_tune.total_time)
Beispiel #18
0
  def load_primer(self):
    """Loads default MIDI primer file.

    Also assigns the steps per bar of this file to be the model's defaults.
    """

    if not os.path.exists(self.midi_primer):
      tf.logging.warn('ERROR! No such primer file exists! %s', self.midi_primer)
      return

    self.primer_sequence = midi_io.midi_file_to_sequence_proto(self.midi_primer)
    quantized_seq = sequences_lib.quantize_note_sequence(
        self.primer_sequence, steps_per_quarter=4)
    extracted_melodies, _ = melodies_lib.extract_melodies(quantized_seq,
                                                          min_bars=0,
                                                          min_unique_pitches=1)
    self.primer = extracted_melodies[0]
    self.steps_per_bar = self.primer.steps_per_bar
Beispiel #19
0
    def compareToAbc2midiAndMetadata(self, midi_path, expected_metadata,
                                     expected_expanded_metadata, test):
        """Compare parsing results to the abc2midi "reference" implementation."""
        # Compare section annotations and groups before expanding.
        self.compareProtoList(expected_metadata.section_annotations,
                              test.section_annotations)
        self.compareProtoList(expected_metadata.section_groups,
                              test.section_groups)

        expanded_test = sequences_lib.expand_section_groups(test)

        abc2midi = midi_io.midi_file_to_sequence_proto(
            os.path.join(tf.resource_loader.get_data_files_path(), midi_path))

        # abc2midi adds a 1-tick delay to the start of every note, but we don't.
        tick_length = ((1 / (abc2midi.tempos[0].qpm / 60)) /
                       abc2midi.ticks_per_quarter)

        for note in abc2midi.notes:
            note.start_time -= tick_length

        self.assertEqual(len(abc2midi.notes), len(expanded_test.notes))
        for exp_note, test_note in zip(abc2midi.notes, expanded_test.notes):
            # For now, don't compare velocities.
            exp_note.velocity = test_note.velocity
            self.assertProtoEquals(exp_note, test_note)
        self.assertEqual(abc2midi.total_time, expanded_test.total_time)

        self.compareProtoList(abc2midi.time_signatures,
                              expanded_test.time_signatures)

        # We've checked the notes and time signatures, now compare the rest of the
        # proto to the expected proto.
        expanded_test_copy = copy.deepcopy(expanded_test)
        del expanded_test_copy.notes[:]
        expanded_test_copy.ClearField('total_time')
        del expanded_test_copy.time_signatures[:]

        self.assertProtoEquals(expected_expanded_metadata, expanded_test_copy)
Beispiel #20
0
    def compareToAbc2midiAndMetadata(self, midi_path, expected_ns_metadata,
                                     test):
        """Compare parsing results to the abc2midi "reference" implementation."""
        abc2midi = midi_io.midi_file_to_sequence_proto(
            os.path.join(tf.resource_loader.get_data_files_path(), midi_path))

        # We don't yet support repeats, so just check the first 10 notes and only
        # the first time signature.
        del abc2midi.notes[10:]
        del test.notes[10:]
        del abc2midi.time_signatures[1:]
        del test.time_signatures[1:]

        # abc2midi adds a 1-tick delay to the start of every note, but we don't.
        tick_length = ((1 / (abc2midi.tempos[0].qpm / 60)) /
                       abc2midi.ticks_per_quarter)

        for note in abc2midi.notes:
            note.start_time -= tick_length

        self.assertEqual(len(abc2midi.notes), len(test.notes))
        for exp_note, test_note in zip(abc2midi.notes, test.notes):
            # For now, don't compare velocities.
            exp_note.velocity = test_note.velocity
            self.assertProtoEquals(exp_note, test_note)

        self.assertEqual(len(abc2midi.time_signatures),
                         len(test.time_signatures))
        for exp_timesig, test_timesig in zip(abc2midi.time_signatures,
                                             test.time_signatures):
            self.assertProtoEquals(exp_timesig, test_timesig)

        # We've checked the notes and time signatures, now compare the rest of the
        # proto to the expected proto.
        test_copy = copy.deepcopy(test)
        del test_copy.notes[:]
        del test_copy.time_signatures[:]
        self.assertProtoEquals(expected_ns_metadata, test_copy)
Beispiel #21
0
  def compareToAbc2midiAndMetadata(self, midi_path, expected_metadata,
                                   expected_expanded_metadata, test):
    """Compare parsing results to the abc2midi "reference" implementation."""
    # Compare section annotations and groups before expanding.
    self.compareProtoList(expected_metadata.section_annotations,
                          test.section_annotations)
    self.compareProtoList(expected_metadata.section_groups,
                          test.section_groups)

    expanded_test = sequences_lib.expand_section_groups(test)

    abc2midi = midi_io.midi_file_to_sequence_proto(
        os.path.join(tf.resource_loader.get_data_files_path(), midi_path))

    # abc2midi adds a 1-tick delay to the start of every note, but we don't.
    tick_length = ((1 / (abc2midi.tempos[0].qpm / 60)) /
                   abc2midi.ticks_per_quarter)

    for note in abc2midi.notes:
      # For now, don't compare velocities.
      note.velocity = 90
      note.start_time -= tick_length

    self.compareProtoList(abc2midi.notes, expanded_test.notes)

    self.assertEqual(abc2midi.total_time, expanded_test.total_time)

    self.compareProtoList(abc2midi.time_signatures,
                          expanded_test.time_signatures)

    # We've checked the notes and time signatures, now compare the rest of the
    # proto to the expected proto.
    expanded_test_copy = copy.deepcopy(expanded_test)
    del expanded_test_copy.notes[:]
    expanded_test_copy.ClearField('total_time')
    del expanded_test_copy.time_signatures[:]

    self.assertProtoEquals(expected_expanded_metadata, expanded_test_copy)
Beispiel #22
0
def test_harry_potter():
    primer_midi = download('https://s3.amazonaws.com/cadl/share/21150_Harry-Potter-and-the-Philosophers-Stone.mid')
    primer_sequence = midi_io.midi_file_to_sequence_proto(primer_midi)
    assert(len(primer_sequence.notes) == 282)
    assert(len(primer_sequence.time_signatures) == 1)
Beispiel #23
0
def test_convert_to_monophonic():
    primer_midi = download('https://s3.amazonaws.com/cadl/share/21150_Harry-Potter-and-the-Philosophers-Stone.mid')
    primer_sequence = midi_io.midi_file_to_sequence_proto(primer_midi)
    magenta_utils.convert_to_monophonic(primer_sequence)
    assert(len(primer_sequence.notes) == 254)
def run_with_flags(melody_rnn_sequence_generator):
    """Generates melodies and saves them as MIDI files.

  Uses the options specified by the flags defined in this module. Intended to be
  called from the main function of one of the melody generator modules.

  Args:
    melody_rnn_sequence_generator: A MelodyRnnSequenceGenerator object specific
        to your model.
  """
    if not FLAGS.output_dir:
        tf.logging.fatal('--output_dir required')
        return

    FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
    if FLAGS.primer_midi:
        FLAGS.primer_midi = os.path.expanduser(FLAGS.primer_midi)

    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)

    primer_sequence = None
    qpm = FLAGS.qpm if FLAGS.qpm else constants.DEFAULT_QUARTERS_PER_MINUTE
    if FLAGS.primer_melody:
        primer_melody = melodies_lib.MonophonicMelody()
        primer_melody.from_event_list(ast.literal_eval(FLAGS.primer_melody))
        primer_sequence = primer_melody.to_sequence(qpm=qpm)
    elif FLAGS.primer_midi:
        primer_sequence = midi_io.midi_file_to_sequence_proto(
            FLAGS.primer_midi)
        if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
            qpm = primer_sequence.tempos[0].qpm

    # Derive the total number of seconds to generate based on the QPM of the
    # priming sequence and the num_steps flag.
    total_seconds = _steps_to_seconds(FLAGS.num_steps, qpm)

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generate_request = generator_pb2.GenerateSequenceRequest()
    if primer_sequence:
        generate_request.input_sequence.CopyFrom(primer_sequence)
        generate_section = (
            generate_request.generator_options.generate_sections.add())
        # Set the start time to begin on the next step after the last note ends.
        notes_by_end_time = sorted(primer_sequence.notes,
                                   key=lambda n: n.end_time)
        last_end_time = notes_by_end_time[
            -1].end_time if notes_by_end_time else 0
        generate_section.start_time_seconds = last_end_time + _steps_to_seconds(
            1, qpm)
        generate_section.end_time_seconds = total_seconds

        if generate_section.start_time_seconds >= generate_section.end_time_seconds:
            tf.logging.fatal(
                'Priming sequence is longer than the total number of steps '
                'requested: Priming sequence length: %s, Generation length '
                'requested: %s', generate_section.start_time_seconds,
                total_seconds)
            return
    else:
        generate_section = (
            generate_request.generator_options.generate_sections.add())
        generate_section.start_time_seconds = 0
        generate_section.end_time_seconds = total_seconds
        generate_request.input_sequence.tempos.add().qpm = qpm
    tf.logging.debug('generate_request: %s', generate_request)

    # Make the generate request num_outputs times and save the output as midi
    # files.
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    digits = len(str(FLAGS.num_outputs))
    for i in range(FLAGS.num_outputs):
        generate_response = melody_rnn_sequence_generator.generate(
            generate_request)

        midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
        midi_path = os.path.join(FLAGS.output_dir, midi_filename)
        midi_io.sequence_proto_to_midi_file(
            generate_response.generated_sequence, midi_path)

    tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs,
                    FLAGS.output_dir)
Beispiel #25
0
def parse_midi_file(midi_file,
                    max_notes=float('Inf'),
                    max_time_signatures=1,
                    max_tempos=1,
                    ignore_polyphonic_notes=True,
                    convert_to_drums=False,
                    steps_per_quarter=16):
    """Summary

    Parameters
    ----------
    midi_file : TYPE
        Description
    max_notes : TYPE, optional
        Description
    max_time_signatures : int, optional
        Description
    max_tempos : int, optional
        Description
    ignore_polyphonic_notes : bool, optional
        Description
    convert_to_drums : bool, optional
        Description
    steps_per_quarter : int, optional
        Description

    Returns
    -------
    TYPE
        Description
    """
    seq = midi_io.midi_file_to_sequence_proto(midi_file)

    while len(seq.notes) > max_notes:
        seq.notes.pop()

    while len(seq.time_signatures) > max_time_signatures:
        seq.time_signatures.pop()

    while len(seq.tempos) > max_tempos:
        seq.tempos.pop()

    if convert_to_drums:
        for note_i in range(len(seq.notes)):
            seq.notes[note_i].program = 10

    if ignore_polyphonic_notes:
        convert_to_monophonic(seq)

    seq = sequences_lib.quantize_note_sequence(
        seq, steps_per_quarter=steps_per_quarter)

    if seq.tempos:
        qpm = seq.tempos[0].qpm
    else:
        qpm = 120

    melody = Melody()
    melody.from_quantized_sequence(
        seq, ignore_polyphonic_notes=ignore_polyphonic_notes)
    seq = melody.to_sequence(qpm=qpm)

    return seq, qpm