示例#1
0
    def to_midi(self, outputs, path_out_dir, filename='out.mid'):
        """
        outputs: one-hot encoded tensor (seq_len, vocab_size)
        """
        events = []
        for event_one_hot in outputs:  #for each event
            label = np.argmax(
                event_one_hot)  #find index of maximum value from one-hot
            events.append(
                self._encoder_decoder.class_index_to_event(label, events))

        print(events)

        mel_pred = note_seq.Melody(events)
        seq_pred = mel_pred.to_sequence()

        path_out = path_out_dir + filename
        midi_io.sequence_proto_to_midi_file(seq_pred, path_out)
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise sequence_generator.SequenceGeneratorError(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise sequence_generator.SequenceGeneratorError(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    if input_sequence and input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm
    else:
      qpm = note_seq.DEFAULT_QUARTERS_PER_MINUTE
    steps_per_second = note_seq.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = note_seq.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
      input_start_step = note_seq.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0
    if last_end_time > generate_section.start_time:
      raise sequence_generator.SequenceGeneratorError(
          'Got GenerateSection request for section that is before the end of '
          'the NoteSequence. This model can only extend sequences. Requested '
          'start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_sequence = note_seq.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)
    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = melody_pipelines.extract_melodies(
        quantized_sequence, search_start_step=input_start_step, min_bars=0,
        min_unique_pitches=1, gap_bars=float('inf'),
        ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    start_step = note_seq.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    end_step = note_seq.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_melodies and extracted_melodies[0]:
      melody = extracted_melodies[0]
    else:
      # If no melody could be extracted, create an empty melody that starts 1
      # step before the request start_step. This will result in 1 step of
      # silence when the melody is extended below.
      steps_per_bar = int(
          note_seq.steps_per_bar_in_quantized_sequence(quantized_sequence))
      melody = note_seq.Melody([],
                               start_step=max(0, start_step - 1),
                               steps_per_bar=steps_per_bar,
                               steps_per_quarter=self.steps_per_quarter)

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step - melody.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_melody = self._model.generate_melody(
        end_step - melody.start_step, melody, **args)
    generated_sequence = generated_melody.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
示例#3
0
# OR, if using batches (NOT SURE HOW TO USE / IF NEEDED - no labels? See magenta.models.shared.events_rnn_model.py)
melody2 = melodies_lib.midi_file_to_melody(input_file2)
melody2.squash(
    min_note,
    max_note,
    transpose_to_key)

inputs_batch = mel_encoder.get_inputs_batch([melody1, melody2], full_length=True)

events = []
for label in labels:
    events.append(mel_encoder.class_index_to_event(label, events))

print(events)

mel_pred = note_seq.Melody(events)
seq_pred = mel_pred.to_sequence()
midi_io.sequence_proto_to_midi_file(seq_pred, out_file1_pred)



# events = [100, 100, 107, 111, NO_EVENT, 99, 112, NOTE_OFF, NO_EVENT]
# melody = melodies_lib.Melody(events)
# melody.squash(
#     self.min_note,
#     self.max_note,
#     self.transpose_to_key)
# inputs, labels = self.med.encode(melody)
# expected_inputs = [
#     [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
#     [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
示例#4
0
def run_with_flags(generator):
  """Generates melodies and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The MelodyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else note_seq.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_melody:
    primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to a single middle C.')
    primer_melody = note_seq.Melody([60])
    primer_sequence = primer_melody.to_sequence(qpm=qpm)

  # Derive the total number of seconds to generate based on the QPM of the
  # priming sequence and the num_steps flag.
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  total_seconds = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  if primer_sequence:
    input_sequence = primer_sequence
    # Set the start time to begin on the next step after the last note ends.
    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step,
        end_time=total_seconds)

    if generate_section.start_time >= generate_section.end_time:
      tf.logging.fatal(
          'Priming sequence is longer than the total number of steps '
          'requested: Priming sequence length: %s, Generation length '
          'requested: %s',
          generate_section.start_time, total_seconds)
      return
  else:
    input_sequence = music_pb2.NoteSequence()
    input_sequence.tempos.add().qpm = qpm
    generate_section = generator_options.generate_sections.add(
        start_time=0,
        end_time=total_seconds)
  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
  tf.logging.debug('input_sequence: %s', input_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(input_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(FLAGS.output_dir, midi_filename)
    note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, FLAGS.output_dir)
示例#5
0
def melody_seq_to_midi(event_seq, midi_file_path, qpm):
    note_events = convert_to_note_events(event_seq)
    output_sequence = note_seq.Melody(note_events).to_sequence(qpm=qpm)
    note_seq.midi_io.note_sequence_to_midi_file(output_sequence,
                                                midi_file_path)
    print("wrote midi output to {}".format(midi_file_path))
def run_with_flags(generator):
  """Generates polyphonic tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PolyphonyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(output_dir):
    tf.gfile.MakeDirs(output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else note_seq.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_pitches:
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.tempos.add().qpm = qpm
    primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
    for pitch in ast.literal_eval(FLAGS.primer_pitches):
      note = primer_sequence.notes.add()
      note.start_time = 0
      note.end_time = 60.0 / qpm
      note.pitch = pitch
      note.velocity = 100
    primer_sequence.total_time = primer_sequence.notes[-1].end_time
  elif FLAGS.primer_melody:
    primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to empty sequence.')
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.tempos.add().qpm = qpm
    primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ

  # Derive the total number of seconds to generate.
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  generate_end_time = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  # Set the start time to begin when the last note ends.
  generate_section = generator_options.generate_sections.add(
      start_time=primer_sequence.total_time,
      end_time=generate_end_time)

  if generate_section.start_time >= generate_section.end_time:
    tf.logging.fatal(
        'Priming sequence is longer than the total number of steps '
        'requested: Priming sequence length: %s, Total length '
        'requested: %s',
        generate_section.start_time, generate_end_time)
    return

  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration

  generator_options.args['condition_on_primer'].bool_value = (
      FLAGS.condition_on_primer)
  generator_options.args['no_inject_primer_during_generation'].bool_value = (
      not FLAGS.inject_primer_during_generation)

  tf.logging.debug('primer_sequence: %s', primer_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(primer_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(output_dir, midi_filename)
    note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, output_dir)
示例#7
0
    def run(self):
        """Generates polyphonic tracks and saves them as MIDI files.
        Uses the options specified by the flags defined in this module.
        Args:
            generator: The PolyphonyRnnSequenceGenerator to use for generation.
        """
        primer_melody = self._get_primer_melody()

        output_dir = os.path.expanduser(MusicGeneratorSettings.output_dir)

        if not tf.gfile.Exists(output_dir):
            tf.gfile.MakeDirs(output_dir)

        for i in os.listdir(output_dir):
            os.remove(os.path.join(output_dir, i))

        primer_sequence = None
        qpm = MusicGeneratorSettings.qpm

        primer_melody = note_seq.Melody(ast.literal_eval(primer_melody))
        primer_sequence = primer_melody.to_sequence(qpm=qpm)

        # Derive the total number of seconds to generate.
        seconds_per_step = 60.0 / qpm / self.generator.steps_per_quarter
        generate_end_time = MusicGeneratorSettings.num_steps * seconds_per_step

        # Specify start/stop time for generation based on starting generation at the
        # end of the priming sequence and continuing until the sequence is num_steps
        # long.
        generator_options = generator_pb2.GeneratorOptions()
        # Set the start time to begin when the last note ends.
        generator_options.generate_sections.add(
            start_time=primer_sequence.total_time, end_time=generate_end_time)

        generator_options.args[
            'temperature'].float_value = MusicGeneratorSettings.temperature
        generator_options.args[
            'beam_size'].int_value = MusicGeneratorSettings.beam_size
        generator_options.args[
            'branch_factor'].int_value = MusicGeneratorSettings.branch_factor
        generator_options.args[
            'steps_per_iteration'].int_value = MusicGeneratorSettings.steps_per_iteration
        generator_options.args[
            'condition_on_primer'].bool_value = MusicGeneratorSettings.condition_on_primer
        generator_options.args[
            'no_inject_primer_during_generation'].bool_value = not MusicGeneratorSettings.inject_primer_during_generation

        # Make the generate request num_outputs times and save the output as midi
        # files.
        digits = len(str(MusicGeneratorSettings.num_outputs))
        for i in range(MusicGeneratorSettings.num_outputs):
            generated_sequence = self.generator.generate(
                primer_sequence, generator_options)
            midi_filename = str(i + 1).zfill(digits) + ".mid"
            midi_path = os.path.join(output_dir, midi_filename)
            note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

            if i == 0:
                threading.Thread(target=self.player.play).start()
            elif i == 1:
                self.player.enqueue(midi_path)

        tf.logging.info('Wrote %d MIDI files to %s',
                        MusicGeneratorSettings.num_outputs, output_dir)
           if note.instrument == melody_instrument]
  del melody_ns.notes[:]
  melody_ns.notes.extend(
      sorted(notes, key=lambda note: note.start_time))
  for i in range(len(melody_ns.notes) - 1):
    melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time
  inputs = melody_conditioned_encoders['inputs'].encode_note_sequence(
      melody_ns)
else:
  # Use one of the provided melodies.
  events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event
            for e in melodies[melody]
            for event in [e] + event_padding]
  inputs = melody_conditioned_encoders['inputs'].encode(
      ' '.join(str(e) for e in events))
  melody_ns = note_seq.Melody(events).to_sequence(qpm=150)

# Play and plot the melody.
note_seq.play_sequence(
    melody_ns,
    synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
note_seq.plot_sequence(melody_ns)

#@title Generate Accompaniment for Melody
#@markdown Generate a piano performance consisting of the chosen
#@markdown melody plus accompaniment.

# Generate sample events.
decode_length = 4096
sample_ids = next(melody_conditioned_samples)['outputs']
def run_with_flags(generator):
    """Generates performance tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PerformanceRnnSequenceGenerator to use for generation.
  """
    if not FLAGS.output_dir:
        tf.logging.fatal('--output_dir required')
        return
    output_dir = os.path.expanduser(FLAGS.output_dir)

    primer_midi = None
    if FLAGS.primer_midi:
        primer_midi = os.path.expanduser(FLAGS.primer_midi)

    if not tf.gfile.Exists(output_dir):
        tf.gfile.MakeDirs(output_dir)

    primer_sequence = None
    if FLAGS.primer_pitches:
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
        for pitch in ast.literal_eval(FLAGS.primer_pitches):
            note = primer_sequence.notes.add()
            note.start_time = 0
            note.end_time = 60.0 / note_seq.DEFAULT_QUARTERS_PER_MINUTE
            note.pitch = pitch
            note.velocity = 100
            primer_sequence.total_time = note.end_time
    elif FLAGS.primer_melody:
        primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
        primer_sequence = primer_melody.to_sequence()
    elif primer_midi:
        primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    else:
        tf.logging.warning(
            'No priming sequence specified. Defaulting to empty sequence.')
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ

    # Derive the total number of seconds to generate.
    seconds_per_step = 1.0 / generator.steps_per_second
    generate_end_time = FLAGS.num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin when the last note ends.
    generate_section = generator_options.generate_sections.add(
        start_time=primer_sequence.total_time, end_time=generate_end_time)

    if generate_section.start_time >= generate_section.end_time:
        tf.logging.fatal(
            'Priming sequence is longer than the total number of steps '
            'requested: Priming sequence length: %s, Total length '
            'requested: %s', generate_section.start_time, generate_end_time)
        return

    for control_cls in note_seq.all_performance_control_signals:
        if FLAGS[control_cls.name].value is not None and (
                generator.control_signals is None
                or not any(control.name == control_cls.name
                           for control in generator.control_signals)):
            tf.logging.warning(
                'Control signal requested via flag, but generator is not set up to '
                'condition on this control signal. Request will be ignored: %s = %s',
                control_cls.name, FLAGS[control_cls.name].value)

    if (FLAGS.disable_conditioning is not None
            and not generator.optional_conditioning):
        tf.logging.warning(
            'Disable conditioning flag set, but generator is not set up for '
            'optional conditioning. Requested disable conditioning flag will be '
            'ignored: %s', FLAGS.disable_conditioning)

    if generator.control_signals:
        for control in generator.control_signals:
            if FLAGS[control.name].value is not None:
                generator_options.args[control.name].string_value = (
                    FLAGS[control.name].value)
    if FLAGS.disable_conditioning is not None:
        generator_options.args['disable_conditioning'].string_value = (
            FLAGS.disable_conditioning)

    generator_options.args['temperature'].float_value = FLAGS.temperature
    generator_options.args['beam_size'].int_value = FLAGS.beam_size
    generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
    generator_options.args[
        'steps_per_iteration'].int_value = FLAGS.steps_per_iteration

    tf.logging.debug('primer_sequence: %s', primer_sequence)
    tf.logging.debug('generator_options: %s', generator_options)

    # Make the generate request num_outputs times and save the output as midi
    # files.
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    digits = len(str(FLAGS.num_outputs))
    for i in range(FLAGS.num_outputs):
        generated_sequence = generator.generate(primer_sequence,
                                                generator_options)

        midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
        midi_path = os.path.join(output_dir, midi_filename)
        note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

    tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, output_dir)
示例#10
0
 def events_to_melody(events, path_out=None):
     seq = note_seq.Melody(events)
     if path_out is not None:
         midi_io.sequence_proto_to_midi_file(seq.to_sequence(), path_out)
     return seq
示例#11
0
def generate_midi(midi_input):
    # Create input generator.
    def input_generator():
        global inputs
        while True:
            yield {
                'inputs': np.array([[inputs]], dtype=np.int32),
                'targets': np.zeros([1, 0], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }


    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())
    melody_conditioned_samples = estimator.predict(
        input_fn, checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(melody_conditioned_samples)

    #@title Choose Melody
    #@markdown Here you can choose a melody to be accompanied by the
    #@markdown model.  We have provided a few, or you can upload a
    #@markdown MIDI file; if your MIDI file is polyphonic, the notes
    #@markdown with highest pitch will be used as the melody.

    # Tokens to insert between melody events.

    # @title Generate from Scratch
    # @markdown Generate a piano performance from scratch.
    # @markdown
    # @markdown This can take a minute or so depending on the length
    # @markdown of the performance the model ends up generating.
    # @markdown Because we use a
    # @markdown [representation](http://g.co/magenta/performance-rnn)
    # @markdown where each event corresponds to a variable amount of
    # @markdown time, the actual number of seconds generated may vary.
    event_padding = 2 * [note_seq.MELODY_NO_EVENT]
    events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event
              for e in midi_input
              for event in [e] + event_padding]
    inputs = melody_conditioned_encoders['inputs'].encode(
        ' '.join(str(e) for e in events))
    melody_ns = note_seq.Melody(events).to_sequence(qpm=150)


    targets = []
    decode_length = 4096
    # decode_length = np.random.randint(len(inputs)*3,len(inputs)*5)
    # print(((decode_length) - len(inputs))/len(inputs))
    sample_ids = next(melody_conditioned_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(
        sample_ids,
        encoder=melody_conditioned_encoders['targets'])
    accompaniment_ns = note_seq.midi_file_to_note_sequence(midi_filename)




    # Use one of the provided melodies.
    events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event
            for e in midi_input
            for event in [e] + event_padding]
    inputs = melody_conditioned_encoders['inputs'].encode(
      ' '.join(str(e) for e in events))
    melody_ns = note_seq.Melody(events).to_sequence(qpm=150)

    # Play and plot the melody.
    note_seq.play_sequence(
        melody_ns,
        synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    note_seq.plot_sequence(melody_ns)
def generate(bundle_name: str,
             sequence_generator,
             generator_id: str,
             primer_melody: melody,
             qpm: float = DEFAULT_QUARTERS_PER_MINUTE,
             total_length_steps: int = 64,
             temperature: float = 1.0,
             beam_size: int = 1,
             branch_factor: int = 1,
             steps_per_iteration: int = 128) -> NoteSequence:
    """Generates and returns a new sequence given the sequence generator.
  Uses the bundle name to download the bundle in the "bundles" directory if it
  doesn't already exist, then uses the sequence generator and the generator id
  to get the generator. Parameters can be provided for the generation phase.
  The MIDI and plot files are written to disk in the "output" folder, with the
  filename pattern "<generator_name>_<generator_id>_<date_time>" with "mid" or
  "html" as extension respectively.
      :param bundle_name: The bundle name to be downloaded and generated with.

      :param sequence_generator: The sequence generator module, which is the
      python module in the corresponding models subfolder.

      :param generator_id: The id of the generator configuration, this is the
      model's configuration.

      :param primer_filename: The filename for the primer, which will be taken
      from the "primers" directory. If left empty, and empty note sequence will
      be used.

      :param qpm: The QPM for the generated sequence. If a primer is provided,
      the primer QPM will be used and this parameter ignored.

      :param total_length_steps: The total length of the sequence, which
      contains the added length of the primer and the generated sequence
      together. This value need to be bigger than the primer length in bars.

      :param temperature: The temperature value for the generation algorithm,
      lesser than 1 is less random (closer to the primer), bigger than 1 is
      more random

      :param beam_size: The beam size for the generation algorithm, a bigger
      branch size means the generation algorithm will generate more sequence
      each iteration, meaning a less random sequence at the cost of more time.

      :param branch_factor: The branch factor for the generation algorithm,
      a bigger branch factor means the generation algorithm will keep more
      sequence candidates at each iteration, meaning a less random sequence
      at the cost of more time.

      :param steps_per_iteration: The number of steps the generation algorithm
      generates at each iteration, a bigger steps per iteration meaning there
      are less iterations in total because more steps gets generated each time.

      :returns The generated NoteSequence
  """

    # Downloads the bundle from the magenta website, a bundle (.mag file) is a
    # trained model that is used by magenta
    mm.notebook_utils.download_bundle(bundle_name, "bundles")
    bundle = sequence_generator_bundle.read_bundle_file(
        os.path.join("bundles", bundle_name))

    # Initialize the generator from the generator id, this need to fit the
    # bundle we downloaded before, and choose the model's configuration.
    generator_map = sequence_generator.get_generator_map()
    generator = generator_map[generator_id](checkpoint=None, bundle=bundle)
    generator.initialize()

    # # Gets the primer sequence that is fed into the model for the generator,
    # # which will generate a sequence based on this one.
    # # If no primer sequence is given, the primer sequence is initialized
    # # to an empty note sequence
    # if primer_filename:
    #   primer_sequence = mm.midi_io.midi_file_to_note_sequence(
    #     os.path.join("primers", primer_filename))
    # else:
    #   primer_sequence = NoteSequence()

    # cheated to just take in the sequence directly as the list
    # inspired by: https://github.com/magenta/magenta/blob/master/magenta/models/melody_rnn/melody_rnn_generate.py
    primer_melody_ns = note_seq.Melody(ast.literal_eval(primer_melody))
    primer_sequence = primer_melody_ns.to_sequence(qpm=qpm)

    # Gets the QPM from the primer sequence. If it wasn't provided, take the
    # parameters that defaults to Magenta's default
    if primer_sequence.tempos:
        if len(primer_sequence.tempos) > 1:
            raise Exception("No support for multiple tempos")
        qpm = primer_sequence.tempos[0].qpm

    # # Calculates the seconds per 1 step, which changes depending on the QPM value
    # # (steps per quarter in generators are mostly 4)
    # seconds_per_step = 60.0 / qpm / getattr(generator, "steps_per_quarter", 4)

    # # Calculates the primer sequence length in steps and time by taking the
    # # total time (which is the end of the last note) and finding the next step
    # # start time.
    # primer_sequence_length_steps = math.ceil(primer_sequence.total_time
    #                                          / seconds_per_step)
    # primer_sequence_length_time = primer_sequence_length_steps * seconds_per_step

    # # Calculates the start and the end of the primer sequence.
    # # We add a negative delta to the end, because if we don't some generators
    # # won't start the generation right at the beginning of the bar, they will
    # # start at the next step, meaning we'll have a small gap between the primer
    # # and the generated sequence.
    # primer_end_adjust = (0.00001 if primer_sequence_length_time > 0 else 0)
    # primer_start_time = 0
    # primer_end_time = (primer_start_time
    #                    + primer_sequence_length_time
    #                    - primer_end_adjust)

    # # Calculates the generation time by taking the total time and substracting
    # # the primer time. The resulting generation time needs to be bigger than zero.
    # generation_length_steps = total_length_steps - primer_sequence_length_steps
    # if generation_length_steps <= 0:
    #   raise Exception("Total length in steps too small "
    #                   + "(" + str(total_length_steps) + ")"
    #                   + ", needs to be at least one bar bigger than primer "
    #                   + "(" + str(primer_sequence_length_steps) + ")")
    # generation_length_time = generation_length_steps * seconds_per_step

    # # Calculates the generate start and end time, the start time will contain
    # # the previously added negative delta from the primer end time.
    # # We remove the generation end time delta to end the generation
    # # on the last bar.
    # generation_start_time = primer_end_time
    # generation_end_time = (generation_start_time
    #                        + generation_length_time
    #                        + primer_end_adjust)

    # # # Showtime
    # # print(f"Primer time: [{primer_start_time}, {primer_end_time}]")
    # # print(f"Generation time: [{generation_start_time}, {generation_end_time}]")

    # # Pass the given parameters, the generator options are common for all models
    # generator_options = GeneratorOptions()
    # generator_options.args['temperature'].float_value = temperature
    # generator_options.args['beam_size'].int_value = beam_size
    # generator_options.args['branch_factor'].int_value = branch_factor
    # generator_options.args['steps_per_iteration'].int_value = steps_per_iteration
    # generator_options.generate_sections.add(
    #   start_time=generation_start_time,
    #   end_time=generation_end_time)

    # Derive the total number of seconds to generate based on the QPM of the
    # priming sequence and the num_steps flag.
    seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
    total_seconds = total_length_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = GeneratorOptions()
    if primer_sequence:
        input_sequence = primer_sequence
        # Set the start time to begin on the next step after the last note ends.
        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0
        generate_section = generator_options.generate_sections.add(
            start_time=last_end_time + seconds_per_step,
            end_time=total_seconds)

        if generate_section.start_time >= generate_section.end_time:
            tf.logging.fatal(
                'Priming sequence is longer than the total number of steps '
                'requested: Priming sequence length: %s, Generation length '
                'requested: %s', generate_section.start_time, total_seconds)
            return
    else:
        input_sequence = NoteSequence()
        input_sequence.tempos.add().qpm = qpm
        generate_section = generator_options.generate_sections.add(
            start_time=0, end_time=total_seconds)
    generator_options.args['temperature'].float_value = temperature
    generator_options.args['beam_size'].int_value = beam_size
    generator_options.args['branch_factor'].int_value = branch_factor
    generator_options.args[
        'steps_per_iteration'].int_value = steps_per_iteration
    # tf.logging.debug('input_sequence: %s', input_sequence)
    # tf.logging.debug('generator_options: %s', generator_options)

    # Generates the sequence, add add the time signature
    # back to the generated sequence
    sequence = generator.generate(primer_sequence, generator_options)

    # # Writes the resulting midi file to the output directory
    # date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    # generator_name = str(generator.__class__).split(".")[2]
    # midi_filename = "%s_%s_%s.mid" % (generator_name, generator_id,
    #                                   date_and_time)
    # midi_path = os.path.join("output", midi_filename)
    # mm.midi_io.note_sequence_to_midi_file(sequence, midi_path)
    # print(f"Generated midi file: {os.path.abspath(midi_path)}")

    # # Writes the resulting plot file to the output directory
    # date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    # generator_name = str(generator.__class__).split(".")[2]
    # plot_filename = "%s_%s_%s.html" % (generator_name, generator_id,
    #                                    date_and_time)
    # plot_path = os.path.join("output", plot_filename)
    # pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
    # plotter = Plotter()
    # plotter.save(pretty_midi, plot_path)
    # print(f"Generated plot file: {os.path.abspath(plot_path)}")

    return sequence