Beispiel #1
0
def generatestuff(input_sequence):
    global temperature
    # Model options. Change these to get different generated sequences! 

    # ewiseq in our case input_sequence 
    num_steps = 128 # change this for shorter or longer sequences
    # temperature = 1.0 # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(n.end_time for n in input_sequence.notes)
                    if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm 
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step
    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(
    start_time=last_end_time + seconds_per_step,
    end_time=total_seconds)

        # generate the output sequence
    try:
        generated_sequence = melody_rnn.generate(input_sequence, generator_options)
        return generated_sequence

    except magenta.models.shared.events_rnn_model.EventSequenceRnnModelError as e:
        print ('caught something' )
        print (' lower num_steps' + str(total_seconds ))
        resetMidiFile()
Beispiel #2
0
def continueByModel(input, model):
    #download model
    if not (path.isfile(f'./content/{model}.mag')):
        print(
            f'Downloading {model} bundle. This will take less than a minute...'
        )
        notebook_utils.download_bundle(f'{model}.mag', './content/')
    #init
    bundle = sequence_generator_bundle.read_bundle_file(
        f'./content/{model}.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # do
    input_sequence = input
    num_steps = 128  # change this for shorter or longer sequences
    temperature = 10.0  # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    return melody_rnn.generate(input_sequence, generator_options)
    def __call__(self, input_sequence):
        """
        Continues a music sequence

        Args:
            input_sequence: initial sequence to continue
                type: NoteSequence object
        Returns:
            NotesSequence object of continued music
        """
        num_steps = 128  # shorter or longer sequences
        temperature = 1.0  # the higher the temperature the more random the sequence.

        # Set the start time to begin on the next step after the last note ends.
        last_end_time = (max(
            n.end_time
            for n in input_sequence.notes) if input_sequence.notes else 0)
        qpm = input_sequence.tempos[0].qpm
        seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
        total_seconds = num_steps * seconds_per_step

        generator_options = generator_pb2.GeneratorOptions()
        generator_options.args['temperature'].float_value = temperature
        generator_options.generate_sections.add(start_time=last_end_time +
                                                seconds_per_step,
                                                end_time=total_seconds)

        # Ask the model to continue the sequence.
        sequence = self.melody_rnn.generate(input_sequence, generator_options)

        note_seq.play_sequence(sequence, synth=note_seq.fluidsynth)
        return sequence
Beispiel #4
0
  def _generate(self, input_sequence, zero_time, response_start_time,
                response_end_time):
    """Generates a response sequence with the currently-selected generator.

    Args:
      input_sequence: The NoteSequence to use as a generation seed.
      zero_time: The float time in seconds to treat as the start of the input.
      response_start_time: The float time in seconds for the start of
          generation.
      response_end_time: The float time in seconds for the end of generation.

    Returns:
      The generated NoteSequence.
    """
    # Generation is simplified if we always start at 0 time.
    response_start_time -= zero_time
    response_end_time -= zero_time

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.input_sections.add(
        start_time=0,
        end_time=response_start_time)
    generator_options.generate_sections.add(
        start_time=response_start_time,
        end_time=response_end_time)

    # Get current temperature setting.
    generator_options.args['temperature'].float_value = self._temperature

    # Generate response.
    tf.logging.info(
        "Generating sequence using '%s' generator.",
        self._sequence_generator.details.id)
    tf.logging.debug('Generator Details: %s',
                     self._sequence_generator.details)
    tf.logging.debug('Bundle Details: %s',
                     self._sequence_generator.bundle_details)
    tf.logging.debug('Generator Options: %s', generator_options)
    response_sequence = self._sequence_generator.generate(
        adjust_sequence_times(input_sequence, -zero_time), generator_options)
    response_sequence = note_seq.trim_note_sequence(response_sequence,
                                                    response_start_time,
                                                    response_end_time)
    return adjust_sequence_times(response_sequence, zero_time)
Beispiel #5
0
def generate_midi(pitches, start_times, durations, tempo, length):
    qpm = tempo/2
    primer_sequence = make_notes_sequence(pitches, start_times, durations, qpm)

    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)

    generator_options.generate_sections.add(
        start_time=last_end_time + _steps_to_seconds(1, qpm),
        end_time=length)

    # generate the output sequence
    generated_sequence = generator.generate(primer_sequence, generator_options)

    predicted_pitches = [note.pitch for note in generated_sequence.notes]
    predicted_start_times = [note.start_time for note in generated_sequence.notes]
    predicted_durations = [note.end_time - note.start_time for note in generated_sequence.notes]
    return {"pitches": predicted_pitches, "start_times": predicted_start_times, "durations": predicted_durations}
def generate_drums(
    bundle_dir: str = 'bundles',
    target_dir: str = 'output',
    generation_length: int = 1,
    temperature: float = 1.1,
    input_sequence: NoteSequence = None,
    start_with_primer: bool = False,
) -> None:
    """Generates a drums midi file

    Generates a midi file of drums percussion, using the pre-trained
    drum_kit_rnn magenta model.

    Args:
        bundle_dir: The directory where the bundle will be downloaded to.
        generation_length: Duration in bars of the generated sequence.
        temp: The degree of randomness of the generation.
        primer_sequence: Optional, to "influence" the generation.
        start_with_primer: True for primer sequence to play before the
            generated one, False to not play the primer sequence.

    Note:
        Primer must be at least one-bar-long if start_with_primer is True

    """
    generator = get_drums_generator(bundle=get_drums_bundle(
        bundle_dir=bundle_dir))
    time = get_generation_times(generation_length=generation_length,
                                start_with_primer=start_with_primer)

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generator_options.generate_sections.add(start_time=time['start'],
                                            end_time=time['end'])

    sequence = generator.generate(input_sequence, generator_options)
    note_sequence_to_midi_file(sequence,
                               os.path.join(target_dir, 'drums_rnn.mid'))
Beispiel #7
0
  def generate(self, primer_sequence):
      qpm = self.server_state['tempo']/2

      length = self.server_state['max_buffer'];
      
      #primer_sequence = make_notes_sequence(pitches, start_times, durations, qpm)

      generator_options = generator_pb2.GeneratorOptions()
      # Set the start time to begin on the next step after the last note ends.
      last_end_time = (max(n.end_time for n in primer_sequence.notes)
            if primer_sequence.notes else 0)

      generator_options.generate_sections.add(
          start_time=last_end_time + _steps_to_seconds(1, qpm),
          end_time=length)

      # generate the output sequence
      generated_sequence = generator.generate(primer_sequence, generator_options)

      # predicted_pitches = [note.pitch for note in generated_sequence.notes]
      # predicted_start_times = [note.start_time for note in generated_sequence.notes]
      # predicted_durations = [note.end_time - note.start_time for note in generated_sequence.notes]
      # return {"pitches": predicted_pitches, "start_times": predicted_start_times, "durations": predicted_durations}
      return generated_sequence
Beispiel #8
0
def run_with_flags(generator):
  """Generates melodies and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The MelodyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else note_seq.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_melody:
    primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to a single middle C.')
    primer_melody = note_seq.Melody([60])
    primer_sequence = primer_melody.to_sequence(qpm=qpm)

  # Derive the total number of seconds to generate based on the QPM of the
  # priming sequence and the num_steps flag.
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  total_seconds = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  if primer_sequence:
    input_sequence = primer_sequence
    # Set the start time to begin on the next step after the last note ends.
    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step,
        end_time=total_seconds)

    if generate_section.start_time >= generate_section.end_time:
      tf.logging.fatal(
          'Priming sequence is longer than the total number of steps '
          'requested: Priming sequence length: %s, Generation length '
          'requested: %s',
          generate_section.start_time, total_seconds)
      return
  else:
    input_sequence = music_pb2.NoteSequence()
    input_sequence.tempos.add().qpm = qpm
    generate_section = generator_options.generate_sections.add(
        start_time=0,
        end_time=total_seconds)
  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
  tf.logging.debug('input_sequence: %s', input_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(input_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(FLAGS.output_dir, midi_filename)
    note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, FLAGS.output_dir)
def listen_and_extend(chunk_duration,
                      min_volume,
                      min_rest,
                      rest_threshold,
                      mel_min=4,
                      rest_max=3,
                      sampling_rate=44100):
    chunksize = int(chunk_duration * sampling_rate)
    min_note_size = float(chunk_duration * 1.05)

    p = pyaudio.PyAudio()  # Initialize PyAudio object

    print(f"Recording audio in {chunk_duration} second chunks.")
    input("Press enter to proceed.")

    # Open stream with standard parameters
    stream = p.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=sampling_rate,
                    input=True,
                    frames_per_buffer=chunksize)

    # Run 4 processing steps: condense octaves, smooth repeats, remove errors, add rests
    pre_seq, full_raw = find_melody(chunksize, chunk_duration, sampling_rate,
                                    min_volume, stream)
    oct_seq = condense_octaves(copy.deepcopy(pre_seq))

    res = process_MIDI(copy.deepcopy(oct_seq), min_note_size)
    while not res[1]:
        res = process_MIDI(res[0], min_note_size)
    final_seq = res[0]

    samp_rest = find_rests(full_raw, rest_threshold)
    sec_rests = [(round(tup[0] / sampling_rate,
                        2), round(tup[1] / sampling_rate, 2))
                 for tup in samp_rest]
    sec_rests = [tup for tup in sec_rests if tup[1] - tup[0] > min_rest]

    rest_seq = []
    for note in final_seq:
        rest_seq = note.add_rests(sec_rests, rest_seq)

    # Cleanup
    stream.stop_stream()
    stream.close()
    p.terminate()

    # Plots the waveform and saves the result
    plt.plot(full_raw)
    plt.axhline(min_volume, color='r')
    plt.axhline(-min_volume, color='r')
    plt.title("Raw Microphone Input")
    plt.savefig("Output/Waveform.png")

    # Save MIDI plots and MIDI files
    save_sequence(pre_seq, 'pre')
    save_sequence(oct_seq, 'oct')
    save_sequence(final_seq, 'post')
    rest_mel = save_sequence(rest_seq, 'rest')

    # Initialize Model
    bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # Model Parameters
    end_time = (max(note.end_time for note in rest_mel.notes))
    qpm = rest_mel.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    steps = ((rest_mel.total_time * qpm * melody_rnn.steps_per_quarter) / 60)
    total = steps * seconds_per_step
    tmp = 1.0

    # Initialize Generator
    gen_options = generator_pb2.GeneratorOptions()
    gen_options.args['temperature'].float_value = tmp
    gen_section = gen_options.generate_sections.add(start_time=end_time +
                                                    seconds_per_step,
                                                    end_time=total)

    out = melody_rnn.generate(rest_mel, gen_options)

    note_seq.sequence_proto_to_midi_file(out, 'Output/ext_out.mid')
    ext = pretty_midi.PrettyMIDI('Output/ext_out.mid')
    visual_midi.Plotter().save(ext, 'Output/ext_plotted.html')

    return ext
Beispiel #10
0
mel.tempos.add(qpm=90)

#  Convert note_seq to MIDI for storage and playback
note_seq.sequence_proto_to_midi_file(mel, 'Input/in.mid')

# Import Dependencies
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2

# Initialize Model
bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')  # Loads model for use
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()

# Model Parameters
steps = 16
tmp = 1.0  # Measure of the generation's "temperature". Higher = More scattered/random

# Initialize Generator
gen_options = generator_pb2.GeneratorOptions()
gen_options.args['temperature'].float_value = tmp
gen_section = gen_options.generate_sections.add(start_time=8, end_time=16)

out = melody_rnn.generate(mel, gen_options)

note_seq.sequence_proto_to_midi_file(out, 'Output/out.mid')
def run_with_flags(generator):
  """Generates polyphonic tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PolyphonyRnnSequenceGenerator to use for generation.
  """
  if not FLAGS.output_dir:
    tf.logging.fatal('--output_dir required')
    return
  output_dir = os.path.expanduser(FLAGS.output_dir)

  primer_midi = None
  if FLAGS.primer_midi:
    primer_midi = os.path.expanduser(FLAGS.primer_midi)

  if not tf.gfile.Exists(output_dir):
    tf.gfile.MakeDirs(output_dir)

  primer_sequence = None
  qpm = FLAGS.qpm if FLAGS.qpm else note_seq.DEFAULT_QUARTERS_PER_MINUTE
  if FLAGS.primer_pitches:
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.tempos.add().qpm = qpm
    primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
    for pitch in ast.literal_eval(FLAGS.primer_pitches):
      note = primer_sequence.notes.add()
      note.start_time = 0
      note.end_time = 60.0 / qpm
      note.pitch = pitch
      note.velocity = 100
    primer_sequence.total_time = primer_sequence.notes[-1].end_time
  elif FLAGS.primer_melody:
    primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
    primer_sequence = primer_melody.to_sequence(qpm=qpm)
  elif primer_midi:
    primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
      qpm = primer_sequence.tempos[0].qpm
  else:
    tf.logging.warning(
        'No priming sequence specified. Defaulting to empty sequence.')
    primer_sequence = music_pb2.NoteSequence()
    primer_sequence.tempos.add().qpm = qpm
    primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ

  # Derive the total number of seconds to generate.
  seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
  generate_end_time = FLAGS.num_steps * seconds_per_step

  # Specify start/stop time for generation based on starting generation at the
  # end of the priming sequence and continuing until the sequence is num_steps
  # long.
  generator_options = generator_pb2.GeneratorOptions()
  # Set the start time to begin when the last note ends.
  generate_section = generator_options.generate_sections.add(
      start_time=primer_sequence.total_time,
      end_time=generate_end_time)

  if generate_section.start_time >= generate_section.end_time:
    tf.logging.fatal(
        'Priming sequence is longer than the total number of steps '
        'requested: Priming sequence length: %s, Total length '
        'requested: %s',
        generate_section.start_time, generate_end_time)
    return

  generator_options.args['temperature'].float_value = FLAGS.temperature
  generator_options.args['beam_size'].int_value = FLAGS.beam_size
  generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
  generator_options.args[
      'steps_per_iteration'].int_value = FLAGS.steps_per_iteration

  generator_options.args['condition_on_primer'].bool_value = (
      FLAGS.condition_on_primer)
  generator_options.args['no_inject_primer_during_generation'].bool_value = (
      not FLAGS.inject_primer_during_generation)

  tf.logging.debug('primer_sequence: %s', primer_sequence)
  tf.logging.debug('generator_options: %s', generator_options)

  # Make the generate request num_outputs times and save the output as midi
  # files.
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
  digits = len(str(FLAGS.num_outputs))
  for i in range(FLAGS.num_outputs):
    generated_sequence = generator.generate(primer_sequence, generator_options)

    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
    midi_path = os.path.join(output_dir, midi_filename)
    note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

  tf.logging.info('Wrote %d MIDI files to %s',
                  FLAGS.num_outputs, output_dir)
Beispiel #12
0
    def run(self):
        """Generates polyphonic tracks and saves them as MIDI files.
        Uses the options specified by the flags defined in this module.
        Args:
            generator: The PolyphonyRnnSequenceGenerator to use for generation.
        """
        primer_melody = self._get_primer_melody()

        output_dir = os.path.expanduser(MusicGeneratorSettings.output_dir)

        if not tf.gfile.Exists(output_dir):
            tf.gfile.MakeDirs(output_dir)

        for i in os.listdir(output_dir):
            os.remove(os.path.join(output_dir, i))

        primer_sequence = None
        qpm = MusicGeneratorSettings.qpm

        primer_melody = note_seq.Melody(ast.literal_eval(primer_melody))
        primer_sequence = primer_melody.to_sequence(qpm=qpm)

        # Derive the total number of seconds to generate.
        seconds_per_step = 60.0 / qpm / self.generator.steps_per_quarter
        generate_end_time = MusicGeneratorSettings.num_steps * seconds_per_step

        # Specify start/stop time for generation based on starting generation at the
        # end of the priming sequence and continuing until the sequence is num_steps
        # long.
        generator_options = generator_pb2.GeneratorOptions()
        # Set the start time to begin when the last note ends.
        generator_options.generate_sections.add(
            start_time=primer_sequence.total_time, end_time=generate_end_time)

        generator_options.args[
            'temperature'].float_value = MusicGeneratorSettings.temperature
        generator_options.args[
            'beam_size'].int_value = MusicGeneratorSettings.beam_size
        generator_options.args[
            'branch_factor'].int_value = MusicGeneratorSettings.branch_factor
        generator_options.args[
            'steps_per_iteration'].int_value = MusicGeneratorSettings.steps_per_iteration
        generator_options.args[
            'condition_on_primer'].bool_value = MusicGeneratorSettings.condition_on_primer
        generator_options.args[
            'no_inject_primer_during_generation'].bool_value = not MusicGeneratorSettings.inject_primer_during_generation

        # Make the generate request num_outputs times and save the output as midi
        # files.
        digits = len(str(MusicGeneratorSettings.num_outputs))
        for i in range(MusicGeneratorSettings.num_outputs):
            generated_sequence = self.generator.generate(
                primer_sequence, generator_options)
            midi_filename = str(i + 1).zfill(digits) + ".mid"
            midi_path = os.path.join(output_dir, midi_filename)
            note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

            if i == 0:
                threading.Thread(target=self.player.play).start()
            elif i == 1:
                self.player.enqueue(midi_path)

        tf.logging.info('Wrote %d MIDI files to %s',
                        MusicGeneratorSettings.num_outputs, output_dir)
def run_with_flags(generator):
    """Generates performance tracks and saves them as MIDI files.

  Uses the options specified by the flags defined in this module.

  Args:
    generator: The PerformanceRnnSequenceGenerator to use for generation.
  """
    if not FLAGS.output_dir:
        tf.logging.fatal('--output_dir required')
        return
    output_dir = os.path.expanduser(FLAGS.output_dir)

    primer_midi = None
    if FLAGS.primer_midi:
        primer_midi = os.path.expanduser(FLAGS.primer_midi)

    if not tf.gfile.Exists(output_dir):
        tf.gfile.MakeDirs(output_dir)

    primer_sequence = None
    if FLAGS.primer_pitches:
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
        for pitch in ast.literal_eval(FLAGS.primer_pitches):
            note = primer_sequence.notes.add()
            note.start_time = 0
            note.end_time = 60.0 / note_seq.DEFAULT_QUARTERS_PER_MINUTE
            note.pitch = pitch
            note.velocity = 100
            primer_sequence.total_time = note.end_time
    elif FLAGS.primer_melody:
        primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
        primer_sequence = primer_melody.to_sequence()
    elif primer_midi:
        primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
    else:
        tf.logging.warning(
            'No priming sequence specified. Defaulting to empty sequence.')
        primer_sequence = music_pb2.NoteSequence()
        primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ

    # Derive the total number of seconds to generate.
    seconds_per_step = 1.0 / generator.steps_per_second
    generate_end_time = FLAGS.num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin when the last note ends.
    generate_section = generator_options.generate_sections.add(
        start_time=primer_sequence.total_time, end_time=generate_end_time)

    if generate_section.start_time >= generate_section.end_time:
        tf.logging.fatal(
            'Priming sequence is longer than the total number of steps '
            'requested: Priming sequence length: %s, Total length '
            'requested: %s', generate_section.start_time, generate_end_time)
        return

    for control_cls in note_seq.all_performance_control_signals:
        if FLAGS[control_cls.name].value is not None and (
                generator.control_signals is None
                or not any(control.name == control_cls.name
                           for control in generator.control_signals)):
            tf.logging.warning(
                'Control signal requested via flag, but generator is not set up to '
                'condition on this control signal. Request will be ignored: %s = %s',
                control_cls.name, FLAGS[control_cls.name].value)

    if (FLAGS.disable_conditioning is not None
            and not generator.optional_conditioning):
        tf.logging.warning(
            'Disable conditioning flag set, but generator is not set up for '
            'optional conditioning. Requested disable conditioning flag will be '
            'ignored: %s', FLAGS.disable_conditioning)

    if generator.control_signals:
        for control in generator.control_signals:
            if FLAGS[control.name].value is not None:
                generator_options.args[control.name].string_value = (
                    FLAGS[control.name].value)
    if FLAGS.disable_conditioning is not None:
        generator_options.args['disable_conditioning'].string_value = (
            FLAGS.disable_conditioning)

    generator_options.args['temperature'].float_value = FLAGS.temperature
    generator_options.args['beam_size'].int_value = FLAGS.beam_size
    generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
    generator_options.args[
        'steps_per_iteration'].int_value = FLAGS.steps_per_iteration

    tf.logging.debug('primer_sequence: %s', primer_sequence)
    tf.logging.debug('generator_options: %s', generator_options)

    # Make the generate request num_outputs times and save the output as midi
    # files.
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    digits = len(str(FLAGS.num_outputs))
    for i in range(FLAGS.num_outputs):
        generated_sequence = generator.generate(primer_sequence,
                                                generator_options)

        midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
        midi_path = os.path.join(output_dir, midi_filename)
        note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)

    tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, output_dir)