def get_bundle(bundle_name: str,
               bundle_dir: str = 'bundles',) -> GeneratorBundle:
    """ Downloads and reads specified magenta bundle

    Will first attempt to find the bundle by name, in the bundle_dir.  If not
    found, it will download it and then read it. Bundle files downloaded by
    this function will keep their official filenames.

    Args:
      bundle_name: str: Magenta pre-trained bundle, e.g. attention_rnn.mag
      bundle_dir: str: Target directory to download bundle files
        (Default value = 'bundles')

    Returns:
      GeneratorBundle: Bundle file that has already been read

    """
    from magenta.models.shared import sequence_generator_bundle

    bundle_file = os.path.join(bundle_dir, bundle_name)
    if not os.path.isfile(bundle_file):
        download_bundle(bundle_name, bundle_dir)
    bundle = sequence_generator_bundle.read_bundle_file(
        bundle_file=bundle_file)
    return bundle
def melody_rnn(input_sequence):
    # Initialize the model.
    print("Initializing Melody RNN...")
    bundle = sequence_generator_bundle.read_bundle_file(
        '/content/basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # Model options. Change these to get different generated sequences!

    input_sequence = twinkle_twinkle  # change this to teapot if you want
    num_steps = 128  # change this for shorter or longer sequences
    temperature = 1.0  # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    return melody_rnn.generate(input_sequence, generator_options)
def get_drums_bundle(bundle_dir: str = 'bundles'):
    bundle_file = os.path.join(bundle_dir, DRUM_KIT_RNN_BUNDLE_NAME)
    if not os.path.isfile(bundle_file):
        download_bundle(DRUM_KIT_RNN_BUNDLE_NAME, 'bundles')
    bundle = sequence_generator_bundle.read_bundle_file(
        bundle_file=bundle_file)
    return bundle
Beispiel #4
0
def continueByModel(input, model):
    #download model
    if not (path.isfile(f'./content/{model}.mag')):
        print(
            f'Downloading {model} bundle. This will take less than a minute...'
        )
        notebook_utils.download_bundle(f'{model}.mag', './content/')
    #init
    bundle = sequence_generator_bundle.read_bundle_file(
        f'./content/{model}.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # do
    input_sequence = input
    num_steps = 128  # change this for shorter or longer sequences
    temperature = 10.0  # the higher the temperature the more random the sequence.

    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0)
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    total_seconds = num_steps * seconds_per_step

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generate_section = generator_options.generate_sections.add(
        start_time=last_end_time + seconds_per_step, end_time=total_seconds)

    # Ask the model to continue the sequence.
    return melody_rnn.generate(input_sequence, generator_options)
Beispiel #5
0
def init_melody_rnn(model_name):
    print("initializing generator...")
    bundle = sequence_generator_bundle.read_bundle_file('./content/' +
                                                        model_name + '.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model_name](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()
    print('🎉initializing done!')
    return melody_rnn
Beispiel #6
0
def setup_model():
    # Initialize the model.
    print("Initializing attention Melody RNN...")
    bundle = sequence_generator_bundle.read_bundle_file(
        'content/attention_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    app.melody_rnn = generator_map['attention_rnn'](checkpoint=None,
                                                    bundle=bundle)
    app.melody_rnn.initialize()
Beispiel #7
0
def initialisation_polyphony(model_name):
    print("initializing generator...")
    bundle = sequence_generator_bundle.read_bundle_file('./content/' +
                                                        model_name +
                                                        '_rnn.mag')
    generator_map = polyphony_sequence_generator.get_generator_map()
    polyphony_rnn = generator_map[model_name](checkpoint=None, bundle=bundle)
    polyphony_rnn.initialize()
    print('🎉initializing done!')
    return polyphony_rnn
Beispiel #8
0
def main(_):
    bundle_file = FLAGS.bundle_path
    checkpoint_file = FLAGS.checkpoint_path
    metagraph_filename = checkpoint_file + '.meta'

    bundle = sequence_generator_bundle.read_bundle_file(bundle_file)

    with tf.gfile.Open(checkpoint_file, 'wb') as f:
        f.write(bundle.checkpoint_file[0])

    with tf.gfile.Open(metagraph_filename, 'wb') as f:
        f.write(bundle.metagraph_file)
Beispiel #9
0
def get_bundle():
    """Returns a generator_pb2.GeneratorBundle object based read from bundle_file.

    Returns:
      Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is
      not set or the save_generator_bundle flag is set.
    """
    if FLAGS.save_generator_bundle:
        return None
    if FLAGS.bundle_file is None:
        return None
    bundle_file = os.path.expanduser(FLAGS.bundle_file)
    return sequence_generator_bundle.read_bundle_file(bundle_file)
Beispiel #10
0
 def __init__(self, state={}, checkpoint='attention_rnn'):
   config = magenta.models.melody_rnn.melody_rnn_model.default_configs[checkpoint]
   bundle_file = sequence_generator_bundle.read_bundle_file(os.path.abspath('model/' + BUNDLE_NAME+'.mag'))
   steps_per_quarter = 4
   self.model = MelodyRnnSequenceGenerator(
     model=melody_rnn_model.MelodyRnnModel(config),
     details=config.details,
     steps_per_quarter=steps_per_quarter,
     bundle=bundle_file)
   self.realtime_ready = True
   # self.temperature=1.2
   self.server_state = state
   self.server_state['history'] = [NoteSequence()]
   pass
Beispiel #11
0
    def load_1(self, bundle_name):
        bundle_name = str(bundle_name)
        config = magenta.models.melody_rnn.melody_rnn_model.default_configs[
            bundle_name]
        bundle_file = read_bundle_file(
            os.path.join(self._canvas_dir, bundle_name + '.mag'))
        steps_per_quarter = 4

        self.generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
            model=melody_rnn_model.MelodyRnnModel(config),
            details=config.details,
            steps_per_quarter=steps_per_quarter,
            bundle=bundle_file)

        self._outlet(1, "loaded")
Beispiel #12
0
    def initialize(self, name, sequence_generator, extra_name=None):
        """
        Initializes the standard model.
        """
        print(f"Initializing {name}...")
        bundle = sequence_generator_bundle.read_bundle_file(
            os.path.join(os.getcwd(), "models", f"{self.model_name}.mag"))

        generator_map = sequence_generator.get_generator_map()
        if extra_name is not None:
            self.model_name = extra_name
        self.model = generator_map[self.model_name](checkpoint=None,
                                                    bundle=bundle)

        self.model.initialize()
    def __init__(self):
        """
        Loads and initializes the rnn
        """
        print('Downloading model bundle. This will take less than a minute...')
        note_seq.notebook_utils.download_bundle('basic_rnn.mag', './content/')

        # Initialize the model.
        print("Initializing Melody RNN...")
        bundle = sequence_generator_bundle.read_bundle_file(
            './content/basic_rnn.mag')
        generator_map = melody_rnn_sequence_generator.get_generator_map()
        self.melody_rnn = generator_map['basic_rnn'](checkpoint=None,
                                                     bundle=bundle)
        self.melody_rnn.initialize()

        print('🎉 Done!')
Beispiel #14
0
    def __init__(self, bundle_path: str):
        """Initialize model from bundle.

        bundle_path (str): Path to the MelodyRnnSequenceGenerator to use for generation.
        """
        bundle_file = os.path.expanduser(bundle_path)
        bundle = sequence_generator_bundle.read_bundle_file(bundle_file)

        config_id = bundle.generator_details.id
        config = melody_rnn_model.default_configs[config_id]

        self.generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
            model=melody_rnn_model.MelodyRnnModel(config),
            details=config.details,
            steps_per_quarter=config.steps_per_quarter,
            checkpoint=None,
            bundle=bundle)
Beispiel #15
0
def _load_generator_from_bundle_file(bundle_file):
    """Returns initialized generator from bundle file path or None if fails."""
    try:
        bundle = sequence_generator_bundle.read_bundle_file(bundle_file)
    except sequence_generator_bundle.GeneratorBundleParseError:
        print('Failed to parse bundle file: %s' % FLAGS.bundle_file)
        return None

    generator_id = bundle.generator_details.id
    if generator_id not in _GENERATOR_MAP:
        print("Unrecognized SequenceGenerator ID '%s' in bundle file: %s" %
              (generator_id, FLAGS.bundle_file))
        return None

    generator = _GENERATOR_MAP[generator_id](checkpoint=None, bundle=bundle)
    generator.initialize()
    print("Loaded '%s' generator bundle from file '%s'." %
          (bundle.generator_details.id, bundle_file))
    return generator
Beispiel #16
0
    def __init__(self, led):
        self.button = Button()
        self.player = Player(MusicGeneratorSettings.output_dir)
        self.led = led

        bundle_file = os.path.expanduser(MusicGeneratorSettings.bundle_file)
        bundle = sequence_generator_bundle.read_bundle_file(bundle_file)
        tf.logging.set_verbosity(MusicGeneratorSettings.log)

        config_id = bundle.generator_details.id
        config = polyphony_model.default_configs[config_id]
        config.hparams.parse(MusicGeneratorSettings.hparams)

        # Having too large of a batch size will slow generation down unnecessarily.
        config.hparams.batch_size = min(
            config.hparams.batch_size, MusicGeneratorSettings.beam_size *
            MusicGeneratorSettings.branch_factor)

        self.generator = polyphony_sequence_generator.PolyphonyRnnSequenceGenerator(
            model=polyphony_model.PolyphonyRnnModel(config),
            details=config.details,
            steps_per_quarter=config.steps_per_quarter,
            checkpoint=None,
            bundle=bundle)
def generate(bundle_name: str,
             sequence_generator,
             generator_id: str,
             primer_melody: melody,
             qpm: float = DEFAULT_QUARTERS_PER_MINUTE,
             total_length_steps: int = 64,
             temperature: float = 1.0,
             beam_size: int = 1,
             branch_factor: int = 1,
             steps_per_iteration: int = 128) -> NoteSequence:
    """Generates and returns a new sequence given the sequence generator.
  Uses the bundle name to download the bundle in the "bundles" directory if it
  doesn't already exist, then uses the sequence generator and the generator id
  to get the generator. Parameters can be provided for the generation phase.
  The MIDI and plot files are written to disk in the "output" folder, with the
  filename pattern "<generator_name>_<generator_id>_<date_time>" with "mid" or
  "html" as extension respectively.
      :param bundle_name: The bundle name to be downloaded and generated with.

      :param sequence_generator: The sequence generator module, which is the
      python module in the corresponding models subfolder.

      :param generator_id: The id of the generator configuration, this is the
      model's configuration.

      :param primer_filename: The filename for the primer, which will be taken
      from the "primers" directory. If left empty, and empty note sequence will
      be used.

      :param qpm: The QPM for the generated sequence. If a primer is provided,
      the primer QPM will be used and this parameter ignored.

      :param total_length_steps: The total length of the sequence, which
      contains the added length of the primer and the generated sequence
      together. This value need to be bigger than the primer length in bars.

      :param temperature: The temperature value for the generation algorithm,
      lesser than 1 is less random (closer to the primer), bigger than 1 is
      more random

      :param beam_size: The beam size for the generation algorithm, a bigger
      branch size means the generation algorithm will generate more sequence
      each iteration, meaning a less random sequence at the cost of more time.

      :param branch_factor: The branch factor for the generation algorithm,
      a bigger branch factor means the generation algorithm will keep more
      sequence candidates at each iteration, meaning a less random sequence
      at the cost of more time.

      :param steps_per_iteration: The number of steps the generation algorithm
      generates at each iteration, a bigger steps per iteration meaning there
      are less iterations in total because more steps gets generated each time.

      :returns The generated NoteSequence
  """

    # Downloads the bundle from the magenta website, a bundle (.mag file) is a
    # trained model that is used by magenta
    mm.notebook_utils.download_bundle(bundle_name, "bundles")
    bundle = sequence_generator_bundle.read_bundle_file(
        os.path.join("bundles", bundle_name))

    # Initialize the generator from the generator id, this need to fit the
    # bundle we downloaded before, and choose the model's configuration.
    generator_map = sequence_generator.get_generator_map()
    generator = generator_map[generator_id](checkpoint=None, bundle=bundle)
    generator.initialize()

    # # Gets the primer sequence that is fed into the model for the generator,
    # # which will generate a sequence based on this one.
    # # If no primer sequence is given, the primer sequence is initialized
    # # to an empty note sequence
    # if primer_filename:
    #   primer_sequence = mm.midi_io.midi_file_to_note_sequence(
    #     os.path.join("primers", primer_filename))
    # else:
    #   primer_sequence = NoteSequence()

    # cheated to just take in the sequence directly as the list
    # inspired by: https://github.com/magenta/magenta/blob/master/magenta/models/melody_rnn/melody_rnn_generate.py
    primer_melody_ns = note_seq.Melody(ast.literal_eval(primer_melody))
    primer_sequence = primer_melody_ns.to_sequence(qpm=qpm)

    # Gets the QPM from the primer sequence. If it wasn't provided, take the
    # parameters that defaults to Magenta's default
    if primer_sequence.tempos:
        if len(primer_sequence.tempos) > 1:
            raise Exception("No support for multiple tempos")
        qpm = primer_sequence.tempos[0].qpm

    # # Calculates the seconds per 1 step, which changes depending on the QPM value
    # # (steps per quarter in generators are mostly 4)
    # seconds_per_step = 60.0 / qpm / getattr(generator, "steps_per_quarter", 4)

    # # Calculates the primer sequence length in steps and time by taking the
    # # total time (which is the end of the last note) and finding the next step
    # # start time.
    # primer_sequence_length_steps = math.ceil(primer_sequence.total_time
    #                                          / seconds_per_step)
    # primer_sequence_length_time = primer_sequence_length_steps * seconds_per_step

    # # Calculates the start and the end of the primer sequence.
    # # We add a negative delta to the end, because if we don't some generators
    # # won't start the generation right at the beginning of the bar, they will
    # # start at the next step, meaning we'll have a small gap between the primer
    # # and the generated sequence.
    # primer_end_adjust = (0.00001 if primer_sequence_length_time > 0 else 0)
    # primer_start_time = 0
    # primer_end_time = (primer_start_time
    #                    + primer_sequence_length_time
    #                    - primer_end_adjust)

    # # Calculates the generation time by taking the total time and substracting
    # # the primer time. The resulting generation time needs to be bigger than zero.
    # generation_length_steps = total_length_steps - primer_sequence_length_steps
    # if generation_length_steps <= 0:
    #   raise Exception("Total length in steps too small "
    #                   + "(" + str(total_length_steps) + ")"
    #                   + ", needs to be at least one bar bigger than primer "
    #                   + "(" + str(primer_sequence_length_steps) + ")")
    # generation_length_time = generation_length_steps * seconds_per_step

    # # Calculates the generate start and end time, the start time will contain
    # # the previously added negative delta from the primer end time.
    # # We remove the generation end time delta to end the generation
    # # on the last bar.
    # generation_start_time = primer_end_time
    # generation_end_time = (generation_start_time
    #                        + generation_length_time
    #                        + primer_end_adjust)

    # # # Showtime
    # # print(f"Primer time: [{primer_start_time}, {primer_end_time}]")
    # # print(f"Generation time: [{generation_start_time}, {generation_end_time}]")

    # # Pass the given parameters, the generator options are common for all models
    # generator_options = GeneratorOptions()
    # generator_options.args['temperature'].float_value = temperature
    # generator_options.args['beam_size'].int_value = beam_size
    # generator_options.args['branch_factor'].int_value = branch_factor
    # generator_options.args['steps_per_iteration'].int_value = steps_per_iteration
    # generator_options.generate_sections.add(
    #   start_time=generation_start_time,
    #   end_time=generation_end_time)

    # Derive the total number of seconds to generate based on the QPM of the
    # priming sequence and the num_steps flag.
    seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
    total_seconds = total_length_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = GeneratorOptions()
    if primer_sequence:
        input_sequence = primer_sequence
        # Set the start time to begin on the next step after the last note ends.
        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0
        generate_section = generator_options.generate_sections.add(
            start_time=last_end_time + seconds_per_step,
            end_time=total_seconds)

        if generate_section.start_time >= generate_section.end_time:
            tf.logging.fatal(
                'Priming sequence is longer than the total number of steps '
                'requested: Priming sequence length: %s, Generation length '
                'requested: %s', generate_section.start_time, total_seconds)
            return
    else:
        input_sequence = NoteSequence()
        input_sequence.tempos.add().qpm = qpm
        generate_section = generator_options.generate_sections.add(
            start_time=0, end_time=total_seconds)
    generator_options.args['temperature'].float_value = temperature
    generator_options.args['beam_size'].int_value = beam_size
    generator_options.args['branch_factor'].int_value = branch_factor
    generator_options.args[
        'steps_per_iteration'].int_value = steps_per_iteration
    # tf.logging.debug('input_sequence: %s', input_sequence)
    # tf.logging.debug('generator_options: %s', generator_options)

    # Generates the sequence, add add the time signature
    # back to the generated sequence
    sequence = generator.generate(primer_sequence, generator_options)

    # # Writes the resulting midi file to the output directory
    # date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    # generator_name = str(generator.__class__).split(".")[2]
    # midi_filename = "%s_%s_%s.mid" % (generator_name, generator_id,
    #                                   date_and_time)
    # midi_path = os.path.join("output", midi_filename)
    # mm.midi_io.note_sequence_to_midi_file(sequence, midi_path)
    # print(f"Generated midi file: {os.path.abspath(midi_path)}")

    # # Writes the resulting plot file to the output directory
    # date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
    # generator_name = str(generator.__class__).split(".")[2]
    # plot_filename = "%s_%s_%s.html" % (generator_name, generator_id,
    #                                    date_and_time)
    # plot_path = os.path.join("output", plot_filename)
    # pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
    # plotter = Plotter()
    # plotter.save(pretty_midi, plot_path)
    # print(f"Generated plot file: {os.path.abspath(plot_path)}")

    return sequence
Beispiel #18
0
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import magenta
from magenta import music as mm
from magenta.models.melody_rnn import melody_rnn_model
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from note_seq.protobuf import generator_pb2
from midiutil import MIDIFile

BUNDLE_NAME = 'attention_rnn'
config = magenta.models.melody_rnn.melody_rnn_model.default_configs[BUNDLE_NAME]
bundle_file = sequence_generator_bundle.read_bundle_file(os.path.abspath('model/' + BUNDLE_NAME+'.mag'))
steps_per_quarter = 4

generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
      model=melody_rnn_model.MelodyRnnModel(config),
      details=config.details,
      steps_per_quarter=steps_per_quarter,
      bundle=bundle_file)

def _steps_to_seconds(steps, qpm):
    return steps * 60.0 / qpm / steps_per_quarter


def make_midi(pitches, start_times, durations, qpm, midi_path):
    track    = 0
    channel  = 0
def listen_and_extend(chunk_duration,
                      min_volume,
                      min_rest,
                      rest_threshold,
                      mel_min=4,
                      rest_max=3,
                      sampling_rate=44100):
    chunksize = int(chunk_duration * sampling_rate)
    min_note_size = float(chunk_duration * 1.05)

    p = pyaudio.PyAudio()  # Initialize PyAudio object

    print(f"Recording audio in {chunk_duration} second chunks.")
    input("Press enter to proceed.")

    # Open stream with standard parameters
    stream = p.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=sampling_rate,
                    input=True,
                    frames_per_buffer=chunksize)

    # Run 4 processing steps: condense octaves, smooth repeats, remove errors, add rests
    pre_seq, full_raw = find_melody(chunksize, chunk_duration, sampling_rate,
                                    min_volume, stream)
    oct_seq = condense_octaves(copy.deepcopy(pre_seq))

    res = process_MIDI(copy.deepcopy(oct_seq), min_note_size)
    while not res[1]:
        res = process_MIDI(res[0], min_note_size)
    final_seq = res[0]

    samp_rest = find_rests(full_raw, rest_threshold)
    sec_rests = [(round(tup[0] / sampling_rate,
                        2), round(tup[1] / sampling_rate, 2))
                 for tup in samp_rest]
    sec_rests = [tup for tup in sec_rests if tup[1] - tup[0] > min_rest]

    rest_seq = []
    for note in final_seq:
        rest_seq = note.add_rests(sec_rests, rest_seq)

    # Cleanup
    stream.stop_stream()
    stream.close()
    p.terminate()

    # Plots the waveform and saves the result
    plt.plot(full_raw)
    plt.axhline(min_volume, color='r')
    plt.axhline(-min_volume, color='r')
    plt.title("Raw Microphone Input")
    plt.savefig("Output/Waveform.png")

    # Save MIDI plots and MIDI files
    save_sequence(pre_seq, 'pre')
    save_sequence(oct_seq, 'oct')
    save_sequence(final_seq, 'post')
    rest_mel = save_sequence(rest_seq, 'rest')

    # Initialize Model
    bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    # Model Parameters
    end_time = (max(note.end_time for note in rest_mel.notes))
    qpm = rest_mel.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter
    steps = ((rest_mel.total_time * qpm * melody_rnn.steps_per_quarter) / 60)
    total = steps * seconds_per_step
    tmp = 1.0

    # Initialize Generator
    gen_options = generator_pb2.GeneratorOptions()
    gen_options.args['temperature'].float_value = tmp
    gen_section = gen_options.generate_sections.add(start_time=end_time +
                                                    seconds_per_step,
                                                    end_time=total)

    out = melody_rnn.generate(rest_mel, gen_options)

    note_seq.sequence_proto_to_midi_file(out, 'Output/ext_out.mid')
    ext = pretty_midi.PrettyMIDI('Output/ext_out.mid')
    visual_midi.Plotter().save(ext, 'Output/ext_plotted.html')

    return ext
Beispiel #20
0
def run_training(build_graph_fn,
                 train_dir,
                 num_training_steps=None,
                 summary_frequency=10,
                 save_checkpoint_secs=60,
                 checkpoints_to_keep=10,
                 keep_checkpoint_every_n_hours=1,
                 master='',
                 task=0,
                 num_ps_tasks=0,
                 warm_start_bundle_file=None):
    """Runs the training loop.

  Args:
    build_graph_fn: A function that builds the graph ops.
    train_dir: The path to the directory where checkpoints and summary events
        will be written to.
    num_training_steps: The number of steps to train for before exiting.
    summary_frequency: The number of steps between each summary. A summary is
        when graph values from the last step are logged to the console and
        written to disk.
    save_checkpoint_secs: The frequency at which to save checkpoints, in
        seconds.
    checkpoints_to_keep: The number of most recent checkpoints to keep in
       `train_dir`. Keeps all if set to 0.
    keep_checkpoint_every_n_hours: Keep a checkpoint every N hours, even if it
        results in more checkpoints than checkpoints_to_keep.
    master: URL of the Tensorflow master.
    task: Task number for this worker.
    num_ps_tasks: Number of parameter server tasks.
    warm_start_bundle_file: Path to a sequence generator bundle file that will
        be used to initialize the model weights for fine-tuning.
  """
    with tf.Graph().as_default():
        with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
            build_graph_fn()

            global_step = tf.train.get_or_create_global_step()
            loss = tf.get_collection('loss')[0]
            perplexity = tf.get_collection('metrics/perplexity')[0]
            accuracy = tf.get_collection('metrics/accuracy')[0]
            train_op = tf.get_collection('train_op')[0]

            logging_dict = {
                'Global Step': global_step,
                'Loss': loss,
                'Perplexity': perplexity,
                'Accuracy': accuracy
            }
            hooks = [
                tf.train.NanTensorHook(loss),
                tf.train.LoggingTensorHook(logging_dict,
                                           every_n_iter=summary_frequency),
                tf.train.StepCounterHook(output_dir=train_dir,
                                         every_n_steps=summary_frequency)
            ]
            if num_training_steps:
                hooks.append(tf.train.StopAtStepHook(num_training_steps))

            with tempfile.TemporaryDirectory() as tempdir:
                if warm_start_bundle_file:
                    # We are fine-tuning from a pretrained bundle. Unpack the bundle and
                    # save its checkpoint to a temporary directory.
                    warm_start_bundle_file = os.path.expanduser(
                        warm_start_bundle_file)
                    bundle = sequence_generator_bundle.read_bundle_file(
                        warm_start_bundle_file)
                    checkpoint_filename = os.path.join(tempdir, 'model.ckpt')
                    with tf.gfile.Open(checkpoint_filename, 'wb') as f:
                        # For now, we support only 1 checkpoint file.
                        f.write(bundle.checkpoint_file[0])
                    variables_to_restore = tf_slim.get_variables_to_restore(
                        exclude=['global_step', '.*Adam.*', 'beta.*_power'])
                    init_op, init_feed_dict = tf_slim.assign_from_checkpoint(
                        checkpoint_filename, variables_to_restore)
                    init_fn = lambda scaffold, sess: sess.run(
                        init_op, init_feed_dict)
                else:
                    init_fn = None

                scaffold = tf.train.Scaffold(
                    init_fn=init_fn,
                    saver=tf.train.Saver(max_to_keep=checkpoints_to_keep,
                                         keep_checkpoint_every_n_hours=
                                         keep_checkpoint_every_n_hours))

                tf.logging.info('Starting training loop...')
                tf_slim.training.train(
                    train_op=train_op,
                    logdir=train_dir,
                    scaffold=scaffold,
                    hooks=hooks,
                    save_checkpoint_secs=save_checkpoint_secs,
                    save_summaries_steps=summary_frequency,
                    master=master,
                    is_chief=task == 0)
                tf.logging.info('Training complete.')
Beispiel #21
0
from pyfiglet import Figlet
global f
f = Figlet(font='slant')


import magenta
from magenta import music
# Import dependencies.
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2

# Initialize the model.
print("Initializing Melody RNN...")
bundle = sequence_generator_bundle.read_bundle_file('basic_rnn.mag')
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()
global temperature
temperature = 1.0

print(' Done!')

inport = mido.open_input('Axoloti Core:Axoloti Core MIDI 1 20:0')
# inport = mido.open_input('Midi Through:Midi Through Port-0 14:0')
outport = mido.open_output('Axoloti Core:Axoloti Core MIDI 1 20:0')
# outport = mido.open_output('Midi Through:Midi Through Port-0 14:0')
lasttime = time.time()

global mid
Beispiel #22
0
    mel.notes.add(pitch=note[0], start_time=note[1], end_time=note[2],
                  velocity=80)

mel.tempos.add(qpm=90)

#  Convert note_seq to MIDI for storage and playback
note_seq.sequence_proto_to_midi_file(mel, 'Input/in.mid')

# Import Dependencies
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2

# Initialize Model
bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag')  # Loads model for use
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()

# Model Parameters
steps = 16
tmp = 1.0  # Measure of the generation's "temperature". Higher = More scattered/random

# Initialize Generator
gen_options = generator_pb2.GeneratorOptions()
gen_options.args['temperature'].float_value = tmp
gen_section = gen_options.generate_sections.add(start_time=8, end_time=16)

out = melody_rnn.generate(mel, gen_options)
Beispiel #23
0
twinkle_twinkle.notes.add(pitch=69, start_time=2.5, end_time=3.0, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=3.0, end_time=4.0, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)
twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)
twinkle_twinkle.total_time = 8

twinkle_twinkle.tempos.add(qpm=60)

# Initialize the model.
print("Initializing attention Melody RNN...")
bundle = sequence_generator_bundle.read_bundle_file(
    'content/attention_rnn.mag')
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['attention_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()

# Model options. Change these to get different generated sequences!

input_sequence = twinkle_twinkle  # change this to teapot if you want
num_steps = 128  # change this for shorter or longer sequences
temperature = 1.0  # the higher the temperature the more random the sequence.

# Set the start time to begin on the next step after the last note ends.
last_end_time = (max(
    n.end_time for n in input_sequence.notes) if input_sequence.notes else 0)
qpm = input_sequence.tempos[0].qpm
seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter