Пример #1
0
 def _check_extract_examples(input_ns, path, input_number):
   """Make sure each input returns exactly one example from the converter."""
   tensors = config.data_converter.to_tensors(input_ns).outputs
   if not tensors:
     print(
         'MusicVAE configs have very specific input requirements. Could not '
         'extract any valid inputs from `%s`. Try another MIDI file.' % path)
     sys.exit()
   elif len(tensors) > 1:
     basename = os.path.join(
         FLAGS.output_dir,
         '%s_input%d-extractions_%s-*-of-%03d.mid' %
         (FLAGS.config, input_number, date_and_time, len(tensors)))
     for i, ns in enumerate(config.data_converter.to_notesequences(tensors)):
       mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))
     print(
         '%d valid inputs extracted from `%s`. Outputting these potential '
         'inputs as `%s`. Call script again with one of these instead.' %
         (len(tensors), path, basename))
     sys.exit()
Пример #2
0
babyshark = mm.midi_file_to_note_sequence('./mid/babyshark.mid')
babyshark = mm.extract_subsequence(babyshark, 0, 8)

babyshark.ticks_per_quarter = 0
babyshark.time_signatures.pop()
babyshark.key_signatures.pop()
babyshark.tempos.pop()
babyshark.tempos.add(qpm=60)

for note in babyshark.notes:
    if note.pitch < 60:
        note.pitch = 60
    note.instrument = 0
    note.is_drum = False

# This gives us a list of sequences.
note_sequences = music_vae.interpolate(twinkle_twinkle,
                                       babyshark,
                                       num_steps=num_steps,
                                       length=8)

# Concatenate them into one long sequence, with the start and
# end sequences at each end.
interp_seq = mm.sequences_lib.concatenate_sequences(note_sequences)

mm.play_sequence(interp_seq, synth=mm.fluidsynth)
mm.plot_sequence(interp_seq)

mm.sequence_proto_to_midi_file(interp_seq, 'twinkle_shark.mid')
Пример #3
0
import magenta.music as mm

babyshark = mm.midi_file_to_note_sequence('./mid/babyshark_full.mid')

# truncate @ 14 seconds

babyshark = mm.extract_subsequence(babyshark, 14, 14 + 8.5)
babyshark = mm.extract_subsequence(babyshark, 0, 8)

babyshark.ticks_per_quarter = 0
babyshark.time_signatures.pop()
babyshark.key_signatures.pop()
babyshark.tempos.pop()
babyshark.tempos.add(qpm=60)

for note in babyshark.notes:
    if note.pitch < 60:
        note.pitch = 60
    note.instrument = 0
    note.is_drum = False

mm.sequence_proto_to_midi_file(babyshark, './mid/babyshark.mid')
Пример #4
0
# Decode to NoteSequence.
midi_filename = decode(
    sample_ids,
    encoder=unconditional_encoders['targets'])
unconditional_ns = mm.midi_file_to_note_sequence(midi_filename)

# Play and plot.
mm.play_sequence(
    unconditional_ns,
    synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(unconditional_ns)

#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).

mm.sequence_proto_to_midi_file(
    unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')

#@title Choose Priming Sequence
#@markdown Here you can choose a priming sequence to be continued
#@markdown by the model.  We have provided a few, or you can
#@markdown upload your own MIDI file.
#@markdown
#@markdown Set `max_primer_seconds` below to trim the primer to a
#@markdown fixed number of seconds (this will have no effect if
#@markdown the primer is already shorter than `max_primer_seconds`).

filenames = {
    'C major arpeggio': '/content/primers/c_major_arpeggio.mid',
    'C major scale': '/content/primers/c_major_scale.mid',
    'Clair de Lune': '/content/primers/clair_de_lune.mid',
Пример #5
0
# Bundle TODO describe
notebook_utils.download_bundle(BUNDLE_NAME, BUNDLE_DIR)
bundle = sequence_generator_bundle.read_bundle_file(
    os.path.join(BUNDLE_DIR, BUNDLE_NAME))

# Generator TODO describe
generator_map = melody_rnn_sequence_generator.get_generator_map()
generator = generator_map[MODEL_NAME](checkpoint=None, bundle=bundle)
generator.initialize()

# Generator options TODO describe
generator_options = generator_pb2.GeneratorOptions()
generator_options.args["temperature"].float_value = TEMPERATURE

# TODO is this unused?
generate_section = generator_options.generate_sections.add(start_time=0,
                                                           end_time=30)

# Generate the sequence TODO describe
sequence = generator.generate(music_pb2.NoteSequence(), generator_options)

# Outputs the midi file TODO describe
midi_file = sequence_proto_to_midi_file(sequence, MIDI_FILE)
midi_file_pretty = sequence_proto_to_pretty_midi(sequence)
# https://stackoverflow.com/questions/6030087/play-midi-files-in-python

# Outputs the plot file TODO describe
plot = plot_sequence(sequence, False)
output_file(PLOT_FILE)
show(plot)
Пример #6
0
def gen_chords():
    form = ChordForm()
    print("Chords CHOICES from form:" + str(form.chord_1.choices))
    form.chord_2.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    form.chord_3.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    form.chord_4.choices = [(chord_id, chord_name)
                            for chord_id, chord_name in form.chord_1.choices]
    if form.validate_on_submit():
        chord_1 = form.chord_1.data
        chord_2 = form.chord_2.data
        chord_3 = form.chord_3.data
        chord_4 = form.chord_4.data
        chords = [chord_1, chord_2, chord_3, chord_4]

        num_bars = form.num_bars.data
        temperature = form.temperature.data

        config = configs.CONFIG_MAP['hier-multiperf_vel_1bar_med_chords']
        weight_name = 'model_chords_fb64.ckpt'
        model = TrainedModel(config,
                             batch_size=BATCH_SIZE,
                             checkpoint_dir_or_path=os.path.join(
                                 app.root_path, 'content', weight_name))

        z1 = np.random.normal(size=[Z_SIZE])
        z2 = np.random.normal(size=[Z_SIZE])
        z = np.array([slerp(z1, z2, t) for t in np.linspace(0, 1, num_bars)])

        seqs = [
            model.decode(length=TOTAL_STEPS,
                         z=z[i:i + 1, :],
                         temperature=temperature,
                         c_input=chord_encoding(chords[i % 4]))[0]
            for i in range(num_bars)
        ]

        trim_sequences(seqs)
        fix_instruments_for_concatenation(seqs)
        prog_interp_ns = concatenate_sequences(seqs)

        f_ext = '.mid'
        random_hex = secrets.token_hex(8)
        music_fn = random_hex + f_ext
        music_mp3 = random_hex + ".mp3"

        #Save music to disk
        mm.sequence_proto_to_midi_file(prog_interp_ns, music_fn)

        ### Move audio to a specified path
        source_path = "/home/pratheesh/Flask_Blog/" + music_mp3
        destination_path = os.path.join(app.root_path, 'static/music',
                                        music_mp3)

        cmd_to_wav = "fluidsynth -F " + random_hex + ".wav /usr/share/sounds/sf2/FluidR3_GM.sf2 " + music_fn
        print(cmd_to_wav)
        os.system(cmd_to_wav)
        cmd_to_mp3 = "lame --preset standard " + random_hex + ".wav " + random_hex + ".mp3"
        print(cmd_to_mp3)
        os.system(cmd_to_mp3)
        #shutil.move(source_path, destination_path)
        #os.replace(source_path, destination_path)
        print("moving file")
        os.rename(source_path, destination_path)
        os.remove(music_fn)
        os.remove(random_hex + ".wav")

        music_file = url_for('static', filename='music/' + music_mp3)

        return render_template('music_output.html', music_file=music_file)

    return render_template('gen_chords.html', form=form)
Пример #7
0
def make_music(extracted_bpm):
    BUNDLE_DIR = '/Users/engrbundle/Desktop/MusicGeneration/MusicVAE/'
    MODEL_DIR = 'hierdec-trio_16bar.tar'

    flags = {}
    flags[
        "output_dir"] = "/Users/engrbundle/Desktop/TeamBeatIt/BeatIt/magentabackend/output"
    flags["config"] = "hierdec-trio_16bar"
    flags[
        "checkpoint_file"] = "/Users/engrbundle/Desktop/TeamBeatIt/BeatIt/server/flaskTests/hierdec-trio_16bar.tar"
    flags["mode"] = "sample"
    # flags["input_midi_1"] =
    # flags["input_midi_2"] =
    flags["num_outputs"] = 1
    flags["max_batch_size"] = 2
    flags["temperature"] = 1.4

    sequence_list = mv_gen.main_program(os.path.join(BUNDLE_DIR, MODEL_DIR),
                                        flags)
    sequence = sequence_list[0]

    ratio = 120 / extracted_bpm

    #Base QPM: 120
    sequence.tempos[0].qpm = int(round(120 * ratio))

    #qpm / 60 * (number of measures = 16)
    # 120 quarter notes a minute
    # 30 full notes a minute
    # .5 full notes a seconds
    # 1 full note every 2 seconds
    # 16 full notes in 32 seconds

    # 90 / 60 * 16 = 24 seconds

    print("Total time ", sequence.total_time)
    print(dir(sequence))
    test = "C Bb F G C Bb F G"
    # test = "C B7 Em C B7 Em C B7 C" sad

    # Create backing chord progression from flags.
    raw_chords = test.split()

    #Base steps_per_chord = 32
    steps_per_chord = int(round(32 * ratio))
    repeated_chords = [
        chord for chord in raw_chords for _ in range(steps_per_chord)
    ]
    backing_chords = mm.ChordProgression(repeated_chords)

    print(dir(sequence))
    total_seconds = sequence.total_time

    CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL

    # Add the backing chords to the input sequence.
    #Base backing_qpm = 60
    chord_qpm = int(round(60 * ratio))
    chord_sequence = backing_chords.to_sequence(sequence_start_time=0.0,
                                                qpm=chord_qpm)
    for text_annotation in chord_sequence.text_annotations:
        if text_annotation.annotation_type == CHORD_SYMBOL:
            chord = sequence.text_annotations.add()
            chord.CopyFrom(text_annotation)
    # input_sequence.total_time = len(backing_chords) * seconds_per_step

    for note in sequence.notes:
        if note.is_drum and note.program == 0:  #Changing to new drum
            note.is_drum = True
            note.instrument = 2
            if note.pitch == 51:
                note.velocity = 0
        elif note.program == 0 and note.instrument == 0 and not note.is_drum:  #Changing from piano to space void
            note.velocity = 0
        elif note.instrument == 1 and note.program == 33:
            note.velocity = 0
        elif note.program == 87:
            print("This is a fifth")

    CHORD_VELOCITY = 50
    renderer = mm.BasicChordRenderer(velocity=CHORD_VELOCITY)
    renderer.render(sequence)

    mm.sequence_proto_to_midi_file(
        sequence,
        '/Users/engrbundle/Desktop/TeamBeatIt/BeatIt/server/musicvae.mid')
    print("New sequence was made")
    test = subprocess.Popen(
        ["timidity", "musicvae.mid", "-Ow", "-o", "musicvae.wav"],
        stdout=subprocess.PIPE)
    output = test.communicate()[0]
    print("Was run!!")

    print("Midi file has been generated")
Пример #8
0
 def seq_to_midi_file(self, seq, output_file):
     melody = mm.Melody(events=seq.tolist())
     note_sequence = melody.to_sequence(qpm=80.0)
     mm.sequence_proto_to_midi_file(note_sequence, output_file)
     return seq
Пример #9
0
model = TrainedModel(config,
                     batch_size=min(FLAGS.max_batch_size, FLAGS.num_outputs),
                     checkpoint_dir_or_path=checkpoint_dir_or_path)
"""
Encodes a collection of NoteSequences into latent vectors.
    Args:
      note_sequences: A collection of NoteSequence objects to encode.
      assert_same_length: Whether to raise an AssertionError if all of the
        extracted sequences are not the same length.
    Returns:
      The encoded `z`, `mu`, and `sigma` values. (as tuple)
"""
logging.info('Encoding...')
#_, mu, _ = model.encode([input_1, input_2])
z, mu, sigma = model.encode([input_1])
#z = np.array([ # z = collection of latent vectors to decode
#    _slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, FLAGS.num_outputs)]) #Spherical linear interpolation

results = model.decode(length=config.hparams.max_seq_len,
                       z=z,
                       temperature=FLAGS.temperature)

basename = os.path.join(
    FLAGS.output_dir, '%s_%s_%s-*-of-%03d.mid' %
    (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
for i, ns in enumerate(results):
    mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

logging.info('Done.')
Пример #10
0
def run(config_map):
    '''
    Load model params, save config file and start trainer.

    Args:
        config_map: Dictionary mapping configuration name to Config object.

    Raises:
        ValueError: if required flags are missing or invalid.
    '''
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

    if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
        raise ValueError(
            'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.'
        )
    if FLAGS.output_dir is None:
        raise ValueError('`--output_dir` is required.')
    tf.gfile.MakeDirs(FLAGS.output_dir)

    if FLAGS.example_midi_dir is None:
        raise ValueError('example_midi_dir is required.')

    if FLAGS.config not in config_map:
        raise ValueError('Invalid config name: %s' % FLAGS.config)
    config = config_map[FLAGS.config]
    config.data_converter.max_tensors_per_item = None

    config_midime = MidiMeConfig
    if FLAGS.config_midime is not None:
        update(config_midime, FLAGS.config_midime)

    logging.info('Loading model...')
    if FLAGS.run_dir:
        checkpoint_dir_or_path = os.path.expanduser(
            os.path.join(FLAGS.run_dir, 'train'))
    else:
        checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
    model = TrainedModel(config,
                         batch_size=min(FLAGS.max_batch_size,
                                        FLAGS.num_outputs),
                         checkpoint_dir_or_path=checkpoint_dir_or_path)

    example_sequences = []

    example_midi_dir = os.path.expanduser(FLAGS.example_midi_dir)
    files_in_dir = tf.gfile.ListDirectory(os.path.join(example_midi_dir))
    for file_in_dir in files_in_dir:
        full_file_path = os.path.join(example_midi_dir, file_in_dir)
        try:
            example_sequences.append(
                mm.midi_file_to_note_sequence(full_file_path))
        except:
            raise ValueError('%s' % full_file_path)

    trimSilence(example_sequences)
    for i in example_sequences:
        i.tempos[0].time = 0
        del i.tempos[1:]

    chunks = getChunks(example_sequences, config.hparams.max_seq_len)

    latent = model.encode(chunks)[0]
    midime = train_model(latent, config_midime)

    s = midime.sample(FLAGS.num_outputs)
    samples = model.decode(s, config.hparams.max_seq_len)

    basename = os.path.join(
        FLAGS.output_dir, '%s_%s-*-of-%03d.mid' %
        (FLAGS.config, date_and_time, FLAGS.num_outputs))
    logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)

    for i, ns in enumerate(samples):
        mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

    logging.info('Done.')
Пример #11
0
def work_on_midi():
    sequence_from_midi = mm.midi_file_to_note_sequence("/Users/wushaobo/Downloads/POP1.mid")

    gen_sequence = generate_sequence(sequence_from_midi)

    mm.sequence_proto_to_midi_file(gen_sequence, "./output_generated_sequence.mid")
Пример #12
0
# Teapot song
# and the AI composed notes via the Music VAE model

# The amount of sequences, including the start and end ones, to generate.
num_steps = 8;

# The list of sequence and it's length.
note_sequences = music_vae.interpolate(
    teapot, # twinkle_twinkle
    twinkle_twinkle, #teapot
    num_steps=num_steps,
    length=32)

# Concatenate into one long sequence, with the start and
# end sequences at each end.
interp_seq = mm.sequences_lib.concatenate_sequences(note_sequences)

# This is a colab utility method that plays the NoteSequence
# For the fully composed AI music via the Music VAE model
mm.play_sequence(interp_seq, synth=mm.fluidsynth)

# This is a colab utility method that visualizes the NoteSequence
# For the fully composed AI music via the Music VAE model
mm.plot_sequence(interp_seq)

# This creates a file called `drums_sample_output.mid`, containing the drums solo we've been using.
mm.sequence_proto_to_midi_file(interp_seq, 'interp_seq_sample_output.mid')

# This is a colab utility method to download that file. In your Python script, you 
# would just write it to disk.
files.download('interp_seq_sample_output.mid')
Пример #13
0
 def sequence2mid(self):
     if hasattr(self, 'sequence'):
         mm.sequence_proto_to_midi_file(self.sequence, 'model.mid')
     else:
         print("No sequence exists.")
Пример #14
0
def run(config_map):
    """Load model params, save config file and start trainer.

    Args:
      config_map: Dictionary mapping configuration name to Config object.

    Raises:
      ValueError: if required flags are missing or invalid.
    """
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

    if (FLAGS.run_dir is None) == (FLAGS.checkpoint_file is None):
        raise ValueError(
            'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.'
        )
    if FLAGS.output_dir is None:
        raise ValueError('`--output_dir` is required.')
    tf.gfile.MakeDirs(FLAGS.output_dir)
    if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':
        raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)

    if FLAGS.config not in config_map:
        raise ValueError('Invalid config name: %s' % FLAGS.config)
    config = config_map[FLAGS.config]
    config.data_converter.max_tensors_per_item = None

    if FLAGS.mode == 'interpolate':
        if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
            raise ValueError(
                '`--input_midi_1` and `--input_midi_2` must be specified in '
                '`interpolate` mode.')
        input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
        input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
        if not os.path.exists(input_midi_1):
            raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
        if not os.path.exists(input_midi_2):
            raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
        input_1 = mm.midi_file_to_note_sequence(input_midi_1)
        input_2 = mm.midi_file_to_note_sequence(input_midi_2)

        def _check_extract_examples(input_ns, path, input_number):
            """Make sure each input returns exactly one example from the converter."""
            tensors = config.data_converter.to_tensors(input_ns).outputs
            if not tensors:
                print(
                    'MusicVAE configs have very specific input requirements. Could not '
                    'extract any valid inputs from `%s`. Try another MIDI file.'
                    % path)
                sys.exit()
            elif len(tensors) > 1:
                basename = os.path.join(
                    FLAGS.output_dir,
                    '%s_input%d-extractions_%s-*-of-%03d.mid' %
                    (FLAGS.config, input_number, date_and_time, len(tensors)))
                for i, ns in enumerate(
                        config.data_converter.from_tensors(tensors)):
                    mm.sequence_proto_to_midi_file(
                        ns, basename.replace('*', '%03d' % i))
                print(
                    '%d valid inputs extracted from `%s`. Outputting these potential '
                    'inputs as `%s`. Call script again with one of these instead.'
                    % (len(tensors), path, basename))
                sys.exit()

        logging.info(
            'Attempting to extract examples from input MIDIs using config `%s`...',
            FLAGS.config)
        _check_extract_examples(input_1, FLAGS.input_midi_1, 1)
        _check_extract_examples(input_2, FLAGS.input_midi_2, 2)

    logging.info('Loading model...')
    if FLAGS.run_dir:
        checkpoint_dir_or_path = os.path.expanduser(
            os.path.join(FLAGS.run_dir, 'train'))
    else:
        checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
    model = TrainedModel(config,
                         batch_size=min(FLAGS.max_batch_size,
                                        FLAGS.num_outputs),
                         checkpoint_dir_or_path=checkpoint_dir_or_path)

    if FLAGS.mode == 'interpolate':
        logging.info('Interpolating...')
        _, mu, _ = model.encode([input_1, input_2])
        z = np.array([
            _slerp(mu[0], mu[1], t)
            for t in np.linspace(0, 1, FLAGS.num_outputs)
        ])
        results = model.decode(length=config.hparams.max_seq_len,
                               z=z,
                               temperature=FLAGS.temperature)
    elif FLAGS.mode == 'sample':
        logging.info('Sampling...')
        results = model.sample(n=FLAGS.num_outputs,
                               length=config.hparams.max_seq_len,
                               temperature=FLAGS.temperature)

    basename = os.path.join(
        FLAGS.output_dir, '%s_%s_%s-*-of-%03d.mid' %
        (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
    logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
    for i, ns in enumerate(results):
        mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

    logging.info('Done.')
Пример #15
0
 def save_midi(self, audio_tensor, path):
     note_seq = self._config.data_converter.to_items([audio_tensor])[0]
     music.sequence_proto_to_midi_file(note_seq, path)
Пример #16
0
    last_end_time = max(
        n.end_time
        for n in input_sequence.notes) if input_sequence.notes else 0
    qpm = input_sequence.tempos[0].qpm
    seconds_per_step = 60.0 / qpm / rnn_model.steps_per_quarter
    start_time = last_end_time + seconds_per_step
    generation_seconds = num_steps * seconds_per_step

    generator_options.generate_sections.add(start_time=start_time,
                                            end_time=start_time +
                                            generation_seconds)

    sequence = rnn_model.generate(input_sequence, generator_options)

    return sequence


def _init_generator():
    bundle_file = mm.sequence_generator_bundle.read_bundle_file(
        './lib/drum_kit_rnn.mag')
    generator_map = drums_rnn_sequence_generator.get_generator_map()
    generator = generator_map['drum_kit'](bundle=bundle_file)

    return generator


random_sequence = build_note_sequence()
gen_sequence = generate_sequence(random_sequence)
mm.sequence_proto_to_midi_file(gen_sequence, "./output_generated_sequence.mid")
Пример #17
0
def note_sequence_to_midi_file(note_sequence, path):
    """
    Save <note_sequence> to .midi file at <path>
    """
    create_dir_if_not_exists(path)
    mm.sequence_proto_to_midi_file(note_sequence, path)
Пример #18
0
def download(note_sequence, filename):
    mm.sequence_proto_to_midi_file(note_sequence, filename)
Пример #19
0
teapot = music_pb2.NoteSequence()
teapot.notes.add(pitch=69, start_time=0, end_time=0.5, velocity=80)
teapot.notes.add(pitch=71, start_time=0.5, end_time=1, velocity=80)
teapot.notes.add(pitch=73, start_time=1, end_time=1.5, velocity=80)
teapot.notes.add(pitch=74, start_time=1.5, end_time=2, velocity=80)
teapot.notes.add(pitch=76, start_time=2, end_time=2.5, velocity=80)
teapot.notes.add(pitch=81, start_time=3, end_time=4, velocity=80)
teapot.notes.add(pitch=78, start_time=4, end_time=5, velocity=80)
teapot.notes.add(pitch=81, start_time=5, end_time=6, velocity=80)
teapot.notes.add(pitch=76, start_time=6, end_time=8, velocity=80)
teapot.total_time = 8

teapot.tempos.add(qpm=60)

mm.sequence_proto_to_midi_file(teapot, 'drums_sample_output.mid')

# Import dependencies.
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.protobuf import generator_pb2
from magenta.protobuf import music_pb2

# Initialize the model.
mm.notebook_utils.download_bundle('basic_rnn.mag', '/content/')
bundle = mm.sequence_generator_bundle.read_bundle_file(
    '/content/basic_rnn.mag')

generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()
print("INITIALIZATION COMPLETE")
Пример #20
0
def run(config_map):
  """Load model params, save config file and start trainer.

  Args:
    config_map: Dictionary mapping configuration name to Config object.

  Raises:
    ValueError: if required flags are missing or invalid.
  """
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

  if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
    raise ValueError(
        'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.')
  if FLAGS.output_dir is None:
    raise ValueError('`--output_dir` is required.')
  tf.gfile.MakeDirs(FLAGS.output_dir)
  if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':
    raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)

  if FLAGS.config not in config_map:
    raise ValueError('Invalid config name: %s' % FLAGS.config)
  config = config_map[FLAGS.config]
  config.data_converter.max_tensors_per_item = None

  if FLAGS.mode == 'interpolate':
    if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
      raise ValueError(
          '`--input_midi_1` and `--input_midi_2` must be specified in '
          '`interpolate` mode.')
    input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
    input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
    if not os.path.exists(input_midi_1):
      raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
    if not os.path.exists(input_midi_2):
      raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
    input_1 = mm.midi_file_to_note_sequence(input_midi_1)
    input_2 = mm.midi_file_to_note_sequence(input_midi_2)

    def _check_extract_examples(input_ns, path, input_number):
      """Make sure each input returns exactly one example from the converter."""
      tensors = config.data_converter.to_tensors(input_ns).outputs
      if not tensors:
        print(
            'MusicVAE configs have very specific input requirements. Could not '
            'extract any valid inputs from `%s`. Try another MIDI file.' % path)
        sys.exit()
      elif len(tensors) > 1:
        basename = os.path.join(
            FLAGS.output_dir,
            '%s_input%d-extractions_%s-*-of-%03d.mid' %
            (FLAGS.config, input_number, date_and_time, len(tensors)))
        for i, ns in enumerate(config.data_converter.to_notesequences(tensors)):
          mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))
        print(
            '%d valid inputs extracted from `%s`. Outputting these potential '
            'inputs as `%s`. Call script again with one of these instead.' %
            (len(tensors), path, basename))
        sys.exit()
    logging.info(
        'Attempting to extract examples from input MIDIs using config `%s`...',
        FLAGS.config)
    _check_extract_examples(input_1, FLAGS.input_midi_1, 1)
    _check_extract_examples(input_2, FLAGS.input_midi_2, 2)

  logging.info('Loading model...')
  if FLAGS.run_dir:
    checkpoint_dir_or_path = os.path.expanduser(
        os.path.join(FLAGS.run_dir, 'train'))
  else:
    checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
  model = TrainedModel(
      config, batch_size=min(FLAGS.max_batch_size, FLAGS.num_outputs),
      checkpoint_dir_or_path=checkpoint_dir_or_path)

  if FLAGS.mode == 'interpolate':
    logging.info('Interpolating...')
    _, mu, _ = model.encode([input_1, input_2])
    z = np.array([
        _slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, FLAGS.num_outputs)])
    results = model.decode(
        length=config.hparams.max_seq_len,
        z=z,
        temperature=FLAGS.temperature)
  elif FLAGS.mode == 'sample':
    logging.info('Sampling...')
    results = model.sample(
        n=FLAGS.num_outputs,
        length=config.hparams.max_seq_len,
        temperature=FLAGS.temperature)

  basename = os.path.join(
      FLAGS.output_dir,
      '%s_%s_%s-*-of-%03d.mid' %
      (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
  logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
  for i, ns in enumerate(results):
    mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

  logging.info('Done.')
Пример #21
0
def preprocess_samples(samples, frame_size, frame_time, spectrogram, prefix,
                       data_dir):
    num = 0
    for audio, sequence in samples:
        num += 1
        frames, frame_num = audio2frame(audio, frame_size, spectrogram)
        mm.sequence_proto_to_midi_file(
            sequence, os.path.join(data_dir, '%s_%i.mid' % (prefix, num)))

        # calulate how many note for each pitch in this sample
        statistics = np.zeros((cfg.PITCH_NUM), dtype=int)
        for note in sequence.notes:
            statistics[cfg.INDEX_DICT[note.pitch]] += 1

        # Sort the notes by start_time and end_time to do a sweep
        notes = [[note.start_time, note.end_time, cfg.INDEX_DICT[note.pitch]]
                 for note in sequence.notes]
        notes_dict = dict(zip(list(range(len(notes))), notes))
        start_dict = {}
        end_dict = {}

        for i in notes_dict:
            start_time = notes_dict[i][0]
            end_time = notes_dict[i][1]
            start_dict.setdefault(start_time, set()).add(i)
            end_dict.setdefault(end_time, set()).add(i)

        start_times = sorted(start_dict.keys())
        end_times = sorted(end_dict.keys())

        # Generate activation and onset matrix
        last_time = end_times[-1]
        t, l, r = 0, 0, 0
        status = set()
        activation = np.zeros((frame_num, cfg.PITCH_NUM), dtype=int)
        onset = np.zeros((frame_num, cfg.PITCH_NUM), dtype=int)
        for i in range(frame_num):
            st = i * frame_time / 1000.0
            ed = (i + 1) * frame_time / 1000.0
            while l < len(start_times
                          ) and start_times[l] >= st and start_times[l] < ed:
                note_list = start_dict[start_times[l]]
                status.update(note_list)
                l += 1
                for j in note_list:
                    onset[i, notes_dict[j][2]] = 1

            for j in status:
                activation[i, notes_dict[j][2]] = 1

            while r < len(
                    end_times) and end_times[r] >= st and end_times[r] < ed:
                note_list = end_dict[end_times[r]]
                status = status - note_list
                r += 1
        '''  
        if frame_num > 200:
            utils.plot_matrix(onset.T)
            assert(False)
        '''
        # Save the sample to a file
        sample = {
            'Audio': audio,
            'Frames': frames,
            'Activation': activation,
            'Onset': onset,
            'Sequence': parse_sequence(sequence),
            'Statistics': statistics
        }
        with open(os.path.join(data_dir, '%s_%i.pickle' % (prefix, num)),
                  'wb') as f:
            pickle.dump(sample, f)