コード例 #1
0
def music_vae_sample(model_id, model_config, num):
    music_vae = TrainedModel(configs.CONFIG_MAP[model_config],
                             batch_size=4,
                             checkpoint_dir_or_path=model_id + '.tar')

    generated_sequences = music_vae.sample(n=num, length=80, temperature=1.0)

    cnt = 1
    for ns in generated_sequences:
        note_sequence_to_midi_file(
            ns, 'vae_sample_' + model_id + '_%d.mid' % (cnt))
        cnt += 1
コード例 #2
0
def main():
    # load trained model
    music_vae = TrainedModel(
        configs.CONFIG_MAP['cat-mel_2bar_big'],
        batch_size=4,
        checkpoint_dir_or_path='checkpoints/mel_2bar_big.ckpt')

    # generate some sequences
    generated_sequences = music_vae.sample(n=10, length=80, temperature=1.0)

    # save sequences to files
    for n, sequence in enumerate(generated_sequences):
        music.sequence_proto_to_midi_file(
            sequence, os.path.join('output',
                                   str(n) + '.mid'))
コード例 #3
0
teapot.notes.add(pitch=76, start_time=6, end_time=8, velocity=80)
teapot.total_time = 8

teapot.tempos.add(qpm=60)

#mm.plot_sequence(teapot)
#mm.play_sequence(teapot,synth=mm.synthesize)
# Initialize the model.
print("Initializing Music VAE...")
music_vae = TrainedModel(configs.CONFIG_MAP['cat-mel_2bar_big'],
                         batch_size=4,
                         checkpoint_dir_or_path='mel_2bar_big.ckpt')

print('🎉 Done!')

generated_sequences = music_vae.sample(n=2, length=80, temperature=1.0)

for ns in generated_sequences:
    # print(ns)
    #mm.plot_sequence(ns)
    #mm.play_sequence(ns, synth=mm.fluidsynth)

    # We're going to interpolate between the Twinkle Twinkle Little Star
    # NoteSequence we defined in the first section, and one of the generated
    # sequences from the previous VAE example

    # How many sequences, including the start and end ones, to generate.
    num_steps = 8

    # This gives us a list of sequences.
    note_sequences = music_vae.interpolate(twinkle_twinkle,
コード例 #4
0
        note.instrument = 9

"""# Chord-Conditioned Model"""

#@title Load Checkpoint

config = configs.CONFIG_MAP['hier-multiperf_vel_1bar_med_chords']
model = TrainedModel(
    config, batch_size=BATCH_SIZE,
    checkpoint_dir_or_path='/content/model_chords_fb64.ckpt')

#@title Same Chord, Random Styles

chord = 'C' #@param {type:"string"}
temperature = 0.2 #@param {type:"slider", min:0.01, max:1.5, step:0.01}
seqs = model.sample(n=BATCH_SIZE, length=TOTAL_STEPS, temperature=temperature,
                    c_input=chord_encoding(chord))

trim_sequences(seqs)
play(seqs)

#@title Same Style, Chord Progression

chord_1 = 'C' #@param {type:"string"}
chord_2 = 'Caug' #@param {type:"string"}
chord_3 = 'Am' #@param {type:"string"}
chord_4 = 'E' #@param {type:"string"}
chords = [chord_1, chord_2, chord_3, chord_4]

temperature = 0.2 #@param {type:"slider", min:0.01, max:1.5, step:0.01}
z = np.random.normal(size=[1, Z_SIZE])
seqs = [
コード例 #5
0
download(drums_interp, '%s_interp.mid' % drums_interp_model)

"""# 2-Bar Melody Model

The pre-trained model consists of a single-layer bidirectional LSTM encoder with 2048 nodes in each direction, a 3-layer LSTM decoder with 2048 nodes in each layer, and Z with 512 dimensions. The model was given 0 free bits, and had its beta valued annealed at an exponential rate of 0.99999 from 0 to 0.43 over 200k steps. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. The final accuracy is 0.95 and KL divergence is 58 bits.
"""

#@title Load the pre-trained model.
mel_2bar_config = configs.CONFIG_MAP['cat-mel_2bar_big']
mel_2bar = TrainedModel(mel_2bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/mel_2bar_big.ckpt')

"""## Generate Samples"""

#@title Generate 4 samples from the prior.
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
mel_2_samples = mel_2bar.sample(n=4, length=32, temperature=temperature)
for ns in mel_2_samples:
  play(ns)

#@title Optionally download samples.
for i, ns in enumerate(mel_2_samples):
  download(ns, 'mel_2bar_sample_%d.mid' % i)

"""## Generate Interpolations"""

#@title Option 1: Use example MIDI files for interpolation endpoints.
input_mel_midi_data = [
    tf.gfile.Open(fn).read()
    for fn in sorted(tf.gfile.Glob('/content/midi/mel_2bar*.mid'))]

#@title Option 2: Upload your own MIDI files to use for interpolation endpoints instead of those provided.