コード例 #1
0
def main(saved_weights_path):
    #This function takes as input the path to the weights of the network
    x, cost, generate, W, bh, bv, x, lr, Wuh, Wuv, Wvu, Wuu, bu, u0 = rnn_rbm.rnnrbm(
    )  #First we build and get the parameters odf the network

    tvars = [W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, u0]

    saver = tf.train.Saver(
        tvars)  #We use this saver object to restore the weights of the model

    song_primer = midi_manipulation.get_song(primer_song)
    print(song_primer)

    with tf.Session() as sess:
        init = tf.initialize_all_variables()
        sess.run(init)
        saver.restore(
            sess, saved_weights_path)  #load the saved weights of the network
        # #We generate num songs
        for i in tqdm(range(num)):
            generated_music = sess.run(
                generate(300), feed_dict={x: song_primer}
            )  #Prime the network with song primer and generate an original song
            new_song_path = "music_outputs/{}_{}".format(
                i,
                primer_song.split("/")[-1])  #The new song will be saved here
            midi_manipulation.write_song(new_song_path, generated_music)
コード例 #2
0
def main(saved_weights_path, primer):
    # First, build model then get pointers to params
    neural_net = lstm_rbm.LSTMNet()

    tvars = neural_net.training_vars()
    x = neural_net.x

    saver = tf.train.Saver(tvars)

    song_primer = midi_manipulation.get_song(primer)

    with tf.Session() as sess:
        init = tf.initialize_all_variables()
        sess.run(init)
        saver.restore(
            sess, saved_weights_path)  #load the saved weights of the network
        # #We generate num songs
        for i in tqdm(range(num)):
            generated_music = sess.run(
                neural_net.generate(300), feed_dict={x: song_primer}
            )  #Prime the network with song primer and generate an original song
            new_song_path = "music_outputs/{}_{}".format(
                i,
                primer.split("/")[-1])  #The new song will be saved here
            midi_manipulation.write_song(new_song_path, generated_music)
コード例 #3
0
def main(saved_weights_path):
    # This function takes as input the path to the weights of the network
    # First we build and get the parameters odf the network
    x, cost, generate, W, bh, bv, x, lr, Wuh, Wuv, Wvu, Wuu, bu, u0 = rnn_rbm.rnnrbm(
    )

    tvars = [W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, u0]

    # We use this saver object to restore the weights of the model
    saver = tf.train.Saver(tvars)

    summ = tf.summary.merge_all()

    song_primer = midi_manipulation.get_song(primer_song)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        writer = tf.summary.FileWriter(
            "/home/ubuntu/cyberbach/tensorpy/tensorboardlog")
        writer.add_graph(sess.graph)
        sess.run(init)
        # load the saved weights of the network
        saver.restore(sess, saved_weights_path)
        # We generate num songs
        for i in tqdm(range(num)):
            # Prime the network with song primer and generate an original song
            generated_music = sess.run(generate(300),
                                       feed_dict={x: song_primer})
            # The new song will be saved here
            #new_song_path = "music_outputs/{}_{}".format(i, primer_song.split("/")[-1])
            new_song_path = "music_outputs/newsong"
            midi_manipulation.write_song(new_song_path, generated_music)
コード例 #4
0
def main(saved_weights_path, songList):
    # Primer
    songList = songList.split(',')
    primer_song = songList[
        0]  # The path to the song to use to prime the network
    song_primer = midi_manipulation.get_song(primer_song)

    # This function takes as input the path to the weights of the network
    x, cost, generate, W, bh, bv, x, lr, Wuh, Wuv, Wvu, Wuu, bu, u0 = rnn_rbm.rnnrbm(
    )  # First we build and get the parameters odf the network

    tvars = [W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, u0]

    saver = tf.train.Saver(
        tvars)  # We use this saver object to restore the weights of the model

    # check folder existence
    directory = (sys.path[0] + '/music_outputs')
    if not os.path.exists(directory):
        os.makedirs(directory)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        print(saved_weights_path)
        saver.restore(
            sess, saved_weights_path)  # load the saved weights of the network
        # #We generate num songs
        for i in tqdm(range(num)):
            generated_music = sess.run(
                generate(300), feed_dict={x: song_primer}
            )  # Prime the network with song primer and generate an original song
            new_song_path = (sys.path[0] + "\music_outputs\Song-{}".format(i))
            midi_manipulation.write_song(new_song_path, generated_music)
コード例 #5
0
def main(saved_weights_path, target_dir, kval):
    if (os.path.isdir(target_dir)):
        songsList = os.listdir(target_dir)
        randomSong = songsList[random.randint(0, len(songsList) - 1)]
        primer_song = os.path.join(
            target_dir,
            randomSong)  #The path to the song to use to prime the network
    else:  #Specific Song!
        primer_song = target_dir

    print('Primer Song = ', primer_song)

    #This function takes as input the path to the weights of the network
    x, _, _, cost, generate, W, bh, bv, lr, Wuh, Wuv, Wvu, Wuu, bu, u0 = rnn_rbm.rnnrbm(
    )  #First we build and get the parameters odf the network

    tvars = [W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, u0]

    saver = tf.train.Saver(
        tvars)  #We use this saver object to restore the weights of the model

    song_primer = midi_manipulation.get_song(primer_song)

    with tf.Session() as sess:
        init = tf.initialize_all_variables()
        sess.run(init)
        saver.restore(
            sess, saved_weights_path)  #load the saved weights of the network
        # Generate songs
        generated_music = sess.run(
            generate(300, k_in=kval), feed_dict={x: song_primer}
        )  #Prime the network with song primer and generate an original song

        saved_weight_name = saved_weights_path.split('/')[-1].split('.')[0]
        primer_song_name = primer_song.split('/')[-1].split('.')[0]

        new_song_path = "music_outputs/Name={}_K={}_{}".format(
            saved_weight_name, kval,
            primer_song_name)  #The new song will be saved here
        midi_manipulation.write_song(new_song_path, generated_music)
コード例 #6
0
def main(saved_weights_path):
    #This function takes as input the path to the weights of the network
    x, cost, generate, reconstruction, W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, lr, u0 = rnn_rbm.rnnrbm()#First we build and get the parameters odf the network

    params=[W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, u0]

    saver = tf.train.Saver(params) #We use this saver object to restore the weights of the model

    song_primer = midi_manipulation.get_song(primer_song)  # primer_song is just one song, not a batch.I It's of dimension 3
    #output folder
    output_folder = "music_outputs_generate"
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        saver.restore(sess, saved_weights_path) #load the saved weights of the network
        # #We generate num_songs songs
        for i in tqdm(range(num_songs)):
            generated_music = sess.run(generate(300), feed_dict={x: song_primer}) #Prime the network with song primer and generate an original song
            new_song_path = "{}/{}_{}".format(output_folder, i, primer_song.split("/")[-1]) #The new song will be saved here
            midi_manipulation.write_song(new_song_path, generated_music)
コード例 #7
0
ファイル: test.py プロジェクト: danieljyj/music_jiang
# -*- coding: utf-8 -*-
"""
Created on Mon Aug  5 16:45:46 2019

@author: p102380
"""
import numpy as np
import midi
import numpy as np
import glob
from tqdm import tqdm
import midi_manipulation

files = glob.glob('{}/*.mid*'.format('Pop_Music_Midi'))

for f in tqdm(files):
    try:
        song = midi_manipulation.get_song(f)
        if np.shape(song)[0] < 20:
            print(np.shape(song))
            print(f)
    except Exception as e:
        print(f, e)
コード例 #8
0
def main(num_epochs, k_test):
    #num_epochs = 100 # 100! (9, 19, 29, ... 99 [10 Checkpoints.])
    target_dir = 'Train_DATA'
    #First, we build the model and get pointers to the model parameters
    songs = midi_manipulation.get_songs(target_dir)  #Load the songs

    #######################
    song_primer = []
    primer_song = [
        'You Belong With Me - Verse.midi', 'Someone Like You - Chorus.midi',
        'Pompeii - Bridge.midi'
    ]
    primer_song = [os.path.join(target_dir, p) for p in primer_song]
    song_primer = [
        midi_manipulation.get_song(primer_song[i]) for i in range(3)
    ]
    #######################

    #ipdb.set_trace()
    print('Doing K as:', k_test)
    x, out1, out2, cost, generate, W, bh, bv, lr, Wuh, Wuv, Wvu, Wuu, bu, u0 = rnn_rbm.rnnrbm(
        k_test)

    #The trainable variables include the weights and biases of the RNN and the RBM, as well as the initial state of the RNN
    tvars = [W, Wuh, Wuv, Wvu, Wuu, bh, bv, bu, u0]
    # opt_func = tf.train.AdamOptimizer(learning_rate=lr)
    # grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 1)
    # updt = opt_func.apply_gradients(zip(grads, tvars))

    #The learning rate of the  optimizer is a parameter that we set on a schedule during training
    opt_func = tf.train.GradientDescentOptimizer(learning_rate=lr)
    gvs = opt_func.compute_gradients(cost, tvars)
    gvs = [
        (tf.clip_by_value(grad, -10., 10.), var) for grad, var in gvs
    ]  #We use gradient clipping to prevent gradients from blowing up during training
    updt = opt_func.apply_gradients(
        gvs
    )  #The update step involves applying the clipped gradients to the model parameters

    saver = tf.train.Saver(
        tvars, max_to_keep=None
    )  #We use this saver object to restore the weights of the model and save the weights every few epochs

    loss_print_dir = 'k{}_lossprint.csv'.format(k_test)
    Loss_Print_pipe = open(loss_print_dir, 'w')

    def Generate_Music(k_test, epoch):
        for i in tqdm(range(3)):
            generated_music = sess.run(
                generate(300), feed_dict={x: song_primer[i]}
            )  #Prime the network with song primer and generate an original song
            new_song_path = "music_outputs/k{}_e{}_{}".format(
                k_test, epoch, primer_song[i].split('/')[-1].split('.')
                [0])  #The new song will be saved here
            midi_manipulation.write_song(new_song_path, generated_music)

    with tf.Session() as sess:
        init = tf.initialize_all_variables()
        sess.run(init)
        #os.system("python weight_initializations.py Train_DATA")

        saver.restore(
            sess, saved_weights_path
        )  #Here we load the initial weights of the model that we created with weight_initializations.py

        print("First, we print these songs as they are. Natural Baby!")
        for i in range(3):
            original_song_path = "music_outputs/{}".format(
                primer_song[i].split('/')[-1].split('.')[0])
            midi_manipulation.write_song(original_song_path, song_primer[i])

        #We run through all of the songs n_epoch times
        print "starting"

        for epoch in range(num_epochs):
            costs = []
            start = time.time()
            for s_ind, song in enumerate(songs):
                for i in range(0, len(song), batch_size):
                    tr_x = song[i:i + batch_size]
                    #alpha = min(0.01, 0.1/float(i)+0.001) #We decrease the learning rate according to a schedule.
                    alpha = 0.01
                    _, out_1, out_2, C = sess.run([updt, out1, out2, cost],
                                                  feed_dict={
                                                      x: tr_x,
                                                      lr: alpha
                                                  })
                    costs.append(C)
            #Print the progress at epoch
            out_1 = np.mean(out_1)
            out_2 = np.mean(out_2)
            if Loss_Print_pipe.closed == False:
                Loss_Print_pipe.write("{},{},{},{},{}\n".format(
                    epoch, out_1, out_2, np.mean(costs),
                    time.time() - start))
            #ipdb.set_trace()
            print "epoch: {} out1: {} out2:{} cost: {} time: {}".format(
                epoch, out_1, out_2, np.mean(costs),
                time.time() - start)
            print
            #Here we save the weights of the model every few epochs
            if (epoch + 1) % epochs_to_save == 0:
                saver.save(
                    sess, "parameter_checkpoints/k{}_epoch_{}.ckpt".format(
                        k_test, epoch))
                Generate_Music(k_test, epoch)
    Loss_Print_pipe.close()  #Close Exporing Pipe.