Esempio n. 1
0
    def predict(self,
                cache,
                pre_trained_model,
                n=1,
                saveto='NewSong',
                step=319,
                conservativity=1):
        # This function predict only
        with tf.Session() as sess:
            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())

            print("Load the model from: {}".format(pre_trained_model))
            saver.restore(sess, 'model/{}'.format(pre_trained_model))
            for _ in range(n):
                xIpt, xOpt = map(np.array, data.getPieceSegment(cache))
                new_state_matrix = sess.run(self.new_song,
                                            feed_dict={
                                                self.predict_seed:
                                                xIpt[0],
                                                self.step_to_sumulate: [step],
                                                self.conservativity:
                                                [conservativity]
                                            })
                newsong = np.concatenate((np.expand_dims(xOpt[0],
                                                         0), new_state_matrix))

                songname = str(time.time()) + '.mid'
                if not os.path.exists(os.path.join(saveto, pre_trained_model)):
                    os.makedirs(os.path.join(saveto, pre_trained_model))
                noteStateMatrixToMidi(newsong,
                                      name=os.path.join(
                                          saveto, pre_trained_model, songname))
                print('New Songs {} saved to \'{}\''.format(
                    songname, os.path.join(saveto, pre_trained_model)))
Esempio n. 2
0
def main():
    input_array = np.load('predict.npy')
    print(input_array.shape)
    # convert the numpy array of dimension (101 * 156 *1) to (101 * 78 * 2)
    result = np.zeros((time_step + 1, 78, 2))
    for j in range(time_step + 1):
        i = 0
        while i < 156:
            if input_array[j, i] == 1:
                result[j, i / 2, 0] = 1
                if input_array[j, i + 1] == 1:
                    result[j, i / 2, 1] = input_array[j, i + 1]
            i = i + 2
    noteStateMatrixToMidi(result, name="ex6")  # transform back to midi-file
Esempio n. 3
0
def midiForXML(xmlFile, midiDestFile):
  #parse xml file into document tree
  tree = xml.etree.ElementTree.parse(xmlFile).getroot()
  tempo = getTempoForSong(tree)
  #We're no longer using a default tempo, this was never
  #really a good idea, since actually the various tempos
  #can differ by an order of magnitued, instead, we return
  #a code to indicate success or failure.
  #if tempo == None:
  #  tempo = 120
  if tempo == None:
    return 1
  else:
    stateMatrix = stateMatrixForSong(tree, 0)[1]
    midi_to_statematrix.noteStateMatrixToMidi(stateMatrix, name=midiDestFile)
    return 0    
Esempio n. 4
0
def midiForXML(xmlFile, midiDestFile):
  #parse xml file into document tree
  tree = xml.etree.ElementTree.parse(xmlFile).getroot()
  tempo = getTempoForSong(tree)
  #We're no longer using a default tempo, this was never
  #really a good idea, since actually the various tempos
  #can differ by an order of magnitued, instead, we return
  #a code to indicate success or failure.
  #if tempo == None:
  #  tempo = 120
  if tempo == None:
    return 1
  else:
    stateMatrix = stateMatrixForSong(tree, 0)[1]
    midi_to_statematrix.noteStateMatrixToMidi(stateMatrix, name=midiDestFile)
    return 0    
def train_statematrix_net(net,
                          batch_size=128,
                          dropout=.5,
                          output_rate=100,
                          output_length=128,
                          total_epochs=5000,
                          path='net_output/'):
    """
    Trains a neural network, taking time steps from state matrices as input
    and output.
    """
    statematrices = readxml.createStateMatrices()
    batches = []
    for song in statematrices.values():
        matrix = song[1]
        while len(matrix) > 0:
            batch = []
            for i in range(batch_size):
                if len(matrix) == 0:
                    break
                batch.append([[n * 20 - 19 for l in matrix.pop() for n in l]])
            batches.append(zip(batch[:-1], batch[1:]))

    if not os.path.exists(path):
        os.makedirs(path)

    print('\nTraining network:')
    for i in range(0, total_epochs, output_rate):
        last = [[-1] * 156]
        statematrix = []
        for j in range(output_length):
            new = net.run(last)
            statematrix.append(
                estimate_statematrix_from_output(new[0], last[0]))
            last = new
        midi_to_statematrix.noteStateMatrixToMidi(
            statematrix, name=(path + 'example{0}'.format(i)))
        print('\tfile example{0}.mid created'.format(i))
        net.save(path + 'weights{0}'.format(i))
        print('\tweights saved in weights{0}'.format(i))

        for j in range(output_rate):
            print('\t\t' + str(i + j))
            for batch in batches:
                net.train(batch, 1, .1, dropout, .5)
                net.reset()
Esempio n. 6
0
def train_statematrix_net(net, batch_size=128, dropout=.5, output_rate = 100,
                          output_length=128, total_epochs=5000,
                          path = 'net_output/'):
    """
    Trains a neural network, taking time steps from state matrices as input
    and output.
    """
    statematrices = readxml.createStateMatrices()
    batches = []
    for song in statematrices.values():
        matrix = song[1]
        while len(matrix) > 0:
            batch = []
            for i in xrange(batch_size):
                if len(matrix) == 0:
                    break
                batch.append([[n * 20 - 19 for l in matrix.pop() for n in l]])
            batches.append(zip(batch[:-1], batch[1:]))

    if not os.path.exists(path):
            os.makedirs(path)

    print('\nTraining network:')
    for i in xrange(0, total_epochs, output_rate):
        last = [[-1] * 156]
        statematrix = []
        for j in xrange(output_length):
            new = net.run(last)
            statematrix.append(estimate_statematrix_from_output(new[0], last[0]))
            last = new
        midi_to_statematrix.noteStateMatrixToMidi(statematrix,
                                    name=(path + 'example{0}'.format(i)))
        print('\tfile example{0}.mid created'.format(i))
        net.save(path + 'weights{0}'.format(i))
        print('\tweights saved in weights{0}'.format(i))
        
        for j in xrange(output_rate):
            print('\t\t' + str(i + j))
            for batch in batches:
                net.train(batch, 1, .1, dropout, .5)
                net.reset()
Esempio n. 7
0
    def train(self,
              cache,
              batch_size=32,
              predict_freq=100,
              show_freq=10,
              save_freq=500,
              max_epoch=10000,
              saveto='NewSong',
              step=319,
              conservativity=1,
              pre_trained_model=None):
        '''
        This is the train function for the biaxial_model. It alos predicts while trianing.
        Args:
            pieces: dict, containing the all note statematrixs as training set;
            batch_size: how many sample in one batch;
            predict_freq: int, every predict_freq we generate a new song;
            save_freq: int, the frequency to save the model;
            show_freq: int, the frequency to show the loss;
            max_epoch: max steps we gonna train;
            saveto: str, the dir we save or new songs;
            step: int, the length we generate a new song. one step means on step for the time model
            conservativity: The conservativity of number of notes of song we generate.
            pre_trained_model: used for restore training
        '''

        cur_model_name = 'biaxial_rnn_{}'.format(int(time.time()))
        batch_generator = data.generate_batch(cache, batch_size)
        val_barch_generator = data.generate_val_batch(cache, batch_size)

        loss_log = []
        min_loss = np.inf
        with tf.Session() as sess:
            merge = tf.summary.merge_all()
            writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                           sess.graph)

            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())

            if pre_trained_model is not None:
                try:
                    print("Load the model from: {}".format(pre_trained_model))
                    saver.restore(sess, 'model/{}'.format(pre_trained_model))
                    #writer = tf.summary.FileWriterCache.get('log/{}'.format(pre_trained_model))
                except Exception:
                    print("Load model Failed!")
                    pass
            if self.new_trainer is not None:
                optimizer = self.optimizer2
            else:
                optimizer = self.optimizer

            for i in range(max_epoch):
                X_train, y_train = next(batch_generator)
                _, loss, merge_result = sess.run((optimizer, self.loss, merge),
                                                 feed_dict={
                                                     self.input_mat: X_train,
                                                     self.output_mat: y_train
                                                 })

                loss_log.append(loss)
                pickle.dump(
                    loss_log,
                    open('model/' + cur_model_name + '_loss_log.pkl', 'wb'))

                if i % show_freq == 0:
                    print('Step {0}: loss is {1}'.format(i, loss))
                if (i + 1) % 10 == 0:
                    writer.add_summary(merge_result, i)

                # generate a new song
                if (i + 1) % predict_freq == 0:
                    xIpt, xOpt = map(np.array, data.getPieceSegment(cache))
                    new_state_matrix = sess.run(self.new_song,
                                                feed_dict={
                                                    self.predict_seed:
                                                    xIpt[0],
                                                    self.step_to_sumulate:
                                                    [step],
                                                    self.conservativity:
                                                    [conservativity]
                                                })
                    newsong = np.concatenate(
                        (np.expand_dims(xOpt[0], 0), new_state_matrix))

                    songname = str(time.time()) + '.mid'
                    if not os.path.exists(os.path.join(saveto,
                                                       cur_model_name)):
                        os.makedirs(os.path.join(saveto, cur_model_name))
                    noteStateMatrixToMidi(newsong,
                                          name=os.path.join(
                                              saveto, cur_model_name,
                                              songname))
                    print('New Songs {} saved to \'{}\''.format(
                        songname, os.path.join(saveto, cur_model_name)))

                # save the models for restoring training
                if (i + 1) % save_freq == 0:
                    if not os.path.exists('model/'):
                        os.makedirs('model/')
                    saver.save(sess, 'model/{}'.format(cur_model_name))
                    print('{} Saved'.format(cur_model_name))

                    # Get validation data and validate
                    xIpt_val, xOpt_val = next(val_barch_generator)
                    val_loss = sess.run((self.loss),
                                        feed_dict={
                                            self.input_mat: xIpt_val,
                                            self.output_mat: xOpt_val
                                        })

                    print(
                        "Validation of loss of {} achieved on step {}".format(
                            val_loss, i))
                    if val_loss < min_loss:
                        min_loss = val_loss

                        # Save the best model
                        saver.save(
                            sess, 'model/{}_{}'.format('best', cur_model_name))
                        print('{}_{} Saved'.format('best', cur_model_name))
Esempio n. 8
0
def write_song(path, song):
    midi_to_statematrix.noteStateMatrixToMidi(song, name=path)
Esempio n. 9
0
        transpositions[toks[1]] = int(toks[2])
      elif toks[0] == 'slow':
        if not len(toks) == 2:
          continue
        slow = int(toks[1])
      elif toks[0] == 'speed':
        if not len(toks) == 2:
          continue
        speed = int(toks[1])
      elif toks[0] == 'start-time':
        if not len(toks) == 2:
          continue
        startTime = float(toks[1])

    #parse xml file into document tree
    tree = xml.etree.ElementTree.parse(mxlfile).getroot()
    if getTempoForSong(tree) == None:
      print("File {} has no tempo!!!".format(mxlfile))
    else:
      stateMatrices[origFilename] = stateMatrixForSong(tree)

  return stateMatrices

if __name__ == "__main__":
  stateMatrices = createStateMatrices()
  print("{0} songs total.".format(len(stateMatrices)))
  for k in stateMatrices.keys():
    midi_to_statematrix.noteStateMatrixToMidi(stateMatrices[k][1], name='./midi_output_test/{}'.format(k))
    

Esempio n. 10
0
def write_song(path, song):
    midi_to_statematrix.noteStateMatrixToMidi(song, name=path)
Esempio n. 11
0
      elif toks[0] == 'slow':
        if not len(toks) == 2:
          continue
        slow = int(toks[1])
      elif toks[0] == 'speed':
        if not len(toks) == 2:
          continue
        speed = int(toks[1])
      elif toks[0] == 'start-time':
        if not len(toks) == 2:
          continue
        startTime = float(toks[1])

    #parse xml file into document tree
    tree = xml.etree.ElementTree.parse(mxlfile).getroot()
    if getTempoForSong(tree) == None:
      print "File {} has no tempo!!!".format(mxlfile)
    else:
      stateMatrices[origFilename] = stateMatrixForSong(tree)

  return stateMatrices

if __name__ == "__main__":
  stateMatrices = createStateMatrices()
  print "{0} songs total.".format(len(stateMatrices))
  #print "Pwd: " + os.getcwd()
  for k in stateMatrices.keys():
    midi_to_statematrix.noteStateMatrixToMidi(stateMatrices[k][1], name='./midi_output_test/{}'.format(k))
    

Esempio n. 12
0
updt = [
    W.assign_add(W_adder),
    bv.assign_add(bv_adder),
    bh.assign_add(bh_adder)
]

with tf.Session() as sess:

    init = tf.global_variables_initializer()
    sess.run(init)
    for epoch in tqdm(range(num_epochs)):
        for song in songs:
            song = np.array(song)
            song = song[:int(
                np.floor(song.shape[0] / num_timesteps) * num_timesteps)]
            song = np.reshape(
                song,
                [song.shape[0] / num_timesteps, song.shape[1] * num_timesteps])

            for i in range(1, len(song), batch_size):
                tr_x = song[i:i + batch_size]
                sess.run(updt, feed_dict={x: tr_x})

    sample = gibbs_sample(1).eval(session=sess,
                                  feed_dict={x: np.zeros((50, n_visible))})
    for i in range(sample.shape[0]):
        if not any(sample[i, :]):
            continue
        S = np.reshape(sample[i, :], (num_timesteps, 2 * note_range))
        midi_to_statematrix.noteStateMatrixToMidi(S, "out_{}".format(i))