Esempio n. 1
0
def generate(initIn, r, dt, generate_function, filename, show=True):
    '''Generate a sample sequence, plot the resulting piano-roll and save
    it as a MIDI file.

    filename : string
        A MIDI file will be created at this location.
    show : boolean
        If True, a piano-roll of the generated sequence will be shown.'''

    piano_roll = numpy.round(generate_function(initIn))
    for repeat in range(1):
        piano_roll = numpy.concatenate(
            (piano_roll, numpy.round(generate_function(piano_roll))), axis=0)
    midiwrite(filename, piano_roll, r, dt)
    if show:
        extent = (0, dt * len(piano_roll)) + r
        pylab.figure()
        pylab.imshow(piano_roll.T,
                     origin='lower',
                     aspect='auto',
                     interpolation='nearest',
                     cmap=pylab.cm.gray_r,
                     extent=extent)
        pylab.xlabel('time (s)')
        pylab.ylabel('MIDI note number')
        pylab.title('generated piano-roll')
Esempio n. 2
0
        def generate(self, filename, show=True):
            '''Generate a sample sequence, plot the resulting piano-roll and save
        it as a MIDI file.

        filename : string
            A MIDI file will be created at this location.
        show : boolean
            If True, a piano-roll of the generated sequence will be shown.'''

            piano_roll = self.generate_function()
            print "Sample generated!"
            midiwrite(filename, piano_roll, self.r, self.dt)
            if show:
                extent = (0, self.dt * len(piano_roll)) + self.r
                pylab.figure()
                pylab.imshow(piano_roll.T,
                             origin='lower',
                             aspect='auto',
                             interpolation='nearest',
                             cmap=pylab.cm.gray_r,
                             extent=extent)
                pylab.xlabel('time (s)')
                pylab.ylabel('MIDI note number (corresponds to piano keys)')
                pylab.title('generated piano-roll')
                pylab.savefig('Piano_Roll_' + str(filename))
Esempio n. 3
0
def write_to_midi_file(sample, file_path):
    """
        write midi list to midi format file
    """
    sample = frequency2key(sample)
    sample = convert_key_to_midi_list(sample)
    midiwrite(file_path, sample, R, DT)
def ideal_predict(model, data):
	inp = data[0, :, :]
	inp = np.expand_dims(inp, axis=0)
	out = model.predict(inp)
	print out.shape
	print np.max(out)
	midiwrite('./test.midi', np.round(out[0, :, :]), r=(12, 109), dt=64)
Esempio n. 5
0
        def generate(self, init_chord, file_name, LS=False, chord_name=None, chord_file=None, state_file=None, n_steps=80, r=(21,109)):
		if(LS):
			Lsystem = LSystem(chord_name, init_chord, chord_file, state_file, r)
                init_sequence = numpy.zeros((1, n_steps +1, self.maxFeatures))
                init_sequence[:, 0, init_chord-self.r[0]] = 1
                for i in numpy.arange(n_steps):
                        probs = self.rnnModel.predict_proba(init_sequence)[:, i, :]
                        for j in numpy.arange(len(init_sequence)):
				if(LS):
					ind = Lsystem.getMaxProbs(probs[j,0:(self.maxFeatures-1)],True)
				else:
					ind = maxProbs(probs[j,0:(self.maxFeatures-1)])
                                init_sequence[j, i+1, ind] = 1

                generate_sq = [sq[:,0:(self.maxFeatures-1)].nonzero()[1] for sq in init_sequence]
                print(generate_sq[0] + self.r[0])
		if(LS):
			print(Lsystem.cur_chord)
			print(Lsystem.cur_state)
			print(Lsystem.cur_opes)
                midiwrite(file_name, init_sequence[0,:,0:(self.maxFeatures-1)], self.r, self.dt)
		extent = (0, self.dt * len(init_sequence[0,:,0:(self.maxFeatures-1)])) + self.r
		pylab.figure()
		pylab.imshow(init_sequence[0,:,0:(self.maxFeatures-1)].T, origin='lower', aspect='auto',interpolation='nearest', cmap=pylab.cm.gray_r,extent=extent)
		pylab.xlabel('time (s)')
		pylab.ylabel('MIDI note number')
		pylab.title('generated piano-roll')
Esempio n. 6
0
def pr2midi(pianoroll, i=1):
    filename = './Test/piano_rolls/pianoroll_' + str(i) + '.mid'
    print(pianoroll.shape)
    utils.midiwrite(
        filename, pianoroll.T, (32, 93), .1
    )  # 3rd arg is the start and end notes; 4rth arg is time between sample
    i += 1
Esempio n. 7
0
 def generate_sample(self, length=200, filename="sample.mid"):
     piece = self.valid[0][2]
     piece.shape
     for i in range(length - self.seq_length):
         seq = numpy.array(piece[-self.seq_length:])
         #print seq.shape
         new = self.generate_function(seq)
         sample = numpy.random.random(88)
         new_n = new > sample
         #while numpy.sum(new_n) == 0:
         #    new += 0.001
         #    new_n = new > sample
         
         #print new
         piece = numpy.vstack([piece, new_n])
         #print piece.shape
     piano_roll = piece
     midiwrite(filename, piano_roll, self.r, self.dt)
     if self.show:
         extent = (0, self.dt * len(piano_roll)) + self.r
         pylab.figure()
         pylab.imshow(
             piano_roll.T, origin='lower', aspect='auto',
             interpolation='nearest', cmap=pylab.cm.gray_r,
             extent=extent)
         pylab.xlabel('time (s)')
         pylab.ylabel('MIDI note number')
         pylab.title('generated piano-roll')
         pylab.savefig('piano_roll')
         pylab.show()
Esempio n. 8
0
    def train(self, files, batch_size=100, num_epochs=200):

        assert len(files) > 0, 'Training set is empty!' \
                               ' (did you download the data files?)'
        dataset = [midiread(f, self.r,
                            self.dt).piano_roll.astype(theano.config.floatX)
                   for f in files]

        def accuracy (v, v_sample):
            accs = []
            t, n = v.shape
            for time in range(t):
                tp = 0 # true positive
                fp = 0 # false positive
                fn = 0 # false negative
                for note in range(n):
                    if v[time][note] == 1 and v_sample[time][note] == 1:
                        tp += 1.
                    if v[time][note] == 0 and v_sample[time][note] == 1:
                        fp += 1.
                    if v[time][note] == 1 and v_sample[time][note] == 0:
                        fn += 1.
                if tp + fp + fn != 0:
                    a = tp / (tp + fp + fn)
                else:
                    a = 0
                accs.append(a)

            acc = numpy.mean(accs)
            return acc

        try:
            print ('lstm_rbm, dataset=Nottingham, lr=%f, epoch=%i' %(self.lr, num_epochs))

            for epoch in xrange(num_epochs):
                numpy.random.shuffle(dataset)
                costs = []
                accs = []

                for s, sequence in enumerate(dataset):
                    for i in xrange(0, len(sequence), batch_size):
                        v = sequence[i:i + batch_size]

                        (cost, v_sample) = self.train_function(v)
                        costs.append(cost)

                        acc = accuracy(v, v_sample)
                        accs.append(acc)

                p = 'Epoch %i/%i    LL %f   ACC %f  time %s' % (epoch + 1, num_epochs, numpy.mean(costs), numpy.mean(accs), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
                print (p)
                if (epoch%100 == 0 or epoch==num_epochs-1):
                    piano_roll = self.generate_function()
                    midiwrite('sample/lstm_rbm%i.mid' %(epoch), piano_roll, self.r, self.dt)
                sys.stdout.flush()

        except KeyboardInterrupt:
            print 'Interrupted by user.'
Esempio n. 9
0
    def generate(self, filename, show=True):

        piano_roll = self.generate_function()
        midiwrite(filename, piano_roll, self.r, self.dt)
        if show:
            extent = (0, self.dt * len(piano_roll)) + self.r
            pylab.figure()
            pylab.imshow(piano_roll.T, origin='lower', aspect='auto',
                         interpolation='nearest', cmap=pylab.cm.gray_r,
                         extent=extent)
            pylab.xlabel('time (s)')
            pylab.ylabel('MIDI note number')
            pylab.title('generated piano-roll')
Esempio n. 10
0
    def generate(self,
                 init_chord,
                 file_name,
                 LS=False,
                 chord_name=None,
                 chord_file=None,
                 state_file=None,
                 n_steps=80,
                 r=(21, 109)):
        if (LS):
            Lsystem = LSystem(chord_name, init_chord, chord_file, state_file,
                              r)
        init_sequence = numpy.zeros((1, n_steps + 1, self.maxFeatures))
        init_sequence[:, 0, init_chord - self.r[0]] = 1
        for i in numpy.arange(n_steps):
            probs = self.rnnModel.predict_proba(init_sequence)[:, i, :]
            for j in numpy.arange(len(init_sequence)):
                if (LS):
                    ind = Lsystem.getMaxProbs(
                        probs[j, 0:(self.maxFeatures - 1)], True)
                else:
                    ind = maxProbs(probs[j, 0:(self.maxFeatures - 1)])
                init_sequence[j, i + 1, ind] = 1

        generate_sq = [
            sq[:, 0:(self.maxFeatures - 1)].nonzero()[1]
            for sq in init_sequence
        ]
        print(generate_sq[0] + self.r[0])
        if (LS):
            print(Lsystem.cur_chord)
            print(Lsystem.cur_state)
            print(Lsystem.cur_opes)
        midiwrite(file_name, init_sequence[0, :, 0:(self.maxFeatures - 1)],
                  self.r, self.dt)
        extent = (0, self.dt *
                  len(init_sequence[0, :, 0:(self.maxFeatures - 1)])) + self.r
        pylab.figure()
        pylab.imshow(init_sequence[0, :, 0:(self.maxFeatures - 1)].T,
                     origin='lower',
                     aspect='auto',
                     interpolation='nearest',
                     cmap=pylab.cm.gray_r,
                     extent=extent)
        pylab.xlabel('time (s)')
        pylab.ylabel('MIDI note number')
        pylab.title('generated piano-roll')
Esempio n. 11
0
    def label_from_file(self, rootLoc, fileLoc, learn_rate, n_iters, threshold):
        """
        Given a xml file at fileLoc, harmonizes the melody in the xml file, by
        doing gradient descent on the top hidden layer of the network.  This
        gives us an estimate of the top layer activations that might generate
        the melody. We then run the network forwards to get the entire harmony
        from the top level activations that we estimate.
        """
        noteReader = myparser.LegatoNoteAdder(64)
        myparser.read(fileLoc, noteReader.handle)
        snippet = noteReader.mtx
        mask = melody_blocker(snippet)

        linear_snippet = snippet.reshape([88*64])
        linear_mask = mask.reshape([88*64])
        in_data = numpy.zeros([10, 88*64])
        x_mask = numpy.zeros([10, 88*64])
        for i in range(10):
            in_data[i, :] = linear_snippet
            x_mask[i, :] = linear_mask


        # Do gradient descent to estimate the activations on layer 1.
        new_vals = theano.shared(
            value=numpy.random.sample([10, self.layer_sizes[-1]]),
        )
        f = theano.function(
            inputs=[],
            updates=[(self.isolated_reverse_input, new_vals)],
        )
        f()
        trainer = self.label(in_data, x_mask, learn_rate)
        for i in range(n_iters):
            print trainer()

        # Then, generate using it.
        result = dbn.sample(self.isolated_reverse_input, rootLoc=rootLoc, save=False,
            threshold=threshold)
        # Add the melody back onto the snippet.
        final = result * (1.0 - mask)
        final = final + snippet
        final[final > 0.5] = 1
        midiwrite(path.join(rootLoc, 'test.midi'), final.T, r=(12, 109), dt=64)
        return final
Esempio n. 12
0
    def generate(self, filename, show=True):
        '''Generate a sample sequence, plot the resulting piano-roll and save
        it as a MIDI file.
        filename : string
            A MIDI file will be created at this location.
        show : boolean
            If True, a piano-roll of the generated sequence will be shown.'''

        piano_roll = self.generate_function()
        midiwrite(filename, piano_roll, self.r, self.dt)
        if show:
            extent = (0, self.dt * len(piano_roll)) + self.r
            pylab.figure()
            pylab.imshow(piano_roll.T, origin='lower', aspect='auto',
                         interpolation='nearest', cmap=pylab.cm.gray_r,
                         extent=extent)
            pylab.xlabel('time (s)')
            pylab.ylabel('MIDI note number')
            pylab.title('generated piano-roll')
Esempio n. 13
0
 def sample(self, top_level=None, rootLoc='./', save=True, threshold=0.5,
         filename='test.midi'):
     """
     Generates a sample from the trained neural net.  top_level is a 10 x
     [size of top layer] matrix whose rows contain values for the top
     layer.  Most of the time, I only use the first row, but you can only
     process data in increments of batch_size.
     """
     if top_level is None:
         top_level_size = self.layer_sizes[-1]
         top_level = numpy.random.randint(2, size=[10, top_level_size])\
             .astype(dtype=NUMPY_DTYPE)
     output = self.generate(top_level)
     output = output.reshape([10, 88*64])
     firstIm = output[0, :].reshape([88, 64])
     # Makes a little picture of the piano roll.
     outIm = Image.fromarray((firstIm*255).astype('uint8'))
     outIm.save(path.join(rootLoc, 'test.png'))
     if threshold is not None:
         firstIm[firstIm > threshold] = 1
         firstIm[firstIm <= threshold] = 0
     if save:
         midiwrite(path.join(rootLoc, filename), firstIm.T, r=(12, 109), dt=64)
     return firstIm
def make_note(mid_path, model):
    temperature = 0.8
    probroll = []
    sampleroll = []
    pressedroll = []
    maxlen = 100
    notes = 88
    x = np.zeros((1, maxlen, notes))
    for i in range(music_length_second):
        predi = model.predict(x, verbose=0)[0]
        predi[predi < 0] = 1E-20  # Avoid negative props
        preds = np.log(predi) / temperature
        exp_preds = np.exp(preds)
        exp_preds = exp_preds * ((1.**2 + (predi.sum() / exp_preds.sum())**3)**
                                 0.5)  # root mean square normalization

        exp_preds[exp_preds > 1] = 0.99  # Ensure no propability over 1.
        keypressed = np.random.binomial(1, exp_preds)
        sampleroll.append(predi)
        probroll.append(exp_preds)
        pressedroll.append(keypressed)
        x[0, :-1] = x[0, 1:]  # Roll one forward
        x[0, -1] = keypressed  # insert new prediction
    midiwrite(mid_path, pressedroll)
Esempio n. 15
0
# Compatible with Python 2.7. not 3.x
# Recursively load midi files, extract piano rolls and save as *.mid (file) and *.npy (matrix)

load_root = './MIDI_Data/'
save_root = './MIDI_Data_PianoRolls/'

for dirpath, dirs, files in os.walk(load_root):
    for name in files:
        if name.endswith('.mid'):
            print dirpath, name
            print dirpath.replace(load_root, save_root), name

            load_dirpath = dirpath
            save_dirpath = dirpath.replace(load_root, save_root)

            load_filepath = os.path.join(load_dirpath, name)
            save_filepath = os.path.join(save_dirpath, name)

            # Read MIDI file
            piano_roll = midiread(load_filepath).piano_roll

            if not os.path.exists(save_dirpath):
                os.makedirs(save_dirpath)

            # Save the piano roll as MIDI
            midiwrite(save_filepath, piano_roll=piano_roll)

            # Save the piano roll as *.npy file
            np.save(save_filepath.replace('.mid', '.npy'), piano_roll)
Esempio n. 16
0
		if time == length:
			break
			
			
		sumOfZero = 0
		sumOfOne = 0
		
		for note in range(88) : 
			for noteFrom in range(88) : 
				valueFrom = int (generated[time-1][noteFrom])
				
				sumOfZero = sumOfZero + count[note][0][noteFrom][valueFrom]
				sumOfOne = sumOfOne + count[note][1][noteFrom][valueFrom]
			
			ratio = sumOfOne / sumOfZero
			
			
			if (ratio/5) > rd.random(): # using orgin ratio directly brings too many notes
				generated[time][note] = 1
	
		#print sum(generated[time])
		
	
	midiwrite("output-bi.mid", generated)
	
	
	
	
	
	
Esempio n. 17
0
			
		sumOfZero = 0
		sumOfOne = 0
		
		for note in range(88) : 
			for noteFrom in range(88) : 		
				valueFrom = int (generated[time-1][noteFrom])		
				
				for notePreFrom in range(88) : 
					valuePreFrom = int (generated[time-2][notePreFrom])		
					
					sumOfZero = sumOfZero + count[note][0][noteFrom][valueFrom][notePreFrom][valuePreFrom]
					sumOfOne = sumOfOne + count[note][1][noteFrom][valueFrom][notePreFrom][valuePreFrom]
			
			ratio = sumOfOne / sumOfZero
			
			# print ratio
			
			if (ratio/4) > rd.random(): #  using orgin ratio directly brings too many notes
				generated[time][note] = 1 # 
	
		# print sum(generated[time]) #print number of pressed note
		
	
	midiwrite("output-tri.mid", generated) # fileout
	
	
	
	
	
Esempio n. 18
0

		exp_preds[exp_preds > 1] = 0.99 #Ensure no propability over 1.
		#rand = np.random.randn(predi.shape[0])*2
		#probas = exp_preds*rand
		#preds = exp_preds / np.sum(exp_preds)
		#probas = np.random.multinomial(10, preds, 1)
		#keypressed = probas > 0.50
		#use a binomial distribution with propability as exp_preds
		keypressed =np.random.binomial(1,exp_preds)
		sampleroll.append(predi)
		probroll.append(exp_preds)
		pressedroll.append(keypressed)
		#print(keypressed)
		#Take x one forward
		x[0,:-1] = x[0,1:] #Roll one forward
		x[0,-1] = keypressed #insert new prediction


	#plt.matshow(sampleroll)
	#plt.title('Raw Predicted')
	#plt.matshow(probroll)
	#plt.title('Propability of keypress')
	#plt.matshow(pressedroll)
	#plt.title('New Keypresses')
	##plt.matshow(np.array(pianoroll) > 0.25)
	#plt.show()

	#Go from the Pianoroll to a midi file which can be played.
	midiwrite('2x128_temp%s.mid'%temperature, pressedroll , r, dt)
Esempio n. 19
0
    def train(self, files, batch_size=100, num_epochs=200):

        assert len(files) > 0, 'Training set is empty!' \
                               ' (did you download the data files?)'
        dataset = [
            midiread(f, self.r,
                     self.dt).piano_roll.astype(theano.config.floatX)
            for f in files
        ]

        def accuracy(v, v_sample):
            accs = []
            t, n = v.shape
            for time in range(t):
                tp = 0  # true positive
                fp = 0  # false positive
                fn = 0  # false negative
                for note in range(n):
                    if v[time][note] == 1 and v_sample[time][note] == 1:
                        tp += 1.
                    if v[time][note] == 0 and v_sample[time][note] == 1:
                        fp += 1.
                    if v[time][note] == 1 and v_sample[time][note] == 0:
                        fn += 1.
                if tp + fp + fn != 0:
                    a = tp / (tp + fp + fn)
                else:
                    a = 0
                accs.append(a)

            acc = numpy.mean(accs)
            return acc

        def sampling(sample):  # 01
            s = T.matrix()
            b = rng.binomial(size=s.shape,
                             n=1,
                             p=s,
                             dtype=theano.config.floatX)
            fun = theano.function(inputs=[s], outputs=[b])

            return fun(sample)[0]

        try:
            print('rnn, dataset=Nottingham, lr=%f, epoch=%i' %
                  (self.lr, num_epochs))

            for epoch in range(num_epochs):
                numpy.random.shuffle(dataset)
                costs = []
                monitors = []
                accs = []

                for s, sequence in enumerate(dataset):
                    for i in range(0, len(sequence), batch_size):
                        if i + batch_size + 1 >= len(sequence):
                            break

                        v = sequence[i:i + batch_size]
                        targets = sequence[i + 1:i + batch_size + 1]

                        (cost, monitor,
                         sample) = self.train_function(v, targets)
                        costs.append(cost)
                        monitors.append(monitor)

                        sample = sampling(sample)
                        acc = accuracy(v, sample)
                        accs.append(acc)

                p = 'Epoch %i/%i  LL %f   ACC %f  Cost %f   time %s' \
                    % (epoch + 1, num_epochs, numpy.mean(monitors), numpy.mean(accs), numpy.mean(costs),
                       datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
                print(p)
                if (epoch % 100 == 0 or epoch == num_epochs - 1):
                    piano_roll = sampling(self.generate_function()[0])
                    midiwrite('sample/rnn_%i.mid' % (epoch), piano_roll,
                              self.r, self.dt)
                sys.stdout.flush()

        except KeyboardInterrupt:
            print('Interrupted by user.')
Esempio n. 20
0
from midi.utils import midiwrite

import sys
# print "This is the name of the script: ", sys.argv[0]
# print "Number of arguments: ", len(sys.argv)
# print "The arguments are: " , str(sys.argv)

if len(sys.argv) != 2:
    print 'song ID as arg'
else:
    print 'Song ID:', int(sys.argv[1])

    song_num = int(sys.argv[1])
    results_dir = 'Results'

    new_song = np.load('{}/input_song_{}.npz'.format(results_dir, song_num))
    new_song = new_song[new_song.files[0]]

    print(new_song.shape)
    midiwrite('{}/input_song_{}.mid'.format(results_dir, song_num), new_song.T,
              (32, 93), 0.25)

    new_song = np.load('{}/output_song_{}.npz'.format(results_dir, song_num))
    new_song = new_song[new_song.files[0]]

    print(new_song.shape)
    midiwrite('{}/output_song_{}.mid'.format(results_dir, song_num),
              new_song.T, (32, 93), 0.25)

    print '\n\nMiDi Generated...'
                    for sequence in data_batch
                ], 0)
                train_result = sess.run(
                    [cost, train_op],
                    feed_dict={
                        x: x_batch,
                        state: numpy.zeros([batch_size, n_hidden]),
                        output: numpy.zeros([batch_size, n_hidden])
                    })
                total_cost.append(train_result[0])

            if len(total_cost) > 0:
                loss_per_check += sum(total_cost) / float(len(total_cost))

        if epoch % check_epochs == 0:
            print 'epoch %d, epoch_cost: %f' % (epoch, loss_per_check)
            if len(losses) > 2 and loss_per_check > max(losses):
                lr = sess.run(learning_rate_decay_op)
                print 'learning rate decay to %f' % lr
            losses.append(loss_per_check)
            loss_per_check = 0.0

    sequence = sess.run(generator, feed_dict={x: x_batch})
    for index in xrange(len(sequence)):
        sequence[index] = sequence[index].reshape((n_output, ))
    print '#############################'
    for t_v in sequence[::5]:
        print numpy.sum(t_v), t_v

midiwrite(generation_path + filename, sequence, r, dt)

          generated = []
          # for note in seed:
          #   generated.append(note)

          # Now add seperator note
          # generated.append(np.ones_like(seed[0]))
          generated.append(seed[0])

          for i in range(GEN_LENGTH):
            # np.newaxis is needed to make predict work for some reason...
            # pred_probs = model.predict(seed[np.newaxis, :], verbose=0)
            pred_probs = model.predict(seed_batch, verbose=0)

            next_note = np.random.binomial(n=1, p=pred_probs[0], size=pred_probs[0].shape)

            ### append to MIDI
            generated.append(next_note)
            # Update the input
            # seed = np.vstack((seed[1:], next_note))
            seed_batch[0][0] = next_note

          ### output MIDI
          sample_filename = save_dir + "lstm_e10_composition_{}.mid".format(iteration)
          print "Saving sample to {}".format(sample_filename)
          midiwrite(sample_filename, generated, MIDI_RANGE, DT)

          # Clean states again for training
          model.reset_states()
Esempio n. 23
0
    def train(self, files, batch_size=128, num_epochs=200):
        def downsampling(sample):
            # only keep the note on even index position (0, 2, 4 ...)
            downsample = []
            for i, s in enumerate(sample):
                if i % 2 == 0:
                    downsample.append(s)
            return downsample

        def upsampling(sample, length):
            '''
            double each notes in sample
                sample: the melody to be upsampled
                length: the length of the original melody M, sample=Downsampling(M)
            '''
            upsample = []
            for s in sample:
                upsample.append(s)
                if (len(upsample) >= length):
                    # upsampling melody length cannot longer than original melody length
                    break
                upsample.append(s)
            return upsample

        def sampling(sample):
            # to be one hot (to be 0,1)
            ixes = []
            for i, s in enumerate(sample):
                n_step = []
                for n in xrange(20):
                    ix = np.random.choice(range(88), p=s)
                    n_step.append(ix)
                count = Counter(n_step)
                ix = count.most_common(1)[0][0]
                x = np.zeros((88, ))
                x[ix] = 1
                ixes.append(x)

            return ixes

        def accuracy(v, v_sample):
            # ACC
            accs = []
            t = len(v)
            n = len(v[0])
            for time in range(t):
                tp = 0  # true positive
                fp = 0  # false positive
                fn = 0  # false negative
                for note in range(n):
                    if v[time][note] == 1 and v_sample[time][note] == 1:
                        tp += 1.
                    if v[time][note] == 0 and v_sample[time][note] == 1:
                        fp += 1.
                    if v[time][note] == 1 and v_sample[time][note] == 0:
                        fn += 1.
                if tp + fp + fn != 0:
                    a = tp / (tp + fp + fn)
                else:
                    a = 0
                accs.append(a)

            acc = numpy.mean(accs)
            return acc

        def generate():
            # sampling procedure, generating music

            # layer 4
            generate_sharp_4 = self.generate_4()[0]
            generate_sharp_4 = sampling(generate_sharp_4)

            # layer 3
            upsample_3 = upsampling(generate_sharp_4,
                                    2 * len(generate_sharp_4) + 1)
            generate_sharp_3 = self.generate_3(upsample_3, upsample_3)[0]
            generate_sharp_3 = sampling(generate_sharp_3)

            # layer 2
            upsample_2 = upsampling(generate_sharp_3,
                                    2 * len(generate_sharp_3) + 1)
            generate_sharp_2 = self.generate_2(upsample_2, upsample_2)[0]
            generate_sharp_2 = sampling(generate_sharp_2)

            # layer 1
            upsample_1 = upsampling(generate_sharp_2,
                                    2 * len(generate_sharp_2) + 1)
            generate_sharp_1 = self.generate_1(upsample_1, upsample_1)[0]
            generate_sharp_1 = sampling(generate_sharp_1)

            # layer 0
            upsample_0 = upsampling(generate_sharp_1,
                                    2 * len(generate_sharp_1) + 1)
            generate_sharp_0 = self.generate_0(upsample_0, upsample_0)[0]
            generate_sharp_0 = sampling(generate_sharp_0)

            return generate_sharp_0

        # load data
        dataset = [
            midiread(f, self.r,
                     self.dt).piano_roll.astype(theano.config.floatX)
            for f in files
        ]
        print('pyramid_mono, dataset=nottingham_mono, lr=%f, epoch=%i' %
              (self.lr, num_epochs))

        for epoch in range(num_epochs):
            numpy.random.shuffle(dataset)
            monitors_0 = []
            accs_0 = []
            monitors_1 = []
            accs_1 = []
            monitors_2 = []
            accs_2 = []
            monitors_3 = []
            accs_3 = []
            monitors_4 = []
            accs_4 = []

            for s, sequence in enumerate(dataset):
                for i in range(0, len(sequence), batch_size):

                    batch_music = sequence[i:i + batch_size]  # 128

                    # layer 0
                    downsample_0 = downsampling(batch_music)  # 64
                    upsample_0 = upsampling(downsample_0,
                                            len(batch_music))  # 128

                    # layer 1
                    downsample_1 = downsampling(downsample_0)  # 32
                    upsample_1 = upsampling(downsample_1,
                                            len(downsample_0))  # 64

                    # layer 2
                    downsample_2 = downsampling(downsample_1)  # 16
                    upsample_2 = upsampling(downsample_2,
                                            len(downsample_1))  # 32

                    # layer 3
                    downsample_3 = downsampling(downsample_2)  # 8
                    upsample_3 = upsampling(downsample_3,
                                            len(downsample_2))  # 16

                    # layer 0
                    sharp_0, monitor_0 = self.sharp_fun_0(
                        batch_music, upsample_0)
                    monitors_0.append(monitor_0)
                    accs_0.append(accuracy(batch_music, sampling(sharp_0)))

                    # layer 1
                    sharp_1, monitor_1 = self.sharp_fun_1(
                        downsample_0, upsample_1)
                    monitors_1.append(monitor_1)
                    accs_1.append(accuracy(downsample_0, sampling(sharp_1)))

                    # layer 2
                    sharp_2, monitor_2 = self.sharp_fun_2(
                        downsample_1, upsample_2)
                    monitors_2.append(monitor_2)
                    accs_2.append(accuracy(downsample_1, sampling(sharp_2)))

                    # layer 3
                    sharp_3, monitor_3 = self.sharp_fun_3(
                        downsample_2, upsample_3)
                    monitors_3.append(monitor_3)
                    accs_3.append(accuracy(downsample_2, sampling(sharp_3)))

                    # layer 4
                    if (len(downsample_3) == 1):
                        sharp_4, monitor_4 = self.sharp_fun_4(
                            downsample_3, downsample_3)
                        accs_4.append(accuracy(downsample_3,
                                               sampling(sharp_4)))
                    else:
                        sharp_4, monitor_4 = self.sharp_fun_4(
                            downsample_3[1:],
                            downsample_3[:len(downsample_3) - 1])
                        accs_4.append(
                            accuracy(downsample_3[1:], sampling(sharp_3)))
                    monitors_4.append(monitor_4)


            p = 'Epoch %i/%i    layer0:LL %f ACC %f   layer1:LL %f ACC %f  layer2:LL %f ACC %f  layer3:LL %f ACC %f  ' \
                'layer4:LL %f ACC %f  time %s' % \
                (epoch + 1, num_epochs, numpy.mean(monitors_0), numpy.mean(accs_0), numpy.mean(monitors_1), numpy.mean(accs_1),
                 numpy.mean(monitors_2), numpy.mean(accs_2), numpy.mean(monitors_3), numpy.mean(accs_3), numpy.mean(monitors_4),
                 numpy.mean(accs_4),   datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
            print(p)

            if (epoch % 50 == 0 or epoch == num_epochs - 1):

                piano_roll = generate()
                midiwrite('sample/pyramid_mono_%i.mid' % (epoch), piano_roll,
                          self.r, self.dt)
Esempio n. 24
0
def sample_and_save(model, length=64):
	x = sample(model, length)
	midiwrite('./test.midi', x, r=(12, 109), dt=64)
Esempio n. 25
0
 def generate(self, filename, show=True):
     piano_roll = self.generate_function()
     midiwrite(filename, piano_roll, self.r, self.dt)
Esempio n. 26
0
        model = RnnRbm()
        model = train_rnnrbm(model, dataset)

        cur = sys.getrecursionlimit()
        sys.setrecursionlimit(40000)
        with open('saved_rnndbn.pkl', mode='w') as f:
            pickle.dump(model, f, -1)
        sys.setrecursionlimit(cur)

    with open('saved_rnndbn.pkl', mode='r') as f:
        model = pickle.load(f)

    import matplotlib
    matplotlib.use('Agg')
    sample_files = ["sample1.mid", "sample2.mid"]
    for s in sample_files:
        piano_roll = model.generate()
        midiwrite(s, piano_roll, key_range, dt)
        extent = (0, dt * len(piano_roll)) + key_range
        import matplotlib.pyplot as plt
        plt.imshow(piano_roll.T,
                   origin='lower',
                   aspect='auto',
                   interpolation='nearest',
                   cmap="gray_r",
                   extent=extent)
        plt.xlabel('time (s)')
        plt.ylabel('MIDI note number')
        plt.title('generated piano-roll')
        plt.savefig(s[:-4] + '.png')
Esempio n. 27
0
# expt/chkpt-synthetic/DMM_lr-0_0008-dh-200-ds-100-nl-relu-bs-20-ep-20-rs-600-rd-0_1-infm-R-tl-2-el-2-ar-2_0-use_p-approx-rc-lstm-uid-final.h5

# =============================================

params, zvec = DMM_evaluate.sample(dmm_reloaded, T=200)
bin_prob = params[0]
print 'Samples: ', bin_prob.shape

# ===========================================

#Parameters for music sampling taken from http://deeplearning.net/tutorial/rnnrbm.html
rval = (21, 109)
dt = 0.3
SAVEDIR = './samples/'
createIfAbsent(SAVEDIR)
print 'Saving wav...'
for idx, sample in enumerate(bin_prob):
    piano_roll = (sample > 0.5) * 1.
    filename = SAVEDIR + DATASET + '-' + str(idx) + '.mid'
    midiwrite(filename, piano_roll, rval, dt)
    print idx,

print 'Converting...'
print os.system('cd ' + SAVEDIR + ';timidity -Ow1S ' + DATASET +
                '*.mid;cd ../')
print '\nFiles\n', ', '.join(os.listdir(SAVEDIR))

# ================================================

import IPython
IPython.display.Audio('./samples/jsb-0.wav')
Esempio n. 28
0
 def generate(self, session, filename='001.mid'):
     sequence = session.run(self.vg)
     midiwrite(self.generation_path + filename, sequence, self.r, self.dt)
Esempio n. 29
0
            # for note in seed:
            #   generated.append(note)

            # Now add seperator note
            # generated.append(np.ones_like(seed[0]))
            generated.append(seed[0])

            for i in range(GEN_LENGTH):
                # np.newaxis is needed to make predict work for some reason...
                # pred_probs = model.predict(seed[np.newaxis, :], verbose=0)
                pred_probs = model.predict(seed_batch, verbose=0)

                next_note = np.random.binomial(n=1,
                                               p=pred_probs[0],
                                               size=pred_probs[0].shape)

                ### append to MIDI
                generated.append(next_note)
                # Update the input
                # seed = np.vstack((seed[1:], next_note))
                seed_batch[0][0] = next_note

            ### output MIDI
            sample_filename = save_dir + "lstm_e30_composition_{}.mid".format(
                iteration)
            print "Saving sample to {}".format(sample_filename)
            midiwrite(sample_filename, generated, MIDI_RANGE, DT)

            # Clean states again for training
            model.reset_states()
Esempio n. 30
0
def main():

    #--- import data ---#
    sizeOfMiniBatch = 5 #how many tunes per miniBatch
    noOfEpoch = 100 
    noOfEpochPerMB = 2
    lengthOfMB = 100
    sparseParam = np.float32(0.01) #increases with no. of cells
    path = './Piano-midi.de/train-individual/hpps'
    #path = './Piano-midi.de/train'
    files = os.listdir(path)
    assert len(files) > 0, 'Training set is empty!' \
                               ' (did you download the data files?)'
    #pitch range is from 21 to 109
    dataset = [midiread((path + "/" + f), (21, 109),0.3).piano_roll.astype(theano.config.floatX) for f in files]
                  
    #check number of notes for each tune:       
    print(str([np.array(dataset[n]).shape[0] for n in np.arange(np.array(dataset).shape[0])]))

    # set "silent" to zero in 1-hot format
    for k in np.arange(np.array(dataset).shape[0]):
        for n in np.arange(0,np.array(dataset[k]).shape[0],1):
            if np.sum(dataset[k][n], dtype=theano.config.floatX) == 0 :
                dataset[k][n][0] = np.float32(1.0)
                

    #--- training with data ---#
    
    myRNN4Music = RNN4Music(h1_length=176, h2_length=176, h3_length=176, io_length=88, R1=np.float32(0.001), R2=np.float32(0.001), R3=np.float32(0.001), Rout=np.float32(0.001)) 
    
    #myRNN4Music.loadParameters('120_120_120_0_001_xEn_150epoch_hpps')
    #myRNN4Music.loadParameters('120_120_120_0_001_sqr_150epoch_hpps')
    myRNN4Music.loadParameters('176_176_176_0_001_xEn_L1_0_01_100epoch_hpps')
    #myRNN4Music.saveParameters('176_176_176_0_001_xEn_L1_0_1_300epoch_hpps')
    
    #myRNN4Music.train(dataset, noOfEpochPerMB, noOfEpoch, sizeOfMiniBatch, lengthOfMB, sparseParam)
    #myRNN4Music.saveParameters('176_176_176_0_001_xEn_L1_0_01_100epoch_hpps')
    #myRNN4Music.train(dataset, noOfEpochPerMB, noOfEpoch, sizeOfMiniBatch, lengthOfMB, sparseParam)
    #myRNN4Music.saveParameters('176_176_176_0_001_xEn_L1_0_01_200epoch_hpps')
    #myRNN4Music.train(dataset, noOfEpochPerMB, noOfEpoch, sizeOfMiniBatch, lengthOfMB, sparseParam)
    #myRNN4Music.saveParameters('176_176_176_0_001_xEn_L1_0_01_300epoch_hpps')
    #myRNN4Music.train(dataset, noOfEpochPerMB, noOfEpoch, sizeOfMiniBatch, lengthOfMB, sparseParam)
    #myRNN4Music.saveParameters('176_176_176_0_001_xEn_L1_0_01_400epoch_hpps')




    #--- plot some genearted tunes ---#

    for baseSample in np.array([0, 20, 15, 31]):
        exampleLength = 50
        myRNN4Music.resetStates()
        generatedTune = myRNN4Music.genMusic(np.float32(dataset[baseSample][0:exampleLength]), 300)
        midiwrite('176_176_176_0_001_sqr_hpps150_' + str(baseSample) + '.mid', generatedTune[0], (21, 109),0.3)
        #generatedTune[0] is the tune, generatedTune[1] is the probability at each iteration
        
        #plot genearted probability
        plt.figure(0 + baseSample*100)
        plt.imshow(np.array(generatedTune[1][0:50,25:65]), origin = 'lower', extent=[25,65,0,50], aspect=0.5,
                        interpolation = 'nearest', cmap='gist_stern_r')
        plt.title('probability of generated midi note piano-roll')
        plt.xlabel('midi note')
        plt.ylabel('sample number (time steps)')
        plt.colorbar()##
        #plot leading example for generation
        plt.figure(1 + baseSample*100)
        plt.imshow(np.transpose(dataset[baseSample]), origin='lower', aspect='auto',
                                 interpolation='nearest', cmap=pylab.cm.gray_r)
        plt.colorbar()
        plt.title('original piano-roll')
        plt.xlabel('sample number (time steps)')
        plt.ylabel('midi note')

        #plot generated tune
        plt.figure(2 + baseSample*100)
        plt.imshow(np.transpose(np.array(generatedTune[0][0:500])), origin='lower', aspect='auto',
                                 interpolation='nearest', cmap=pylab.cm.gray_r)
        plt.colorbar()
        plt.title('generated piano-roll')
        plt.xlabel('sample number (time steps)')
        plt.ylabel('midi note')
    plt.show()