Exemple #1
0
def demo_random_new(argv):
    mid = Song()
    track = mid.add_track()

    # header = MidiFile('AutumnL.mid')

    for i in range(1000):
        note = np.random.randint(60, 100)
        # key down
        track.append(
            Message(
                'note_on',
                note=note,
                velocity=np.random.randint(30, 70),
                time=0 if np.random.rand() > 0.8 else np.random.randint(
                    100, 300),
            ))

        # key up
        track.append(
            Message(
                'note_on',
                note=note,
                velocity=0,
                time=np.random.randint(100, 300),
            ))
    mid.save('new_song.mid')
def get_openning(LEN, mode='borrow'):
    if mode == 'borrow':
        # borrow from song
        # midi = MidiFile("songs/bach_846.mid")
        midi = Song(
            "datasets/easymusicnotes/level6/anniversary-song-glen-miller-waltz-piano-level-6.mid"
        )
        hots = midi.encode_onehot()
        return [hots[i] for i in range(LEN)]
    elif mode == 'random':
        # random
        # return [np.random.rand(dim,) for _ in range(LEN)]
        return [np.random.binomial(1, 0.3, (dim, )) for _ in range(LEN)]
    else:
        raise NotImplemented
Exemple #3
0
def demo_copy(argv):
    mid = Song()
    track = mid.add_track()

    source = MidiFile('playground/AutumnL.mid')
    # only copy notes
    Song._copy(source, track, filter_f=lambda x: x.type == 'note_on')
    mid.save('playground/AutumnL_copy.mid')
Exemple #4
0
def demo_align(argv):
    if len(argv) == 0:
        source = MidiFile('songs/bach_846.mid')
    else:
        source = MidiFile(argv[0])
    # source = MidiFile('songs/AutumnL.mid')
    # source = MidiFile('datasets/easymusicnotes/level6/anniversary-song-glen-miller-waltz-piano-level-6.mid')
    msgs, times = Song._get_absolute_time(
        source,
        filter_f=lambda x: x.type in ['note_on', 'note_off'],
        unit='beat')

    for msg, t in zip(msgs, times):
        print t, msg

    hots = Song._get_hots(msgs, times, resolution=0.25)
    print hots.shape

    for hoti in hots:
        hoti = ''.join([('_' if char == 0 else 'x') for char in hoti])
        print hoti
        sleep(0.2)
from midiwrapper import Song
from keras.models import load_model
import numpy as np

if __name__ == '__main__':
    m = load_model('./temp/gan2.h5')
    m.summary()
    bs, seq_len, note_dim, _ = m.output_shape

    code_dim = m.input_shape[-1]

    def code_generator(bs):
        Z = np.random.uniform(-1., 1., size=(bs, code_dim))
        return Z

    mid = Song()
    track = mid.add_track()
    notes = m.predict(code_generator(1))[0, :, :, 0]

    mid._compose(track, notes, deltat=100, threshold=1.)
    mid.save_as('gan2.mid')
Exemple #6
0
    code_dim = args.code_dim
    # note_dim = args.note_dim
    niter = args.niter
    nbatch = args.nbatch
    vis_iterval = args.vis_interval
    work_dir = args.work_dir
    vis_dir = os.path.join(args.work_dir, 'vis')

    # env setup
    if os.path.exists(work_dir):
        shutil.rmtree(work_dir)
    os.mkdir(work_dir)
    os.mkdir(vis_dir)

    # data preparation
    data = Song.load_from_dir("../../datasets/easymusicnotes/",
                              encoder=OneHotEncoder())
    # data = Song.load_from_dir("./datasets/e-comp/", encoder=AllInOneEncoder())
    # data = np.load('../../datasets/e-comp-allinone.npz')['data']
    note_dim = data[0].shape[-1]
    print min(map(lambda x: x.shape[0], data))

    def data_generator(bs):
        indices = np.random.randint(data.shape[0], size=(bs, ))
        x = []
        for ind_i in indices:
            start = np.random.randint(data[ind_i].shape[0] - seq_len)
            x.append(data[ind_i][start:start + seq_len])
        x = np.array(x).astype('float')
        if args.activation == 'tanh':
            x = x * 2 - 1
        return x
Exemple #7
0
    shape = dis.get_input_shape_at(0)[1:]
    gen_input, real_input = Input(shape), Input(shape)
    dis2batch = Model([gen_input, real_input],
                      [dis(gen_input), dis(real_input)])
    dis.trainable = True
    dis2batch.compile(optimizer=dis_opt,
                      loss='binary_crossentropy',
                      metrics=['binary_accuracy'])

    gen_trainner = gendis
    dis_trainner = dis2batch

    gh = 2
    gw = 7
    imsave('{}/real.png'.format(vis_dir),
           Song.grid_vis_songs(data_generator(gh*gw)[:, :, :, 0], gh=gh, gw=gw))
    vis_Z = code_generator(gh*gw)
    fake_pool = gen.predict(vis_Z)
    for iteration in range(0, niter):
        print 'iteration', iteration
        Z = code_generator(nbatch)
        gen_img = gen.predict(Z)

        fake_pool = np.vstack([fake_pool[:nbatch], gen_img])
        shuffle_indices = np.random.permutation(len(fake_pool))
        fake_pool = fake_pool[shuffle_indices]
        gen_img = fake_pool[:nbatch]

        real_img = data_generator(nbatch)
        gen_y = np.zeros((nbatch, 1))
        real_y = np.ones((nbatch, 1))
from midiwrapper import Song
import numpy as np

from keras.models import load_model


if __name__ == '__main__':
    FILE = 'datasets/easymusicnotes/level6/anniversary-song-glen-miller-waltz-piano-level-6.mid'
    SONG_LEN = 1000
    LEN = 20
    dim = 128
    THRESHOLD = 0.85

    hots = Song(FILE).\
        encode_onehot(
                    {'filter_f': lambda x: x.type in ['note_on', 'note_off'],
                     'unit': 'beat'},
                    {'resolution': 0.25})
    print hots.shape
    model = load_model('temp/memorize.h5')

    mid = Song()
    track = mid.add_track()

    seq = [hots[i] for i in range(20)]
#   seq = [np.random.binomial(1, 0.3, (dim,)) for _ in range(LEN)]
    notes = []  # deepcopy(seq)
    accumulate = np.zeros((dim,)).astype('int')
    for _ in range(SONG_LEN):
        note = model.predict(np.array([seq]))[0]
        seq.pop(0)
Exemple #9
0
    gendis.compile(optimizer=gen_opt, loss='binary_crossentropy')

    shape = dis.get_input_shape_at(0)[1:]
    gen_input, real_input = Input(shape), Input(shape)
    dis2batch = Model([gen_input, real_input],
                      [dis(gen_input), dis(real_input)])
    dis.trainable = True
    dis2batch.compile(optimizer=dis_opt,
                      loss='binary_crossentropy',
                      metrics=['binary_accuracy'])

    gen_trainner = gendis
    dis_trainner = dis2batch

    imsave('{}/real.png'.format(vis_dir),
           Song.grid_vis_songs(data_generator(25)))
    vis_Z = code_generator(25)
    for iteration in range(0, niter):
        print 'iteration', iteration
        Z = code_generator(nbatch)
        gen_img = gen.predict(Z)

        if iteration < args.boost or iteration % args.k == 0:
            real_img = data_generator(nbatch)
            gen_y = np.zeros((nbatch, 1))
            real_y = np.ones((nbatch, 1))
            d_loss = dis_trainner.train_on_batch([gen_img, real_img],
                                                 [gen_y, real_y])
            print('\tDiscriminator:\t{}'.format(d_loss))
        else:
            y = np.ones((nbatch, 1))
        hots = midi.encode_onehot()
        return [hots[i] for i in range(LEN)]
    elif mode == 'random':
        # random
        # return [np.random.rand(dim,) for _ in range(LEN)]
        return [np.random.binomial(1, 0.3, (dim, )) for _ in range(LEN)]
    else:
        raise NotImplemented


if __name__ == '__main__':
    SONG_LEN = 500
    THRESHOLD = 0.50
    MAX_SUSTAIN = 4

    mid = Song()
    track = mid.add_track()

    model = load_model('temp/simple_rnn.h5')
    _, LEN, dim = model.input_shape

    seq = get_openning(LEN, mode='borrow')
    notes = []  # deepcopy(seq)
    accumulate = np.zeros((dim, )).astype('int')
    for _ in range(SONG_LEN):
        note = model.predict(np.array([seq]))[0][-1]
        seq.pop(0)

        # sustain too long
        accumulate = accumulate * (note >= THRESHOLD) + (note >= THRESHOLD)
        note[accumulate >= MAX_SUSTAIN] = 0.
from keras.layers import Input, LSTM, Dense, Activation
from keras.models import Model
from keras.optimizers import RMSprop

if __name__ == '__main__':
    DIR = 'datasets/easymusicnotes/'
    LEN = 100  # length of input
    N = 10000  # number of training sequences

    # preparing files
    print 'Reading files ...'
    filelist = []
    for root, _, files in os.walk(DIR):
        for name in files:
            filelist.append(os.path.join(root, name))
    midis = [Song(filename) for filename in filelist]
    data = []
    for ind, midi in enumerate(midis):
        print '\t[{:02d}/{:02d}] Handling'.\
            format(ind, len(midis)), filelist[ind], '...'
        hots = midi.encode_onehot(
            {
                'filter_f': lambda x: x.type in ['note_on', 'note_off'],
                'unit': 'beat'
            }, {'resolution': 0.25})
        data.append(hots)
        print '\t', hots.shape
    data = np.array(data)

    # sample training data
    print 'Sampling ...'
Exemple #12
0
        hots = AllInOneEncoder().encode(midi.midi)
        # hots = midi.encode_onehot()
        return [hots[i] for i in range(LEN)]
    elif mode == 'random':
        # random
        # return [np.random.rand(dim,) for _ in range(LEN)]
        return [np.random.binomial(1, 0.2, (dim, )) for _ in range(LEN)]
    else:
        raise NotImplemented


if __name__ == '__main__':
    SONG_LEN = 1000
    THRESHOLD = 1.00

    mid = Song()
    track = mid.add_track()

    # model = load_model('temp/simple_rnn.h5', compile=False)
    model = define_model((1, 1, 363), stateful=True)
    model.load_weights('temp/simple_rnn.h5')
    model = Model(model.input, model.layers[-2].output)  # before softmax
    _, LEN, dim = model.input_shape

    # avoid too long sustain
    sustain = np.zeros((128))
    current_t = 0.

    # np.random.seed( sum(map(ord, 'wuxintong')) )  # 32)
    np.random.seed(32)  # 32)

# testing
if __name__ == '__main__':
    from mido import MidiFile
    midi = MidiFile('./datasets/e-comp/2002chan01.mid')
    encoder = AllInOneEncoder()
    code = encoder.encode(midi)
    msg = encoder.decode(code)

    print code.dtype
    print code.shape
    # print msg[-10:]

    from midiwrapper import Song
    s = Song()
    track = s.add_track()
    for msgi in msg:
        track.append(msgi)
    s.save_as("decode.mid")

#   import os
#   from tqdm import tqdm
#   dirpath = './datasets/e-comp/'
#   tmppath = './datasets/e-comp-allinone/'
#   encoder = AllInOneEncoder()
#   filelist = []
#   for root, _, files in os.walk(dirpath):
#       for name in files:
#           filelist.append(os.path.join(root, name))
#   for filename in tqdm(filelist):
from midiwrapper import Song
import numpy as np

from keras.layers import Input, LSTM, Dense, Activation
from keras.models import Model
from keras.optimizers import RMSprop, Adam

if __name__ == '__main__':
    FILE = 'datasets/easymusicnotes/level6/anniversary-song-glen-miller-waltz-piano-level-6.mid'
    LEN = 20
    dim = 128

    hots = Song(FILE).\
        encode_onehot(
                    {'filter_f': lambda x: x.type in ['note_on', 'note_off'],
                     'unit': 'beat'},
                    {'resolution': 0.25})
    print hots.shape

    x = []
    y = []
    for i in range(LEN, len(hots)-LEN-1):
        x.append(hots[i:i+LEN])
        y.append(hots[i+LEN])
    train_x = np.array(x)
    train_y = np.array(y)

    print train_x.shape
    print train_y.shape

    # Build models