コード例 #1
0
ファイル: interact.py プロジェクト: MultiTrickFox/Wave_Gen
def main():

    import config

    from model import load_model
    model = load_model()
    while not model:
        config.model_path = input('valid model: ')
        model = load_model()

    if config.do_fourier:
        import data_fourier as data
    else: import data_direct as data

    d = data.load_data(with_meta=True)
    d, _ = data.split_data(d)

    from random import shuffle
    #shuffle(d)
    d = d[:config.hm_wav_gen]

    for i,(seq,meta) in enumerate(d):

        from model import respond_to
        _, seq = respond_to(model, [seq], training_run=False, extra_steps=config.hm_extra_steps)
        seq = seq.detach()
        if config.use_gpu:
            seq = seq.cpu()
        seq = seq.numpy()

        seq = data.data_to_audio(seq, meta)

        data.write(f'{config.output_file}{i}.wav', config.sample_rate, seq)
コード例 #2
0
ファイル: interact.py プロジェクト: MultiTrickFox/Wave_Gen3
def main():

    import config

    from model import load_model
    model = load_model()
    while not model:
        config.model_path = input('valid model: ')
        model = load_model()

    from data import load_data, split_data
    d = load_data(with_meta=True)
    d, _ = split_data(d)

    # from random import shuffle
    # shuffle(d)
    d = d[:config.hm_output_file]

    for i, (seq, meta) in enumerate(d):

        from model import respond_to
        _, seq = respond_to(model, [seq[:config.hm_extra_steps]],
                            training_run=False,
                            extra_steps=config.hm_extra_steps)
        seq = seq.detach()
        if config.use_gpu:
            seq = seq.cpu()
        seq = seq.numpy()

        from data import data_to_audio, write
        seq = data_to_audio(seq, meta)
        write(f'{config.output_file}{i}.wav', config.sample_rate, seq)
コード例 #3
0
ファイル: interact.py プロジェクト: MultiTrickFox/Wave_Gen2
def main():

    import config

    from model import load_model
    model = load_model()
    while not model:
        config.model_path = input('valid model: ')
        model = load_model()

    from data import load_data, split_data, file_output
    d = load_data(frames=not config.attention_only)
    # d, _ = split_data(d)
    # from random import shuffle
    # shuffle(d)

    for i, seq in enumerate(d[:config.hm_wav_gen]):

        from model import respond_to
        _, seq = respond_to(model, [seq],
                            training_run=False,
                            extra_steps=config.hm_extra_steps)
        seq = seq.detach()
        if config.use_gpu:
            seq = seq.cpu()
        seq = seq.numpy()

        file_output(f'{config.output_file}{i}', seq)
コード例 #4
0
def main():

    import config

    from model import load_model
    model = load_model(config.model_path + '_final')
    while not model:
        config.model_path = input('valid model: ')
        model = load_model()

    from data import load_data, split_data
    d = load_data()
    d, _ = split_data(d)

    # from random import shuffle
    # shuffle(d)
    #d = d[:config.hm_output_file]
    d = [d[8]]  # [8,10,13,14]]
    config.polyphony = True

    for i, seq in enumerate(d):

        from model import respond_to
        seq = respond_to(model, seq[:1])
        seq = [t.detach() for t in seq]
        if config.use_gpu:
            seq = [t.cpu() for t in seq]
        seq = [t.numpy() for t in seq]

        from data import note_reverse_dict, convert_to_midi
        seq_converted = []
        for timestep in seq:
            if config.act_fn == 't': timestep = (timestep + 1) / 2
            if config.polyphony:
                t_converted = ''
                for i, e in enumerate(timestep[0]):
                    if e > config.pick_threshold:
                        t_converted += note_reverse_dict[i % 12] + str(
                            int(i / 12) + config.min_octave
                        ) if i != config.out_size - 1 else 'R'
                        t_converted += ','
                t_converted = t_converted[:-1] if len(t_converted) else 'R'
            else:
                i = timestep[0].argmax()
                t_converted = note_reverse_dict[i % 12] + str(
                    int(i / 12) + config.min_octave)
            seq_converted.append(t_converted)
        convert_to_midi(seq_converted).show()
コード例 #5
0
ファイル: train.py プロジェクト: MultiTrickFox/QRNN
def nograd_loss(args):

    model, datapoint = args

    states = None
    loss = 0

    with no_grad():

        for inp,lbl in datapoint:

            out, states = respond_to(model, inp, states)

            loss += sequence_loss(lbl, out, do_grad=False)

    return loss
コード例 #6
0
ファイル: train.py プロジェクト: MultiTrickFox/QRNN
def grad_loss(args):

    model, datapoint = args

    states = None
    loss = 0

    grads = [zeros(param.size()) for layer in model for param in layer._asdict().values()]

    for inp,lbl in datapoint:

        out, states = respond_to(model, inp, states)

        states = [state.detach() for state in states]

        loss += sequence_loss(lbl, out)
        grads = [e1 + e2 for e1, e2 in zip(grads, collect_grads(model))]

    return grads, loss
コード例 #7
0
def main(disp_text=True):

    if config.fresh_model:
        config.all_losses = []
        save_model(make_model())
        model = load_model()
        if disp_text: print('created model.', end=' ')
    else:
        model = load_model()
        if not model:
            save_model(make_model())
            model = load_model()
            if disp_text: print('created model.', end=' ')
        else:
            if disp_text: print('loaded model.', end=' ')

    data = load_data()
    data, data_dev = split_data(data)
    # from random import choice
    # from torch import randn
    # data = [[randn(config.in_size) for _ in range(choice(range(config.max_seq_len//2,config.max_seq_len)))] for _ in range(40)]
    # data_dev = []
    # for d in data: print(len(d))
    if config.max_seq_len: data = [d[:config.max_seq_len] for d in data]

    if not config.batch_size or config.batch_size >= len(data):
        config.batch_size = len(data)
        one_batch = True
    elif config.batch_size < 1:
        config.batch_size = int(len(data) * config.batch_size)
        one_batch = False
    else:
        one_batch = False

    if disp_text:
        print(
            f'hm data: {len(data)}, hm dev: {len(data_dev)}, bs: {config.batch_size}, lr: {config.learning_rate}, \ntraining started @ {now()}'
        )

    data_losss, dev_losss = [], []
    if not one_batch:
        if not config.all_losses:
            config.all_losses.append(dev_loss(model, data))
        data_losss.append(config.all_losses[-1])
    if config.dev_ratio:
        dev_losss.append(dev_loss(model, data_dev))

    if data_losss or dev_losss:
        if disp_text:
            print(
                f'initial loss(es): {data_losss[-1] if data_losss else ""} {dev_losss[-1] if dev_losss else ""}'
            )

    for ep in range(config.hm_epochs):

        loss = 0

        for i, batch in enumerate(batchify_data(data)):

            loss += respond_to(model, batch)

            sgd(model) if config.optimizer == 'sgd' else adaptive_sgd(model)

        loss /= len(data)

        if not one_batch: loss = dev_loss(model, data)
        data_losss.append(loss)
        config.all_losses.append(loss)
        if config.dev_ratio: dev_losss.append(dev_loss(model, data_dev))

        if disp_text:
            print(
                f'epoch {ep}, loss {loss}, dev loss {dev_losss[-1] if config.dev_ratio else ""}, completed @ {now()}',
                flush=True)
        if config.ckp_per_ep and ((ep + 1) % config.ckp_per_ep == 0):
            save_model(model, config.model_path + f'_ckp{ep}')

    if one_batch: data_losss.append(dev_loss(model, data))

    if disp_text:
        print(
            f'training ended @ {now()} \nfinal losses: {data_losss[-1]}, {dev_losss[-1] if config.dev_ratio else ""}',
            flush=True)
    show(plot(data_losss))
    if config.dev_ratio:
        show(plot(dev_losss))
    if not config.fresh_model: show(plot(config.all_losses))

    return model, [data_losss, dev_losss]
コード例 #8
0
def dev_loss(model, batch):
    with no_grad():
        loss, _ = respond_to(model, batch, training_run=False)
    return loss / len(batch)
コード例 #9
0
ファイル: train.py プロジェクト: MultiTrickFox/Wave_Gen2
def dev_loss(model, batch):
    with no_grad():
        loss,_ = respond_to(model, batch, training_run=False)
    return loss /sum(len(sequence) for sequence in batch)
コード例 #10
0
ファイル: train.py プロジェクト: MultiTrickFox/Wave_Gen2
def main():

    if config.attention_only:
        from model2 import make_model_higher, respond_to
    else: from model import make_model_higher, respond_to

    if config.fresh_model:
        save_model(make_model_higher())
        model = load_model()
        print('created model.',end=' ')
    else:
        model = load_model()
        if not model:
            save_model(make_model_higher())
            model = load_model()
            print('created model.',end=' ')
        else:
            print('loaded model.',end=' ')
    print(f'info: {config.creation_info}')

    data = load_data(frames=not config.attention_only)
    data, data_dev = split_data(data)

    if not config.batch_size or config.batch_size >= len(data):
        config.batch_size = len(data)
        one_batch = True
    elif config.batch_size < 1:
        config.batch_size = int(len(data)*config.batch_size)
        one_batch = False
    else: one_batch = False

    print(f'hm data: {len(data)}, hm dev: {len(data_dev)}, bs: {config.batch_size}, lr: {config.learning_rate}, \ntraining started @ {now()}')

    data_losss, dev_losss = [], []
    if config.batch_size != len(data):
        data_losss.append(dev_loss(model, data))
    if config.dev_ratio:
        dev_losss.append(dev_loss(model, data_dev))

    if data_losss or dev_losss:
        print(f'initial loss(es): {data_losss[-1] if data_losss else ""} {dev_losss[-1] if dev_losss else ""}')

    for ep in range(config.hm_epochs):

        loss = 0

        for i, batch in enumerate(batchify_data(data, do_shuffle=not one_batch)):

            # print(f'\tbatch {i}, started @ {now()}', flush=True)

            batch_size = sum(len(sequence) for sequence in batch)

            loss += respond_to(model, batch)
            sgd(model, batch_size=batch_size) if config.optimizer == 'sgd' else \
                adaptive_sgd(model, batch_size=batch_size)

        # loss /= sum(len(sequence) for sequence in data)
        if not one_batch: loss = dev_loss(model, data)
        data_losss.append(loss)
        if config.dev_ratio:
            dev_losss.append(dev_loss(model, data_dev))

        print(f'epoch {ep}, loss {loss}, dev loss {dev_losss[-1] if config.dev_ratio else ""}, completed @ {now()}', flush=True)
        if config.ckp_per_ep and ((ep+1)%config.ckp_per_ep==0):
                save_model(model,config.model_path+f'_ckp{ep}')

    # data_losss.append(dev_loss(model, data))
    # if config.dev_ratio:
    #     dev_losss.append(dev_loss(model, data_dev))

    print(f'training ended @ {now()} \nfinal losses: {data_losss[-1]}, {dev_losss[-1] if config.dev_ratio else ""}', flush=True)
    show(plot(data_losss))
    if config.dev_ratio:
        show(plot(dev_losss))

    # if input(f'Save model as {config.model_path}? (y/n): ').lower() == 'y':
    #     save_model(load_model(), config.model_path + '_prev')
    #     save_model(model)

    return model, [data_losss, dev_losss]
コード例 #11
0
ファイル: train.py プロジェクト: MultiTrickFox/QRNN
def main(model=None):

    print(f'readying model & data @ {now()}')

    data = load_data()
    if not data:
        save_data(preprocess())
        data = load_data()

    if not model:
        if not config.fresh_model:
            model = load_model()
        if not model:
            model = make_model()
            save_model(model)
            model = load_model()
            print('created ',end='')
        else: print('loaded ',end='')
        print(f'model: {describe_model(model)}')

    print(f'total files: {len(data)}, ',end='')

    data, data_dev = split_dataset(data)

    if config.batch_size > len(data):
        config.batch_size = len(data)
    elif config.batch_size == -1:
        config.batch_size = len(data_dev)

    print(f'train: {len(data)}, dev: {len(data_dev)}, batch size: {config.batch_size}')

    print(f'hm train: {sum(len(datapoint) for datapoint in data)}, '
          f'hm dev: {sum(len(datapoint) for datapoint in data_dev)}, '
          f'learning rate: {config.learning_rate}, '
          f'optimizer: {config.optimizer}, '
          f'\ntraining for {config.hm_epochs} epochs.. ',end='\n')

    one_batch = (config.batch_size == len(data)) or (config.train_combined and config.train_parallel)
    config.shuffle_epoch &= not one_batch
    window_slide_multiplier = config.hm_bars_grouped//config.hm_bars_slide
    if config.ckp_save_epochs == -1: config.ckp_save_epochs = range(config.hm_epochs)

    data_losss, dev_losss = [], []

    if config.initialize_loss:

        print(f'initializing losses @ {now()}', flush=True)
        if not one_batch:
            data_losss.append(dev_loss(model,data))
        dev_losss.append(dev_loss(model,data_dev))
        print(f'initial losses: {data_losss, dev_losss}')

    print(f'training started @ {now()}', flush=True)

    for ep in range(config.hm_epochs):

        loss = 0

        if config.train_parallel and config.train_combined:
            l, g = process_data_onebatch(model, data)
            loss += l
            give_grads(model, g)
            batch_size = sum(sum(len(inp) * window_slide_multiplier for inp, lbl in datapoint) for datapoint in data)
            sgd(model, batch_size=batch_size) if config.optimizer == 'sgd' else adaptive_sgd(model, ep, batch_size=batch_size)

        else:
            for i,batch in enumerate(batchify(data)):

                if config.disp_batches:
                    print(f'\tbatch {i}, {sum(len(datapoint) for datapoint in batch)}', end='', flush=True)

                batch_size = sum(sum(len(inp)*window_slide_multiplier for inp,lbl in datapoint) for datapoint in batch)

                if config.train_parallel:
                    l,g = process_batch_parallel(model,batch)
                    loss += l
                    give_grads(model,g)

                elif config.train_combined:
                    loss += process_batch_combined(model, batch)

                else:
                    for j,datapoint in enumerate(batch):
                        states = None
                        for k,(inp,lbl) in enumerate(datapoint):
                            out, states = respond_to(model, inp, states)
                            states = [state.detach() for state in states]
                            loss += sequence_loss(lbl,out)

                sgd(model,batch_size=batch_size) if config.optimizer == 'sgd' else adaptive_sgd(model,ep,batch_size=batch_size)

                if config.disp_batches:
                    print(f', completed @ {now()}' ,flush=True)

        loss /= sum(sum(len(inp)*window_slide_multiplier for inp,lbl in datapoint) for datapoint in data)

        data_losss.append(loss)
        dev_losss.append(dev_loss(model,data_dev))
        
        print(f'epoch {ep}, loss {loss}, dev loss {dev_losss[-1]}, completed @ {now()}', flush=True)

        if ep in config.ckp_save_epochs:
            save_model(model,f'{config.model_save_path}_ckp{ep}')

    data_losss.append(dev_loss(model,data))
    dev_losss.append(dev_loss(model,data_dev))

    print(f'final losses: {[data_losss[-1],dev_losss[-1]]}')

    print(f'training ended @ {now()}', flush=True)

    plot(data_losss)
    show()
    plot(dev_losss)
    show()

    if config.overwrite_model or input(f'Save model as {config.model_save_path}? (y/n): ').lower() == 'y':
        save_model(load_model(),config.model_save_path+'_prev')
        save_model(model)

    return model, [data_losss, dev_losss]