示例#1
0
def main(exp, frame_sizes, generate_from, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  generate_from=generate_from,
                  **params)
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        nb_classes=params['nb_classes'],
        weight_norm=params['weight_norm'],
    )
    #    model = SampleRNN([16, 4], 2, 1024, True, 256, True)
    print('Loading saved model' + params['generate_from'])
    checkpoint = torch.load(params['generate_from'])
    temporary_dict = {}
    for k, v in checkpoint.items():
        temporary_dict[k[6:]] = v
    checkpoint = temporary_dict
    model.load_state_dict(checkpoint)
    if not os.path.exists(params['generate_to']):
        os.mkdir(params['generate_to'])
    print(params['cond'])
    generator = GeneratorPlugin(params['generate_to'], params['n_samples'],
                                params['sample_length'], params['sample_rate'],
                                params['nb_classes'], params['cond'])
    generator.register_generate(model.cuda(), params['cuda'])
    generator.epoch(exp)
示例#2
0
# Load pretrained model
model.load_state_dict(new_pretrained_state)

# Generate Plugin
num_samples = 1  # params['n_samples']
sample_length = params['sample_length']
sample_rate = params['sample_rate']
sampling_temperature = params['sampling_temperature']

# Override from our options
sample_length = sample_rate * int(options.length)

print("Number samples: {}, sample_length: {}, sample_rate: {}".format(num_samples, sample_length, sample_rate))
print("Generating %d seconds of audio" % (sample_length / sample_rate))
generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length, sample_rate, sampling_temperature)

# Call new register function to accept the trained model and the cuda setting
generator.register_generate(model.cuda(), params['cuda'])

# Generate new audio
# $$$ check if we already have generated audio and increment the file name
generator.epoch(OUTPUT_NAME)
GENERATED_FILEPATH = GENERATED_PATH + "ep" + OUTPUT_NAME + "-s1.wav"
print("Saved audio to %s " % GENERATED_FILEPATH)

if options.output:
    print("Moving to %s" % options.output)
    os.rename(GENERATED_FILEPATH, options.output)

                  learn_h0=params['learn_h0'],
                  q_levels=params['q_levels'],
                  weight_norm=params['weight_norm'])

# Delete "model." from key names since loading the checkpoint automatically attaches it to the key names
pretrained_state = torch.load(PRETRAINED_PATH)
new_pretrained_state = OrderedDict()

for k, v in pretrained_state.items():
    layer_name = k.replace("model.", "")
    new_pretrained_state[layer_name] = v
    # print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v)))

# Load pretrained model
model.load_state_dict(new_pretrained_state)

# Generate Plugin
num_samples = 2  # params['n_samples']
sample_length = params['sample_length']
sample_rate = params['sample_rate']
print("Number samples: {}, sample_length: {}, sample_rate: {}".format(
    num_samples, sample_length, sample_rate))
generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length,
                            sample_rate)

# Call new register function to accept the trained model and the cuda setting
generator.register_generate(model.cuda(), params['cuda'])

# Generate new audio
generator.epoch('Test2')
    # Gets initial samples form 1 test sample and check if it re-generates it
    audio_filename = dataset_filenames[0]
    from librosa.core import load
    sr = params['sample_rate']
    seq, sr = load(audio_filename, sr=sr, mono=True)
    print("Sample rate: {}".format(sr))

    # Generate Plugin
    num_samples = 6  # params['n_samples']

    initial_seq_size = 64 * 100  # has to be multiple of rnn.n_frame_samples ???
    initial_seq = None
    if initial_seq_size > 1:
        init = utils.linear_quantize(torch.from_numpy(seq[0:initial_seq_size]), params['q_levels'])
        # init = seq[0:initial_seed_size]
        init = np.tile(init, (num_samples, 1))
        initial_seq = torch.LongTensor(init)
        # initial_seed = utils.linear_quantize(initial_seed, params['q_levels'])

    sample_length = params['sample_length']
    sample_rate = params['sample_rate']
    print("Number samples: {}, sample_length: {}, sample_rate: {}".format(num_samples, sample_length, sample_rate))
    generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length, sample_rate)

    # Overloads register function to accept the trained model and the cuda setting
    generator.register_generate(model.cuda(), params['cuda'])

    # Generate new audio
    generator.epoch('Test19_{}'.format(initial_seq_size), initial_seed=initial_seq)
示例#5
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path,
                                     'samples'), params['n_samples'],
                        params['sample_length'], params['sample_rate']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    generateAudio = GeneratorPlugin(os.path.join(results_path, 'samples'),
                                    params['n_samples'],
                                    params['sample_length'],
                                    params['sample_rate'])

    pr = cProfile.Profile()

    generateAudio.register(trainer)
    start = time.time()

    pr.enable()
    generateAudio.epoch(30)
    pr.disable()

    end = time.time()

    s = io.StringIO()
    sortby = 'cumulative'
    ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
    ps.print_stats()
    print(s.getvalue())

    print("Time taken: ", end - start)
示例#6
0
for k, v in pretrained_state.items():
    layer_name = k.replace("model.", "")
    new_pretrained_state[layer_name] = v
    print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v)))

# Load pretrained model
model.load_state_dict(new_pretrained_state)
model = model.cuda()

# Generate Plugin
num_samples = 4  # params['n_samples']
sample_length = params['sample_length']
sample_rate = params['sample_rate']
print("Number samples: {}, sample_length: {}, sample_rate: {}".format(
    num_samples, sample_length, sample_rate))
generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length,
                            sample_rate)

# Call new register function to accept the trained model and the cuda setting
generator.register_generate(model, params['cuda'])

# Generate new audio
# Condition: hidden_cnn
# hidden_cnn = torch.zeros(params['n_rnn'], num_samples, params['dim']).contiguous().cuda()
hidden_cnn = torch.tensor(
    np.ones([params['n_rnn'], num_samples,
             params['dim']])).contiguous().float().cuda()
# hidden_cnn = torch.LongTensor(params['n_rnn'], num_samples, params['dim']).fill_(0.)
# hidden_cnn = torch.tensor(np.zeros([params['n_rnn'], num_samples, params['dim']])).long()
generator.epoch('Test2', hidden=hidden_cnn)