Beispiel #1
0
def test_run():
    config = Config(
        cuda=False,
        data_dir=get_data_dir("gettysburg"),
        save_dir=None,
        rnn_size=2,
        rnn_model="LSTM",
        num_layers=2,
        batch_size=8,
        seq_length=4,
        num_epochs=1,
        save_every=None,
        grad_clip=5.,
        learning_rate=0.002,
        decay_rate=0.97,
        keep_prob=1.0,
        sampling_mode="weighted",
    )
    dataset, model, losses = train_model(config, verbose=False)
    hash_result = np.sum(
        sample_model(
            config=config,
            dataset=dataset,
            model=model,
            text=False,
        ))
    assert hash_result == 18989, hash_result
Beispiel #2
0
def sample():
    title   = request.form['title'] # name を受け取る
    opinion = request.form['opinion']

    session = Session()
    sample = sample_model(id=title) # インスタンスを作成
    session.add(sample) # データ追加
    session.commit() # データをDBへ反映
    session.close() # セッションを閉じる
    return render_template('sample.html',title = title, opinion = opinion)
Beispiel #3
0
def runtime_pixelsnail_sampler(folder_name, model,
                               dataset_name, run_num, epoch, batch_size=16, condition=None, image_size=[32, 32],
                               device='cuda', temperature=1.0):
    model.eval()
    row = sample_model(model, image_size=image_size, condition=condition,
                             batch_size=batch_size, device=device, temperature=temperature)
    path = get_runtime_sampler_path(folder_name, dataset_name, run_num, epoch)
    utils.save_image(
        torch.cat(row, 0),
        path,
        nrow=batch_size,
        normalize=True,
        range=(-1, 1),
    )
Beispiel #4
0
def make_sample(model_vqvae, model_top, model_middle, model_bottom, file_path, batch=16, device='cuda', temp=1.0):
    top_sample = sample_model(model_top, device, batch, [32, 32], temp)

    if model_middle is not None:
        middle_sample = sample_model(
            model_middle, device, batch, [64, 64], temp, condition=top_sample
        )
        bottom_sample = sample_model(
            model_bottom, device, batch, [128, 128], temp, condition=middle_sample
        )
    else:
        bottom_sample = sample_model(
            model_bottom, device, batch, [64, 64], temp, condition=top_sample
        )

    if model_middle is not None:
        decoded_sample = model_vqvae.decode_code(top_sample, middle_sample, bottom_sample)
    else:
        decoded_sample = model_vqvae.decode_code(top_sample, bottom_sample)

    decoded_sample = decoded_sample.clamp(-1, 1)

    save_image(decoded_sample, file_path,
               normalize=True, range=(-1, 1))
Beispiel #5
0
def top_conditioned_sample():
    """Sample from the bottom prior given the incoming top codemap"""
    global vqvae
    assert vqvae is not None
    global DEVICE
    assert DEVICE is not None
    global transformer_bottom
    assert transformer_bottom is not None
    global label_encoders_per_modality
    assert label_encoders_per_modality is not None
    global spectrograms_helper
    assert spectrograms_helper is not None

    BYPASS = False

    top_code, bottom_code = parse_codes(request)
    global_instrument_family_str = str(
        request.args.get('instrument_family_str'))
    min_pitch = int(request.args.get('min_pitch'))
    max_pitch = int(request.args.get('max_pitch'))

    if not BYPASS:
        temperature = float(request.args.get('temperature'))
        top_p = float(request.args.get('top_p') or 0.0)
        top_k = int(request.args.get('top_k') or 0)

        class_conditioning_tensors_bottom = make_conditioning_tensors(
            {
                'pitch': (min_pitch, max_pitch),
                'instrument_family_str': global_instrument_family_str
            }, label_encoders_per_modality)

        # repeat the top codemap for all bottom samples
        num_samples = max_pitch - min_pitch
        top_code = top_code.expand(num_samples, -1, -1)

        bottom_code = sample_model(
            transformer_bottom,
            DEVICE,
            num_samples,
            transformer_bottom.shape,
            temperature,
            condition=top_code,
            class_conditioning=class_conditioning_tensors_bottom,
            top_p_sampling_p=top_p,
            top_k_sampling_k=top_k)
    else:
        import time
        num_samples = 1
        top_code = top_code.expand(num_samples, -1, -1)
        bottom_code = bottom_code.expand(num_samples, -1, -1)
        time.sleep(2)

    logmelspectrogram_and_IF = vqvae.decode_code(top_code, bottom_code)

    zip_path = upload_directory + 'samples.zip'
    with ZipFile(zip_path, 'w') as zf:
        for pitch, sample in zip(range(min_pitch, max_pitch),
                                 logmelspectrogram_and_IF):
            # convert to audio and write to file
            audio = spectrograms_helper.to_audio(sample.unsqueeze(0))[0]
            audio_path = write_audio_to_file(
                audio, f'-{global_instrument_family_str}-{pitch}')
            zf.write(audio_path,
                     arcname=f'{global_instrument_family_str}-{pitch}.wav')

    return flask.send_file(
        zip_path,
        mimetype="application/zip",
        cache_timeout=-1  # disable cache
    )
Beispiel #6
0
def train_model(config,
                verbose=True,
                seed=1,
                sample_prime_text="The ",
                sample_length=50):

    set_seed(config, seed=seed)
    dataset, dataloader = load_data(config)

    model = CharRNNModel(config)
    if config.cuda:
        model = model.cuda()

    # Setup Training
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(
        model.parameters(),
        lr=config.learning_rate,
    )
    losses = []

    # Training Loop
    model.train()
    print("START TRAINING for {} Epochs".format(config.num_epochs))
    for epoch in range(config.num_epochs):
        batch_loss = 0
        for x_batch, y_batch in dataloader:

            # Pre-process inputs
            x_var = maybe_cuda_var(x_batch, cuda=config.cuda)
            y_var = maybe_cuda_var(y_batch, cuda=config.cuda)

            # Run, perform gradient-descent
            model.zero_grad()
            output, hidden = model(
                x=x_var,
                hidden=new_hidden(model, batch_size=config.batch_size),
            )
            loss = criterion(
                output.contiguous().view(-1, config.vocab_size),
                y_var.view(-1),
            )
            loss.backward()
            torch.nn.utils.clip_grad_norm(model.parameters(), config.grad_clip)
            optimizer.step()

            # Record loss
            loss_value = loss.data.cpu().numpy()[0]
            batch_loss += loss_value
        losses.append(batch_loss)

        # Print status
        maybe_print("Epoch={}/{}: Loss={}".format(
            epoch,
            config.num_epochs,
            batch_loss,
        ),
                    verbose=verbose)

        if verbose:
            print("Sample: ")
            print(
                indent_text(
                    sample_model(
                        config=config,
                        dataset=dataset,
                        model=model,
                        prime_text=sample_prime_text,
                        length=sample_length,
                    )))

        if config.save_every and epoch % config.save_every == 0:
            save_path = os.path.join(config.save_dir,
                                     "{:04d}.ckpt".format(epoch)),
            maybe_print("Saving to ".format(save_path), verbose=verbose)
            torch.save(model, save_path)

        # Decay Learning Rate
        for param_group in optimizer.param_groups:
            param_group['lr'] *= config.decay_rate

    print("DONE TRAINING for {} Epochs".format(config.num_epochs))
    return dataset, model, losses
Beispiel #7
0
import config as char_rnn_config
import train as char_rnn_train
import sample as char_rnn_sample

config = char_rnn_config.Config(
    cuda=False,
    data_dir="data/tinyshakespeare",
    save_dir=None,
    rnn_size=128,
    rnn_model="LSTM",
    num_layers=2,
    batch_size=64,
    seq_length=50,
    num_epochs=50,
    save_every=False,
    grad_clip=5.,
    learning_rate=0.002,
    decay_rate=0.97,
    keep_prob=1.0,
    sampling_mode="weighted",
)

dataset, model, losses = char_rnn_train.train_model(config)

print(
    char_rnn_sample.sample_model(
        config=config,
        dataset=dataset,
        model=model,
    ))