コード例 #1
0
def train_autoencoder(epochs: int = 250, batch_size: int = 64, num_workers=4, track=False, plot_images: int = 0):
    """ perform training loop for the cVAE """
    if track:
        wandb.login(key="834835ffb309d5b1618c537d20d23794b271a208")
        wandb.init(**wandb_kwargs)
    encoder = ConditionalEncoder().cuda()
    decoder = ConditionalDecoder().cuda()
    classifier = ImageClassifier().cuda()
    classifier.load()
    classifier.eval()
    classifier.use_label_hierarchy()
    make_data_loader = MakeDataLoader(augmented=True)
    data_loader = make_data_loader.get_data_loader_train(batch_size=batch_size,
                                                         shuffle=True,
                                                         num_workers=num_workers)
    for epoch in trange(epochs, desc="epochs"):
        for images, labels in data_loader:
            images, labels = images.cuda(), labels.cuda()
            images = images*2 - 1 # rescale (0,1) to (-1,1)

            latent_mu, latent_sigma = encoder(images, labels)
            latent = gaussian_sampler(latent_mu, latent_sigma)
            generated_images = decoder(latent, labels)
            generated_labels = classifier(generated_images)

            decoder.zero_grad()
            encoder.zero_grad()
            loss_recon_ = loss_reconstruction(images, generated_images)
            loss_kl_ = alpha*loss_kl([latent_mu, latent_sigma])
            loss_class_ = beta*loss_class(labels, generated_labels)
            loss = loss_recon_ + loss_kl_ + loss_class_
            loss.backward()

            encoder.optimizer.step()
            decoder.optimizer.step()
        encoder.save()
        decoder.save()
        if track:
            log = {
                "loss reconstruction" : loss_recon_.item(),
                "loss KL": loss_kl_.item(),
                "loss class": loss_class_.item(),
                "loss": loss.item()
            }
            wandb.log(log)
        if plot_images and not epoch % plot_images:
            width = min(8, len(generated_images))
            write_generated_galaxy_images_iteration(iteration=epoch, images=generated_images, width=width, height=len(generated_images)//width, file_prefix="generated_cVAE")
    wandb.finish()
コード例 #2
0
def train_classifier_on_hyperparameters(learning_rate_init=learning_rate_init,
                                        gamma=gamma,
                                        seed_parameter=seed_parameter,
                                        track=track):
    hyperparameter_dict = {
        "lr_init": learning_rate_init,
        "lr_gamma": gamma,
        "seed_parameter": seed_parameter,
    }
    wandb_kwargs.update({"config": hyperparameter_dict})
    wandb_kwargs["name"] = f"lr {learning_rate_init:.3f}, gamma{gamma:.4f}"

    make_data_loader = MakeDataLoader(N_sample=N_sample)
    classifier = Classifier(
        seed=seed_parameter,
        gamma=gamma,
        sample_variance_threshold=sample_variance_threshold,
        optimizer=optimizer,
        optimizer_kwargs=optimizer_kwargs,
        learning_rate_init=learning_rate_init,
        weight_loss_sample_variance=weight_loss_sample_variance,
        evaluation_steps=evaluation_steps,
        considered_groups=considered_groups,
        N_batches_test=N_batches_test,
    ).to(device)

    if N_gpus > 1 and device.type == "cuda":
        classifier = torch.nn.DataParallel(classifier)

    if reload:
        classifier.load()
        classifier.use_label_hierarchy()


#    with torch.autograd.detect_anomaly():
    if track:
        train_classifier_tracked(classifier,
                                 make_data_loader,
                                 epochs=epochs,
                                 save=True,
                                 batch_size=batch_size,
                                 wandb_kwargs=wandb_kwargs,
                                 track=True)
    else:
        train_classifier(classifier,
                         make_data_loader,
                         epochs=epochs,
                         save=True,
                         batch_size=batch_size)
コード例 #3
0
ファイル: training.py プロジェクト: shackste/galaxy-generator
    "name" : f"lr_G {lr_generator}, lr_D {lr_discriminator}",  # bottom level identifier, label of graph in UI
    "config" : hyperparameter_dict, # dictionary of used hyperparameters
}

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#device = torch.device("cpu")

generator = Generator(latent_dim=latent_dim, labels_dim=labels_dim, G_lr=lr_generator).to(device)
discriminator = Discriminator(labels_dim=labels_dim, D_lr=lr_discriminator).to(device)

if reload:
    generator.load()
#    discriminator.load()
if conditional:
    classifier = ImageClassifier().to(device)
    classifier.load()  # use pretrained classifier
    classifier.eval()  # classifier is not trained, but always in evaluation mode
    classifier.use_label_hierarchy()

loss_class = torch.nn.MSELoss()


def train_discriminator(images: torch.Tensor,
                        labels: torch.Tensor,
                        optimizer_step: bool = True):
    latent = generate_latent(labels.shape[0], latent_dim, sigma=False)
    labels_fake = generate_labels(labels.shape[0])
    if not conditional:
        labels_fake[:] = 0
    generated_images = generator(latent, labels_fake)
コード例 #4
0
import torch

from image_classifier import ImageClassifier
from dataset import MakeDataLoader

classifier = ImageClassifier()
classifier.load()
classifier.eval()
classifier.use_label_hierarchy()
make_data_loader = MakeDataLoader()
data_loader = make_data_loader.get_data_loader_valid(batch_size=64,
                                                     shuffle=True,
                                                     num_workers=4)
for images, labels in data_loader:
    predicted_labels = classifier(images)
    mse = torch.mean((labels - predicted_labels)**2)
    print("rmse", torch.sqrt(mse))
    print()
    print("L1", torch.mean(torch.abs(labels - predicted_labels)))
    break