def genXzbyModel(sigma, g, N, model: CVAEModel, samples=10000):
    sigma = torch.full(size=(samples, 1),
                       fill_value=sigma,
                       dtype=torch.float32).to(torch.device('cuda:0'))
    g = torch.full(size=(samples, 1), fill_value=g,
                   dtype=torch.float32).to(torch.device('cuda:0'))
    return torch.clamp(
        model.sampleDecoder(torch.cat([sigma, g, torch.log(N)], dim=1),
                            onlyMu=False)[:, 0], -1, 1)
Esempio n. 2
0
def genNbyModel(sigma, g, model: CVAEModel, samples=10000):
    sigma = torch.full(size=(samples, 1),
                       fill_value=sigma,
                       dtype=torch.float32).to(torch.device('cuda:0'))
    g = torch.full(size=(samples, 1), fill_value=g,
                   dtype=torch.float32).to(torch.device('cuda:0'))
    logN = torch.clamp_min((model.sampleDecoder(torch.cat([sigma, g], dim=1))),
                           0)
    return torch.round(torch.exp(logN) + 0.49)
Esempio n. 3
0
def create_initial_state(model: CVAEModel, epochs=400):
    '''
    Creates the initial training state for a specific model.
    The optimizer used is Adamax with starting lr of 0.001.
    The scheduler used is a custom waving scheduler that fall exponentially to a factor of 0.001 depending on the number of epochs.
    '''
    optimizer = torch.optim.Adamax(model.parameters(), lr=0.01)
    gamma = np.exp(np.log(0.005) / epochs)
    scheduler = WavingScheduler(optimizer, 400, gamma=gamma)
    return TrainerState(model, optimizer, scheduler)
Esempio n. 4
0
    def advance(self, data: DataManager, batch_size=1024):
        '''
        Executes one epoch in the training
        '''
        self.model.train()  # set in train mode

        loss_accum, KL_accum, batches = 0.0, 0.0, 0

        for c, x in data.get_batches(
                batch_size):  # get data shuffled in batches
            self.optimizer.zero_grad()
            latent_mu, latent_logVar, z, x_mu, x_logVar = self.model(c, x)

            posterior_ll = torch.mean(CVAEModel.posterior_LogLikelihood(
                x, x_mu, x_logVar),
                                      dim=0)  # accumulate batch
            kl_div = torch.mean(CVAEModel.KL_divergence(
                latent_mu, latent_logVar),
                                dim=0)  # accumulate batch
            elbo = posterior_ll - kl_div
            loss = -elbo

            loss_accum += loss.item()
            KL_accum += kl_div.item()
            batches += 1

            # perform optimization
            loss.backward()

            self.optimizer.step()

        self.lrs.append(self.optimizer.param_groups[0]['lr'])

        # self.scheduler.step(accLoss / numberOfBatches)
        self.scheduler.step()

        self.loss_history.append(loss_accum / batches)
        self.kl_history.append(KL_accum / batches)

        return loss_accum / batches  # loss evaluation average
def scatGenFactory(depth, width, activation, latent):
    return CVAEModel(7, 6, latent, width, depth, activation)
def pathGenFactory(depth, width, activation, latent):
    return CVAEModel(3, 3, latent, width, depth, activation)
def lenGenFactory(depth, width, activation, latent):
    return CVAEModel(2, 1, latent, width, depth, activation)