Пример #1
0
    losses = {
        'total': [],
        'kl': [],
        'bce': [],
        'dis': [],
        'gen': [],
        'test_bce': [],
        'class': [],
        'test_class': []
    }
    Ns = len(trainLoader) * opts.batchSize  #no samples
    Nb = len(trainLoader)  #no batches
    ####### Start Training #######
    for e in range(opts.maxEpochs):
        cvae.train()
        dis.train()

        epochLoss = 0
        epochLoss_kl = 0
        epochLoss_bce = 0
        epochLoss_dis = 0
        epochLoss_gen = 0
        epochLoss_class = 0

        TIME = time()

        for i, data in enumerate(trainLoader, 0):

            x, y = data
            if cvae.useCUDA:
Пример #2
0
    download=True,
    transform=transforms.Compose(
        [transforms.ToTensor()]
    )
)


train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size)

model = CVAE(input_size, hidden_size, latent_size, num_of_classes).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)

for epoch in tqdm.tqdm(range(n_epochs)):
  
  model.train()
  train_loss = 0
  
  for x, y in train_dataloader:
    
    x = x.view(-1, input_size).to(device)
    y = utils.y_to_onehot(y, batch_size, num_of_classes).to(device)
    
    optimizer.zero_grad()
    x_mu, x_logvar, z, z_mu, z_logvar = model(x, y)
    loss = model.loss_calc(x, x_mu, z_mu, z_logvar)
    loss.backward()
    train_loss += loss.item()
    optimizer.step()
    
  model.eval()