Exemplo n.º 1
0
def train_discriminator(discriminator, loss, optimizer, realdata, fakedata):
    size = realdata.size(0)
    optimizer.zero_grad()  # reset gradients
    '''Discriminator Loss: (1/m * sum for i=1 to i=m) (log D(x(i)) + log (1 - D(G(z(i)))))'''
    '''1. Train on real data (1st half of above loss equation)'''
    # D(x(i))
    pred_real = discriminator(realdata)
    err_real = loss(pred_real, to_device(ones_target(size),
                                         device))  # real data has 1 target
    err_real.backward()
    '''2. Train on fake data (2nd half of above loss equation)'''
    # D(G(z))
    pred_fake = discriminator(fakedata)
    err_fake = loss(pred_fake, to_device(zeros_target(size),
                                         device))  # fake data has 0 target
    err_fake.backward()
    '''3. Update weights with gradients'''
    optimizer.step()
    '''4. Return error and predictions for real and fake inputs'''
    return err_real + err_fake, pred_real, pred_fake
Exemplo n.º 2
0
def train_generator(discriminator, loss, optimizer, fakedata):
    size = fakedata.size(0)
    optimizer.zero_grad()  # reset gradients
    '''Generator Loss: (1/m * sum for i=1 to i=m) (log (1 - D(G(z(i)))))'''
    '''1. Train on fake data'''
    # D(G(z))
    prediction = discriminator(
        fakedata)  # this fake data comes from generator outside of function
    '''2. Calculate error and backpropagate'''
    error = loss(
        prediction, to_device(generator_target(size), device)
    )  # instead of minimizing log(1-D(G(z))), maximise log(D(gz)) for stronger gradients in early training
    error.backward()
    '''3. Update weights with gradients'''
    optimizer.step()
    '''4. Return error'''
    return error
Exemplo n.º 3
0
def tb_update(update_frequency, epoch, total_epochs, batch_num, trainset_len,
              generator, logger, n_test_samples, d_error, g_error, d_pred_real,
              d_pred_fake):
    num_test_samples = n_test_samples
    test_noise = to_device(noise(num_test_samples),
                           device)  # Generating noise samples for evaluation

    if (batch_num) % update_frequency == 0:
        generator.eval()
        test_samples = vectors_to_samples(generator(test_noise))
        test_samples = test_samples.data
        generator.train()

        logger.log_images(test_samples.cpu(), num_test_samples, epoch,
                          batch_num, trainset_len)

        # Display status logs
        logger.display_status(epoch, total_epochs, batch_num, trainset_len,
                              d_error, g_error, d_pred_real, d_pred_fake)
Exemplo n.º 4
0
# Parameters
DATASET_NAME = 'EPiano'
SEQ_LENGTH = 600
BATCH_SIZE = 100
D_LR = 0.00008
G_LR = 0.00001
NUM_EPOCHS = 100
N_TEST_SAMPLES = 2
UPDATE_FREQUENCY = 50
SAVE_FREQUENCY = 60000

# Instantiate dataset, discriminator & generator
trainset = functional.DeviceDataLoader(
    DataLoader(dataset.EpianoDataset(SEQ_LENGTH), BATCH_SIZE, shuffle=True),
    functional.device)  # wrapped for GPU
discriminator = functional.to_device(model.Discriminator(SEQ_LENGTH),
                                     functional.device)  # wrapped for GPU
generator = functional.to_device(model.Generator(SEQ_LENGTH),
                                 functional.device)  # wrapped for GPU

# Instantiate optimizer and loss
d_optimizer = Adam(discriminator.parameters(), lr=D_LR)
g_optimizer = Adam(generator.parameters(), lr=G_LR)
loss = BCELoss()
'''
Binary Cross Entropy Loss:
L = {l1,l2....lN)^T  l(i) = -w(i) [ y(i) * log(v(i)) + (1 - y) * log(1 - v(i)) ] 
mean is calculated by computing sum(L) / N
because we don't need weights, set w(i) = 1 for all i
'''

# Instantiate logger and create new save directory