Beispiel #1
0
    return gradient_penalty


if __name__ == "__main__":
    MAX_EPOCH = 100
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")

    netG = Generator().to(device)
    netD = discriminator().to(device)
    optimizerD = optim.RMSprop(netD.parameters(), lr=1e-5)
    optimizerG = optim.RMSprop(netG.parameters(), lr=1e-5)
    config = clstm_config()
    train, test = get_dataloader(config.batch_size)

    #----------------------------- train
    for epoch in range(100):
        print(f'====== epoch {epoch} ======')
        netD.train()
        netG.train()
        # ----------------------------- train
        #lossD, lossG = 0, 0

        for x, y in train:
            # -------- train D

            #print('input shape == >',x.shape)
            for parm in netD.parameters():
Beispiel #2
0
def display_progression_epoch(j, id_max):
    '''See epoch progression
    '''
    batch_progression = int((j / id_max) * 100)
    sys.stdout.write(str(batch_progression) + ' % epoch' + chr(13))
    _ = sys.stdout.flush

def create_logdir(method, weight, rd):
    """ Directory to save training logs, weights, biases, etc."""
    return "gan/train_logs/kdd/{}/{}/{}".format(weight, method, rd)

gen_config = generator_config()
dis_config = discriminator_config()
train_config = gan_training_config()
data_config = clstm_config()
data_loader = DataLoader(data_config)
rng = np.random.RandomState(RANDOM_SEED)

with tf.Graph().as_default():
    with tf.Session() as sess:
        generator = Generator(gen_config)
        discriminator = Discriminator(dis_config)
        generator.build()

        #Pretrain csltm classifier in generator
        # Train procedure
        global_step = tf.Variable(0, name='global_step', trainable=False)
        # Learning rate decay
        starter_learning_rate = train_config.pretrain_learning_rate
        learning_rate = tf.train.exponential_decay(starter_learning_rate,