Beispiel #1
0
def models_init():
    netD = Discriminator()
    netG = Generator()

    apply_weights(netD)
    apply_weights(netG)

    return netD, netG
Beispiel #2
0
my_transforms = transforms.Compose([
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, )),
])

dataset = datasets.MNIST(root="dataset/",
                         train=True,
                         transform=my_transforms,
                         download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Create discriminator and generator
netD = Discriminator(channels_img, features_d).to(device)
netG = Generator(channels_noise, channels_img, features_g).to(device)

# Setup Optimizer for G and D
optimizerD = optim.Adam(
    netD.parameters(), lr=lr,
    betas=(0.5, 0.999))  # adjust exponentially weighted bata = 0.5
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(0.5, 0.999))

netG.train()
netD.train()

criterion = nn.BCELoss(
)  # pred: N*any  target: N*any, when use this in training, we will convert to reshape(-1)

real_label = 1
Beispiel #3
0
noise_dim = 2

# Random Initialization
torch.manual_seed(1)
np.random.seed(1)

# Dataloader
action_dir = '../data/train_input.npy'
state_dir = '../data/train_output.npy'
dataset = SyntheticDataset(action_dir, state_dir)
loader = data.DataLoader(dataset, batch_size=10)

# Models
encoder = Encoder()
decoder = Decoder()
discriminator = Discriminator()
decoder.weight_init(mean=0.0, std=0.02)
encoder.weight_init(mean=0.0, std=0.02)
discriminator.weight_init(mean=0.0, std=0.02)

# Initialize Loss
l1, mse, bce = nn.L1Loss(), nn.MSELoss(), nn.BCELoss()

# Initialize Optimizer
G_optimizer = optim.Adam([{
    'params': encoder.parameters()
}, {
    'params': decoder.parameters()
}],
                         lr=lr_rate,
                         betas=(0.5, 0.999))
Beispiel #4
0
my_transforms = transforms.Compose([
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, )),
])

dataset = datasets.MNIST(root="dataset/",
                         train=True,
                         transform=my_transforms,
                         download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Create discriminator and generator
netD = Discriminator(channels_img, features_d).to(device)
netG = Generator(channels_noise, channels_img, features_g).to(device)

# Setup Optimizer for G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(0.5, 0.999))

netG.train()
netD.train()

criterion = nn.BCELoss()

real_label = 1
fake_label = 0

fixed_noise = torch.randn(64, channels_noise, 1, 1).to(device)
Beispiel #5
0
noise_dim = 2

# Random Initialization
torch.manual_seed(1)
np.random.seed(1)

# Dataloader
action_dir = '../data/train_input.npy'
state_dir = '../data/train_output.npy'
dataset = SyntheticDataset(action_dir, state_dir)
loader = data.DataLoader(dataset, batch_size=10)

# Models
encoder = Encoder()
decoder = Decoder()
discriminator = Discriminator()
decoder.weight_init(mean=0.0, std=0.02)
encoder.weight_init(mean=0.0, std=0.02)
discriminator.weight_init(mean=0.0, std=0.02)

# Initialize Loss
l1, mse, bce = nn.L1Loss(), nn.MSELoss(), nn.BCELoss()

# Initialize Optimizer
G_optimizer = optim.Adam([{'params': encoder.parameters()}, {'params': decoder.parameters()}], lr=lr_rate, betas=(0.5, 0.999))
D_optimizer = optim.Adam(discriminator.parameters(), lr=lr_rate, betas=(0.5, 0.999))

# Train Networks
step = 0
for epoch in range(num_epochs):
    for i, inputs in enumerate(loader):
Beispiel #6
0
        print(tmp)


# Hyperparameters
EPOCHS = 100000
lr = 0.0001
batch_size = 128
input_dim = 3
latent_dim = 5

# Choosing device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using {device}.')

# Creating the Discriminator and Generator
netD = Discriminator(input_dim).to(device)
netG = Generator(latent_dim).to(device)

# Setting the optimizers
optimD = optim.Adam(netD.parameters(), lr=lr)
optimG = optim.Adam(netG.parameters(), lr=lr)

# Setting the loss function
criterion = nn.BCELoss().to(device)

# Training
training = False
if training:
    netD.train()
    netG.train()
    half_batch = batch_size // 2
Beispiel #7
0
# Random Initialization
torch.manual_seed(1)
np.random.seed(1)

# Dataloader
action_dir = '../data/train_input.npy'
state_dir = '../data/train_output.npy'
dataset = SyntheticDataset(action_dir, state_dir)
loader = data.DataLoader(dataset, batch_size=10)

# Models
encoder = VEncoder()
encoder.weight_init(mean=0.0, std=0.02)
generator = Generator()
generator.weight_init(mean=0.0, std=0.02)
D_VAE = Discriminator()
D_VAE.weight_init(mean=0.0, std=0.02)
D_LR = Discriminator()
D_LR.weight_init(mean=0.0, std=0.02)

# Initialize Loss
l1, mse, bce = nn.L1Loss(), nn.MSELoss(), nn.BCELoss()

# Initialize Optimizer
optimizer_E = torch.optim.Adam(encoder.parameters(),
                               lr=lr_rate,
                               betas=(0.5, 0.999))
optimizer_G = torch.optim.Adam(generator.parameters(),
                               lr=lr_rate,
                               betas=(0.5, 0.999))
optimizer_D_VAE = torch.optim.Adam(D_VAE.parameters(),