# Dimension of latent vector
zx = 4
zy = 4

maxIter = 100
alpha = np.zeros(maxIter)
for pp in range(maxIter):
    alpha[pp] = (2**(maxIter - pp))
# print("alpha")
# print(alpha)
# print(alpha.shape)

# Prepare GAN generator
device = torch.device("cpu")
netG = netG(1, 1, 64, 5, 1)
netG.load_state_dict(torch.load('netG_epoch_5.pth'))
netG.to(device)
netG.eval()
torch.set_grad_enabled(False)

# Z is the observed heads through time
Z = np.loadtxt('ts_ref_gan.txt')
Z = Z.reshape(Z.shape[0], 1)

# Prepare latent_k_array
latent_k_array = torch.rand(nr, 1, zx, zy, device=device) * 2 - 1
print("latent_k_array")
print(latent_k_array)
print(latent_k_array.shape)
plt.matshow(latent_k_array[0][0])
Beispiel #2
0
import matplotlib.pyplot as plt

nrow = 129
ncol = 129

nr = 100  # the number of realizations

# Dimension of latent vector
zx = 5
zy = 5
prior_weight = 2
LearningRate = 0.1

# Prepare GAN generator
device = torch.device("cpu")
netG = netG(1, 1, 64, 5, 1)
netG.load_state_dict(torch.load('netG_epoch_10.pth'))
netG.to(device)
netG.eval()
torch.set_grad_enabled(False)

netD = netD(1, 64, 5, 1)
netD.load_state_dict(torch.load('netD_epoch_10.pth'))
netD.to(device)
netD.eval()
torch.set_grad_enabled(False)

Sample_Point = 10
Reference_k = np.loadtxt('Ref_ln_K.txt')
mask_k = np.zeros((nrow, ncol))
Sample = np.zeros((Sample_Point, 2))
                                   mirror=False,
                                   batch_size=batch_size,
                                   n_channel=nc)


# custom weights initialization called on netG and netD
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


netG = netG(nc, nz, ngf, gfs, ngpu)

netG.apply(weights_init)
if opt.netG != '':
    netG.load_state_dict(torch.load(opt.netG))
print(netG)

netD = netD(nc, ndf, dfs, ngpu=1)
netD.apply(weights_init)
if opt.netD != '':
    netD.load_state_dict(torch.load(opt.netD))
print(netD)

criterion = nn.BCELoss()

# Optimizers
Beispiel #4
0
        # print("real_cpu.shape")
        # print(real_cpu.shape)
        # print("output.shape")
        # print(output.shape)
        # print("label.shape")
        # print(label.shape)
        # print()

        errD_real = criterion(output, label)
        errD_real.backward()
        D_x = output.mean().item()

        # train with fake
        noise = torch.rand(batch_size, nz, zx, zy, device=device)*2-1
        noise_condition, condition_mask = generate_condition(data)
        fake = netG(noise, noise_condition)
        # print("Ran generator")
        # print("noise.shape")
        # print(noise.shape)
        # print("noise_condition.shape")
        # print(noise_condition.shape)
        # print("fake.shape")
        # print(fake.shape)
        # print()

        # label.fill_(fake_label)
        output = netD(fake.detach())
        label = torch.full_like(output, fake_label, device=device)  # forcing label to match output count
        # print("look here again")
        # print(fake.shape)
        # print(output.shape)
# noise = torch.rand(batch_size, nz, zx, zy, device=device)*2-1
noise = torch.rand(batch_size, 1, npx, npy, device=device) * 2 - 1
condition = generate_condition(reference_k_array)
# input_matrix.to(device)
print("noise matrix:")
print(noise)
print(noise.shape)
print()

# Turn off gradient calculation
torch.set_grad_enabled(False)

# First run of loop (prepares array for stacking)
# forward run the model
noise = torch.rand(batch_size, 1, npx, npy, device=device) * 2 - 1
output = netG(noise, condition)
print("Output matrix:")
print(output)
print(output.shape)
print()

output = output.cpu()
numpy_output = output.numpy()
numpy_output = numpy_output.squeeze()
print("numpy_output:")
print(numpy_output)
print(numpy_output.shape)
print()
plt.matshow(numpy_output)
# plt.show()
        # print("real_cpu.shape")
        # print(real_cpu.shape)
        # print("output.shape")
        # print(output.shape)
        # print("label.shape")
        # print(label.shape)
        # print()

        errD_real = criterion(output, label)
        errD_real.backward()
        D_x = output.mean().item()

        # train with fake
        noise = torch.rand(batch_size, nz, zx, zy, device=device) * 2 - 1
        noise_condition, condition_mask = generate_condition(data)
        fake = netG(noise, noise_condition)
        # print("Ran generator")
        # print("noise.shape")
        # print(noise.shape)
        # print("noise_condition.shape")
        # print(noise_condition.shape)
        # print("fake.shape")
        # print(fake.shape)
        # print()

        # label.fill_(fake_label)
        output = netD(fake.detach())
        label = torch.full_like(
            output, fake_label,
            device=device)  # forcing label to match output count
        # print("look here again")
Beispiel #7
0
nz = 1  # 'number of non-spatial dimensions in latent space z'
ngf = 64    # 'initial number of filters for dis'
gfs = 5    # 'kernel size for gen'
ngpu = 1    # 'number of GPUs to use'

# Input batch parameters
batch_size = 1
zx = 4
zy = 4

# File directory
outf = './train_data'
epoch = 5

# Load model
netG = netG(nc, nz, ngf, gfs, ngpu)
netG.load_state_dict(torch.load('%s/netG_epoch_%d.pth' % (outf, epoch)))
netG.to(device)
netG.eval()
print(netG)
print()

# Load input matrix
noise = torch.rand(batch_size, nz, zx, zy, device=device)*2-1
# input_matrix.to(device)
print("noise matrix:")
print(noise)
print(noise.shape)
print()

# Turn off gradient calculation