示例#1
0
def translate_z(zs, model):
    if isinstance(zs, np.ndarray):
        zs = torch.from_numpy(zs).float()
    running_time = get_current_time()
    save_path_prefix = 'results/translation_z/' + running_time + '/'
    sample = model.decode(zs).cpu()
    if not os.path.exists(save_path_prefix):
        os.makedirs(save_path_prefix)
    save_image(sample.view(zs.shape[0], 1, args.img_size, args.img_size),
               save_path_prefix + 'z_translation' + '.png')
示例#2
0
    def __init__(self, G, FE, D, Q, to_inverse, is_debugging=False):

        has_cuda = torch.cuda.is_available()
        self.device = torch.device("cuda" if has_cuda else "cpu")
        self.G = G.to(self.device)
        self.FE = FE.to(self.device)
        self.D = D.to(self.device)
        self.Q = Q.to(self.device)
        self.batch_size = 100
        self.current_time = gm.get_current_time()
        self.to_inverse = to_inverse
        self.saving_path = './results/' + str(self.to_inverse) + '/' + self.current_time
        self.is_debugging = is_debugging
        if not os.path.exists(self.saving_path) and not self.is_debugging:
            os.makedirs(self.saving_path)
        self.transform_pad = 4
        self.img_size = 28
示例#3
0
bhx = Variable(torch.zeros(X_dim), requires_grad=True)


def P(z):
    h = nn.relu(z @ Wzh + bzh.repeat(z.size(0), 1))
    X = nn.sigmoid(h @ Whx + bhx.repeat(h.size(0), 1))
    return X


# =============================== TRAINING ====================================

params = [Wxh, bxh, Whz_mu, bhz_mu, Whz_var, bhz_var, Wzh, bzh, Whx, bhx]

solver = optim.Adam(params, lr=lr)

running_time = get_current_time()

for it in range(100000):
    X, _ = mnist.train.next_batch(mb_size, shuffle=True)
    print('X: ', X.shape)
    # plot_a_numpy_array(X[0])
    X = Variable(torch.from_numpy(X))

    # Forward
    z_mu, z_var = Q(X)
    z = sample_z(z_mu, z_var)
    X_sample = P(z)

    # Loss
    recon_loss = nn.binary_cross_entropy(X_sample, X,
                                         size_average=False) / mb_size