use_gpu = False
gan_model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub',
                           'DCGAN',
                           pretrained=True,
                           useGPU=use_gpu)

from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable
import brancher.functions as BF

#import brancher.config as cfg
#cfg.set_device('gpu')
#print(cfg.device)

Gmodel = gan_model.netG.cpu()
decoder = BF.BrancherFunction(lambda x: Gmodel(x))

n_itr = 2000  #2000
image_size = 64
N_rep = 5
loss_list1 = []
loss_list2 = []
loss_list3 = []
imagesGT = []
imagesNoise = []
images1 = []
images2 = []
images3 = []
for rep in range(N_rep):
    h_size = 120
    W1 = DeterministicVariable(np.random.normal(0., 0.2, (h_size, h_size)),
示例#2
0
        super(DecoderArchitecture, self).__init__()
        self.l1 = nn.Linear(latent_size, hidden_size)
        self.relu = nn.ReLU()
        self.l2 = nn.Linear(hidden_size, image_size) # Latent mean output
        self.l3 = nn.Linear(hidden_size, image_size) # Latent log sd output
        self.softplus = nn.Softplus()

    def __call__(self, x):
        h = self.relu(self.l1(x))
        output_mean = self.l2(h)
        output_log_sd = self.l3(h)
        return {"mean": output_mean, "sd": self.softplus(output_log_sd) + 0.01}


# Initialize encoder and decoders
encoder = BF.BrancherFunction(EncoderArchitecture(image_size=image_size, latent_size=latent_size))
decoder = BF.BrancherFunction(DecoderArchitecture(latent_size=latent_size, image_size=image_size))

# Generative model
z = NormalVariable(np.zeros((latent_size,)), np.ones((latent_size,)), name="z")
decoder_output = decoder(z)
x = NormalVariable(decoder_output["mean"], decoder_output["sd"], name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, batch_size=50, name="x", is_observed=True)
encoder_output = encoder(Qx)
Qz = NormalVariable(encoder_output["mean"], encoder_output["sd"], name="z")
model.set_posterior_model(ProbabilisticModel([Qx, Qz]))

# Joint-contrastive inference
示例#3
0
        return output_logits


N_repetitions = 5  #5
num_itr = 3000
N_ELBO = 10
N_ELBO_ITR = 20
b_size = 200

loss_list1 = []
loss_list2 = []
loss_list3 = []

for rep in range(N_repetitions):
    # # Initialize encoder and decoders
    encoder1 = BF.BrancherFunction(
        EncoderArchitecture1(image_size=image_size, latent_size3=latent_size3))
    encoder2 = BF.BrancherFunction(
        EncoderArchitecture2(latent_size2=latent_size2,
                             latent_size3=latent_size3))
    encoder3 = BF.BrancherFunction(
        EncoderArchitecture3(latent_size1=latent_size1,
                             latent_size2=latent_size2))

    decoder1 = BF.BrancherFunction(
        DecoderArchitecture1(latent_size1=latent_size1,
                             latent_size2=latent_size2))
    decoder2 = BF.BrancherFunction(
        DecoderArchitecture2(latent_size2=latent_size2,
                             latent_size3=latent_size3))
    decoder3 = BF.BrancherFunction(
        DecoderArchitecture3(latent_size3=latent_size3, image_size=image_size))
        output_mean = self.l1(x)
        return {"mean": output_mean}

N_repetitions = 15
num_itr = 3000
N_ELBO = 10
N_ELBO_ITR = 20

loss_list1 = []
loss_list2 = []
loss_list3 = []

for rep in range(N_repetitions):
    print("Repetition: {}".format(rep))
    # Initialize encoder and decoders
    encoder1 = BF.BrancherFunction(EncoderArchitecture1(image_size=image_size, latent_size3=latent_size3))
    encoder2 = BF.BrancherFunction(EncoderArchitecture2(latent_size2=latent_size2, latent_size3=latent_size3))
    encoder3 = BF.BrancherFunction(EncoderArchitecture3(latent_size1=latent_size1, latent_size2=latent_size2))

    decoder1 = BF.BrancherFunction(DecoderArchitecture1(latent_size1=latent_size1, latent_size2=latent_size2))
    decoder2 = BF.BrancherFunction(DecoderArchitecture2(latent_size2=latent_size2, latent_size3=latent_size3))
    decoder3 = BF.BrancherFunction(DecoderArchitecture3(latent_size3=latent_size3, image_size=image_size))

    # Generative model
    z1sd = 1.5 #1
    z2sd = 0.25 #0.25
    z3sd = 0.15
    z1 = NormalVariable(np.zeros((latent_size1,)), z1sd*np.ones((latent_size1,)), name="z1")
    decoder_output1 = DeterministicVariable(decoder1(z1), name="decoder_output1")
    z2 = NormalVariable(BF.relu(decoder_output1["mean"]), z2sd*np.ones((latent_size2,)), name="z2")
    decoder_output2 = DeterministicVariable(decoder2(z2), name="decoder_output2")
        self.fc1 = nn.Linear(7, n_hidden)  # 6*6 from image dimension
        self.fc2 = nn.Linear(n_hidden, 2)
        self.fc3 = nn.Linear(n_hidden, 2)

    def forward(self, r, w0, w1, mx, my, sx, sy):
        x = torch.cat((r, w0, w1, mx, my, sx, s), 1)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        x = F.softplus(self.fc3(x))
        return x, y


net = InferenceNet()

# Variational bayesian update
brancher_net = BF.BrancherFunction(net)


def bayesian_update(r, w0, w1, mx, my, sx, sy):
    out = brancher_net(r, w0, w1, mx, my, sx, sy)
    return [out[0], out[1], out[2], out[3]]


# Model
T = 20

sigma = 0.1
mx = [DeterministicVariable(0., "mx_0")]
my = [DeterministicVariable(0., "my_0")]
sx = [DeterministicVariable(0., "sx_0")]
sy = [DeterministicVariable(0., "sy_0")]
        #output_log_sd = self.l3(h0)
        return {"mean": output_mean}


class DecoderArchitecture2(nn.Module):
    def __init__(self, latent_size2, image_size):
        super(DecoderArchitecture2, self).__init__()
        self.l1 = nn.Linear(latent_size2, image_size)

    def __call__(self, x):
        output_mean = self.l1(x)
        return {"mean": output_mean}


# Initialize encoder and decoders
encoder1 = BF.BrancherFunction(
    EncoderArchitecture1(image_size=image_size, latent_size2=latent_size2))
encoder2 = BF.BrancherFunction(
    EncoderArchitecture2(latent_size1=latent_size1, latent_size2=latent_size2))
decoder1 = BF.BrancherFunction(
    DecoderArchitecture1(latent_size1=latent_size1, latent_size2=latent_size2))
decoder2 = BF.BrancherFunction(
    DecoderArchitecture2(latent_size2=latent_size2, image_size=image_size))

# Generative model
z1sd = 1.
z2sd = 0.5  #0.01
z1 = NormalVariable(np.zeros((latent_size1, )),
                    z1sd * np.ones((latent_size1, )),
                    name="z1")
decoder_output1 = DeterministicVariable(decoder1(z1), name="decoder_output1")
z2 = NormalVariable(BF.relu(decoder_output1["mean"]),
                                  padding=1)
        self.f1 = torch.nn.ReLU()
        self.l2 = torch.nn.Linear(in_features=image_size**2 * out_channels,
                                  out_features=10)

    def __call__(self, x):
        h = self.f1(self.l1(x))
        h_shape = h.shape
        h = h.view((h_shape[0], np.prod(h_shape[1:])))
        logits = self.l2(h)
        return logits


network = PytorchNetwork()

brancher_network = BF.BrancherFunction(network)

# Data sampling model
minibatch_size = 4
minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                  batch_size=minibatch_size,
                                  name="indices",
                                  is_observed=True)
x = Empirical(input_variable,
              indices=minibatch_indices,
              name="x",
              is_observed=True)
labels = Empirical(output_labels,
                   indices=minibatch_indices,
                   name="labels",
                   is_observed=True)