示例#1
0
def make_inference_net():
    shared = torch.nn.Sequential(torch.nn.Linear(dim_x, 512), torch.nn.ReLU())
    return NormalNet(mu_net=torch.nn.Sequential(shared,
                                                torch.nn.Linear(512, dim_z)),
                     sigma_net=torch.nn.Sequential(
                         shared, torch.nn.Linear(512, dim_z),
                         Lambda(lambda x: torch.exp(0.5 * x))))
 def make_almost_generative_net(self, dim_x):
     return NormalNet(
         torch.nn.Linear(self.generative_net_input_dim, dim_x),
         torch.nn.Sequential(
             torch.nn.Linear(self.generative_net_input_dim, dim_x),
             Lambda(lambda x: torch.exp(0.5 * x + self.
                                        generative_sigma_adjustment))))
示例#3
0
  def make_almost_inference_net(self, dim_x):
    ndf = self.inference_net_num_filters
    model = torch.nn.Sequential(
      # input is (nc) x 64 x 64
      torch.nn.Conv2d(1, ndf, 4, 2, 1, bias=False),
      torch.nn.LeakyReLU(0.2, inplace=True),
      # state size. (ndf) x 32 x 32
      torch.nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
      torch.nn.BatchNorm2d(ndf * 2),
      torch.nn.LeakyReLU(0.2, inplace=True),
      # state size. (ndf*2) x 16 x 16
      torch.nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
      torch.nn.BatchNorm2d(ndf * 4),
      torch.nn.LeakyReLU(0.2, inplace=True),
      # state size. (ndf*4) x 8 x 8

      # Flatten the filter channels
      Lambda(lambda x: x.view(x.size(0), -1)),

      torch.nn.Linear((ndf * 4) * 8 * 8, self.inference_net_output_dim),
      torch.nn.LeakyReLU(0.2, inplace=True)

      # This is the rest of the DCGAN discriminator
      # torch.nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
      # torch.nn.BatchNorm2d(ndf * 8),
      # torch.nn.LeakyReLU(0.2, inplace=True),
      # # state size. (ndf*8) x 4 x 4
      # torch.nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
      # torch.nn.Sigmoid()
    )

    return (model.cuda() if self.use_gpu else model)
示例#4
0
  def make_almost_generative_net(self, dim_x):
    # We learn a std dev for each pixel which is not a function of the input.
    # Note that this Variable is NOT going to show up in `net.parameters()` and
    # therefore it is implicitly free from the ridge penalty/p(theta) prior.
    init_log_sigma = torch.log(1e-2 * torch.ones(1, 1, 64, 64))

    # See https://github.com/pytorch/examples/blob/master/dcgan/main.py#L107
    dim_in = self.generative_net_input_dim
    ngf = self.generative_net_num_filters
    model = torch.nn.Sequential(
      Lambda(lambda x: x.view(-1, dim_in, 1, 1)),
      torch.nn.ConvTranspose2d( dim_in, ngf * 8, 4, 1, 0, bias=False),
      torch.nn.BatchNorm2d(ngf * 8),
      torch.nn.ReLU(inplace=True),
      # state size. (ngf*8) x 4 x 4
      torch.nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
      torch.nn.BatchNorm2d(ngf * 4),
      torch.nn.ReLU(inplace=True),
      # state size. (ngf*4) x 8 x 8
      torch.nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
      torch.nn.BatchNorm2d(ngf * 2),
      torch.nn.ReLU(inplace=True),
      # state size. (ngf*2) x 16 x 16
      torch.nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
      torch.nn.BatchNorm2d(ngf),
      torch.nn.ReLU(inplace=True),
      # state size. (ngf) x 32 x 32
      torch.nn.ConvTranspose2d(    ngf,       1, 4, 2, 1, bias=False),
      # state size. 1 x 64 x 64
      torch.nn.Tanh()
    )

    if self.use_gpu:
      model = model.cuda()
      init_log_sigma = init_log_sigma.cuda()

    log_sigma = Variable(init_log_sigma, requires_grad=True)
    return NormalNet(
      model,
      Lambda(
        lambda x, log_sigma: torch.exp(log_sigma.expand(x.size(0), -1, -1, -1)) + 1e-3,
        extra_args=(log_sigma,)
      )
    )
示例#5
0
 def make_almost_generative_net(self, dim_x):
     return NormalNet(
         torch.nn.Sequential(
             torch.nn.ReLU(),
             torch.nn.Linear(self.generative_net_input_dim, dim_x)),
         torch.nn.Sequential(
             torch.nn.ReLU(),
             torch.nn.Linear(self.generative_net_input_dim, dim_x),
             # Learn the log variance
             Lambda(lambda x: torch.exp(0.5 * x))))
示例#6
0
 def make_generative_net(self, dim_x):
     hidden_size = 16
     shared = torch.nn.Sequential(torch.nn.Linear(self.dim_z, hidden_size),
                                  torch.nn.ReLU())
     return NormalNet(
         torch.nn.Sequential(shared, torch.nn.Linear(hidden_size, dim_x)),
         torch.nn.Sequential(
             shared,
             torch.nn.Linear(hidden_size, dim_x),
             # Learn the log variance
             Lambda(lambda x: torch.exp(0.5 * x))))
示例#7
0
def make_inference_net(shared):
    # return NormalNet(
    #   mu_net=torch.nn.Sequential(
    #     shared,
    #     torch.nn.Linear(dim_shared, dim_z)
    #   ),
    #   sigma_net=torch.nn.Sequential(
    #     shared,
    #     torch.nn.Linear(dim_shared, dim_z),
    #     Lambda(lambda x: torch.exp(0.5 * x))
    #   )
    # )

    return Normal_MeanPrecisionNet(
        torch.nn.Sequential(shared, torch.nn.Linear(dim_shared, dim_z)),
        torch.nn.Sequential(shared, torch.nn.Linear(dim_shared, dim_z),
                            Lambda(lambda x: torch.exp(0.5 * x))))
示例#8
0
def make_generative_net():
    return BernoulliNet(rate_net=torch.nn.Sequential(
        torch.nn.Linear(dim_z, 512), torch.nn.ReLU(),
        torch.nn.Linear(512, dim_x), Lambda(torch.sigmoid)))