示例#1
0
 def __init__(self, learning_rate, hidden_dims, label=''):
     super().__init__()
     self.label = label
     self.learning_rate = learning_rate
     self.hidden_dims = hidden_dims
     self.weight_std = 10**-3
     self.sigma_1_prior = Variable(torch.Tensor([0.001]), requires_grad=False)
     self.sigma_2_prior = Variable(torch.Tensor([10**-7]), requires_grad=False)
     self.prior_weight = 0.5
     self.num_samples = 1
     # layers
     self.W1_mu = nn.Parameter(xavier_init((noise_dim, self.hidden_dims[0])).type(torch.FloatTensor), requires_grad=True)
     self.W1_rho = nn.Parameter(xavier_init((noise_dim, self.hidden_dims[0])).type(torch.FloatTensor), requires_grad=True)
     self.W2_mu = nn.Parameter(xavier_init((self.hidden_dims[0], 128*7*7)).type(
         torch.FloatTensor), requires_grad=True)
     self.W2_rho = nn.Parameter(xavier_init((self.hidden_dims[0], 128*7*7)).type(
         torch.FloatTensor),
                                requires_grad=True)
     # self.W3_mu = nn.Parameter(xavier_init((self.hidden_dims[1], 32**2*3)).type(torch.FloatTensor),
     #                           requires_grad=True)
     # self.W3_rho = nn.Parameter(xavier_init((self.hidden_dims[1], 32**2*3)).type(torch.FloatTensor),
     #                            requires_grad=True)
     # # deconv layers
     self.deconv1 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1)
     self.deconv2 = nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1)
     # batch norm
     self.batch_norm1 = nn.BatchNorm1d(self.hidden_dims[0])
     self.batch_norm2 = nn.BatchNorm1d(128*7*7)
     self.batch_norm3 = nn.BatchNorm2d(64)
 def __init__(self, learning_rate, hidden_dims, label=''):
     super().__init__()
     self.label = label
     self.learning_rate = learning_rate
     self.hidden_dims = hidden_dims
     self.weight_std = 10**-4
     self.sigma_1_prior = Variable(torch.Tensor([0.001]), requires_grad=False)
     self.sigma_2_prior = Variable(torch.Tensor([10**-7]), requires_grad=False)
     self.prior_weight = 0.5
     self.num_samples = 3
     # layers
     self.W1_mu = nn.Parameter(xavier_init((noise_dim, self.hidden_dims[0])).type(torch.FloatTensor), requires_grad=True)
     self.W1_rho = nn.Parameter(xavier_init((noise_dim, self.hidden_dims[0])).type(torch.FloatTensor), requires_grad=True)
     self.W2_mu = nn.Parameter(xavier_init((self.hidden_dims[0], 128*7*7)).type(
         torch.FloatTensor), requires_grad=True)
     self.W2_rho = nn.Parameter(xavier_init((self.hidden_dims[0], 128*7*7)).type(
         torch.FloatTensor),
                                requires_grad=True)
     # self.W3_mu = nn.Parameter(xavier_init((self.hidden_dims[1], 32**2*3)).type(torch.FloatTensor),
     #                           requires_grad=True)
     # self.W3_rho = nn.Parameter(xavier_init((self.hidden_dims[1], 32**2*3)).type(torch.FloatTensor),
     #                            requires_grad=True)
     # # deconv layers
     self.W1_deconv_mu = nn.Parameter(xavier_init((128, 64, 6, 6)).type(torch.FloatTensor),
                       requires_grad=True)
     self.W1_deconv_rho = nn.Parameter(xavier_init((128, 64, 6, 6)).type(
         torch.FloatTensor), requires_grad=True)
     self.W2_deconv_mu = nn.Parameter(xavier_init((64, 3, 4, 4)).type(torch.FloatTensor),
                                    requires_grad=True)
     self.W2_deconv_rho = nn.Parameter(xavier_init((64, 3, 4, 4)).type(torch.FloatTensor),
                                    requires_grad=True)
    def __init__(self, learning_rate, hidden_dims, label=''):
        super().__init__()
        self.label = label
        self.learning_rate = learning_rate
        self.hidden_dims = hidden_dims
        self.weight_std = 10**-3
        self.sigma_1_prior = Variable(torch.Tensor([0.001]), requires_grad=False)
        self.sigma_2_prior = Variable(torch.Tensor([10**-7]), requires_grad=False)
        self.prior_weight = 0.5
        self.num_samples = 3

        # convlutional layers
        self.W1_conv_mu = nn.Parameter(xavier_init((32, 1, 5, 5)).type(torch.FloatTensor),
                          requires_grad=True)
        self.W1_conv_rho = nn.Parameter(xavier_init((32, 1, 5, 5)).type(
            torch.FloatTensor), requires_grad=True)
        self.W2_conv_mu = nn.Parameter(xavier_init((64, 32, 5, 5)).type(torch.FloatTensor),
                                       requires_grad=True)
        self.W2_conv_rho = nn.Parameter(xavier_init((64, 32, 5, 5)).type(torch.FloatTensor),
                                       requires_grad=True)

        # linear layers
        self.W1_mu = nn.Parameter(xavier_init((self.hidden_dims[0], self.hidden_dims[1])).type(
            torch.FloatTensor), requires_grad=True)
        self.W1_rho = nn.Parameter(xavier_init((self.hidden_dims[0], self.hidden_dims[1])).type(
            torch.FloatTensor), requires_grad=True)
        # self.W2_mu = nn.Parameter(xavier_init((self.hidden_dims[0], self.hidden_dims[1])).type(torch.FloatTensor), requires_grad=True)
        # self.W2_rho = nn.Parameter(xavier_init((self.hidden_dims[0], self.hidden_dims[1])).type(torch.FloatTensor), requires_grad=True)
        self.W3_mu = nn.Parameter(xavier_init((self.hidden_dims[1], 1)).type(torch.FloatTensor),
                                  requires_grad=True)
        self.W3_rho = nn.Parameter(xavier_init((self.hidden_dims[1], 1)).type(torch.FloatTensor),
                                   requires_grad=True)
 def __init__(self, learning_rate, hidden_dims, label=''):
     super().__init__()
     self.label = label
     self.learning_rate = learning_rate
     self.hidden_dims = hidden_dims
     self.weight_std = 10**-3
     self.sigma_1_prior = Variable(torch.Tensor([0.001]), requires_grad=False)
     self.sigma_2_prior = Variable(torch.Tensor([10**-7]), requires_grad=False)
     self.prior_weight = 0.5
     self.num_samples = 2
     # layers
     self.W1_mu = nn.Parameter(xavier_init((noise_dim, self.hidden_dims[0])).type(torch.FloatTensor), requires_grad=True)
     self.W1_rho = nn.Parameter(xavier_init((noise_dim, self.hidden_dims[0])).type(torch.FloatTensor), requires_grad=True)
     self.W2_mu = nn.Parameter(xavier_init((self.hidden_dims[0], self.hidden_dims[1])).type(torch.FloatTensor), requires_grad=True)
     self.W2_rho = nn.Parameter(xavier_init((self.hidden_dims[0], self.hidden_dims[1])).type(torch.FloatTensor), requires_grad=True)
     self.W3_mu = nn.Parameter(xavier_init((self.hidden_dims[1], 28**2)).type(
         torch.FloatTensor),
                               requires_grad=True)
     self.W3_rho = nn.Parameter(xavier_init((self.hidden_dims[1], 28**2)).type(
         torch.FloatTensor),
                                requires_grad=True)
     # layer bias
     self.b1_mu = nn.Parameter(torch.zeros((self.hidden_dims[0],)).type(torch.FloatTensor),
                               requires_grad=True)
     self.b1_rho = nn.Parameter(torch.zeros((self.hidden_dims[0],)).type(torch.FloatTensor),
                                requires_grad=True)
     self.b2_mu = nn.Parameter(torch.zeros((self.hidden_dims[1],)).type(torch.FloatTensor), requires_grad=True)
     self.b2_rho = nn.Parameter(torch.zeros((self.hidden_dims[1],)).type(torch.FloatTensor), requires_grad=True)
     self.b3_mu = nn.Parameter(torch.zeros((28**2,)).type(torch.FloatTensor), requires_grad=True)
     self.b3_rho = nn.Parameter(torch.zeros((28**2,)).type(torch.FloatTensor), requires_grad=True)