Exemplo n.º 1
0
    def __init__(self, n_classes):
        super().__init__()
        self.conv_1 = BayesianConv2d(in_channels=3,
                                     out_channels=64,
                                     kernel_size=(3, 3),
                                     stride=1,
                                     padding=2)
        self.conv_2 = BayesianConv2d(in_channels=64,
                                     out_channels=192,
                                     kernel_size=(3, 3),
                                     padding=2)
        self.conv_3 = BayesianConv2d(in_channels=192,
                                     out_channels=384,
                                     kernel_size=(3, 3),
                                     padding=1)
        self.conv_4 = BayesianConv2d(in_channels=384,
                                     out_channels=256,
                                     kernel_size=(3, 3),
                                     padding=1)
        self.conv_5 = BayesianConv2d(in_channels=256,
                                     out_channels=256,
                                     kernel_size=(3, 3),
                                     padding=1)
        self.mx_pl = nn.MaxPool2d(kernel_size=3, stride=2)

        # fully connected layers
        self.fc_1 = BayesianLinear(in_features=4096, out_features=512)
        self.fc_2 = BayesianLinear(in_features=512, out_features=256)
        self.fc_3 = BayesianLinear(in_features=256, out_features=n_classes)
Exemplo n.º 2
0
 def __init__(self):
     super().__init__()
     self.conv1 = BayesianConv2d(1, 6, (5, 5))
     self.conv2 = BayesianConv2d(6, 16, (5, 5))
     self.fc1 = BayesianLinear(256, 120)
     self.fc2 = BayesianLinear(120, 84)
     self.fc3 = BayesianLinear(84, 10)
Exemplo n.º 3
0
 def __init__(self, input_dim, output_dim):
     super().__init__()
     # simple 2-layer fully connected linear regressor
     #self.linear = nn.Linear(input_dim, output_dim)
     # self.linear1  = nn.Linear(input_dim, 128)
     # self.linear2  = nn.Linear(128, 128)
     # self.linear3  = nn.Linear(128, output_dim)
     
     self.blinear1 = BayesianLinear(input_dim, 1024)
     self.blinear2 = BayesianLinear(1024, 1024)
     self.blinear3 = BayesianLinear(1024, output_dim)
     # self.elu1     = nn.ELU()
     # self.elu2     = nn.ELU()
     # # self.elu3     = nn.ELU()
     # self.blinear3 = BayesianLinear(64, 64)
     # self.blinear4 = BayesianLinear(64, 64)
     self.sigmoid1 = nn.Sigmoid()
     # self.sigmoid2 = nn.Sigmoid()
     # self.sigmoid3 = nn.Sigmoid()
     # self.log = nn.LogSigmoid()
     # self.silu = nn.SiLU()
     # self.blinear2 = BayesianLinear(64, output_dim, bias=True)
     self.linear1 = nn.Linear(input_dim, 1024, bias=True)
     self.linear2 = nn.Linear(1024, 1024, bias=True)
     self.linear3 = nn.Linear(1024, output_dim, bias=True)
     self.lsig1   = nn.Sigmoid()
Exemplo n.º 4
0
            def __init__(self, input_dim):
                super().__init__()

                self.blinear1 = BayesianLinear(input_dim, 24)
                self.blinear2 = BayesianLinear(24, 24)
                self.blinear3 = BayesianLinear(24, 24)
                self.blinear4 = BayesianLinear(24, 1)
 def __init__(self, input_dim, output_dim):
     super().__init__()
     # self.linear = nn.Linear(input_dim, output_dim)
     self.blinear1 = BayesianLinear(input_dim, 100)
     self.blinear2 = BayesianLinear(100, 20)
     self.blinear3 = BayesianLinear(20, 10)
     self.blinear4 = BayesianLinear(10, 5)
     self.blinear5 = BayesianLinear(5, output_dim)
    def __init__(self, input_dim, output_dim, blayer1, blayer2):

        super().__init__()

        # Layer one, so on so forth (Bayesian Layer)
        self.blinear1 = BayesianLinear(input_dim, blayer1)
        self.blinear2 = BayesianLinear(blayer1, blayer2)
        self.blinearOutput = BayesianLinear(blayer2, output_dim)
Exemplo n.º 7
0
 def __init__(self):
     super().__init__()
     self.layer = nn.Sequential(
         BayesianLinear(30, 10),
         nn.ReLU(),
         BayesianLinear(10, 5),
         nn.ReLU(),
         BayesianLinear(5, 1)
     )
Exemplo n.º 8
0
 def _make_dense_layers(self, i_dim, o_dim, cfg):
     layers = []
     for x in cfg:
         if x == 'D':
             layers += [nn.Dropout(p=0.5)]
         elif x == 'C':
             layers += [BayesianLinear(i_dim, o_dim)]
         else:
             layers += [BayesianLinear(i_dim, x),
                        nn.ReLU()]
             i_dim = x
     return nn.Sequential(*layers)
Exemplo n.º 9
0
 def __init__(self, top):
     input_size, n_hidden1, n_hidden2, output_size = best_topo[top]
     super(MLP_Bayesian, self).__init__()
     self.input_size = input_size
     self.network = nn.Sequential(
         BayesianLinear(input_size,
                        n_hidden1),  # nn.Linear(input_size, n_hidden1), #
         nn.SELU(),  # nn.SELU(),  #  funzione bene con nn.ReLU(), ELU(),
         BayesianLinear(n_hidden1,
                        n_hidden2),  # nn.Linear(n_hidden1, n_hidden2), #
         nn.SELU(),  # nn.SELU(), #
         BayesianLinear(
             n_hidden2,
             output_size),  # nn.Linear(n_hidden2, output_size), #
     )
 def __init__(self,
              n_links,
              hidden_size=20,
              prior_pi=1.0,
              prior_sigma_1=1.0,
              prior_sigma_2=0.01):
     super(BRNN, self).__init__()
     self.hidden_size = hidden_size
     self.lstm_1 = BayesianLSTM(n_links,
                                hidden_size,
                                prior_pi=prior_pi,
                                prior_sigma_1=prior_sigma_1,
                                prior_sigma_2=prior_sigma_2,
                                posterior_rho_init=1.0,
                                peephole=False)
     self.lstm_2 = BayesianLSTM(hidden_size,
                                hidden_size,
                                prior_pi=prior_pi,
                                prior_sigma_1=prior_sigma_1,
                                prior_sigma_2=prior_sigma_2,
                                posterior_rho_init=1.0,
                                peephole=False)
     self.linear = BayesianLinear(hidden_size,
                                  n_links,
                                  prior_pi=prior_pi,
                                  prior_sigma_1=prior_sigma_1,
                                  prior_sigma_2=prior_sigma_2,
                                  posterior_rho_init=3.0)
    def test_any_prior_on_layer(self):
        l = BayesianLinear(7, 5, prior_dist=torch.distributions.studentT.StudentT(1, 1))
        t = torch.ones(3, 7)
        _ = l(t)

        self.assertEqual(l.log_prior, l.log_prior)
        pass
Exemplo n.º 12
0
 def build_model(self): 
     self.blinear1 = BayesianLinear(self.input_dim, 80)
     self.blinear2 = BayesianLinear(80, 60)
     self.blinear3 = BayesianLinear(60, 50)
     self.blinear4 = BayesianLinear(50, 40)
     self.blinear5 = BayesianLinear(40, 20)
     self.blinear6 = BayesianLinear(20, 20)
     self.blinear7 = BayesianLinear(20, 20)
     self.blinear8 = BayesianLinear(20, self.output_dim)
Exemplo n.º 13
0
    def __init__(self, features, out_nodes=10):
        super(VGG, self).__init__()
        self.features = features
        self.classifier = nn.Sequential(
            #nn.Dropout(),
            BayesianLinear(512 * 7 * 7, 512),
            nn.ReLU(True),
            #nn.Dropout(),
            BayesianLinear(512, 512),
            nn.ReLU(True),
            BayesianLinear(512, out_nodes),
        )

        for m in self.modules():
            if isinstance(m, BayesianConv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight_mu.data.normal_(0, math.sqrt(2. / n))
                m.bias_mu.data.zero_()
Exemplo n.º 14
0
 def __init__(self):
     super().__init__()
     self.blinear1 = BayesianLinear(10, 512)
     self.bconv = BayesianConv2d(3,
                                 3,
                                 kernel_size=(3, 3),
                                 padding=1,
                                 bias=True)
     self.blstm = BayesianLSTM(10, 2)
Exemplo n.º 15
0
    def test_kl_divergence_bayesian_linear_module(self):
        blinear = BayesianLinear(10, 10)
        to_feed = torch.ones((1, 10))
        predicted = blinear(to_feed)

        complexity_cost = blinear.log_variational_posterior - blinear.log_prior
        kl_complexity_cost = kl_divergence_from_nn(blinear)

        self.assertEqual((complexity_cost == kl_complexity_cost).all(),
                         torch.tensor(True))
        pass
Exemplo n.º 16
0
    def __init__(self, input_dim, hidden_dim, linear_dim, sequence_length,
                 output_dim):
        '''
			input_dim: input dimension (number of stops + dimension of temporal features)
			hidden_dim: hidden dimension
			linear_dim: linear layer dimension
			sequence_length: sequence_length (default: 24*14 hours i.e. two weeks)
			output_dim: output dimension (number of stops, default: 10)
		'''

        super(LSTM_Net, self).__init__()

        # define network constants
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.linear_dim = linear_dim
        self.sequence_length = sequence_length
        self.output_dim = output_dim

        # bayesian LSTM layer
        self.lstm1 = BayesianLSTM(in_features=input_dim,
                                  out_features=hidden_dim,
                                  bias=True,
                                  prior_sigma_1=1.0,
                                  prior_sigma_2=4.0,
                                  prior_pi=0.5,
                                  posterior_mu_init=0,
                                  posterior_rho_init=-0.5)

        # bayesian linear layer
        self.blinear1 = BayesianLinear(
            in_features=hidden_dim,
            out_features=linear_dim,
            bias=True,
            prior_sigma_1=1.0,
            prior_sigma_2=0.5,
            prior_pi=0.5,
            posterior_mu_init=0,
            posterior_rho_init=-0.5,
        )

        # linear layer
        self.linear1 = nn.Linear(in_features=linear_dim,
                                 out_features=output_dim,
                                 bias=True)

        # dropout function
        self.dropout = nn.Dropout(p=0.5)
Exemplo n.º 17
0
def convert_layer_to_bayesian(layer):
    if isinstance(layer, torch.nn.Linear):
        new_layer = BayesianLinear(layer.in_features, layer.out_features)
    elif isinstance(layer, nn.Embedding):
        new_layer = BayesianEmbedding(layer.num_embeddings,
                                      layer.embedding_dim)
    elif isinstance(layer, nn.Conv1d):
        new_layer = BayesianConv1d(
            layer.in_channels,
            layer.out_channels,
            kernel_size=layer.kernel_size[0],
            groups=layer.groups,
            padding=layer.padding,
            dilation=layer.dilation,
        )
    else:
        Warning(
            f"Could not find correct type for conversion of layer {layer} with type {type(layer)}"
        )
        new_layer = layer

    return new_layer
 def __init__(self, input_dim, output_dim):
     super().__init__()
     #self.linear = nn.Linear(input_dim, output_dim)
     self.blinear1 = BayesianLinear(input_dim, 512)
     self.blinear2 = BayesianLinear(512, output_dim)
Exemplo n.º 19
0
 def __init__(self):
     super().__init__()
     self.blinear = BayesianLinear(10, 10)
Exemplo n.º 20
0
 def __init__(self):
     super().__init__()
     self.nn = nn.Sequential(BayesianLinear(10, 7),
                             BayesianLinear(7, 5))