Beispiel #1
0
 def __init__(self):
     super().__init__()
     self.conv1 = BayesianConv2d(1, 6, (5, 5))
     self.conv2 = BayesianConv2d(6, 16, (5, 5))
     self.fc1 = BayesianLinear(256, 120)
     self.fc2 = BayesianLinear(120, 84)
     self.fc3 = BayesianLinear(84, 10)
Beispiel #2
0
    def __init__(self, n_classes):
        super().__init__()
        self.conv_1 = BayesianConv2d(in_channels=3,
                                     out_channels=64,
                                     kernel_size=(3, 3),
                                     stride=1,
                                     padding=2)
        self.conv_2 = BayesianConv2d(in_channels=64,
                                     out_channels=192,
                                     kernel_size=(3, 3),
                                     padding=2)
        self.conv_3 = BayesianConv2d(in_channels=192,
                                     out_channels=384,
                                     kernel_size=(3, 3),
                                     padding=1)
        self.conv_4 = BayesianConv2d(in_channels=384,
                                     out_channels=256,
                                     kernel_size=(3, 3),
                                     padding=1)
        self.conv_5 = BayesianConv2d(in_channels=256,
                                     out_channels=256,
                                     kernel_size=(3, 3),
                                     padding=1)
        self.mx_pl = nn.MaxPool2d(kernel_size=3, stride=2)

        # fully connected layers
        self.fc_1 = BayesianLinear(in_features=4096, out_features=512)
        self.fc_2 = BayesianLinear(in_features=512, out_features=256)
        self.fc_3 = BayesianLinear(in_features=256, out_features=n_classes)
Beispiel #3
0
 def __init__(self):
     super().__init__()
     self.blinear1 = BayesianLinear(10, 512)
     self.bconv = BayesianConv2d(3,
                                 3,
                                 kernel_size=(3, 3),
                                 padding=1,
                                 bias=True)
     self.blstm = BayesianLSTM(10, 2)
    def test_inheritance(self):

        #check if bayesian linear has nn.Module and BayesianModule classes
        bconv = BayesianConv2d(in_channels=3,
                               out_channels=3,
                               kernel_size=(3, 3),
                               bias=False)

        self.assertEqual(isinstance(bconv, (nn.Module)), True)
        self.assertEqual(isinstance(bconv, (BayesianModule)), True)
Beispiel #5
0
    def _make_conv_layers(self, in_channels, cfg):
        layers = []
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [BayesianConv2d(in_channels, x, kernel_size=(3,3), padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU()]

                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)
Beispiel #6
0
    def test_kl_divergence_bayesian_conv2d_module(self):
        bconv = BayesianConv2d(in_channels=3,
                               out_channels=3,
                               kernel_size=(3, 3))

        to_feed = torch.ones((1, 3, 25, 25))
        predicted = bconv(to_feed)

        complexity_cost = bconv.log_variational_posterior - bconv.log_prior
        kl_complexity_cost = kl_divergence_from_nn(bconv)

        self.assertEqual((complexity_cost == kl_complexity_cost).all(),
                         torch.tensor(True))
        pass
    def test_kl_divergence(self):
        #create model, sample weights
        #check if kl divergence between apriori and a posteriori is working
        bconv = BayesianConv2d(in_channels=3,
                               out_channels=3,
                               kernel_size=(3, 3))

        to_feed = torch.ones((1, 3, 25, 25))
        predicted = bconv(to_feed)

        complexity_cost = bconv.log_variational_posterior - bconv.log_prior
        self.assertEqual((complexity_cost == complexity_cost).all(),
                         torch.tensor(True))
        pass
    def test_variational_inference(self):
        #create module, check if inference is variating
        bconv = BayesianConv2d(in_channels=3,
                               out_channels=3,
                               kernel_size=(3, 3))

        conv = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=(3, 3))

        to_feed = torch.ones((1, 3, 25, 25))

        self.assertEqual((bconv(to_feed) != bconv(to_feed)).any(),
                         torch.tensor(True))
        self.assertEqual((conv(to_feed) == conv(to_feed)).all(),
                         torch.tensor(True))
        pass
    def test_weights_shape(self):
        #check if weights shape is the expected
        bconv = BayesianConv2d(in_channels=3,
                               out_channels=3,
                               kernel_size=(3, 3))

        conv = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=(3, 3))

        to_feed = torch.ones((1, 3, 25, 25))

        infer1 = bconv(to_feed)
        infer2 = conv(to_feed)

        self.assertEqual(infer1.shape, infer2.shape)
        pass
Beispiel #10
0
    def __init__(self, num_graph, in_feature, out_feature):
        super(GraphConvolution, self).__init__()
        self.num_graph = num_graph
        self.in_feature = in_feature
        self.out_feature = out_feature

        self.mask = nn.Parameter(torch.ones(num_graph, num_joint, num_joint))

        self.gcn_list = nn.ModuleList([
            BayesianConv2d(self.in_feature,
                           self.out_feature,
                           kernel_size=(1, 1)) for i in range(self.num_graph)
        ])

        self.bn = nn.BatchNorm2d(out_feature)
        self.act = nn.ReLU()
    def test_weights_shape_cuda(self):
        #check if weights shape is the expected
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        bconv = BayesianConv2d(in_channels=3,
                               out_channels=3,
                               kernel_size=(3, 3)).to(device)

        conv = nn.Conv2d(in_channels=3, out_channels=3,
                         kernel_size=(3, 3)).to(device)

        to_feed = torch.ones((1, 3, 25, 25)).to(device)

        infer1 = bconv(to_feed)
        infer2 = conv(to_feed)

        self.assertEqual(infer1.shape, infer2.shape)
        pass
    def test_freeze_module(self):
        #create module, freeze
        #check if two inferences keep equal
        bconv = BayesianConv2d(in_channels=3,
                               out_channels=3,
                               kernel_size=(3, 3),
                               bias=False)

        to_feed = torch.ones((1, 3, 25, 25))

        self.assertEqual((bconv(to_feed) != bconv(to_feed)).any(),
                         torch.tensor(True))

        frozen_feedforward = bconv.forward_frozen(to_feed)
        bconv.freeze = True
        self.assertEqual((bconv.forward(to_feed) == frozen_feedforward).all(),
                         torch.tensor(True))
        pass
Beispiel #13
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = BayesianConv2d(in_channels,
                                    v,
                                    kernel_size=(3, 3),
                                    padding=1,
                                    bias=True)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
    def test_init_bayesian_layer(self):
        #create bayesian layer

        module = BayesianConv2d(3, 10, (3, 3))
        pass