def wip_experiment_simultaneous_average_weights_mixture_model_with_uncertainty():
    num_tasks = 2
    weights_dir = "checkpoints/MNIST/bayesian/splitted/2-tasks/"

    loaders1, loaders2 = get_splitmnist_dataloaders(num_tasks)
    net1, net2 = get_splitmnist_models(num_tasks, True, weights_dir)
    net1.cuda()
    net2.cuda()
    net_mix = get_mixture_model(num_tasks, weights_dir, include_last_layer=False)
    net_mix.cuda()

    # Creating 2 sets of last layer
    fc3_1 = BBBLinear(84, 5, name='fc3_1') # hardcoded for lenet
    weights_1 = torch.load(weights_dir + "model_lenet_2.1.pt")
    fc3_1.W = torch.nn.Parameter(weights_1['fc3.W'])
    fc3_1.log_alpha = torch.nn.Parameter(weights_1['fc3.log_alpha'])

    fc3_2 = BBBLinear(84, 5, name='fc3_2') # hardcoded for lenet
    weights_2 = torch.load(weights_dir + "model_lenet_2.2.pt")
    fc3_2.W = torch.nn.Parameter(weights_2['fc3.W'])
    fc3_2.log_alpha = torch.nn.Parameter(weights_2['fc3.log_alpha'])

    fc3_1, fc3_2 = fc3_1.cuda(), fc3_2.cuda()

    print("Model-1, Loader-1:", calculate_accuracy(net1, loaders1[1]))
    print("Model-2, Loader-2:", calculate_accuracy(net2, loaders2[1]))
    print("Model-Mix, Loader-1:", predict_using_epistemic_uncertainty_with_mixture_model(net_mix, fc3_1, fc3_2, loaders1[1]))
    print("Model-Mix, Loader-2:", predict_using_epistemic_uncertainty_with_mixture_model(net_mix, fc3_1, fc3_2, loaders2[1]))
Beispiel #2
0
    def __init__(self, outputs, inputs):
        super(BBB3Conv3FC, self).__init__()

        self.num_classes = outputs

        self.conv1 = BBBConv2d(inputs, 32, 5, alpha_shape=(1,1), padding=2, bias=False, name='conv1')
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv2 = BBBConv2d(32, 64, 5, alpha_shape=(1,1), padding=2, bias=False, name='conv2')
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv3 = BBBConv2d(64, 128, 5, alpha_shape=(1,1), padding=1, bias=False, name='conv3')
        self.soft3 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.flatten = FlattenLayer(2 * 2 * 128)
        self.fc1 = BBBLinear(2 * 2 * 128, 1000, alpha_shape=(1,1), bias=False, name='fc1')
        self.soft5 = nn.Softplus()

        self.fc2 = BBBLinear(1000, 1000, alpha_shape=(1,1), bias=False, name='fc2')
        self.soft6 = nn.Softplus()

        self.fc3 = BBBLinear(1000, outputs, alpha_shape=(1,1), bias=False, name='fc3')
Beispiel #3
0
    def __init__(self, outputs, inputs):
        super(BBBLeNet, self).__init__()

        self.num_classes = outputs

        self.conv1 = BBBConv2d(inputs,
                               6,
                               5,
                               alpha_shape=(1, 1),
                               padding=0,
                               bias=False)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(6,
                               16,
                               5,
                               alpha_shape=(1, 1),
                               padding=0,
                               bias=False)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(5 * 5 * 16)
        self.fc1 = BBBLinear(5 * 5 * 16, 120, alpha_shape=(1, 1), bias=False)
        self.soft3 = nn.Softplus()

        self.fc2 = BBBLinear(120, 84, alpha_shape=(1, 1), bias=False)
        self.soft4 = nn.Softplus()

        self.fc3 = BBBLinear(84, outputs, alpha_shape=(1, 1), bias=False)
    def __init__(self, outputs, inputs, init_log_noise):
        super(BBB3Conv3FC_1D, self).__init__()

        self.outputs = outputs

        self.conv1 = BBBConv1d(inputs,
                               32,
                               5,
                               alpha_shape=(1, 1),
                               padding=2,
                               bias=False,
                               name='conv1')
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool1d(kernel_size=3, stride=2)

        self.conv2 = BBBConv1d(32,
                               64,
                               5,
                               alpha_shape=(1, 1),
                               padding=2,
                               bias=False,
                               name='conv2')
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool1d(kernel_size=3, stride=2)

        self.conv3 = BBBConv1d(64,
                               128,
                               5,
                               alpha_shape=(1, 1),
                               padding=1,
                               bias=False,
                               name='conv3')
        self.soft3 = nn.Softplus()
        self.pool3 = nn.MaxPool1d(kernel_size=3, stride=2)

        # out = [500, 128, 14]

        self.flatten = FlattenLayer(14 * 128)
        self.fc1 = BBBLinear(14 * 128,
                             1000,
                             alpha_shape=(1, 1),
                             bias=False,
                             name='fc1')
        self.soft5 = nn.Softplus()

        self.fc2 = BBBLinear(1000,
                             1000,
                             alpha_shape=(1, 1),
                             bias=False,
                             name='fc2')
        self.soft6 = nn.Softplus()

        self.fc3 = BBBLinear(1000,
                             outputs,
                             alpha_shape=(1, 1),
                             bias=False,
                             name='fc3')

        self.log_noise = nn.Parameter(torch.cuda.FloatTensor([init_log_noise]))
Beispiel #5
0
    def __init__(self, outputs, inputs):
        super(BBBAlexNet, self).__init__()

        self.num_classes = outputs

        self.conv1 = BBBConv2d(inputs, 64, 11, alpha_shape=(1,1), stride=4, padding=5, bias=False, name='conv1')
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(64, 192, 5, alpha_shape=(1,1), padding=2, bias=False, name='conv2')
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3 = BBBConv2d(192, 384, 3, alpha_shape=(1,1), padding=1, bias=False, name='conv3')
        self.soft3 = nn.Softplus()

        self.conv4 = BBBConv2d(384, 256, 3, alpha_shape=(1,1), padding=1, bias=False, name='conv4')
        self.soft4 = nn.Softplus()

        self.conv5 = BBBConv2d(256, 128, 3, alpha_shape=(1,1), padding=1, bias=False, name='conv5')
        self.soft5 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(1 * 1 * 128)
        self.classifier = BBBLinear(1 * 1 * 128, outputs, alpha_shape=(1,1), bias=False, name='classifier')
Beispiel #6
0
    def test_linear(self):
        batch_size = np.random.randint(1, 256)
        batch = torch.randn((batch_size, 128))

        layer = BBBLinear(128, 64, alpha_shape=(1, 1), bias=False)
        batch = layer(batch)

        assert batch.shape[0] == batch_size
        assert batch.shape[1] == 64
    def __init__(self, outputs, inputs, init_log_noise):
        super(BBB3Liner, self).__init__()

        self.outputs = outputs

        self.fc1 = BBBLinear(inputs,
                             100,
                             alpha_shape=(1, 1),
                             bias=False,
                             name='fc1')
        self.soft5 = nn.Softplus()

        # self.fc2 = BBBLinear(100, 100, alpha_shape=(1,1), bias=False, name='fc2')
        # self.soft6 = nn.Softplus()

        self.fc3 = BBBLinear(100,
                             outputs,
                             alpha_shape=(1, 1),
                             bias=False,
                             name='fc3')

        self.log_noise = nn.Parameter(torch.cuda.FloatTensor([init_log_noise]))
Beispiel #8
0
    def __init__(self, outputs, inputs):
        super(BBB3Conv3FC, self).__init__()

        self.num_classes = outputs

        self.conv1 = BBBConv2d(inputs,
                               16,
                               3,
                               alpha_shape=(1, 1),
                               stride=1,
                               padding=1,
                               bias=True,
                               name='conv1')
        self.bn1 = nn.BatchNorm2d(16)
        self.activate1 = nn.PReLU()
        self.dropout1 = nn.Dropout2d(p=0.25)

        self.conv2 = BBBConv2d(16,
                               16,
                               3,
                               alpha_shape=(1, 1),
                               stride=1,
                               padding=1,
                               bias=True,
                               name='conv2')
        self.bn2 = nn.BatchNorm2d(16)
        self.activate2 = nn.PReLU()
        self.dropout2 = nn.Dropout2d(p=0.25)

        self.conv3 = BBBConv2d(16,
                               32,
                               3,
                               alpha_shape=(1, 1),
                               stride=1,
                               padding=1,
                               bias=True,
                               name='conv3')
        self.bn3 = nn.BatchNorm2d(32)
        self.activate3 = nn.PReLU()
        self.dropout3 = nn.Dropout2d(p=0.25)

        self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)

        self.conv4 = BBBConv2d(33,
                               32,
                               3,
                               alpha_shape=(1, 1),
                               stride=1,
                               padding=1,
                               bias=True,
                               name='conv4')
        self.bn4 = nn.BatchNorm2d(32)
        self.activate4 = nn.PReLU()
        self.dropout4 = nn.Dropout2d(p=0.25)

        self.conv5 = BBBConv2d(32,
                               32,
                               3,
                               alpha_shape=(1, 1),
                               stride=1,
                               padding=1,
                               bias=True,
                               name='conv5')
        self.bn5 = nn.BatchNorm2d(32)
        self.activate5 = nn.PReLU()
        self.dropout5 = nn.Dropout2d(p=0.25)

        self.conv6 = BBBConv2d(32,
                               64,
                               3,
                               alpha_shape=(1, 1),
                               stride=1,
                               padding=1,
                               bias=True,
                               name='conv6')
        self.bn6 = nn.BatchNorm2d(64)
        self.activate6 = nn.PReLU()
        self.dropout6 = nn.Dropout2d(p=0.25)

        self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(8 * 5 * 97)

        self.fc1 = BBBLinear(8 * 5 * 97,
                             512,
                             alpha_shape=(1, 1),
                             bias=True,
                             name='fc1')
        self.fc1_bn = nn.BatchNorm1d(512)
        self.fc1_activate = nn.PReLU()
        self.fc1_dropout = nn.Dropout(p=0.50)

        self.fc2 = BBBLinear(512,
                             256,
                             alpha_shape=(1, 1),
                             bias=True,
                             name='fc2')
        self.fc2_bn = nn.BatchNorm1d(256)
        self.fc2_activate = nn.Softplus()
        self.fc2_dropout = nn.Dropout(p=0.50)
        #
        # self.fc3 = BBBLinear(256, 128, alpha_shape=(1, 1), bias=True, name='fc3')
        # self.fc3_bn = nn.BatchNorm1d(128)
        # self.fc3_activate = nn.Softplus()
        # self.fc3_dropout = nn.Dropout(p=0.50)
        #
        # self.fc4 = BBBLinear(128, 64, alpha_shape=(1, 1), bias=True, name='fc4')
        # self.fc4_bn = nn.BatchNorm1d(64)
        # self.fc4_activate = nn.Softplus()
        # self.fc4_dropout = nn.Dropout(p=0.50)

        self.fc5 = BBBLinear(256,
                             outputs,
                             alpha_shape=(1, 1),
                             bias=True,
                             name='fc5')
    def __init__(self, outputs, inputs):
        super(BBB3Conv3FC, self).__init__()

        self.num_classes = outputs

        self.conv1 = BBBConv2d(inputs, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv1')
        self.bn1 = nn.BatchNorm2d(32)
        self.activate1 = nn.ELU()
        self.drop1 = nn.Dropout2d(0.25)

        self.conv2 = BBBConv2d(32, 64, 5, alpha_shape=(1, 1), stride=1, padding=2, bias=True, name='conv2')
        self.bn2 = nn.BatchNorm2d(64)
        self.activate2 = nn.ELU()
        self.drop2 = nn.Dropout2d(0.25)

        self.pool1 = BBBConv2d(64, 64, 2, alpha_shape=(1, 1), stride=2, padding=0, bias=True, name='pool1')
        self.pool1_bn = nn.BatchNorm2d(64)
        self.pool1_activate = nn.ELU()
        self.pool1_drop = nn.Dropout2d(0.25)

        self.conv4 = BBBConv2d(64, 128, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv4')
        self.bn4 = nn.BatchNorm2d(128)
        self.activate4 = nn.ELU()
        self.drop4 = nn.Dropout2d(0.25)

        self.conv5 = BBBConv2d(128, 256, 5, alpha_shape=(1, 1), stride=1, padding=2, bias=True, name='conv5')
        self.bn5 = nn.BatchNorm2d(256)
        self.activate5 = nn.ELU()
        self.drop5 = nn.Dropout2d(0.25)

        self.pool2 = BBBConv2d(256, 256, 2, alpha_shape=(1, 1), stride=2, padding=0, bias=True, name='pool2')
        self.pool2_bn = nn.BatchNorm2d(256)
        self.pool2_activate = nn.ELU()
        self.pool2_drop = nn.Dropout2d(0.25)

        self.conv7 = BBBConv2d(256, 384, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv7')
        self.bn7 = nn.BatchNorm2d(384)
        self.activate7 = nn.ELU()
        self.drop7 = nn.Dropout2d(0.25)

        self.conv8 = BBBConv2d(384, 512, 5, alpha_shape=(1, 1), stride=1, padding=2, bias=True, name='conv8')
        self.bn8 = nn.BatchNorm2d(512)
        self.activate8 = nn.ELU()
        self.drop8 = nn.Dropout2d(0.25)

        self.pool3 = BBBConv2d(512, 512, 2, alpha_shape=(1, 1), stride=2, padding=0, bias=True, name='pool3')
        self.pool3_bn = nn.BatchNorm2d(512)
        self.pool3_activate = nn.ELU()
        self.pool3_drop = nn.Dropout2d(0.25)

        self.conv10 = BBBConv2d(512, 640, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv10')
        self.bn10 = nn.BatchNorm2d(640)
        self.activate10 = nn.ELU()
        self.drop10 = nn.Dropout2d(0.25)

        self.conv11 = BBBConv2d(640, 768, 5, alpha_shape=(1, 1), stride=1, padding=2, bias=True, name='conv11')
        self.bn11 = nn.BatchNorm2d(768)
        self.activate11 = nn.ELU()
        self.drop11 = nn.Dropout2d(0.25)

        self.pool4 = BBBConv2d(768, 768, 2, alpha_shape=(1, 1), stride=2, padding=0, bias=True, name='pool4')
        self.pool4_bn = nn.BatchNorm2d(768)
        self.pool4_activate = nn.ELU()
        self.pool4_drop = nn.Dropout2d(0.25)

        self.flatten = FlattenLayer(2 * 8 * 768)

        self.fc1 = BBBLinear(2 * 8 * 768, 512, alpha_shape=(1, 1), bias=True, name='fc1')
        self.fc1_bn = nn.BatchNorm1d(512)
        self.fc1_activate = nn.ELU()
        self.fc1_drop = nn.Dropout(0.50)

        self.fc2 = BBBLinear(512, 256, alpha_shape=(1, 1), bias=True, name='fc2')
        self.fc2_bn = nn.BatchNorm1d(256)
        self.fc2_activate = nn.ELU()
        self.fc2_drop = nn.Dropout(0.50)

        self.fc3 = BBBLinear(256, 64, alpha_shape=(1, 1), bias=True, name='fc3')
        self.fc3_bn = nn.BatchNorm1d(64)
        self.fc3_activate = nn.ELU()
        self.fc3_drop = nn.Dropout(0.50)

        self.fc4 = BBBLinear(64, outputs, alpha_shape=(1, 1), bias=True, name='fc4')
Beispiel #10
0
    def __init__(self, outputs, inputs):
        super(BBB3Conv3FC, self).__init__()

        self.num_classes = outputs

        self.layer1 = nn.Sequential(
            BBBConv2d(inputs,
                      16,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv1'), nn.BatchNorm2d(16), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer2 = nn.Sequential(
            BBBConv2d(16,
                      32,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv1_1'), nn.BatchNorm2d(32), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer3 = nn.Sequential(nn.AvgPool2d(kernel_size=2, stride=2))

        self.layer4 = nn.Sequential(
            BBBConv2d(48,
                      64,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv2'), nn.BatchNorm2d(64), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer5 = nn.Sequential(
            BBBConv2d(64,
                      128,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv2_1'), nn.BatchNorm2d(128), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer6 = nn.Sequential(nn.AvgPool2d(kernel_size=2, stride=2))

        self.layer7 = nn.Sequential(
            BBBConv2d(192,
                      256,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv3'), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer8 = nn.Sequential(
            BBBConv2d(256,
                      512,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv3_1'), nn.BatchNorm2d(512), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer9 = nn.Sequential(nn.AvgPool2d(kernel_size=2, stride=2))

        self.layer10 = nn.Sequential(
            BBBConv2d(768,
                      512,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv4'), nn.BatchNorm2d(512), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer11 = nn.Sequential(
            BBBConv2d(512,
                      512,
                      3,
                      alpha_shape=(1, 1),
                      stride=1,
                      padding=1,
                      bias=True,
                      name='conv4_1'), nn.BatchNorm2d(512), nn.ReLU(),
            nn.Dropout2d(p=0.25))

        self.layer12 = nn.Sequential(nn.AvgPool2d(kernel_size=2, stride=2))

        self.layer13 = nn.Sequential(
            FlattenLayer(2 * 8 * 1024),
            BBBLinear(2 * 8 * 1024,
                      512,
                      alpha_shape=(1, 1),
                      bias=True,
                      name='fc1'), nn.BatchNorm1d(512), nn.ReLU(),
            nn.Dropout(p=0.25),
            BBBLinear(512, 256, alpha_shape=(1, 1), bias=True, name='fc2'),
            nn.BatchNorm1d(256), nn.ReLU(), nn.Dropout(p=0.25),
            BBBLinear(256, outputs, alpha_shape=(1, 1), bias=True, name='fc3'))
Beispiel #11
0
    def __init__(self, outputs, inputs):
        super(BBB3Conv3FC, self).__init__()

        self.num_classes = outputs

        self.conv1 = BBBConv2d(inputs, 16, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv1')
        self.bn1 = nn.BatchNorm2d(16)
        self.activate1 = nn.PReLU()
        self.dropout1 = nn.Dropout2d(p=0.25)

        self.conv2 = BBBConv2d(16, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv2')
        self.bn2 = nn.BatchNorm2d(32)
        self.activate2 = nn.PReLU()
        self.dropout2 = nn.Dropout2d(p=0.25)

        self.conv2_1 = BBBConv2d(32, 32, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv2_1')
        self.bn2_1 = nn.BatchNorm2d(32)
        self.activate2_1 = nn.PReLU()
        self.dropout2_1 = nn.Dropout2d(p=0.25)

        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3 = BBBConv2d(32, 64, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv3')
        self.bn3 = nn.BatchNorm2d(64)
        self.activate3 = nn.PReLU()
        self.dropout3 = nn.Dropout2d(p=0.25)

        self.conv4 = BBBConv2d(64, 128, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv4')
        self.bn4 = nn.BatchNorm2d(128)
        self.activate4 = nn.PReLU()
        self.dropout4 = nn.Dropout2d(p=0.25)

        self.conv4_1 = BBBConv2d(128, 128, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv4_1')
        self.bn4_1 = nn.BatchNorm2d(128)
        self.activate4_1 = nn.PReLU()
        self.dropout4_1 = nn.Dropout2d(p=0.25)

        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv5 = BBBConv2d(128, 256, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv5')
        self.bn5 = nn.BatchNorm2d(256)
        self.activate5 = nn.PReLU()
        self.dropout5 = nn.Dropout2d(p=0.25)

        self.conv6 = BBBConv2d(256, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv6')
        self.bn6 = nn.BatchNorm2d(512)
        self.activate6 = nn.PReLU()
        self.dropout6 = nn.Dropout2d(p=0.25)

        self.conv6_1 = BBBConv2d(512, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv6_1')
        self.bn6_1 = nn.BatchNorm2d(512)
        self.activate6_1 = nn.PReLU()
        self.dropout6_1 = nn.Dropout2d(p=0.25)

        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv7 = BBBConv2d(512, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv7')
        self.bn7 = nn.BatchNorm2d(512)
        self.activate7 = nn.PReLU()
        self.dropout7 = nn.Dropout2d(p=0.25)

        self.conv8 = BBBConv2d(512, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv8')
        self.bn8 = nn.BatchNorm2d(512)
        self.activate8 = nn.PReLU()
        self.dropout8 = nn.Dropout2d(p=0.25)

        self.conv8_1 = BBBConv2d(512, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv8_1')
        self.bn8_1 = nn.BatchNorm2d(512)
        self.activate8_1 = nn.PReLU()
        self.dropout8_1 = nn.Dropout2d(p=0.25)

        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv9 = BBBConv2d(512, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv9')
        self.bn9 = nn.BatchNorm2d(512)
        self.activate9 = nn.PReLU()
        self.dropout9 = nn.Dropout2d(p=0.25)

        self.conv10 = BBBConv2d(512, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv10')
        self.bn10 = nn.BatchNorm2d(512)
        self.activate10 = nn.PReLU()
        self.dropout10 = nn.Dropout2d(p=0.25)

        self.conv10_1 = BBBConv2d(512, 512, 3, alpha_shape=(1, 1), stride=1, padding=1, bias=True, name='conv10_1')
        self.bn10_1 = nn.BatchNorm2d(512)
        self.activate10_1 = nn.PReLU()
        self.dropout10_1 = nn.Dropout2d(p=0.25)

        self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(2 * 2 * 512)

        self.fc1 = BBBLinear(2 * 2 * 512, 1024, alpha_shape=(1, 1), bias=True, name='fc1')
        self.fc1_bn = nn.BatchNorm1d(1024)
        self.fc1_activate = nn.PReLU()
        self.fc1_dropout = nn.Dropout(p=0.25)

        self.fc2 = BBBLinear(1024, 512, alpha_shape=(1, 1), bias=True, name='fc2')
        self.fc2_bn = nn.BatchNorm1d(512)
        self.fc2_activate = nn.PReLU()
        self.fc2_dropout = nn.Dropout(p=0.25)

        self.fc3 = BBBLinear(512, 256, alpha_shape=(1, 1), bias=True, name='fc3')
        self.fc3_bn = nn.BatchNorm1d(256)
        self.fc3_activate = nn.PReLU()
        self.fc3_dropout = nn.Dropout(p=0.25)

        self.fc4 = BBBLinear(256, outputs, alpha_shape=(1, 1), bias=True, name='fc4')