コード例 #1
0
    def __init__(self, outputs, inputs):
        super(BBB3Conv3FC, self).__init__()
        self.conv1 = BBBConv2d(inputs, 32, 5, stride=1, padding=2)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv2 = BBBConv2d(32, 64, 5, stride=1, padding=2)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv3 = BBBConv2d(64, 128, 5, stride=1, padding=1)
        self.soft3 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.flatten = FlattenLayer(2 * 2 * 128)
        self.fc1 = BBBLinearFactorial(2 * 2 * 128, 1000)
        self.soft5 = nn.Softplus()

        self.fc2 = BBBLinearFactorial(1000, 1000)
        self.soft6 = nn.Softplus()

        self.fc3 = BBBLinearFactorial(1000, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.pool2, self.conv3, self.soft3, self.pool3, self.flatten,
            self.fc1, self.soft5, self.fc2, self.soft6, self.fc3
        ]

        self.layers = nn.ModuleList(layers)
コード例 #2
0
    def __init__(self, outputs, inputs,):
        super(_BayesianLeNetD, self).__init__()

        self.outputs = outputs
        #due to the convlayer change have to assume the logvar initial value.
        self.q_logvar_init = 0.05
        self.p_logvar_init = math.log(0.05)

        self.conv1 = BBBConv2d(self.q_logvar_init, self.p_logvar_init,inputs,6, 5, stride=1)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(self.q_logvar_init, self.p_logvar_init,6, 16, 5, stride=1)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(5 * 5 * 16)
        self.fc1 = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,5 * 5 * 16, 120)
        self.soft3 = nn.Softplus()

        self.fc2 = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,120, 84)
        self.soft4 = nn.Softplus()

        self.fc3 = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,84, outputs)

        layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2,
                  self.flatten, self.fc1, self.soft3, self.fc2, self.soft4, self.fc3]

        #not sure if this is right, test drive.
        self.prob = nn.Sigmoid()
        if outputs == 1:
            layers.append(self.prob)

        self.layers = nn.ModuleList(layers)
コード例 #3
0
    def __init__(self, outputs, inputs):
        super(Hybrid_5Conv3FC, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(inputs, 96, kernel_size=11, stride=4),
            nn.ReLU(inplace=True),
            # nn.Dropout(p=0.5),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            # nn.Dropout(p=0.5),
            nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.flatten = FlattenLayer(6 * 6 * 256)
        self.fc1 = BBBLinearFactorial(6 * 6 * 256, 4096)
        self.dropout1 = nn.Dropout(p=0.5)
        self.fc2 = BBBLinearFactorial(4096, 4096)
        self.dropout2 = nn.Dropout(p=0.5)
        self.fc3 = BBBLinearFactorial(4096, outputs)

        layers = [
            self.flatten, self.fc1, self.dropout1, self.fc2, self.dropout2,
            self.fc3
        ]

        self.layers = nn.ModuleList(layers)
    def __init__(self, outputs, inputs):
        super(BBBLeNet, self).__init__()
        self.conv1 = BBBConv2d(inputs, 6, 5, stride=1)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(6, 16, 5, stride=1)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(5 * 5 * 16)
        self.fc1 = BBBLinearFactorial(5 * 5 * 16, 120)
        self.soft3 = nn.Softplus()

        self.fc2 = BBBLinearFactorial(120, 84)
        self.soft4 = nn.Softplus()

        self.fc3 = BBBLinearFactorial(84, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.pool2, self.flatten, self.fc1, self.soft3, self.fc2,
            self.soft4, self.fc3
        ]

        self.layers = nn.ModuleList(layers)
コード例 #5
0
    def __init__(self, outputs, inputs):
        super(_BayesianAlexNetD, self).__init__()

        self.q_logvar_init = 0.05
        self.p_logvar_init = math.log(0.05)
 
        self.classifier = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init, 1* 1 * 128, outputs)

        self.conv1 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, inputs, 64, kernel_size=11, stride=4, padding=5)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(self.q_logvar_init,  self.p_logvar_init, 64, 192, kernel_size=5, padding=2)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 192, 384, kernel_size=3, padding=1)
        self.soft3 = nn.Softplus()

        self.conv4 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 384, 256, kernel_size=3, padding=1)
        self.soft4 = nn.Softplus()

        self.conv5 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 256, 128, kernel_size=3, padding=1)
        self.soft5 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        # self.flatten = FlattenLayer(1 * 1 * 128)
        # self.fc1 = BBBLinearFactorial(q_logvar_init, N, p_logvar_init, 1* 1 * 128, outputs)


        layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2, self.conv3, self.soft3,
                  self.conv4, self.soft4, self.conv5, self.soft5, self.pool3]

        self.layers = nn.ModuleList(layers)
コード例 #6
0
class _ClassifierD(nn.Module):
    '''
    the ACGAN part, the code is doing the classification here
    '''
    def __init__(self, outputs, inputs,):
        super(_ClassifierD, self).__init__()
        self.outputs = outputs
        #due to the convlayer change have to assume the logvar initial value.
        self.q_logvar_init = 0.05
        self.p_logvar_init = math.log(0.05)

        self.conv1 = BBBConv2d(self.q_logvar_init, self.p_logvar_init,inputs,6, 5, stride=1)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(self.q_logvar_init, self.p_logvar_init,6, 16, 5, stride=1)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(5 * 5 * 16)
        self.fc1 = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,5 * 5 * 16, 120)
        self.soft3 = nn.Softplus()

        self.fc2 = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,120, 84)
        self.soft4 = nn.Softplus()

        #self.fc3 = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,84, outputs)

        self.fcA = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,84, 1)
        self.fcB = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init,84, 1)
        layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2,
                  self.flatten, self.fc1, self.soft3, self.fc2, self.soft4]

        #not sure if this is right, test drive.
        self.prob = nn.Sigmoid()
        if outputs == 1:
            layers.append(self.prob)

        self.layers = nn.ModuleList(layers)

    def forward(self, x):#used name: probforward
        'Forward pass with Bayesian weights'
        kl = 0
        for layer in self.layers:
            if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):
                x, _kl, = layer.convprobforward(x)
                kl += _kl

            elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward):
                x, _kl, = layer.fcprobforward(x)
                kl += _kl
            else:
                x = layer(x)
        #logits = x
        #return logits, kl
        logitsA, klA = self.fcA.fcprobforward(x)
        logitsB, klB = self.fcB.fcprobforward(x)
        return logitsA, logitsB, klA+kl, klB+kl
コード例 #7
0
    def __init__(self, outputs, inputs):
        super(Hybrid_6Conv3FC, self).__init__()

        self.features = nn.Sequential(
            # Conv Layer block 1
            nn.Conv2d(in_channels=inputs,
                      out_channels=32,
                      kernel_size=3,
                      padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=32,
                      out_channels=64,
                      kernel_size=3,
                      padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # Conv Layer block 2
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=3,
                      padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=128,
                      out_channels=128,
                      kernel_size=3,
                      padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # Conv Layer block 3
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=3,
                      padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=256,
                      out_channels=256,
                      kernel_size=3,
                      padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )

        self.flatten = FlattenLayer(4 * 4 * 256)
        self.fc1 = BBBLinearFactorial(4 * 4 * 256, 1024)
        self.soft1 = nn.Softplus()
        self.fc2 = BBBLinearFactorial(1024, 512)
        self.soft2 = nn.Softplus()
        self.dropout2 = nn.Dropout(p=0.1)
        self.fc3 = BBBLinearFactorial(512, outputs)

        layers = [
            self.flatten, self.fc1, self.soft1, self.fc2, self.soft2,
            self.dropout2, self.fc3
        ]
        self.layers = nn.ModuleList(layers)
コード例 #8
0
    def __init__(self, outputs, inputs):
        super(BBBAlexNet, self).__init__()
        # self.conv1 = BBBConv2d(inputs, 64, kernel_size=11, stride=4, padding=5)
        # self.soft1 = nn.Softplus()
        # self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        #
        # self.conv2 = BBBConv2d(64, 192, kernel_size=5, padding=2)
        # self.soft2 = nn.Softplus()
        # self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        #
        # self.conv3 = BBBConv2d(192, 384, kernel_size=3, padding=1)
        # self.soft3 = nn.Softplus()
        #
        # self.conv4 = BBBConv2d(384, 256, kernel_size=3, padding=1)
        # self.soft4 = nn.Softplus()
        #
        # self.conv5 = BBBConv2d(256, 128, kernel_size=3, padding=1)
        # self.soft5 = nn.Softplus()
        # self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        #
        # self.flatten = FlattenLayer(1 * 1 * 128)
        # self.fc1 = BBBLinearFactorial(1* 1 * 128, outputs)

        self.conv1 = BBBConv2d(inputs, 96, kernel_size=11, stride=4)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv2 = BBBConv2d(96, 256, kernel_size=5, stride=1, padding=2)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv3 = BBBConv2d(256, 384, kernel_size=3, stride=1, padding=1)
        self.soft3 = nn.Softplus()

        self.conv4 = BBBConv2d(384, 384, kernel_size=3, stride=1, padding=1)
        self.soft4 = nn.Softplus()

        self.conv5 = BBBConv2d(384, 256, kernel_size=3, stride=1, padding=1)
        self.soft5 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.flatten = FlattenLayer(6 * 6 * 256)
        self.fc1 = BBBLinearFactorial(6 * 6 * 256, 4096)
        self.dropout1 = nn.Dropout(p=0.5)
        self.fc2 = BBBLinearFactorial(4096, 4096)
        self.dropout2 = nn.Dropout(p=0.5)
        self.fc3 = BBBLinearFactorial(4096, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.pool2, self.conv3, self.soft3, self.conv4, self.soft4,
            self.conv5, self.soft5, self.pool3, self.flatten, self.fc1,
            self.dropout1, self.fc2, self.dropout2, self.fc3
        ]

        self.layers = nn.ModuleList(layers)
コード例 #9
0
    def __init__(self, outputs, inputs):
        super(BBBELUN2, self).__init__()
        self.conv1 = BBBConv2d(inputs, 96, 6, stride=1)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(96, 512, 3, stride=1)
        self.soft2 = nn.Softplus()
        self.conv3 = BBBConv2d(512, 512, 3, stride=1)
        self.soft3 = nn.Softplus()
        self.conv4 = BBBConv2d(512, 512, 3, stride=1)
        self.soft4 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv5 = BBBConv2d(512, 768, 3, stride=1)
        self.soft5 = nn.Softplus()
        self.conv6 = BBBConv2d(768, 768, 3, stride=1)
        self.soft6 = nn.Softplus()
        self.conv7 = BBBConv2d(768, 768, 2, stride=1)
        self.soft7 = nn.Softplus()
        self.conv8 = BBBConv2d(768, 768, 2, stride=1)
        self.soft8 = nn.Softplus()
        self.conv9 = BBBConv2d(768, 768, 1, stride=1)
        self.soft9 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv10 = BBBConv2d(768, 1024, 3, stride=1)
        self.soft10 = nn.Softplus()
        self.conv11 = BBBConv2d(1024, 1024, 3, stride=1)
        self.soft11 = nn.Softplus()
        self.conv12 = BBBConv2d(1024, 1024, 3, stride=1)
        self.soft12 = nn.Softplus()
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(8 * 8 * 1024)
        self.fc1 = BBBLinearFactorial(8 * 8 * 1024, 4096)
        self.soft13 = nn.Softplus()

        self.fc2 = BBBLinearFactorial(4096, 4096)
        self.soft14 = nn.Softplus()

        self.fc3 = BBBLinearFactorial(4096, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.conv3, self.soft3, self.conv4, self.soft4, self.pool2,
            self.conv5, self.soft5, self.conv6, self.soft6, self.conv7,
            self.soft7, self.conv8, self.soft8, self.conv9, self.soft9,
            self.pool3, self.conv10, self.soft10, self.conv11, self.soft11,
            self.conv12, self.soft12, self.pool4, self.flatten, self.fc1,
            self.soft13, self.fc2, self.soft14, self.fc3
        ]

        self.layers = nn.ModuleList(layers)
コード例 #10
0
    def __init__(self, outputs, inputs):
        super(Hybrid_timeseries_1Conv2FC, self).__init__()

        self.features = nn.Sequential(
            nn.Conv1d(inputs, 64, kernel_size=11, stride=4, padding=5),
            nn.ReLU(inplace=True),
        )

        self.flatten = FlattenLayer(1 * 1 * 64)
        self.fc1 = BBBLinearFactorial(1 * 1 * 64, 512)
        self.fc2 = BBBLinearFactorial(512, outputs)

        layers = [self.flatten, self.fc1, self.fc2]

        self.layers = nn.ModuleList(layers)
コード例 #11
0
    def __init__(self, outputs, inputs):
        super(Hybrid_5Conv1FC, self).__init__()

        self.features = nn.Sequential(
            nn.Conv2d(inputs, 64, kernel_size=11, stride=4, padding=5),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        self.fc1 = BBBLinearFactorial(256, outputs)

        layers = [self.fc1]
        self.layers = nn.ModuleList(layers)
コード例 #12
0
    def __init__(self, outputs, inputs):
        super(BBBAlexNet_5Conv1FC, self).__init__()
        self.conv1 = BBBConv2d(inputs, 64, kernel_size=11, stride=4, padding=5)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(64, 192, kernel_size=5, padding=2)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3 = BBBConv2d(192, 384, kernel_size=3, padding=1)
        self.soft3 = nn.Softplus()

        self.conv4 = BBBConv2d(384, 256, kernel_size=3, padding=1)
        self.soft4 = nn.Softplus()

        self.conv5 = BBBConv2d(256, 256, kernel_size=3, padding=1)
        self.soft5 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(1 * 1 * 256)
        self.fc1 = BBBLinearFactorial(1 * 1 * 256, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.pool2, self.conv3, self.soft3, self.conv4, self.soft4,
            self.conv5, self.soft5, self.pool3, self.flatten, self.fc1
        ]

        self.layers = nn.ModuleList(layers)
コード例 #13
0
    def __init__(self, outputs, inputs):
        super(BBBAlexNetTimeSeries_1Conv2FC, self).__init__()

        self.conv1 = BBBConv1d(inputs, 64, kernel_size=11, stride=4, padding=5)
        self.soft1 = nn.Softplus()

        #self.conv2 = BBBConv1d(64, 192, kernel_size=5, padding=2)
        #self.soft2 = nn.Softplus()

        self.flatten = FlattenLayer(1 * 1 * 64)
        self.fc1 = BBBLinearFactorial(1 * 1 * 64, 512)
        #self.fc2 = BBBLinearFactorial(4096, 4096)
        self.fc3 = BBBLinearFactorial(512, outputs)

        layers = [self.conv1, self.soft1, self.flatten, self.fc1, self.fc3]

        self.layers = nn.ModuleList(layers)
コード例 #14
0
    def __init__(self, outputs, inputs):
        super(BBBAlexNet_6Conv3FC, self).__init__()

        self.conv1 = BBBConv2d(inputs, 32, kernel_size=3, stride=1, padding=1)
        self.norm1 = nn.BatchNorm2d(32)
        self.soft1 = nn.Softplus()
        self.conv2 = BBBConv2d(32, 64, kernel_size=3, stride=1, padding=1)
        self.soft2 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3 = BBBConv2d(64, 128, kernel_size=3, stride=1, padding=1)
        self.norm2 = nn.BatchNorm2d(128)
        self.soft3 = nn.Softplus()
        self.conv4 = BBBConv2d(128, 128, kernel_size=3, stride=1, padding=1)
        self.soft4 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.dropout1 = nn.Dropout(p=0.05)

        self.conv5 = BBBConv2d(128, 256, kernel_size=3, stride=1, padding=1)
        self.norm3 = nn.BatchNorm2d(256)
        self.soft5 = nn.Softplus()
        self.conv6 = BBBConv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.soft6 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.dropout2 = nn.Dropout(p=0.1)
        self.flatten = FlattenLayer(4 * 4 * 256)
        self.fc1 = BBBLinearFactorial(4 * 4 * 256, 1024)
        self.soft7 = nn.Softplus()
        self.fc2 = BBBLinearFactorial(1024, 512)
        self.soft8 = nn.Softplus()
        self.dropout3 = nn.Dropout(p=0.1)
        self.fc3 = BBBLinearFactorial(512, outputs)

        layers = [
            self.conv1, self.soft1, self.conv2, self.soft2, self.pool1,
            self.conv3, self.soft3, self.conv4, self.soft4, self.pool2,
            self.conv5, self.soft5, self.conv6, self.soft6, self.pool3,
            self.flatten, self.fc1, self.soft7, self.fc2, self.soft8,
            self.dropout3, self.fc3
        ]

        self.layers = nn.ModuleList(layers)
コード例 #15
0
    def __init__(self, outputs, inputs):
        super(BBBCNN1, self).__init__()
        self.conv1 = BBBConv2d(inputs, 92, 3, stride=1)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv2 = BBBConv2d(92, 384, 1, stride=1)
        self.soft2 = nn.Softplus()
        self.conv3 = BBBConv2d(384, 384, 2, stride=1)
        self.soft3 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv4 = BBBConv2d(384, 640, 2, stride=1)
        self.soft4 = nn.Softplus()
        self.conv5 = BBBConv2d(640, 640, 2, stride=1)
        self.soft5 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv6 = BBBConv2d(640, 640, 1, stride=1)
        self.soft6 = nn.Softplus()
        self.conv7 = BBBConv2d(640, 768, 2, stride=1)
        self.soft7 = nn.Softplus()
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv8 = BBBConv2d(768, 768, 2, stride=1)
        self.soft8 = nn.Softplus()
        self.conv9 = BBBConv2d(768, 768, 2, stride=1)
        self.soft9 = nn.Softplus()
        self.pool5 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv10 = BBBConv2d(768, 768, 1, stride=1)
        self.soft10 = nn.Softplus()
        self.conv11 = BBBConv2d(768, 640, 2, stride=1)
        self.soft11 = nn.Softplus()
        self.conv12 = BBBConv2d(640, 384, 2, stride=1)
        self.soft12 = nn.Softplus()
        self.pool6 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(8 * 8 * 384)
        self.fc1 = BBBLinearFactorial(8 * 8 * 384, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.conv3, self.soft3, self.pool2, self.conv4, self.soft4,
            self.conv5, self.soft5, self.pool3, self.conv6, self.soft6,
            self.conv7, self.soft7, self.pool4, self.conv8, self.soft8,
            self.conv9, self.soft9, self.pool5, self.conv10, self.soft10,
            self.conv11, self.soft11, self.conv12, self.soft12, self.pool6,
            self.flatten, self.fc1
        ]

        self.layers = nn.ModuleList(layers)
コード例 #16
0
    def __init__(self, outputs, inputs):
        super(BBB4Conv3FC, self).__init__()
        self.conv1 = BBBConv2d(inputs, 32, kernel_size=5, stride=1, padding=0)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv2 = BBBConv2d(32, 64, kernel_size=5, stride=1, padding=0)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv3 = BBBConv2d(64, 128, kernel_size=5, stride=1, padding=0)
        self.soft3 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv4 = BBBConv2d(128, 256, kernel_size=5, stride=1, padding=0)
        self.soft4 = nn.Softplus()
        self.pool4 = nn.MaxPool2d(kernel_size=3, stride=2)

        # self.conv5 = BBBConv2d(256, 512, kernel_size=5, stride=1, padding=0)
        # self.soft5 = nn.Softplus()
        # self.pool5 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.flatten = FlattenLayer(3 * 3 * 256)
        self.fc1 = BBBLinearFactorial(3 * 3 * 256, 1000)
        self.soft6 = nn.Softplus()

        self.fc2 = BBBLinearFactorial(1000, 1000)
        self.soft7 = nn.Softplus()

        self.fc3 = BBBLinearFactorial(1000, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.pool2, self.conv3, self.soft3, self.pool3, self.conv4,
            self.soft4, self.pool4, self.flatten, self.fc1, self.soft6,
            self.fc2, self.soft7, self.fc3
        ]

        self.layers = nn.ModuleList(layers)
コード例 #17
0
class _BayesianAlexNetD(nn.Module):
    '''The architecture of AlexNet with Bayesian Layers'''

    def __init__(self, outputs, inputs):
        super(_BayesianAlexNetD, self).__init__()

        self.q_logvar_init = 0.05
        self.p_logvar_init = math.log(0.05)
 
        self.classifier = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init, 1* 1 * 128, outputs)

        self.conv1 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, inputs, 64, kernel_size=11, stride=4, padding=5)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = BBBConv2d(self.q_logvar_init,  self.p_logvar_init, 64, 192, kernel_size=5, padding=2)
        self.soft2 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 192, 384, kernel_size=3, padding=1)
        self.soft3 = nn.Softplus()

        self.conv4 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 384, 256, kernel_size=3, padding=1)
        self.soft4 = nn.Softplus()

        self.conv5 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 256, 128, kernel_size=3, padding=1)
        self.soft5 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        # self.flatten = FlattenLayer(1 * 1 * 128)
        # self.fc1 = BBBLinearFactorial(q_logvar_init, N, p_logvar_init, 1* 1 * 128, outputs)


        layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2, self.conv3, self.soft3,
                  self.conv4, self.soft4, self.conv5, self.soft5, self.pool3]

        self.layers = nn.ModuleList(layers)

    def forward(self, x):
        kl = 0
        for layer in self.layers:
            if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):
                x, _kl, = layer.convprobforward(x)
            else:
                x = layer.forward(x)
        x = x.view(x.size(0), -1)
        x, _kl = self.classifier.fcprobforward(x)
        kl += _kl
        logits = x
        return logits, kl
コード例 #18
0
    def __init__(self, outputs, inputs):
        super(FlowTestNet, self).__init__()
        flow = False
        self.q_logvar_init = 0.05
        self.p_logvar_init = math.log(0.05)
        n = 3072
        self.classifier1 = BBBLinearFactorial(self.q_logvar_init,
                                              self.p_logvar_init,
                                              n,
                                              n,
                                              flow=flow)
        self.classifier2 = BBBLinearFactorial(self.q_logvar_init,
                                              self.p_logvar_init,
                                              n,
                                              n,
                                              flow=flow)
        self.classifier3 = BBBLinearFactorial(self.q_logvar_init,
                                              self.p_logvar_init,
                                              n,
                                              n,
                                              flow=flow)
        self.classifier4 = BBBLinearFactorial(self.q_logvar_init,
                                              self.p_logvar_init,
                                              n,
                                              n,
                                              flow=flow)
        self.classifier5 = BBBLinearFactorial(self.q_logvar_init,
                                              self.p_logvar_init,
                                              n,
                                              n,
                                              flow=flow)

        self.classifier6 = BBBLinearFactorial(self.q_logvar_init,
                                              self.p_logvar_init,
                                              n,
                                              outputs,
                                              flow=flow)
        self.flatten_layer = Flatten()

        # self.flatten = FlattenLayer(1 * 1 * 128)
        # self.fc1 = BBBLinearFactorial(q_logvar_init, N, p_logvar_init, 1* 1 * 128, outputs)

        layers = [
            self.flatten_layer, self.classifier1, self.classifier2,
            self.classifier3, self.classifier4, self.classifier5,
            self.classifier6
        ]

        self.layers = nn.ModuleList(layers)
コード例 #19
0
    def __init__(self, outputs, inputs):
        super(BBBELUN1, self).__init__()
        self.conv1 = BBBConv2d(inputs, 384, 3, stride=1)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv2 = BBBConv2d(384, 384, 1, stride=1)
        self.soft2 = nn.Softplus()
        self.conv3 = BBBConv2d(384, 384, 2, stride=1)
        self.soft3 = nn.Softplus()
        self.conv4 = BBBConv2d(384, 640, 2, stride=1)
        self.soft4 = nn.Softplus()
        self.conv5 = BBBConv2d(640, 640, 2, stride=1)
        self.soft5 = nn.Softplus()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv6 = BBBConv2d(640, 640, 1, stride=1)
        self.soft6 = nn.Softplus()
        self.conv7 = BBBConv2d(640, 768, 2, stride=1)
        self.soft7 = nn.Softplus()
        self.conv8 = BBBConv2d(768, 768, 2, stride=1)
        self.soft8 = nn.Softplus()
        self.conv9 = BBBConv2d(768, 768, 2, stride=1)
        self.soft9 = nn.Softplus()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv10 = BBBConv2d(768, 768, 1, stride=1)
        self.soft10 = nn.Softplus()
        self.conv11 = BBBConv2d(768, 896, 2, stride=1)
        self.soft11 = nn.Softplus()
        self.conv12 = BBBConv2d(896, 896, 2, stride=1)
        self.soft12 = nn.Softplus()
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv13 = BBBConv2d(896, 896, 3, stride=1)
        self.soft13 = nn.Softplus()
        self.conv14 = BBBConv2d(896, 1024, 2, stride=1)
        self.soft14 = nn.Softplus()
        self.conv15 = BBBConv2d(1024, 1024, 2, stride=1)
        self.soft15 = nn.Softplus()
        self.pool5 = nn.MaxPool2d(kernel_size=2, stride=1)

        self.conv16 = BBBConv2d(1024, 1024, 1, stride=1)
        self.soft16 = nn.Softplus()
        self.conv17 = BBBConv2d(1024, 1152, 2, stride=1)
        self.soft17 = nn.Softplus()
        self.pool6 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv18 = BBBConv2d(1152, 1152, 2, stride=1)
        self.soft18 = nn.Softplus()
        self.pool7 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = FlattenLayer(2 * 2 * 1152)
        self.fc1 = BBBLinearFactorial(2 * 2 * 1152, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.conv2, self.soft2,
            self.conv3, self.soft3, self.conv4, self.soft4, self.conv5,
            self.soft5, self.pool2, self.conv6, self.soft6, self.conv7,
            self.soft7, self.conv8, self.soft8, self.conv9, self.soft9,
            self.pool3, self.conv10, self.soft10, self.conv11, self.soft11,
            self.conv12, self.soft12, self.pool4, self.conv13, self.soft13,
            self.conv14, self.soft14, self.conv15, self.soft15, self.pool5,
            self.conv16, self.soft16, self.conv17, self.soft17, self.pool6,
            self.conv18, self.soft18, self.pool7, self.flatten, self.fc1
        ]

        self.layers = nn.ModuleList(layers)
コード例 #20
0
    def __init__(self,
                 block,
                 layers,
                 input_channels,
                 num_classes=10,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 norm_layer=None):
        super(ResNet, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self._norm_layer = norm_layer

        self.inplanes = 64
        self.dilation = 1
        self.q_logvar_init = 0.05
        self.p_logvar_init = math.log(0.05)

        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = BBBConv2d(self.q_logvar_init,
                               self.p_logvar_init,
                               input_channels,
                               self.inplanes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.drop = nn.Dropout(p=.5)
        self.classifier = BBBLinearFactorial(self.q_logvar_init,
                                             self.p_logvar_init,
                                             512 * block.expansion,
                                             num_classes,
                                             flow=False)
        print(block.expansion)

        layers2 = [
            self.conv1, self.bn1, self.relu, self.maxpool, self.layer1,
            self.layer2, self.layer3, self.layer4, self.avgpool
        ]

        self.layers2 = nn.ModuleList(layers2)

        for m in self.modules():
            if isinstance(m, BBBConv2d):
                m.reset_parameters()
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
コード例 #21
0
class ResNet(nn.Module):
    def __init__(self,
                 block,
                 layers,
                 input_channels,
                 num_classes=10,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 norm_layer=None):
        super(ResNet, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self._norm_layer = norm_layer

        self.inplanes = 64
        self.dilation = 1
        self.q_logvar_init = 0.05
        self.p_logvar_init = math.log(0.05)

        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = BBBConv2d(self.q_logvar_init,
                               self.p_logvar_init,
                               input_channels,
                               self.inplanes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.drop = nn.Dropout(p=.5)
        self.classifier = BBBLinearFactorial(self.q_logvar_init,
                                             self.p_logvar_init,
                                             512 * block.expansion,
                                             num_classes,
                                             flow=False)
        print(block.expansion)

        layers2 = [
            self.conv1, self.bn1, self.relu, self.maxpool, self.layer1,
            self.layer2, self.layer3, self.layer4, self.avgpool
        ]

        self.layers2 = nn.ModuleList(layers2)

        for m in self.modules():
            if isinstance(m, BBBConv2d):
                m.reset_parameters()
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)

    def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
        norm_layer = self._norm_layer
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                norm_layer(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes, planes, stride, downsample, self.groups,
                  self.base_width, previous_dilation, norm_layer))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      groups=self.groups,
                      base_width=self.base_width,
                      dilation=self.dilation,
                      norm_layer=norm_layer))

        return nn.Sequential(*layers)

    def probforward(self, x, dropout=False):
        loss = 0
        i = 0
        out, kl = self.conv1.probforward(x)
        out = self.relu(self.bn1(out))
        loss += kl

        out, kl = self.pf(out, self.layer1)
        loss += kl
        out, kl = self.pf(out, self.layer2)
        loss += kl
        out, kl = self.pf(out, self.layer3)
        loss += kl
        out, kl = self.pf(out, self.layer4)
        loss += kl
        out = F.avg_pool2d(out, 4)
        out = out.view(out.size(0), -1)
        if (dropout):
            x = self.drop(x)
        x, _kl = self.classifier.probforward(out)
        kl += loss
        logits = x
        return logits, kl

    def pf(self, x, layer):
        kl = 0

        for l in layer:
            #print(l)
            if hasattr(l, 'probforward') and callable(l.probforward):
                x, _kl, = l.probforward(x)
                kl += _kl
            else:
                print(l)
                x = l.forward(x)
        return x, kl
コード例 #22
0
    def __init__(self, outputs, inputs):
        super(BBBSqueezeNet, self).__init__()

        self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2)
        self.soft1 = nn.Softplus()
        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)

        # Fire module 1
        self.squeeze1 = BBBConv2d(64, 16, kernel_size=1)
        self.squeeze_activation1 = nn.Softplus()
        self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1)
        self.expand3x3_activation1 = nn.Softplus()

        # Fire module 2
        self.squeeze2 = BBBConv2d(128, 16, kernel_size=1)
        self.squeeze_activation2 = nn.Softplus()
        self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1)
        self.expand3x3_activation2 = nn.Softplus()

        self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)

        # Fire module 3
        self.squeeze3 = BBBConv2d(128, 32, kernel_size=1)
        self.squeeze_activation3 = nn.Softplus()
        self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1)
        self.expand3x3_activation3 = nn.Softplus()

        # Fire module 4
        self.squeeze4 = BBBConv2d(256, 32, kernel_size=1)
        self.squeeze_activation4 = nn.Softplus()
        self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1)
        self.expand3x3_activation4 = nn.Softplus()

        self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)

        # Fire module 5
        self.squeeze5 = BBBConv2d(256, 48, kernel_size=1)
        self.squeeze_activation5 = nn.Softplus()
        self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1)
        self.expand3x3_activation5 = nn.Softplus()

        # Fire module 6
        self.squeeze6 = BBBConv2d(384, 48, kernel_size=1)
        self.squeeze_activation6 = nn.Softplus()
        self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1)
        self.expand3x3_activation6 = nn.Softplus()

        # Fire module 7
        self.squeeze7 = BBBConv2d(384, 64, kernel_size=1)
        self.squeeze_activation7 = nn.Softplus()
        self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1)
        self.expand3x3_activation7 = nn.Softplus()

        # Fire module 8
        self.squeeze8 = BBBConv2d(512, 64, kernel_size=1)
        self.squeeze_activation8 = nn.Softplus()
        self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1)
        self.expand3x3_activation8 = nn.Softplus()

        self.drop1 = nn.Dropout(p=0.5)
        self.conv2 = BBBConv2d(512, outputs, kernel_size=1)
        self.soft2 = nn.Softplus()
        self.flatten = FlattenLayer(13 * 13 * 100)
        self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs)

        layers = [
            self.conv1, self.soft1, self.pool1, self.squeeze1,
            self.squeeze_activation1, self.expand3x3_1,
            self.expand3x3_activation1, self.squeeze2,
            self.squeeze_activation2, self.expand3x3_2,
            self.expand3x3_activation2, self.pool2, self.squeeze3,
            self.squeeze_activation3, self.expand3x3_3,
            self.expand3x3_activation3, self.squeeze4,
            self.squeeze_activation4, self.expand3x3_4,
            self.expand3x3_activation4, self.pool3, self.squeeze5,
            self.squeeze_activation5, self.expand3x3_5,
            self.expand3x3_activation5, self.squeeze6,
            self.squeeze_activation6, self.expand3x3_6,
            self.expand3x3_activation6, self.squeeze7,
            self.squeeze_activation7, self.expand3x3_7,
            self.expand3x3_activation7, self.squeeze8,
            self.squeeze_activation8, self.expand3x3_8,
            self.expand3x3_activation8, self.drop1, self.conv2, self.soft2,
            self.flatten, self.fc1
        ]

        self.layers = nn.ModuleList(layers)