def __init__(self, num_classes, batch_norm, cls_spectral_norm):
        super().__init__()

        def activation():
            return nn.LeakyReLU()

        if batch_norm:

            def bn_2d(dim):
                return nn.BatchNorm2d(dim)

            def bn_1d(dim):
                return nn.BatchNorm1d(dim)
        else:

            def bn_2d(dim):
                return Identity(dim)

            def bn_1d(dim):
                return Identity(dim)

        self.feat_ext = nn.Sequential(
            nn.Conv2d(3, 128, 3, padding=1),
            bn_2d(128),
            activation(),
            nn.MaxPool2d(2, stride=2, padding=0),
            #
            nn.Conv2d(128, 256, 3, padding=1),
            bn_2d(256),
            activation(),
            nn.MaxPool2d(2, stride=2, padding=0),
            #
            nn.Conv2d(256, 512, 3, padding=1),
            bn_2d(512),
            activation(),
            nn.MaxPool2d(2, stride=2, padding=0),
            #
            nn.Conv2d(512, 256, 3, padding=1),
            bn_2d(256),
            activation(),
            nn.MaxPool2d(2, stride=2, padding=0),
            #
            nn.Conv2d(256, 128, 3, padding=1),
            bn_2d(128),
            activation(),
            nn.MaxPool2d(2, stride=2, padding=0),
            #
            mynn.LinearView())

        cls = nn.Linear(128, num_classes)
        if cls_spectral_norm:
            nn.utils.spectral_norm(cls)

        self.cls = nn.Sequential(cls)
Beispiel #2
0
    def __init__(self,
                 num_classes=10,
                 activation=nn.LeakyReLU(0.1),
                 batchnorm=nn.BatchNorm2d):
        super().__init__()

        def weight_norm(module):
            return module

        self.use_affine = True

        self.activation = activation

        self.feat_ext = nn.Sequential(
            weight_norm(nn.Conv2d(3, 128, 3, padding=1)),
            batchnorm(128, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(128, 128, 3, padding=1)),
            batchnorm(128, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(128, 128, 3, padding=1)),
            batchnorm(128, affine=self.use_affine),
            activation,
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Dropout(0.5),
            #
            weight_norm(nn.Conv2d(128, 256, 3, padding=1)),
            batchnorm(256, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(256, 256, 3, padding=1)),
            batchnorm(256, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(256, 256, 3, padding=1)),
            batchnorm(256, affine=self.use_affine),
            activation,
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Dropout(0.5),
            #
            weight_norm(nn.Conv2d(256, 512, 3, padding=0)),
            batchnorm(512, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(512, 256, 1, padding=0)),
            batchnorm(256, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(256, 128, 1, padding=0)),
            batchnorm(128, affine=self.use_affine),
            activation,
            nn.AvgPool2d(6, stride=2, padding=0),
            mynn.LinearView())

        self.cls = nn.Sequential(nn.Linear(128, num_classes))
Beispiel #3
0
    def __init__(self,
                 num_classes=10,
                 activation=nn.LeakyReLU(0.1),
                 batchnorm=nn.BatchNorm2d):
        super().__init__()

        def weight_norm(module):
            return module

        self.use_affine = True

        self.activation = activation

        # Block 1
        self.conv1a = weight_norm(self.nn.Conv2d(3, 128, 3, padding=1))
        self.bn1a = batchnorm(128, affine=self.affine)
        self.conv1a = weight_norm(self.nn.Conv2d(3, 128, 3, padding=1))
        self.bn1b = batchnorm(128, affine=self.affine)
        self.conv1c = weight_norm(self.nn.Conv2d(3, 128, 3, padding=1))
        self.bn1c = batchnorm(128, affine=self.affine)
        self.mp1 = nn.MaxPool2d(2, stride=2, padding=0)
        self.drop1 = nn.Dropout(0.5)

        # Block 2
        self.conv2a = weight_norm(nn.Conv2d(128, 256, 3, padding=1))
        self.bn2a = batchnorm(256, affine=self.use_affine)
        self.conv2b = weight_norm(nn.Conv2d(256, 256, 3, padding=1))
        self.bn2b = batchnorm(256, affine=self.use_affine)
        self.conv2c = weight_norm(nn.Conv2d(256, 256, 3, padding=1))
        self.bn2c = batchnorm(256, affine=self.use_affine)
        self.mp2 = nn.MaxPool2d(2, stride=2, padding=0),
        self.drop2 = nn.Dropout(0.5)

        # Block 3
        self.conv3a = weight_norm(nn.Conv2d(256, 512, 3, padding=0))
        self.bn3a = batchnorm(512, affine=self.use_affine)
        self.conv3b = weight_norm(nn.Conv2d(512, 256, 1, padding=0))
        self.bn3b = batchnorm(256, affine=self.use_affine)
        self.conv3c = weight_norm(nn.Conv2d(256, 128, 1, padding=0))
        self.bn3c = batchnorm(128, affine=self.use_affine)
        self.ap3 = nn.AvgPool2d(6, stride=2, padding=0)

        self.lv = mynn.LinearView()

        self.cls = nn.Sequential(nn.Linear(128, num_classes))
Beispiel #4
0
def cnn13_feat_ext(activation, batchnorm, bn_affine, latent_dim=None):
    return nn.Sequential(
        nn.Conv2d(3, 128, 3, padding=1),
        batchnorm(128, affine=bn_affine),
        activation,
        nn.Conv2d(128, 128, 3, padding=1),
        batchnorm(128, affine=bn_affine),
        activation,
        nn.Conv2d(128, 128, 3, padding=1),
        batchnorm(128, affine=bn_affine),
        activation,
        nn.MaxPool2d(2, stride=2, padding=0),
        nn.Dropout(0.5),
        #
        nn.Conv2d(128, 256, 3, padding=1),
        batchnorm(256, affine=bn_affine),
        activation,
        nn.Conv2d(256, 256, 3, padding=1),
        batchnorm(256, affine=bn_affine),
        activation,
        nn.Conv2d(256, 256, 3, padding=1),
        batchnorm(256, affine=bn_affine),
        activation,
        nn.MaxPool2d(2, stride=2, padding=0),
        nn.Dropout(0.5),
        #
        nn.Conv2d(256, 512, 3, padding=0),
        batchnorm(512, affine=bn_affine),
        activation,
        nn.Conv2d(512, 256, 1, padding=0),
        batchnorm(256, affine=bn_affine),
        activation,
        nn.Conv2d(256, 128, 1, padding=0),
        batchnorm(128, affine=bn_affine),
        activation,
        nn.AvgPool2d(6, stride=2, padding=0),
        mynn.LinearView(),
        Identity() if latent_dim is None else nn.Sequential(
            nn.Linear(128, latent_dim),
            nn.BatchNorm1d(latent_dim),
            # activation
        ))
    def __init__(self, block, num_blocks, activation, batchnorm):
        super().__init__()
        self.in_planes = 64

        self.activation = activation
        self.batchnorm = batchnorm

        conv1 = nn.Conv2d(3,
                          64,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False)
        bn1 = self.batchnorm(64)
        layer1 = self._make_layer(block, 64, num_blocks[0], 1, activation,
                                  batchnorm)
        layer2 = self._make_layer(block, 128, num_blocks[1], 2, activation,
                                  batchnorm)
        layer3 = self._make_layer(block, 256, num_blocks[2], 2, activation,
                                  batchnorm)
        layer4 = self._make_layer(block, 512, num_blocks[3], 2, activation,
                                  batchnorm)

        feat_dim = 2

        self.m = nn.Sequential(
            conv1,
            bn1,
            self.activation(),
            layer1,
            layer2,
            layer3,
            layer4,
            nn.AvgPool2d(4),
            mynn.LinearView(),
        )
    def __init__(self, num_classes: int, batch_norm: bool, drop_out: bool,
                 cls_spectral_norm: bool, final_bn: bool):
        super().__init__()

        def activation():
            return nn.LeakyReLU(0.1)

        if drop_out:

            def dropout(p):
                return nn.Dropout(p)
        else:

            def dropout(p):
                return Identity()

        bn_affine = True

        if batch_norm:

            def bn_2d(dim):
                return nn.BatchNorm2d(dim, affine=bn_affine)

            def bn_1d(dim):
                return nn.BatchNorm1d(dim, affine=bn_affine)
        else:

            def bn_2d(dim):
                return Identity(dim)

            def bn_1d(dim):
                return Identity(dim)

        self.feat_ext = nn.Sequential(
            nn.Conv2d(3, 128, 3, padding=1),
            bn_2d(128),
            activation(),
            nn.Conv2d(128, 128, 3, padding=1),
            bn_2d(128),
            activation(),
            nn.Conv2d(128, 128, 3, padding=1),
            bn_2d(128),
            activation(),
            nn.MaxPool2d(2, stride=2, padding=0),
            dropout(0.5),
            #
            nn.Conv2d(128, 256, 3, padding=1),
            bn_2d(256),
            activation(),
            nn.Conv2d(256, 256, 3, padding=1),
            bn_2d(256),
            activation(),
            nn.Conv2d(256, 256, 3, padding=1),
            bn_2d(256),
            activation(),
            nn.MaxPool2d(2, stride=2, padding=0),
            dropout(0.5),
            #
            nn.Conv2d(256, 512, 3, padding=0),
            bn_2d(512),
            activation(),
            nn.Conv2d(512, 256, 1, padding=0),
            bn_2d(256),
            activation(),
            nn.Conv2d(256, 128, 1, padding=0),
            bn_2d(128) if final_bn else Identity(),
            activation(),
            nn.AvgPool2d(6, stride=2, padding=0),
            mynn.LinearView(),
        )

        cls = nn.Linear(128, num_classes)
        if cls_spectral_norm:
            nn.utils.spectral_norm(cls)

        self.cls = nn.Sequential(cls)
Beispiel #7
0
    def __init__(self,
                 num_classes=10,
                 activation=nn.LeakyReLU(0.1),
                 batchnorm=nn.BatchNorm2d,
                 num_augmentations=None):
        super().__init__()

        def weight_norm(module):
            return module

        self.num_augmentations = num_augmentations
        assert self.num_augmentations is not None

        self.use_affine = True

        self.activation = activation

        self.beta = torch.distributions.Beta(torch.tensor([2.]),
                                             torch.tensor([5.]))

        # self.augmenter = nn.GRU(128, 128, 2, batch_first=True)
        # self.augmenter = nn.ModuleList([
        #                     nn.Sequential(
        #                         nn.Linear(128,10,bias=False),
        #                         nn.LeakyReLU(0.1),
        #                         nn.Linear(10,128,bias=False)) for i in range(self.num_augmentations-1)])

        # self.logvar_map = nn.Linear(128, 128)

        self.feat_ext = nn.Sequential(
            weight_norm(nn.Conv2d(3, 128, 3, padding=1)),
            batchnorm(128, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(128, 128, 3, padding=1)),
            batchnorm(128, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(128, 128, 3, padding=1)),
            batchnorm(128, affine=self.use_affine),
            activation,
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Dropout(0.5),
            #
            weight_norm(nn.Conv2d(128, 256, 3, padding=1)),
            batchnorm(256, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(256, 256, 3, padding=1)),
            batchnorm(256, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(256, 256, 3, padding=1)),
            batchnorm(256, affine=self.use_affine),
            activation,
            nn.MaxPool2d(2, stride=2, padding=0),
            nn.Dropout(0.5),
            #
            weight_norm(nn.Conv2d(256, 512, 3, padding=0)),
            batchnorm(512, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(512, 256, 1, padding=0)),
            batchnorm(256, affine=self.use_affine),
            activation,
            weight_norm(nn.Conv2d(256, 128, 1, padding=0)),
            batchnorm(128, affine=self.use_affine),
            activation,
            nn.AvgPool2d(6, stride=2, padding=0),
            mynn.LinearView())

        self.cls = nn.Sequential(nn.Linear(128, num_classes))