Example #1
0
 def __init__(self, in_channels: int, num_classes: int) -> None:
     super().__init__()
     self.head = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         tnn.Reshape(in_channels),
     )
     self.emb = nn.Embedding(num_classes, in_channels)
     self.discr = nn.Linear(in_channels, 1)
Example #2
0
 def __init__(self, feat_extractor, feature_size, num_classes):
     super(ProjectionDiscr, self).__init__()
     self.bone = feat_extractor
     self.head = nn.Sequential(
         nn.AdaptiveMaxPool2d(1),
         tnn.Reshape(feature_size),
     )
     self.emb = nn.Embedding(num_classes, feature_size)
     self.discr = nn.Linear(feature_size, 1)
Example #3
0
    def __init__(self, feat_extractor, feature_size, num_classes):
        super(Classifier1, self).__init__()
        self.bone = feat_extractor

        self.head = nn.Sequential(
            nn.AdaptiveMaxPool2d(1),
            tnn.Reshape(feature_size),
            #nn.Dropout(0.5),
            kaiming(nn.Linear(feature_size, num_classes)),
            nn.BatchNorm1d(num_classes))
Example #4
0
 def set_pool_size(self, size: int) -> 'ClassificationHead':
     """
     Average pool to spatial size :code:`size` rather than 1. Recreate the
     first Linear to accomodate the change.
     """
     self.pool = nn.AdaptiveAvgPool2d(size)
     self.reshape = tnn.Reshape(size * size * self.in_channels)
     self.linear1 = kaiming(
         nn.Linear(size * size * self.in_channels,
                   self.linear1.out_features))
     return self
Example #5
0
    def __init__(self, feat_extractor, feature_size, num_classes):
        super(Classifier, self).__init__()
        self.bone = feat_extractor

        self.head = nn.Sequential(
            nn.AdaptiveMaxPool2d(1),
            tnn.Reshape(feature_size),
            kaiming(nn.Linear(feature_size, feature_size)),
            nn.ReLU(inplace=True),
            xavier(nn.Linear(feature_size, num_classes)),
        )
Example #6
0
    def __init__(self, in_ch, num_classes, B=0):

        def ch(ch):
            return int(ch * 1.1**B) // 8 * 8

        def n_layers(d):
            return int(math.ceil(d * 1.2**B))

        def r():
            return int(224 * 1.15**B)

        super(EfficientNet, self).__init__(
            # Stage 1
            # nn.UpsamplingBilinear2d(size=(r(), r())),
            tu.kaiming(tnn.Conv3x3(in_ch, ch(32), stride=2, bias=False)),
            nn.BatchNorm2d(ch(32)),
            tnn.HardSwish(),

            # Stage 2
            MBConv(ch(32), ch(16), 3, mul_factor=1),
            *[
                MBConv(ch(16), ch(16), 3, mul_factor=1)
                for _ in range(n_layers(1) - 1)
            ],

            # Stage 3
            MBConv(ch(16), ch(24), 3, stride=2),
            *[MBConv(ch(24), ch(24), 3) for _ in range(n_layers(2) - 1)],

            # Stage 4
            MBConv(ch(24), ch(40), 5, stride=2),
            *[MBConv(ch(40), ch(40), 5) for _ in range(n_layers(2) - 1)],

            # Stage 5
            MBConv(ch(40), ch(80), 3, stride=2),
            *[MBConv(ch(80), ch(80), 3) for _ in range(n_layers(3) - 1)],

            # Stage 6
            MBConv(ch(80), ch(112), 5),
            *[MBConv(ch(112), ch(112), 5) for _ in range(n_layers(3) - 1)],

            # Stage 7
            MBConv(ch(112), ch(192), 5, stride=2),
            *[MBConv(ch(192), ch(192), 5) for _ in range(n_layers(4) - 1)],

            # Stage 8
            MBConv(ch(192), ch(320), 3),
            *[MBConv(ch(320), ch(320), 3) for _ in range(n_layers(1) - 1)],
            tu.kaiming(tnn.Conv1x1(ch(320), ch(1280), bias=False)),
            nn.BatchNorm2d(ch(1280)),
            tnn.HardSwish(),
            nn.AdaptiveAvgPool2d(1),
            tnn.Reshape(-1),
            tu.xavier(nn.Linear(ch(1280), num_classes)))
Example #7
0
 def to_two_layers(self, hidden_channels: int) -> 'ClassificationHead':
     """
     Set the classifier architecture to avgpool-flatten-linear1-relu-linear2.
     """
     self._modules = OrderedDict([
         ('pool', nn.AdaptiveAvgPool2d(1)),
         ('reshape', tnn.Reshape(self.in_channels)),
         ('linear1', kaiming(nn.Linear(self.in_channels, hidden_channels))),
         ('relu1', nn.ReLU(True)),
         ('linear2', kaiming(nn.Linear(hidden_channels, self.num_classes))),
     ])
     return self
Example #8
0
    def to_resnet_style(self) -> 'ClassificationHead':
        """
        Set the classifier architecture to avgpool-flatten-linear.
        """
        self._modules = OrderedDict([
            ('pool', nn.AdaptiveAvgPool2d(1)),
            ('reshape', tnn.Reshape(self.in_channels)),
            ('linear1', kaiming(nn.Linear(self.in_channels,
                                          self.num_classes))),
        ])

        return self
Example #9
0
def VggGeneratorDebug(in_noise=32, out_ch=3):
    """
    A not so small Vgg net image GAN generator for testing purposes

    Args:
        in_noise (int): dimension of the input noise
        out_ch (int): number of output channels (3 for RGB images)

    Returns:
        a VGG instance
    """
    return nn.Sequential(
        kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
        nn.LeakyReLU(0.2, inplace=True),
        VggBNBone([128, 'U', 64, 'U', 32, 'U', 16], in_ch=128),
        xavier(tnn.Conv1x1(16, 1)), nn.Sigmoid())
Example #10
0
    def __init__(self, in_noise, out_ch, side_ch=1):
        super(VggImg2ImgGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.Spade2d(out, side_ch, 64),
                **kwargs)

        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Example #11
0
    def __init__(self, in_noise, out_ch, num_classes):
        super(VggClassCondGeneratorDebug, self).__init__()

        def make_block(in_ch, out_ch, **kwargs):
            return tnn.Conv2dNormReLU(
                in_ch,
                out_ch,
                norm=lambda out: tnn.ConditionalBN2d(out, 64),
                **kwargs)

        self.emb = nn.Embedding(num_classes, 64)
        self.net = tnn.CondSeq(
            kaiming(nn.Linear(in_noise, 128 * 16)), tnn.Reshape(128, 4, 4),
            nn.LeakyReLU(0.2, inplace=True),
            VggBNBone([128, 'U', 64, 'U', 32, 'U', 16],
                      in_ch=128,
                      block=make_block), xavier(tnn.Conv1x1(16, out_ch)),
            nn.Sigmoid())
Example #12
0
    def to_vgg_style(self, hidden_channels: int) -> 'ClassificationHead':
        """
        Set the classifier architecture to
        avgpool-flatten-linear1-relu-dropout-linear2-relu-dropout-linear3, like
        initially done with VGG.
        """
        self._modules = OrderedDict([
            ('pool', nn.AdaptiveAvgPool2d(7)),
            ('reshape', tnn.Reshape(7 * 7 * self.in_channels)),
            ('linear1',
             kaiming(nn.Linear(7 * 7 * self.in_channels, hidden_channels))),
            ('relu1', nn.ReLU(True)),
            ('dropout1', nn.Dropout(0.5)),
            ('linear2', kaiming(nn.Linear(hidden_channels, hidden_channels))),
            ('relu2', nn.ReLU(True)),
            ('dropout2', nn.Dropout(0.5)),
            ('linear3', kaiming(nn.Linear(hidden_channels, self.num_classes))),
        ])

        return self