예제 #1
0
 def __init__(self, num_classes=10):
     super(MNISTMSNConvNet, self).__init__()
     self.main = nn.Sequential(
         SNConv2d(1, 16, kernel_size=5, stride=1, padding=2),
         MeanSpectralNorm(16), nn.LeakyReLU(0.1, inplace=True),
         SNConv2d(16, 32, kernel_size=3, stride=1, padding=2),
         MeanSpectralNorm(32), nn.LeakyReLU(0.1, inplace=True),
         SNConv2d(32, 32, kernel_size=4, stride=2, padding=2),
         MeanSpectralNorm(32), nn.LeakyReLU(0.1, inplace=True))
     #nn.MaxPool2d(kernel_size=2, stride=2))
     self.fc = SNLinear(16 * 16 * 32, num_classes)
예제 #2
0
    def __init__(self, nc, ndf):
        super(_MSNnetD, self).__init__()

        self.main = nn.Sequential(
            # input is (nc) x 32 x 32
            #SNConv2d()
            SNConv2d(nc, ndf, 3, 1, 1, bias=True),
            nn.LeakyReLU(0.1, inplace=True),
            SNConv2d(ndf, ndf, 4, 2, 1, bias=True),
            nn.LeakyReLU(0.1, inplace=True),
            # state size. (ndf) x 1 x 32
            SNConv2d(ndf, ndf * 2, 3, 1, 1, bias=True),
            nn.LeakyReLU(0.1, inplace=True),
            SNConv2d(ndf * 2, ndf * 2, 4, 2, 1, bias=True),
            MeanSpectralNorm(ndf * 2),
            nn.LeakyReLU(0.1, inplace=True),
            # state size. (ndf*2) x 16 x 16
            SNConv2d(ndf * 2, ndf * 4, 3, 1, 1, bias=True),
            nn.LeakyReLU(0.1, inplace=True),
            SNConv2d(ndf * 4, ndf * 4, 4, 2, 1, bias=True),
            MeanSpectralNorm(ndf * 4),
            nn.LeakyReLU(0.1, inplace=True),
            # state size. (ndf*4) x 8 x 8
            SNConv2d(ndf * 4, ndf * 8, 3, 1, 1, bias=True),
            MeanSpectralNorm(ndf * 8),
            nn.LeakyReLU(0.1, inplace=True),
            # state size. (ndf*8) x 4 x 4
            #SNConv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            #nn.Sigmoid()
        )
        self.snlinear = SNLinear(ndf * 8 * 4 * 4, 1)
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
     super(_SNDenseLayer, self).__init__()
     self.add_module("relu1", nn.ReLU(inplace=True)),
     self.add_module(
         "conv1",
         SNConv2d(num_input_features,
                  bn_size * growth_rate,
                  kernel_size=1,
                  stride=1,
                  bias=False)),
     self.add_module("relu2", nn.ReLU(inplace=True)),
     self.add_module(
         "conv2",
         SNConv2d(bn_size * growth_rate,
                  growth_rate,
                  kernel_size=3,
                  stride=1,
                  padding=1,
                  bias=False)),
     self.drop_rate = drop_rate
 def __init__(self, num_input_features, num_output_features):
     super(_SNTransition, self).__init__()
     self.add_module("relu", nn.ReLU(inplace=True))
     self.add_module(
         "conv",
         SNConv2d(num_input_features,
                  num_output_features,
                  kernel_size=1,
                  stride=1,
                  bias=False))
     self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
예제 #5
0
def make_layers(cfg, norm='BN'):
    layers = []
    in_channels = 3
    for i, v in enumerate(cfg):
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            padding = v[1] if isinstance(v, tuple) else 1
            out_channels = v[0] if isinstance(v, tuple) else v
            conv2d = nn.Conv2d(in_channels,
                               out_channels,
                               kernel_size=3,
                               padding=padding)
            if norm == 'BN':
                layers += [
                    conv2d,
                    nn.BatchNorm2d(out_channels),
                    nn.ReLU(),
                    nn.Dropout(0.3)
                ]
            elif norm == 'SN':
                SNconv2d = SNConv2d(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    padding=padding)
                layers += [SNconv2d, nn.LeakyReLU(0.1, inplace=True)]
            elif norm == 'MSN':
                SNconv2d = SNConv2d(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    padding=padding)
                layers += [
                    SNconv2d,
                    MeanSpectralNorm(out_channels),
                    nn.LeakyReLU(0.1, inplace=True)
                ]
            else:
                layers += [conv2d, nn.ReLU(), nn.Dropout(0.3)]
            in_channels = out_channels
    return nn.Sequential(*layers)
예제 #6
0
    def __init__(
        self,
        growth_rate=12,
        block_config=(16, 16, 16),
        compression=0.8,
        num_init_features=24,
        bn_size=4,
        drop_rate=0,
        avgpool_size=8,
        num_classes=10,
    ):

        super(MSNDenseNet, self).__init__()
        assert 0 < compression <= 1, "compression of densenet should be between 0 and 1"
        self.avgpool_size = avgpool_size

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([("conv0",
                          SNConv2d(3,
                                   num_init_features,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False))]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(
                num_layers=num_layers,
                num_input_features=num_features,
                bn_size=bn_size,
                growth_rate=growth_rate,
                drop_rate=drop_rate,
            )
            self.features.add_module("denseblock%d" % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _MSNTransition(num_input_features=num_features,
                                       num_output_features=int(num_features *
                                                               compression))
                self.features.add_module("transition%d" % (i + 1), trans)
                num_features = int(num_features * compression)

        # Final batch norm
        self.features.add_module("norm_final", MeanSpectralNorm(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)
예제 #7
0
    def __init__(self,
                 nc,
                 ndf,
                 data_size,
                 num_classes,
                 num_hidden_layers=8,
                 spectral_norm=True):
        super(ConvNet, self).__init__()
        model_dict = [{
            'mult': 1,
            'kernel': 3,
            'stride': 1,
            'padding': 1
        }, {
            'mult': 1,
            'kernel': 4,
            'stride': 2,
            'padding': 1
        }, {
            'mult': 2,
            'kernel': 3,
            'stride': 1,
            'padding': 1
        }, {
            'mult': 2,
            'kernel': 3,
            'stride': 1,
            'padding': 1
        }, {
            'mult': 2,
            'kernel': 4,
            'stride': 2,
            'padding': 1
        }, {
            'mult': 4,
            'kernel': 3,
            'stride': 1,
            'padding': 1
        }, {
            'mult': 4,
            'kernel': 4,
            'stride': 2,
            'padding': 1
        }, {
            'mult': 8,
            'kernel': 3,
            'stride': 1,
            'padding': 1
        }]
        modules = []
        out_size = 0
        for i in range(num_hidden_layers):
            if i == 0:
                in_size = nc
                H = data_size
            else:
                in_size = ndf * model_dict[i - 1]['mult']
            out_size = ndf * model_dict[i]['mult']
            if spectral_norm == True:
                modules.append(
                    SNConv2d(in_size,
                             out_size,
                             model_dict[i]['kernel'],
                             model_dict[i]['stride'],
                             model_dict[i]['padding'],
                             bias=True))
            else:
                modules.append(
                    nn.Conv2d(in_size,
                              out_size,
                              model_dict[i]['kernel'],
                              model_dict[i]['stride'],
                              model_dict[i]['padding'],
                              bias=True))
            modules.append(nn.LeakyReLU(0.1, inplace=True))

            H = floor((H + 2 * (model_dict[i]['padding']) -
                       (model_dict[i]['kernel'] - 1) - 1) /
                      model_dict[i]['stride'] + 1)
        self.main = nn.Sequential(*modules)

        self.snlinear = SNLinear(out_size * H * H, num_classes)