Exemple #1
0
    def __init__(self, nc, ngf, ngpu, ref_batch):
        super(GeneratorNetVBN, self).__init__()
        self.ngpu = ngpu
        self.ref_batch = ref_batch
        nz = 100

        self.net = nn.Sequential(
            nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0),
            nnrd.VBN2d(ngf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ngf * 8, ngf * 8, 3, 1, 1),
            nnrd.VBN2d(ngf * 8),
            nn.LeakyReLU(0.2, inplace=True),

            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1),
            nnrd.VBN2d(ngf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1),
            nnrd.VBN2d(ngf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1),
            nnrd.VBN2d(ngf),
            nn.LeakyReLU(0.2, inplace=True),

            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(ngf, nc, 4, 2, 1),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
Exemple #2
0
    def removeBatchNormLayers(self):
        layers = []

        i = 1
        for layer in self.net.children():
            names = []
            for name, module in layer.named_children():
                names.append(name)
            if 'conv' + str(i) in names and 'bn' + str(i) in names:
                layer = nnrd.Layer(layer[0], layer[2], layer[3])
                layers.append(layer)
            else:
                layers.append(layer)
            i += 1

        self.net = nnrd.RelevanceNetAlternate(*layers)
Exemple #3
0
    def __init__(self, ngpu):
        super(DiscriminatorNet, self).__init__()
        self.ngpu = ngpu

        # All attributes are automatically assigned to the modules parameter list
        n_features = 784
        n_out = 1
        self.net = nnrd.RelevanceNet(
            nnrd.Layer(nnrd.FirstLinear(n_features, 1024), nnrd.ReLu(),
                       nnrd.Dropout(0.3)),
            nnrd.Layer(nnrd.NextLinear(1024, 1024), nnrd.ReLu(),
                       nnrd.Dropout(0.3)),
            nnrd.Layer(
                nnrd.NextLinear(1024, 512),
                nnrd.ReLu(),
                nnrd.Dropout(0.3),
            ),
            nnrd.Layer(nnrd.NextLinear(512, 512), nnrd.ReLu(),
                       nnrd.Dropout(0.3)),
            nnrd.Layer(
                nnrd.NextLinear(512, 256),
                nnrd.ReLu(),
                nnrd.Dropout(0.3),
            ), nnrd.Layer(nnrd.NextLinear(256, n_out), nn.Sigmoid()))
Exemple #4
0
    def __init__(self, nc, ndf, alpha, ngpu=1):
        super(DiscriminatorNet, self).__init__()

        self.ngpu = ngpu
        self.net = nnrd.RelevanceNet(
            nnrd.Layer(
                nnrd.FirstConvolution(nc, ndf, 4, 2, 1),
                nnrd.ReLu(),
            ),
            # state size. (ndf) x 32 x 32
            nnrd.Layer(
                nnrd.NextConvolution(ndf, ndf * 2, 4, '1', 2, 1, alpha=alpha),
                nnrd.BatchNorm2d(ndf * 2),
                nnrd.ReLu(),
            ),
            # state size. (ndf*2) x 16 x 16
            nnrd.Layer(
                nnrd.NextConvolution(ndf * 2,
                                     ndf * 4,
                                     4,
                                     '2',
                                     2,
                                     1,
                                     alpha=alpha),
                nnrd.BatchNorm2d(ndf * 4),
                nnrd.ReLu(),
            ),
            # state size. (ndf*4) x 8 x 8
            nnrd.Layer(
                nnrd.NextConvolution(ndf * 4,
                                     ndf * 8,
                                     4,
                                     '3',
                                     2,
                                     1,
                                     alpha=alpha),
                nnrd.BatchNorm2d(ndf * 8),
                nnrd.ReLu(),
            ),
            # state size. (ndf*8) x 4 x 4
            nnrd.Layer(nnrd.NextConvolution(ndf * 8, 1, 4, '4', 1, 0),
                       nn.Sigmoid()))
Exemple #5
0
    def __init__(self, nc, ndf, alpha, ngpu=1):
        super(DiscriminatorNetLessCheckerboardToCanonical, self).__init__()

        self.relevance = None
        self.ngpu = ngpu

        self.net = nnrd.RelevanceNetAlternate(
            nnrd.Layer(
                OrderedDict([
                    ('conv1',
                     nnrd.FirstConvolution(in_channels=nc,
                                           out_channels=ndf,
                                           kernel_size=3,
                                           stride=1,
                                           padding=0)),
                    ('relu1', nnrd.ReLu()),
                ])),
            nnrd.Layer(
                OrderedDict([
                    ('conv2',
                     nnrd.NextConvolution(in_channels=ndf,
                                          out_channels=ndf,
                                          kernel_size=4,
                                          name='0',
                                          stride=2,
                                          padding=1,
                                          alpha=alpha)),
                    ('bn2', nnrd.BatchNorm2d(ndf)),
                    ('relu2', nnrd.ReLu()),
                    ('dropou2', nnrd.Dropout(0.3)),
                ])),
            # state size. (ndf) x 32 x 32
            nnrd.Layer(
                OrderedDict([
                    ('conv3',
                     nnrd.NextConvolution(in_channels=ndf,
                                          out_channels=ndf * 2,
                                          kernel_size=4,
                                          name='1',
                                          stride=2,
                                          padding=1,
                                          alpha=alpha)),
                    ('bn3', nnrd.BatchNorm2d(ndf * 2)),
                    ('relu3', nnrd.ReLu()),
                    ('dropout3', nnrd.Dropout(0.3)),
                ])),
            # state size. (ndf*2) x 16 x 16
            nnrd.Layer(
                OrderedDict([
                    ('conv4',
                     nnrd.NextConvolution(in_channels=ndf * 2,
                                          out_channels=ndf * 4,
                                          kernel_size=4,
                                          name='2',
                                          stride=2,
                                          padding=1,
                                          alpha=alpha)),
                    ('bn4', nnrd.BatchNorm2d(ndf * 4)),
                    ('relu4', nnrd.ReLu()),
                    ('dropout4', nnrd.Dropout(0.3)),
                ])),  # state size. (ndf*2) x 16 x 16
            # state size. (ndf*4) x 8 x 8
            nnrd.Layer(
                OrderedDict([
                    ('conv5',
                     nnrd.NextConvolutionEps(in_channels=ndf * 4,
                                             out_channels=ndf * 8,
                                             kernel_size=4,
                                             name='3',
                                             stride=2,
                                             padding=1,
                                             epsilon=0.01)),
                    ('bn5', nnrd.BatchNorm2d(ndf * 8)),
                    ('relu5', nnrd.ReLu()),
                    ('dropout5', nnrd.Dropout(0.3)),
                ])),
        )

        self.lastConvolution = nnrd.LastConvolutionEps(in_channels=ndf * 8,
                                                       out_channels=1,
                                                       kernel_size=4,
                                                       name='4',
                                                       stride=1,
                                                       padding=0,
                                                       epsilon=0.01)

        self.sigmoid = nn.Sigmoid()
        self.lastReLU = nnrd.ReLu()
Exemple #6
0
    def __init__(self, nc, ndf, alpha, beta, ngpu=1):
        super(DiscriminatorNetLessCheckerboardAlternate, self).__init__()

        self.relevance = None
        self.ngpu = ngpu

        self.net = nnrd.RelevanceNetAlternate(
            nnrd.Layer(
                nnrd.FirstConvolution(in_channels=nc,
                                      out_channels=ndf,
                                      kernel_size=5,
                                      stride=1,
                                      padding=0),
                nnrd.ReLu(),
            ),
            nnrd.Layer(
                nnrd.NextConvolution(in_channels=ndf,
                                     out_channels=ndf,
                                     kernel_size=4,
                                     name='0',
                                     stride=2,
                                     padding=1,
                                     alpha=alpha),
                nnrd.BatchNorm2d(ndf),
                nnrd.ReLu(),
                nnrd.Dropout(0.3),
            ),
            # state size. (ndf) x 32 x 32
            nnrd.Layer(
                nnrd.NextConvolution(in_channels=ndf,
                                     out_channels=ndf * 2,
                                     kernel_size=4,
                                     name='1',
                                     stride=2,
                                     padding=1,
                                     alpha=alpha),
                nnrd.BatchNorm2d(ndf * 2),
                nnrd.ReLu(),
                nnrd.Dropout(0.3),
            ),
            # state size. (ndf*2) x 16 x 16
            nnrd.Layer(
                nnrd.NextConvolution(in_channels=ndf * 2,
                                     out_channels=ndf * 4,
                                     kernel_size=4,
                                     name='2',
                                     stride=2,
                                     padding=1,
                                     alpha=alpha),
                nnrd.BatchNorm2d(ndf * 4),
                nnrd.ReLu(),
                nnrd.Dropout(0.3),
            ),  # state size. (ndf*2) x 16 x 16
            # state size. (ndf*4) x 8 x 8
            nnrd.Layer(
                nnrd.NextConvolutionEps(in_channels=ndf * 4,
                                        out_channels=ndf * 8,
                                        kernel_size=4,
                                        name='3',
                                        stride=2,
                                        padding=1),
                nnrd.BatchNorm2d(ndf * 8),
                nnrd.ReLu(),
                nnrd.Dropout(0.3),
            ),
            # state size. (ndf*8) x 4 x 4
            nnrd.Layer(
                nnrd.LastConvolutionEps(in_channels=ndf * 8,
                                        out_channels=1,
                                        kernel_size=4,
                                        name='4',
                                        stride=1,
                                        padding=0), ))
        self.sigmoid = nn.Sigmoid()
        self.lastReLU = nnrd.ReLu()
Exemple #7
0
    def __init__(self, nc, ndf, alpha, ngpu=1):
        super(SmoothingLayerDiscriminator, self).__init__()

        self.relevance = None
        self.ngpu = ngpu

        self.net = nnrd.RelevanceNetAlternate(
            nnrd.Layer(
                OrderedDict([
                    ('conv1',
                     nnrd.FirstConvolution(in_channels=nc,
                                           out_channels=nc,
                                           kernel_size=3,
                                           stride=1,
                                           padding=0)),
                    ('relu1', nnrd.ReLu()),
                ])),
            nnrd.Layer(
                OrderedDict([
                    ('conv2',
                     nnrd.NextConvolution(in_channels=nc,
                                          out_channels=ndf,
                                          kernel_size=4,
                                          name='0',
                                          stride=2,
                                          padding=1,
                                          alpha=alpha)),
                    ('bn2', nnrd.BatchNorm2d(ndf)),
                    ('relu2', nnrd.ReLu()),
                    ('dropou2', nnrd.Dropout(0.3)),
                ])),
            # state size. (ndf) x 32 x 32
            nnrd.Layer(
                OrderedDict([
                    ('conv3',
                     nnrd.NextConvolution(in_channels=ndf,
                                          out_channels=ndf * 2,
                                          kernel_size=4,
                                          name='1',
                                          stride=2,
                                          padding=1,
                                          alpha=alpha)),
                    ('bn3', nnrd.BatchNorm2d(ndf * 2)),
                    ('relu3', nnrd.ReLu()),
                    ('dropout3', nnrd.Dropout(0.3)),
                ])),
            # state size. (ndf*2) x 16 x 16
            nnrd.Layer(
                OrderedDict([
                    ('conv4',
                     nnrd.NextConvolution(in_channels=ndf * 2,
                                          out_channels=ndf * 4,
                                          kernel_size=4,
                                          name='2',
                                          stride=2,
                                          padding=1,
                                          alpha=alpha)),
                    ('bn4', nnrd.BatchNorm2d(ndf * 4)),
                    ('relu4', nnrd.ReLu()),
                    ('dropout4', nnrd.Dropout(0.3)),
                ])),  # state size. (ndf*2) x 16 x 16
            # state size. (ndf*4) x 8 x 8
            nnrd.Layer(
                OrderedDict([
                    ('conv5',
                     nnrd.NextConvolutionEps(in_channels=ndf * 4,
                                             out_channels=ndf * 8,
                                             kernel_size=4,
                                             name='3',
                                             stride=2,
                                             padding=1,
                                             epsilon=0.01)),
                    ('bn5', nnrd.BatchNorm2d(ndf * 8)),
                    ('relu5', nnrd.ReLu()),
                    ('dropout5', nnrd.Dropout(0.3)),
                ])),
        )

        self.lastConvolution = nnrd.LastConvolutionEps(in_channels=ndf * 8,
                                                       out_channels=1,
                                                       kernel_size=4,
                                                       name='4',
                                                       stride=1,
                                                       padding=0,
                                                       epsilon=0.01)

        self.sigmoid = nn.Sigmoid()
        self.lastReLU = nnrd.ReLu()

        # Do not update weights in smoothing layer
        for parameter in self.net[0][0].parameters():
            parameter.requires_grad = False