Beispiel #1
0
    def __init__(self, d=128):
        super(DiscriminatorNet, self).__init__()

        n_out = 1
        self.net = RelevanceNet(
            Layer(  # Input Layer
                FirstConvolution(1, d, 4, stride=2, padding=1),
                PropReLu(),
            ),
            Layer(
                NextConvolution(d, 2 * d, 4, stride=2, padding=1),
                BatchNorm2d(2 * d),
                PropReLu(),
            ),
            Layer(
                NextConvolution(2 * d, 4 * d, 4, stride=2, padding=1),
                BatchNorm2d(4 * d),
                PropReLu(),
            ),
            Layer(
                NextConvolution(4 * d, 8 * d, 4, stride=2, padding=1),
                BatchNorm2d(8 * d),
                PropReLu(),
            ),
            Layer(  # Output Layer
                NextConvolution(8 * d, 1, 4, stride=1, padding=0),
                FlattenLayer(), nn.Sigmoid()))

        self.optimizer = optim.Adam(self.parameters(), lr=0.0002)
    def __init__(self, d, nc):
        super(MNISTDiscriminatorNet, self).__init__(d, nc)

        self.loss = nn.BCELoss()

        self.net = RelevanceNet(
            Layer(  # Input Layer
                FirstConvolution(nc, d, 4, stride=2, padding=1),
                PropReLu(),
            ),
            Layer(
                NextConvolution(d, 2 * d, 4, stride=2, padding=1, alpha=1),
                BatchNorm2d(2 * d),
                PropReLu(),
            ),
            Layer(
                NextConvolution(2 * d, 4 * d, 4, stride=2, padding=1, alpha=1),
                BatchNorm2d(4 * d),
                PropReLu(),
            ),
            Layer(
                NextConvolution(4 * d, 8 * d, 4, stride=2, padding=1, alpha=1),
                BatchNorm2d(8 * d),
                PropReLu(),
            ),
            Layer(  # We take relevance here
                NextConvolution(8 * d, 1, 4, stride=1, padding=0, alpha=1)),
            Layer(  # Output Layer
                nn.Sigmoid()))
Beispiel #3
0
    def __init__(self):
        super(DiscriminatorNet, self).__init__()

        n_out = 1

        self.net = RelevanceNet(
            Layer(  # Input Layer
                FirstConvolution(1, 128, 4, stride=2, padding=1),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(
                NextConvolution(128, 256, 4, stride=2, padding=1),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(
                NextConvolution(256, 1024, 4, stride=2, padding=1),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(
                NextConvolution(1024, 1, 4, stride=1, padding=0),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(  # Output Layer
                LastLinear(25, n_out),
                nn.Sigmoid()
            )
        )


        self.optimizer = optim.Adam(self.parameters(), lr=0.0002)
        self.loss = nn.BCELoss()
Beispiel #4
0
class DiscriminatorNet(nn.Module):
    """
    Three hidden-layer discriminative neural network
    """
    def __init__(self, d=128):
        super(DiscriminatorNet, self).__init__()

        n_out = 1
        self.net = RelevanceNet(
            Layer(  # Input Layer
                FirstConvolution(1, d, 4, stride=2, padding=1),
                PropReLu(),
            ),
            Layer(
                NextConvolution(d, 2 * d, 4, stride=2, padding=1),
                BatchNorm2d(2 * d),
                PropReLu(),
            ),
            Layer(
                NextConvolution(2 * d, 4 * d, 4, stride=2, padding=1),
                BatchNorm2d(4 * d),
                PropReLu(),
            ),
            Layer(
                NextConvolution(4 * d, 8 * d, 4, stride=2, padding=1),
                BatchNorm2d(8 * d),
                PropReLu(),
            ),
            Layer(  # Output Layer
                NextConvolution(8 * d, 1, 4, stride=1, padding=0),
                FlattenLayer(), nn.Sigmoid()))

        self.optimizer = optim.Adam(self.parameters(), lr=0.0002)

    def forward(self, x):
        return self.net(x)

    def relprop(self, R):
        return self.net.relprop(R)

    def weight_init(self, mean, std):
        for m in self.net.modules():
            if isinstance(m, FirstConvolution) or isinstance(
                    m, NextConvolution):
                m.weight.data.normal_(mean, std)
                # m.bias.data.fill_(0)

    def training_iteration(self, real_data, fake_data, optimizer):
        N = real_data.size(0)

        # Reset gradients
        optimizer.zero_grad()

        # 1.1 Train on real data
        prediction_real = self.forward(real_data)
        error_real = loss(prediction_real, discriminator_target(N))
        # error_real.backward()

        # 1.2 Train on fake data
        predictions_fake = self.forward(fake_data)
        error_fake = loss(predictions_fake, generator_target(N))
        # error_fake.backward()

        training_loss = loss(prediction_real - predictions_fake,
                             discriminator_target(N))
        training_loss.backward()

        # 1.3 update weights
        optimizer.step()

        return error_fake + error_real, prediction_real, predictions_fake
    def __init__(self, ndf, nc):
        super(CIFARDiscriminatorNet, self).__init__(ndf, nc)

        self.loss = nn.BCELoss()

        net = RelevanceNet()

        net.add_module('conv0',
                       FirstConvolution(nc, ndf, stride=2, kernel_size=5))
        net.add_module('relu0', PropReLu())

        net.add_module('conv1',
                       NextConvolution(ndf, ndf * 2, stride=2, kernel_size=5))
        net.add_module('bn1', BatchNorm2d(ndf * 2))
        net.add_module('relu1', PropReLu())

        net.add_module(
            'conv2', NextConvolution(ndf * 2, ndf * 4, stride=2,
                                     kernel_size=5))
        net.add_module('bn2', BatchNorm2d(ndf * 4))
        net.add_module('relu2', PropReLu())

        net.add_module(
            'conv3', NextConvolution(ndf * 4, ndf * 8, stride=2,
                                     kernel_size=5))
        net.add_module('bn3', BatchNorm2d(ndf * 8))
        net.add_module('relu3', PropReLu())

        net.add_module(
            'conv4', NextConvolution(ndf * 8,
                                     ndf * 16,
                                     stride=2,
                                     kernel_size=5))
        net.add_module('bn4', BatchNorm2d(ndf * 16))
        net.add_module('relu4', PropReLu())

        net.add_module('Flatten', FlattenToLinearLayer())
        net.add_module('lastlinear', NextLinear(ndf * 16, 1))

        net.add_module('sigmoid', nn.Sigmoid())

        self.net = net
    def __init__(self, isize, nc, ndf, ngpu, n_extra_layers=0):
        super(WGANDiscriminatorNet, self).__init__(ndf, nc)
        self.ngpu = ngpu
        assert isize % 16 == 0, "isize has to be a multiple of 16"

        main = RelevanceNet()
        main.cuda()
        main.add_module('initial-conv{0}-{1}'.format(nc, ndf),
                        FirstConvolution(nc, ndf, 4, 2, 1))
        main.add_module('initial-relu{0}'.format(ndf), PropReLu(inplace=True))
        csize, cndf = isize / 2, ndf

        # Extra layers
        for t in range(n_extra_layers):
            main.add_module('extra-layers-{0}-{1}-conv'.format(t, cndf),
                            NextConvolution(cndf, cndf, 3, 1, 1))
            main.add_module('extra-layers-{0}-{1}-batchnorm'.format(t, cndf),
                            BatchNorm2d(cndf))
            main.add_module('extra-layers-{0}-{1}-relu'.format(t, cndf),
                            PropReLu(inplace=True))

        while csize > 4:
            in_feat = cndf
            out_feat = cndf * 2
            main.add_module(
                'pyramid-{0}-{1}-conv'.format(in_feat, out_feat),
                NextConvolution(in_feat, out_feat, 4, 2, 1, alpha=2.0))
            main.add_module('pyramid-{0}-batchnorm'.format(out_feat),
                            BatchNorm2d(out_feat))
            main.add_module('pyramid-{0}-relu'.format(out_feat),
                            PropReLu(inplace=True))
            cndf = cndf * 2
            csize = csize / 2

        # We take relevance here
        # state size. K x 4 x 4
        # Global average to single output
        main.add_module('final-{0}-{1}-conv'.format(cndf, 1),
                        NextConvolution(cndf, 1, 4, 1, 0, alpha=2.0))
        self.main = main
Beispiel #7
0
class DiscriminatorNet(nn.Module):
    """
    Three hidden-layer discriminative neural network
    """

    def __init__(self):
        super(DiscriminatorNet, self).__init__()

        n_out = 1

        self.net = RelevanceNet(
            Layer(  # Input Layer
                FirstConvolution(1, 128, 4, stride=2, padding=1),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(
                NextConvolution(128, 256, 4, stride=2, padding=1),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(
                NextConvolution(256, 1024, 4, stride=2, padding=1),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(
                NextConvolution(1024, 1, 4, stride=1, padding=0),
                PropReLu(),
                # Pooling(2),
                Dropout(0.3)
            ),
            Layer(  # Output Layer
                LastLinear(25, n_out),
                nn.Sigmoid()
            )
        )


        self.optimizer = optim.Adam(self.parameters(), lr=0.0002)
        self.loss = nn.BCELoss()

    def forward(self, x):
        return self.net(x)

    def relprop(self, R):
        return self.net.relprop(R)

    def weight_init(self, mean, std):
        for m in self.net.modules():
            if isinstance(m, FirstConvolution) or isinstance(m, NextConvolution):
                m.weight.data.normal_(mean, std)
                m.bias.data.fill_(0)


    def training_iteration(self, real_data, fake_data, optimizer):
        N = real_data.size(0)

        # Reset gradients
        optimizer.zero_grad()

        # 1.1 Train on real data
        prediction_real = self.forward(real_data)
        # Calculate error & backpropagation
        error_real = loss(prediction_real, discriminator_target(N))
        # error_real.backward()
        # 1.2 Train on fake data
        predictions_fake = self.forward(fake_data)
        # Calculate error & backprop
        error_fake = loss(predictions_fake, generator_target(N))
        # error_fake.backward()
        training_loss = error_real + error_fake
        training_loss.backward()

        # 1.3 update weights
        optimizer.step()

        return error_fake + error_real, prediction_real, predictions_fake