def __init__(self, nc, ndf, alpha, beta, ngpu=1): super(DiscriminatorNetVBN, self).__init__() self.ngpu = ngpu self.net = nnrd.RelevanceNet( nnrd.Layer( nnrd.FirstConvolution(nc, ndf, 5, 1, 2), nnrd.ReLu(), ), nnrd.Layer( nnrd.NextConvolution(ndf, ndf, 4, '0', 2, 1, alpha=alpha), nnrd.BatchNorm2d(ndf), nnrd.ReLu(), ), # state size. (ndf) x 32 x 32 nnrd.Layer( nnrd.NextConvolution(ndf, ndf * 2, 4, '1', 2, 1, alpha=alpha), nnrd.BatchNorm2d(ndf * 2), nnrd.ReLu(), ), # state size. (ndf*2) x 16 x 16 nnrd.Layer( nnrd.NextConvolution(ndf * 2, ndf * 4, 4, '2', 2, 1, alpha=alpha), nnrd.BatchNorm2d(ndf * 4), nnrd.ReLu(), ), # state size. (ndf*4) x 8 x 8 nnrd.Layer( nnrd.NextConvolution(ndf * 4, ndf * 8, 4, '3', 2, 1, alpha=alpha), nnrd.BatchNorm2d(ndf * 8), nnrd.ReLu(), ), # state size. (ndf*8) x 4 x 4 nnrd.Layer(nnrd.NextConvolution(ndf * 8, 1, 4, '4', 1, 0), nn.Sigmoid()))
def __init__(self, ngpu): super(DiscriminatorNet, self).__init__() self.ngpu = ngpu # All attributes are automatically assigned to the modules parameter list n_features = 784 n_out = 1 self.net = nnrd.RelevanceNet( nnrd.Layer(nnrd.FirstLinear(n_features, 1024), nnrd.ReLu(), nnrd.Dropout(0.3)), nnrd.Layer(nnrd.NextLinear(1024, 1024), nnrd.ReLu(), nnrd.Dropout(0.3)), nnrd.Layer( nnrd.NextLinear(1024, 512), nnrd.ReLu(), nnrd.Dropout(0.3), ), nnrd.Layer(nnrd.NextLinear(512, 512), nnrd.ReLu(), nnrd.Dropout(0.3)), nnrd.Layer( nnrd.NextLinear(512, 256), nnrd.ReLu(), nnrd.Dropout(0.3), ), nnrd.Layer(nnrd.NextLinear(256, n_out), nn.Sigmoid()))
def __init__(self, nc, ndf, alpha, ngpu=1): super(DiscriminatorNetLessCheckerboardToCanonical, self).__init__() self.relevance = None self.ngpu = ngpu self.net = nnrd.RelevanceNetAlternate( nnrd.Layer( OrderedDict([ ('conv1', nnrd.FirstConvolution(in_channels=nc, out_channels=ndf, kernel_size=3, stride=1, padding=0)), ('relu1', nnrd.ReLu()), ])), nnrd.Layer( OrderedDict([ ('conv2', nnrd.NextConvolution(in_channels=ndf, out_channels=ndf, kernel_size=4, name='0', stride=2, padding=1, alpha=alpha)), ('bn2', nnrd.BatchNorm2d(ndf)), ('relu2', nnrd.ReLu()), ('dropou2', nnrd.Dropout(0.3)), ])), # state size. (ndf) x 32 x 32 nnrd.Layer( OrderedDict([ ('conv3', nnrd.NextConvolution(in_channels=ndf, out_channels=ndf * 2, kernel_size=4, name='1', stride=2, padding=1, alpha=alpha)), ('bn3', nnrd.BatchNorm2d(ndf * 2)), ('relu3', nnrd.ReLu()), ('dropout3', nnrd.Dropout(0.3)), ])), # state size. (ndf*2) x 16 x 16 nnrd.Layer( OrderedDict([ ('conv4', nnrd.NextConvolution(in_channels=ndf * 2, out_channels=ndf * 4, kernel_size=4, name='2', stride=2, padding=1, alpha=alpha)), ('bn4', nnrd.BatchNorm2d(ndf * 4)), ('relu4', nnrd.ReLu()), ('dropout4', nnrd.Dropout(0.3)), ])), # state size. (ndf*2) x 16 x 16 # state size. (ndf*4) x 8 x 8 nnrd.Layer( OrderedDict([ ('conv5', nnrd.NextConvolutionEps(in_channels=ndf * 4, out_channels=ndf * 8, kernel_size=4, name='3', stride=2, padding=1, epsilon=0.01)), ('bn5', nnrd.BatchNorm2d(ndf * 8)), ('relu5', nnrd.ReLu()), ('dropout5', nnrd.Dropout(0.3)), ])), ) self.lastConvolution = nnrd.LastConvolutionEps(in_channels=ndf * 8, out_channels=1, kernel_size=4, name='4', stride=1, padding=0, epsilon=0.01) self.sigmoid = nn.Sigmoid() self.lastReLU = nnrd.ReLu()
def __init__(self, nc, ndf, alpha, beta, ngpu=1): super(DiscriminatorNetLessCheckerboardAlternate, self).__init__() self.relevance = None self.ngpu = ngpu self.net = nnrd.RelevanceNetAlternate( nnrd.Layer( nnrd.FirstConvolution(in_channels=nc, out_channels=ndf, kernel_size=5, stride=1, padding=0), nnrd.ReLu(), ), nnrd.Layer( nnrd.NextConvolution(in_channels=ndf, out_channels=ndf, kernel_size=4, name='0', stride=2, padding=1, alpha=alpha), nnrd.BatchNorm2d(ndf), nnrd.ReLu(), nnrd.Dropout(0.3), ), # state size. (ndf) x 32 x 32 nnrd.Layer( nnrd.NextConvolution(in_channels=ndf, out_channels=ndf * 2, kernel_size=4, name='1', stride=2, padding=1, alpha=alpha), nnrd.BatchNorm2d(ndf * 2), nnrd.ReLu(), nnrd.Dropout(0.3), ), # state size. (ndf*2) x 16 x 16 nnrd.Layer( nnrd.NextConvolution(in_channels=ndf * 2, out_channels=ndf * 4, kernel_size=4, name='2', stride=2, padding=1, alpha=alpha), nnrd.BatchNorm2d(ndf * 4), nnrd.ReLu(), nnrd.Dropout(0.3), ), # state size. (ndf*2) x 16 x 16 # state size. (ndf*4) x 8 x 8 nnrd.Layer( nnrd.NextConvolutionEps(in_channels=ndf * 4, out_channels=ndf * 8, kernel_size=4, name='3', stride=2, padding=1), nnrd.BatchNorm2d(ndf * 8), nnrd.ReLu(), nnrd.Dropout(0.3), ), # state size. (ndf*8) x 4 x 4 nnrd.Layer( nnrd.LastConvolutionEps(in_channels=ndf * 8, out_channels=1, kernel_size=4, name='4', stride=1, padding=0), )) self.sigmoid = nn.Sigmoid() self.lastReLU = nnrd.ReLu()
def __init__(self, nc, ndf, alpha, ngpu=1): super(SmoothingLayerDiscriminator, self).__init__() self.relevance = None self.ngpu = ngpu self.net = nnrd.RelevanceNetAlternate( nnrd.Layer( OrderedDict([ ('conv1', nnrd.FirstConvolution(in_channels=nc, out_channels=nc, kernel_size=3, stride=1, padding=0)), ('relu1', nnrd.ReLu()), ])), nnrd.Layer( OrderedDict([ ('conv2', nnrd.NextConvolution(in_channels=nc, out_channels=ndf, kernel_size=4, name='0', stride=2, padding=1, alpha=alpha)), ('bn2', nnrd.BatchNorm2d(ndf)), ('relu2', nnrd.ReLu()), ('dropou2', nnrd.Dropout(0.3)), ])), # state size. (ndf) x 32 x 32 nnrd.Layer( OrderedDict([ ('conv3', nnrd.NextConvolution(in_channels=ndf, out_channels=ndf * 2, kernel_size=4, name='1', stride=2, padding=1, alpha=alpha)), ('bn3', nnrd.BatchNorm2d(ndf * 2)), ('relu3', nnrd.ReLu()), ('dropout3', nnrd.Dropout(0.3)), ])), # state size. (ndf*2) x 16 x 16 nnrd.Layer( OrderedDict([ ('conv4', nnrd.NextConvolution(in_channels=ndf * 2, out_channels=ndf * 4, kernel_size=4, name='2', stride=2, padding=1, alpha=alpha)), ('bn4', nnrd.BatchNorm2d(ndf * 4)), ('relu4', nnrd.ReLu()), ('dropout4', nnrd.Dropout(0.3)), ])), # state size. (ndf*2) x 16 x 16 # state size. (ndf*4) x 8 x 8 nnrd.Layer( OrderedDict([ ('conv5', nnrd.NextConvolutionEps(in_channels=ndf * 4, out_channels=ndf * 8, kernel_size=4, name='3', stride=2, padding=1, epsilon=0.01)), ('bn5', nnrd.BatchNorm2d(ndf * 8)), ('relu5', nnrd.ReLu()), ('dropout5', nnrd.Dropout(0.3)), ])), ) self.lastConvolution = nnrd.LastConvolutionEps(in_channels=ndf * 8, out_channels=1, kernel_size=4, name='4', stride=1, padding=0, epsilon=0.01) self.sigmoid = nn.Sigmoid() self.lastReLU = nnrd.ReLu() # Do not update weights in smoothing layer for parameter in self.net[0][0].parameters(): parameter.requires_grad = False