예제 #1
0
    def __init__(self,
                 input_channels=1,
                 verbose=False,
                 cshape=(128, 7, 7),
                 w_distance=True):
        super(Discriminator, self).__init__()

        self.channels = input_channels
        self.cshape = cshape
        self.iels = int(np.prod(self.cshape))
        self.lshape = (self.iels, )
        self.verbose = verbose

        self.model = nn.Sequential(
            nn.Conv2d(self.channels, 64, 4, stride=2, padding=1),
            nn.LeakyReLU(0.02, inplace=True),
            nn.Conv2d(64, 128, 4, stride=2, padding=1),
            nn.LeakyReLU(0.02, inplace=True),
            Reshape(self.lshape),
            nn.Linear(self.iels, 1024),
            nn.LeakyReLU(0.02, inplace=True),
            nn.Linear(1024, 1),
        )

        if w_distance is False:
            self.model = nn.Sequential(self.model, nn.Sigmoid())

        init_weights(self)

        if self.verbose:
            print(self.model)
예제 #2
0
    def __init__(self,
                 input_channels=1,
                 output_channels=64,
                 cshape=(128, 7, 7),
                 verbose=False):
        super(Encoder, self).__init__()

        self.cshape = cshape
        self.iels = int(np.prod(self.cshape))
        self.lshape = (self.iels, )
        self.output_channels = output_channels
        self.input_channels = input_channels
        self.verbose = verbose

        self.model = nn.Sequential(
            nn.Conv2d(self.input_channels, 64, 4, stride=2, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.Conv2d(64, 128, 4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            Reshape(self.lshape),
            nn.Linear(self.iels, 1024),
            nn.BatchNorm1d(1024),
            nn.ReLU(True),
        )

        self.mu = nn.Linear(1024, self.output_channels)
        self.con = nn.Linear(1024, self.output_channels)

        init_weights(self)

        if self.verbose:
            print(self.model)
예제 #3
0
    def __init__(self,
                 latent_dim=50,
                 x_shape=(1, 28, 28),
                 cshape=(128, 7, 7),
                 verbose=False):
        super(Generator, self).__init__()

        self.latent_dim = latent_dim
        self.ishape = cshape
        self.iels = int(np.prod(self.ishape))
        self.x_shape = x_shape
        self.output_channels = x_shape[0]
        self.verbose = verbose

        self.model = nn.Sequential(
            nn.Linear(self.latent_dim, 1024),
            nn.ReLU(True),
            nn.Linear(1024, self.iels),
            nn.BatchNorm1d(self.iels),
            nn.ReLU(True),
            #
            Reshape(self.ishape),
        )

        # block_layers = []
        # for i in range(6):
        #     block_layers += [ResNetBlock(128)]

        # self.model = nn.Sequential(self.model, *block_layers)

        self.model = nn.Sequential(
            self.model,
            nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            #
            nn.ConvTranspose2d(64,
                               self.output_channels,
                               4,
                               stride=2,
                               padding=1),
            nn.Sigmoid(),
        )

        init_weights(self)

        if self.verbose:
            print(self.model)
예제 #4
0
    def __init__(self, latent_dim=50, input_dim=784, inter_dims=[500, 500, 2000], verbose=False):
        super(Generator, self).__init__()

        self.latent_dim = latent_dim
        self.input_dim = input_dim
        self.inter_dims = inter_dims
        self.verbose = verbose

        self.model = nn.Sequential(
            *block(self.latent_dim, self.inter_dims[-1]),
            *block(self.inter_dims[-1], self.inter_dims[-2]),
            *block(self.inter_dims[-2], self.inter_dims[-3]),
            nn.Linear(self.inter_dims[-3], self.input_dim),
            nn.Sigmoid()
        )

        init_weights(self)

        if self.verbose:
            print(self.model)
예제 #5
0
    def __init__(self, input_dim=784, inter_dims=[500, 500, 2000], latent_dim=10, verbose=False):
        super(Encoder, self).__init__()

        self.latent_dim = latent_dim
        self.input_dim = input_dim
        self.inter_dims = inter_dims
        self.verbose = verbose

        self.model = nn.Sequential(
            *block(self.input_dim, self.inter_dims[0]),
            *block(self.inter_dims[0], self.inter_dims[1]),
            *block(self.inter_dims[1], self.inter_dims[2]),
        )

        self.mu = nn.Linear(self.inter_dims[-1], self.latent_dim)
        self.con = nn.Linear(self.inter_dims[-1], self.latent_dim)

        init_weights(self)

        if self.verbose:
            print(self.model)
예제 #6
0
    def __init__(self, verbose=False, input_dim=784, inter_dims=[500, 500, 2000]):
        super(Discriminator, self).__init__()

        self.input_dim = input_dim
        self.inter_dims = inter_dims
        self.verbose = verbose

        self.model = nn.Sequential(
            nn.Linear(self.input_dim, self.inter_dims[0]),
            nn.LeakyReLU(0.02, True),
            nn.Linear(self.inter_dims[0], self.inter_dims[1]),
            nn.LeakyReLU(0.02, True),
            nn.Linear(self.inter_dims[1], self.inter_dims[2]),
            nn.LeakyReLU(0.02, True),

            nn.Linear(self.inter_dims[2], 1),
            nn.Sigmoid()
        )

        init_weights(self)

        if self.verbose:
            print(self.model)
예제 #7
0
    def __init__(self, input_dim=784, inter_dims=[500, 500, 2000], r=9, latent_dim=10, verbose=False):
        super(Encoder_SMM, self).__init__()

        self.latent_dim = latent_dim
        self.input_dim = input_dim
        self.inter_dims = inter_dims
        self.r = r
        self.verbose = verbose

        self.model = nn.Sequential(
            *block(self.input_dim, self.inter_dims[0]),
            *block(self.inter_dims[0], self.inter_dims[1]),
            *block(self.inter_dims[1], self.inter_dims[2]),
        )

        self.mu = nn.Linear(self.inter_dims[-1], self.latent_dim)
        self.con = nn.Linear(self.inter_dims[-1], self.latent_dim)
        self.v = nn.Linear(self.inter_dims[-1], 1)

        init_weights(self)
        self.v.weight.data.uniform_(0.01, 0.03)
        if self.verbose:
            print(self.model)