コード例 #1
0
ファイル: celeb_fenchel.py プロジェクト: zepx/cvb
    def __init__(self, nc, ndf, latent_dim, out_dim = None):
        super(Encoder, self).__init__()
        self.latent_dim = latent_dim
        if out_dim is None:
            self.out_dim = latent_dim
        else:
            self.out_dim = out_dim
        
        self.x2h = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 5, 2, 2, bias=False),
            nn.ReLU(inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 5, 2, 2, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.ReLU(inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 5, 2, 2, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.ReLU(inplace=True),
            nn.Conv2d(ndf * 4, ndf * 4, 5, 2, 2, bias=False),
            nn.ReLU(inplace=True),
        )

        self.z2h = nn.Sequential(
        #    nn.Conv2d(self.latent_dim, 4 * ndf, 1),
      #      nn.ReLU(inplace=True),
    #        nn.Conv2d(4 * ndf, 4 * ndf, 1),
  #          nn.ReLU(inplace=True),
            nn.Linear(cmd_args.latent_dim, ndf * 4 * 4 * 4)
        )
        self.fc2 = nn.Linear(ndf * 4 * 4 * 4 * 2, 400)
        self.fc3 = nn.Linear(400, self.out_dim)

        weights_init(self)
コード例 #2
0
    def __init__(self, input_dim, latent_dim):
        super(Nu, self).__init__()
        self.latent_dim = latent_dim
        self.input_dim = input_dim

        nc = 1
        ndf = 16
        self.x_main = nn.Sequential(
            nn.Conv2d(nc, ndf, 5, 2, 2, bias=True),
            nn.Softplus(),
            nn.Conv2d(ndf, ndf * 2, 5, 2, 2, bias=True),
            nn.Softplus(),
            nn.Conv2d(ndf * 2, ndf * 2, 5, 2, 2, bias=True),
            nn.Softplus(),
            Flatten(),
            nn.Linear(ndf * 2 * 4 * 4, 300),
            nn.Softplus(),
        )

        self.z_main = nn.Sequential(
            nn.Linear(latent_dim, 300),
            nn.Softplus(),
        )

        self.joint = nn.Sequential(
            nn.Linear(300 * 2, 300),
            nn.Softplus(),
            nn.Linear(300, 1),
        )

        weights_init(self)
コード例 #3
0
    def __init__(self, latent_dim):
        super(MyMnistCnnEncoder, self).__init__()
        self.latent_dim = latent_dim
        nc = 1
        ndf = 16

        self.main = MySequential(
            MyConv2d(nc, ndf, 5, 2, 2, bias=True),
            #nn.BatchNorm2d(ndf),
            MySoftplus(),
            MyConv2d(ndf, ndf * 2, 5, 2, 2, bias=True),
            #nn.BatchNorm2d(ndf * 2),
            MySoftplus(),
            MyConv2d(ndf * 2, ndf * 2, 5, 2, 2, bias=True),
            #nn.BatchNorm2d(ndf * 2),
            MySoftplus(),
        )
        self.fc1 = MySequential(
            MyLinear(ndf * 2 * 4 * 4, 300),
            MySoftplus(),
        )
        #self.fc1_bnorm = nn.BatchNorm1d(300)
        self.fc2 = MyLinear(300, latent_dim)
        self.fc3 = MyLinear(300, latent_dim)
        weights_init(self)
コード例 #4
0
ファイル: toy_common.py プロジェクト: zepx/cvb
    def __init__(self):
        super(Decoder, self).__init__()

        self.fc1 = nn.Linear(2, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, 64)
        self.fc4 = nn.Linear(64, 4)
        weights_init(self)
コード例 #5
0
ファイル: nonparam_fenchel.py プロジェクト: zepx/cvb
    def __init__(self):
        super(Nu, self).__init__()

        self.fc1 = nn.Linear(4 + 2, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, 1)

        weights_init(self)
コード例 #6
0
    def __init__(self, latent_dim, input_dim, act_out=F.sigmoid):
        super(FCDecoder, self).__init__()
        self.latent_dim = latent_dim
        self.input_dim = input_dim
        self.act_out = act_out

        self.fc1 = nn.Linear(latent_dim, 400)
        self.fc2 = nn.Linear(400, input_dim)
        weights_init(self)
コード例 #7
0
    def __init__(self, input_dim, latent_dim):
        super(MyMnistFCEncoder, self).__init__()
        self.latent_dim = latent_dim
        self.input_dim = input_dim        

        self.fc1 = MyLinear(input_dim, 400)
        self.fc2 = MyLinear(400, latent_dim)
        self.fc3 = MyLinear(400, latent_dim)
        weights_init(self)
コード例 #8
0
    def __init__(self, latent_dim, input_dim, act_out=F.sigmoid):
        super(FC3LayerDecoder, self).__init__()
        self.latent_dim = latent_dim
        self.input_dim = input_dim
        self.act_out = act_out

        self.fc1 = nn.Linear(latent_dim, 256)
        self.fc2 = nn.Linear(256, 512)
        self.fc3 = nn.Linear(512, input_dim)
        weights_init(self)
コード例 #9
0
ファイル: nonparam_fenchel.py プロジェクト: zepx/cvb
    def __init__(self):
        super(Encoder, self).__init__()

        self.fc1 = nn.Linear(4 + 2, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, 64)

        self.fc4 = nn.Linear(64, 2)

        weights_init(self)
コード例 #10
0
    def __init__(self, input_dim, latent_dim):
        super(FCGaussianEncoder, self).__init__()
        self.latent_dim = latent_dim
        self.input_dim = input_dim

        self.fc1 = nn.Linear(input_dim, 400)

        self.fc2 = nn.Linear(400, latent_dim)
        self.fc3 = nn.Linear(400, latent_dim)

        weights_init(self)
コード例 #11
0
ファイル: mnist_common.py プロジェクト: zepx/cvb
    def __init__(self, latent_dim):
        super(AvbEncoder, self).__init__()
        self.latent_dim = latent_dim
        nc = 1
        ndf = 16
        # self.transform = transform.CenterCrop((64, 64))

        self.main = nn.Sequential(
            nn.Conv2d(nc, ndf, 5, 2, 2, bias=True),
            #nn.BatchNorm2d(ndf),
            nn.Softplus(),
            nn.Conv2d(ndf, ndf * 2, 5, 2, 2, bias=True),
            #nn.BatchNorm2d(ndf * 2),
            nn.Softplus(),
            nn.Conv2d(ndf * 2, ndf * 2, 5, 2, 2, bias=True),
            #nn.BatchNorm2d(ndf * 2),
            nn.Softplus(),
        )
        self.fc1 = nn.Linear(ndf * 2 * 4 * 4, 300)
        #self.fc1_bnorm = nn.BatchNorm1d(300)
        self.fc2 = nn.Linear(300, latent_dim)
        self.fc3 = nn.Linear(300, latent_dim)
        weights_init(self)
コード例 #12
0
ファイル: celeb_parametric_opt.py プロジェクト: zepx/cvb
    def __init__(self, nc, ndf, latent_dim):
        super(MyCelebEncoder, self).__init__()
        self.latent_dim = latent_dim

        self.main = MySequential(
            # input is (nc) x 64 x 64
            MyConv2d(nc, ndf, 5, 2, 2, bias=False),
            MyReLU(inplace=True),
            # state size. (ndf) x 32 x 32
            MyConv2d(ndf, ndf * 2, 5, 2, 2, bias=False),
            MyBatchNorm2d(ndf * 2),
            MyReLU(inplace=True),
            # state size. (ndf*2) x 16 x 16
            MyConv2d(ndf * 2, ndf * 4, 5, 2, 2, bias=False),
            MyBatchNorm2d(ndf * 4),
            MyReLU(inplace=True),
            MyConv2d(ndf * 4, ndf * 4, 5, 2, 2, bias=False),
            MyBatchNorm2d(ndf * 4),
            MyReLU(inplace=True),
        )

        self.fc2 = MyLinear(ndf * 4 * 4 * 4, latent_dim)
        self.fc3 = MyLinear(ndf * 4 * 4 * 4, latent_dim)
        weights_init(self)