Ejemplo n.º 1
0
 def pre_train(self):
     embedding_optim_cfg = OptimConfig('Adam', lr=1e-2)
     self.embedding_optim = embedding_optim_cfg.get(
         self.gen.embedding.parameters())
     embedding_loss_cfg = LossConfig('MSE')
     self.embedding_loss = embedding_loss_cfg.get()
     self.gen_optim = self.goptim_cfg.get(
         filter(lambda x: type(x) != SampleMatrix, self.gen.parameters()))
Ejemplo n.º 2
0
    def pre_train(self):
        embedding_optim_cfg = OptimConfig('Adam', lr=1e-3)
        self.embedding_optim = embedding_optim_cfg.get(
            self.gen.embedding.parameters())
        embedding_loss_cfg = LossConfig('MSE')
        self.embedding_loss = embedding_loss_cfg.get()
        self.gen_optim = goptim_cfg.get(
            filter(lambda x: type(x) != SampleMatrix, self.gen.parameters()))

        logger.create_scalar('js')
        logger.create_scalar('coverage')
        logger.create_scalar('coverage_HQ')
        logger.create_scalar('confidence')
        logger.create_scalar('ratio')
Ejemplo n.º 3
0
    def vis_gen(self, postfix):
        self.eval_mode()
        x_generated = self.sampling(128, 1)
        img = make_grid(th.Tensor(x_generated), nrow=16)
        save_image(img, os.path.join(logger.img_dir, 'gen_{0:05d}.jpg'.format(postfix)))


if __name__ == '__main__':
    MODE = 10
    LATENT_DIM = 100
    IMAGE_SIZE = 64
    DATASET_LEN = 20000
    PRELOAD_LEN = 20000
    EPOCHS = 50000
    device = 'cuda'
    gloss_cfg = LossConfig('BCE')
    dloss_cfg = LossConfig('BCE')

    sampler_cfg = SamplerConfig(
        name='Onehot',
        out_shape=PRELOAD_LEN,
        latent_dim=LATENT_DIM,
        random_sampling=False
    )
    goptim_cfg = OptimConfig('Adam', lr=0.0002)
    doptim_cfg = OptimConfig('Adam', lr=0.0002)

    dataset_cfg = DatasetConfig('CelebA', dataset_len=DATASET_LEN, image_size=IMAGE_SIZE, preload_len=PRELOAD_LEN)
    loader_cfg = LoaderConfig('naive', batch_size=128, shuffle=True)

    gen_cfg = ModelConfig(
Ejemplo n.º 4
0
    args = parser.parse_args()

    device = 'cuda'
    GAN_EPOCHS = 50000
    DECREASE_LR_EPOCHS = 25000

    IMG_SIZE = 256
    BATCH_SIZE = 1
    label_len = int(args.label_len)

    EXP_NAME = 'refocus_final_' + str(label_len)
    # logger = SummaryWriter('./log/' + EXP_NAME)

    LR = 2e-4

    gloss_cfg = LossConfig('MSE')
    dloss_cfg = LossConfig('MSE')

    goptim_cfg = OptimConfig('Adam', lr=LR, beta=(0.5, 0.999))
    doptim_cfg = OptimConfig('Adam', lr=LR, beta=(0.5, 0.999))

    dataset_cfg = DatasetConfig('FlowerFull',
                                size=IMG_SIZE,
                                num=label_len,
                                train=False)
    loader_cfg = LoaderConfig('naive', batch_size=1, shuffle=False)

    # gen_cfg = ModelConfig('ResNetGen', input_nc=3, output_nc=3, ngf=64, n_blocks=6)
    gen_cfg = ModelConfig('UNetGen',
                          input_nc=3,
                          output_nc=3,
Ejemplo n.º 5
0
        fig = plt.figure()
        plt.scatter(sampler_modes[:, 0], sampler_modes[:, 1], s=1)
        plt.savefig(
            os.path.join(logger.img_dir,
                         'sampler_modes_{0:05d}.jpg'.format(postfix)))
        plt.close()


if __name__ == '__main__':
    dataset_len = 2000
    mode = 10
    latent_dim = 10

    device = 'cuda'
    gloss_cfg = LossConfig('BCE')
    dloss_cfg = LossConfig('BCE')

    sampler_loss_cfg = LossConfig('MSE')
    sampler_optim_cfg = OptimConfig('Adam', lr=0.001)
    sampler_cfg = SamplerConfig(
        name='Learning',
        out_shape=latent_dim,
        data_num=dataset_len,
        batch_size=2000,
        epoch=50,
        alpha=0,
        loss_cfg=sampler_loss_cfg,
        optim_cfg=sampler_optim_cfg,
    )