Beispiel #1
0
    def __init__(self, args, sess=None):
        self.session(sess)
        self.netE = nets.Encoder()
        self.netG = nets.Generator()
        self.train = tf.placeholder(tf.bool)
        self.build_network(args.nsf, args.npx, args.batch_size)

        if sess is None and args.check == False:
            self.initialize()

        variables_to_restore = tf.trainable_variables(
        ) + tf.moving_average_variables()
        super(SurfaceToStructure, self).__init__(variables_to_restore)
    def __init__(self, args, sess=None):
        self.session(sess)
        self.G_losses = []
        self.D_losses = []
        self.xgs = []
        self.netE = nets.Encoder()
        self.netG = nets.Generator()
        self.netD = nets.Discriminator()
        self.train_G = tf.placeholder(tf.bool)
        self.train_D = tf.placeholder(tf.bool)
        opt = tf.train.AdamOptimizer(args.learning_rate, 0.5)
        G_tower_grads = []
        D_tower_grads = []
        n_disc = len(args.nsf_disc)

        for i in range(args.n_gpus):
            gpu_name = '/gpu:{0}'.format(i+3)
            with tf.device(gpu_name):
                print gpu_name
                batch_size_per_gpu = args.batch_size / args.n_gpus
                self.build_network(args, batch_size_per_gpu, i)

                G_grads = opt.compute_gradients(self.G_losses[-1], var_list=self.E_vars + self.G_vars)
                G_tower_grads.append(G_grads)

                D_grads = [opt.compute_gradients(self.D_losses[-1][i], var_list=self.D_vars[i]) for i in range(n_disc)]
                D_tower_grads.append(D_grads)

        self.optG = opt.apply_gradients(average_gradients(G_tower_grads))
        self.G_loss = tf.reduce_mean(self.G_losses)
        self.xg = tf.concat(self.xgs, 0)

        self.optD = []
        self.D_loss = []
        for i in range(n_disc):
            grads = []
            losses = []
            for j in range(args.n_gpus):
                grads.append(D_tower_grads[j][i])
                losses.append(self.D_losses[j][i])
            self.optD.append(opt.apply_gradients(average_gradients(grads)))
            self.D_loss.append(tf.reduce_mean(losses))

        if sess is None and args.check == False:
            self.initialize()

        ma_vars = tf.moving_average_variables()
        BN_vars = [var for var in ma_vars if var.name.startswith('E') or var.name.startswith('G')]
        variables_to_save = self.E_vars + self.G_vars + BN_vars
        super(SurfaceToStructure, self).__init__(variables_to_save)
Beispiel #3
0
    def __init__(self, strategy, restore):

        self.strategy = strategy
        self.z_dim = Config.latent_dim
        self.global_batchsize = Config.global_batchsize
        self.batchsize_per_replica = int(self.global_batchsize /
                                         self.strategy.num_replicas_in_sync)

        self.gen_model = nets.Generator()
        self.disc_model = nets.Discriminator()
        self.gen_optimizer = tf.keras.optimizers.Adam(
            learning_rate=Config.gen_lr,
            beta_1=Config.beta1,
            beta_2=Config.beta2)
        self.disc_optimizer = tf.keras.optimizers.Adam(
            learning_rate=Config.disc_lr,
            beta_1=Config.beta1,
            beta_2=Config.beta2)
        self.train_writer = tf.summary.create_file_writer(Config.summaryDir +
                                                          'train')

        self.ckpt = tf.train.Checkpoint(
            step=tf.Variable(0),
            generator_optimizer=self.gen_optimizer,
            generator_model=self.gen_model,
            discriminator_optimizer=self.disc_optimizer,
            discriminator_model=self.disc_model)

        self.ckpt_manager = tf.train.CheckpointManager(self.ckpt,
                                                       Config.modelDir,
                                                       max_to_keep=3)

        self.global_step = 0

        if (restore):
            latest_ckpt = tf.train.latest_checkpoint(Config.modelDir)
            if not latest_ckpt:
                raise Exception('No saved model found in: ' + Config.modelDir)
            self.ckpt.restore(latest_ckpt)
            self.global_step = int(
                latest_ckpt.split('-')[-1]
            )  # .../ckpt-300 returns 300 previously trained totalbatches
            print("Restored saved model from latest checkpoint")
Beispiel #4
0
    os.makedirs(args.dataroot)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.device = device 

np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
cudnn.benchmark = True


dat = data.load_data(args.dataset, args.dataroot, args.batchSize, 
                        device=device, imgsize=args.imageSize, Ntrain=args.Ntrain, Ntest=args.Ntest)

#### defining generator
netG = nets.Generator(args.imageSize, args.nz, args.ngf, dat['nc']).to(device)
netG2 = nets.Generator(args.imageSize, args.nz, args.ngf, dat['nc']).to(device)
if args.model == 'presgan':
    log_sigma = torch.tensor([args.logsigma_init]*(args.imageSize*args.imageSize), device=device, requires_grad=True)
print('{} Generator: {}'.format(args.model.upper(), netG))

#### defining discriminator
netD = nets.Discriminator(args.imageSize, args.ndf, dat['nc']).to(device) 
print('{} Discriminator: {}'.format(args.model.upper(), netD))

#### initialize weights
netG.apply(utils.weights_init)
netG2.apply(utils.weights_init)
if args.ckptG != '':
    netG.load_state_dict(torch.load(args.ckptG))
    netG2.load_state_dict(torch.load(args.ckptG))
Beispiel #5
0
def init_params():
    discriminator = nets.Discriminator(2)
    generator = nets.Generator(noise_vec_size)
    disc_optim = torch.optim.Adam(discriminator.parameters(), lr=d_lr)
    gen_optim = torch.optim.Adam(generator.parameters(), lr=g_lr)
    return discriminator, generator, disc_optim, gen_optim, torch.nn.BCELoss()