def __init__(self, sess, args, datasets): self.sess = sess self.isbatch_normal = args.isBatch_normal self.isNormal = args.isNormal self.checkpoint_dir = args.checkpoint_dir self.result_dir = args.result_dir self.log_dir = args.log_dir self.dataset_name = args.dataset_name self.run_type = args.run_type self.lr = args.lr self.epoch = args.epoch self.batch_size = args.batch_size self.n_inputs = args.n_inputs # MNIST data input (img shape: 28*28) self.n_steps = args.n_steps # time steps self.missing_rate = args.missing_rate self.n_hidden_units = args.n_hidden_units # neurons in hidden layer self.n_classes = args.n_classes # MNIST classes (0-9 digits) self.run_type = args.run_type self.result_path = args.result_path self.model_path = args.model_path self.pretrain_epoch = args.pretrain_epoch self.isSlicing = args.isSlicing self.g_loss_lambda = args.g_loss_lambda self.model_name += "_" + str(args.missing_rate) self.datasets = datasets self.z_dim = args.z_dim # dimension of noise-vector self.use_grui = args.use_grui print(self.n_inputs) # WGAN_GP parameter self.lambd = 0.25 # The higher value, the more stable, but the slower convergence self.disc_iters = args.disc_iters # The number of critic iterations for one-step of generator # train self.learning_rate = args.lr self.beta1 = args.beta1 if "1.5" in tf.__version__ or "1.7" in tf.__version__: self.grui_cell_g1 = mygru_cell.MyGRUCell15(self.n_hidden_units) self.grui_cell_g2 = mygru_cell.MyGRUCell15(self.n_hidden_units) self.grui_cell_d = mygru_cell.MyGRUCell15(self.n_hidden_units) elif "1.4" in tf.__version__: self.grui_cell_g1 = mygru_cell.MyGRUCell4(self.n_hidden_units) self.grui_cell_g2 = mygru_cell.MyGRUCell4(self.n_hidden_units) self.grui_cell_d = mygru_cell.MyGRUCell14(self.n_hidden_units) elif "1.2" in tf.__version__: self.grui_cell_d = mygru_cell.MyGRUCell2(self.n_hidden_units) self.grui_cell_g1 = mygru_cell.MyGRUCell2(self.n_hidden_units) self.grui_cell_g2 = mygru_cell.MyGRUCell12(self.n_hidden_units) # test self.sample_num = 64 # number of generated images to be saved self.num_batches = len( datasets.aq_train_data) // (self.batch_size * 48)
def __init__(self, sess, args, datasets): self.sess = sess self.isbatch_normal = args.isBatch_normal self.checkpoint_dir = args.checkpoint_dir self.log_dir = args.log_dir self.lr = args.lr self.epoch = args.epoch self.batch_size = args.batch_size self.n_inputs = args.n_inputs # MNIST data input (img shape: 28*28) self.n_steps = args.n_steps self.n_hidden_units = args.n_hidden_units # neurons in hidden layer self.pretrain_epoch = args.pretrain_epoch self.impute_iter = args.impute_iter self.g_loss_lambda = args.g_loss_lambda self.missing_rate = args.missing_rate self.model_name += str(self.missing_rate) self.datasets = datasets self.z_dim = args.z_dim # dimension of noise-vector # WGAN_GP parameter self.disc_iters = args.disc_iters # The number of critic iterations for one-step of generator # train self.learning_rate = args.lr self.beta1 = args.beta1 if "1.5" in tf.__version__ or "1.7" in tf.__version__: self.grud_cell_d = mygru_cell.MyGRUCell15(self.n_hidden_units) self.grud_cell_g = mygru_cell.MyGRUCell15(self.n_hidden_units) elif "1.4" in tf.__version__: self.grud_cell_d = mygru_cell.MyGRUCell4(self.n_hidden_units) self.grud_cell_g = mygru_cell.MyGRUCell4(self.n_hidden_units) elif "1.2" in tf.__version__: self.grud_cell_d = mygru_cell.MyGRUCell2(self.n_hidden_units) self.grud_cell_g = mygru_cell.MyGRUCell2(self.n_hidden_units) # test self.sample_num = 64 # number of generated images to be saved self.num_batches = len(datasets.m) // self.batch_size