def main(): # parse arguments args = parse_args() if args is None: exit() seed = 124 tf.set_random_seed(seed) np.random.seed(seed) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = BEGAN_CS(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir, g_lr=args.g_lr, d_lr=args.d_lr) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session if args.train: gan.train() print(" [*] Training finished!") else: gan.test() print(" [*] Testing finished!")
def main(): # parse arguments args = parse_args() if args is None: exit() # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # declare instance for GAN if args.gan_type == 'TGAN_64': gan = TGAN_64(sess, args) elif args.gan_type == 'TGAN_128': gan = TGAN_128(sess, args) else: raise Exception("[!] There is no option for " + args.gan_type) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() gan.train_check()
def main(): # parse arguments args = parse_args() if args is None: exit() # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # declare instance for GAN gan = None if args.gan_type == 'GAN': gan = GAN(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir, g_from_scratch=args.g_from_scratch) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!")
def main(): # parse arguments args = parse_args() if args is None: exit() # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # declare instance for GAN if args.gan_type == 'AE_GAN': gan = AE_GAN(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim,dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) elif args.gan_type == 'CAE_CGAN': gan = CAE_CGAN(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim,dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) else: raise Exception("[!] There is no option for " + args.gan_type) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() gan.train_check() # visualize learned generator gan.visualize_results(args.epoch - 1) print(" [*] Testing finished!")
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: patchGan = PatchGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, z_dim=FLAGS.z_dim, checkpoint_dir=FLAGS.checkpoint_dir) show_all_variables() if FLAGS.train: patchGan.train(FLAGS) else: if not patchGan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") OPTION = 1 visualize(sess, patchGan, FLAGS, OPTION)
def main(_): if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) # Prevent tensorflow from allocating the totality of a GPU memory run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: dcgan = DCGAN(sess, flags=FLAGS) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") visualize(sess, dcgan, FLAGS)
def main(): # parse arguments args = parse_args() if args is None: exit() # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # declare instance for GAN gan = None if args.gan_type == 'GAN': gan = GAN(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir, g_from_scratch=args.g_from_scratch) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!")
def __init__(self, config): self.config = config self.task = config.task self.model_dir = config.model_dir self.gpu_memory_fraction = config.gpu_memory_fraction self.init_first_decoder_input = config.init_first_decoder_input self.log_step = config.log_step self.max_step = config.max_step self.num_log_samples = config.num_log_samples self.checkpoint_secs = config.checkpoint_secs if config.task.lower().startswith('binpacking'): # Load train and test data self.data_loader = BinPackingDataLoader(config) else: raise Exception("[!] Unknown task: {}".format(config.task)) # Build model based on data and config self.model = Model( config, orders=self.data_loader.o, inputs=self.data_loader.x, baselines=self.data_loader.b, enc_seq_length=self.data_loader.seq_length, dec_seq_length=self.data_loader.seq_length, ) self.build_session() show_all_variables()
def __init__(self, config, rng): self.config = config self.rng = rng self.task = config.task self.model_dir = config.model_dir self.gpu_memory_fraction = config.gpu_memory_fraction self.log_step = config.log_step self.max_step = config.max_step self.num_log_samples = config.num_log_samples self.checkpoint_secs = config.checkpoint_secs if config.task.lower().startswith('tsp'): self.data_loader = TSPDataLoader(config, rng=self.rng) else: raise Exception("[!] Unknown task: {}".format(config.task)) self.model = Model( config, inputs=self.data_loader.x, labels=self.data_loader.y, enc_seq_length=self.data_loader.seq_length, dec_seq_length=self.data_loader.seq_length, mask=self.data_loader.mask) self.build_session() show_all_variables()
def main(): # parse arguments args = parse_args() if args is None: exit() # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = TripleGAN(sess, epoch=args.epoch, batch_size=args.batch_size, unlabel_batch_size=args.unlabel_batch_size, z_dim=args.z_dim, dataset_name=args.dataset, n=args.n, gan_lr=args.gan_lr, cla_lr=args.cla_lr, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!") # visualize learned generator gan.visualize_results(args.epoch - 1) print(" [*] Testing finished!")
def run(): print(FLAGS.dataset_name_s) print(FLAGS.dataset_name_t) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) # os.environ["CUDA_VISIBLE_DEVICES"] = "0" os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.cuda) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: model = WDMAN(sess, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=FLAGS.y_dim, dataset_name_s=FLAGS.dataset_name_s, dataset_name_t=FLAGS.dataset_name_t, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, data_dir=FLAGS.data_dir, x_type=FLAGS.x_type) show_all_variables() if FLAGS.train: model.train(FLAGS) else: if not model.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode")
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: model = Model(input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, dataset_name=FLAGS.dataset_name, checkpoint_dir=FLAGS.checkpoint_dir, crop=FLAGS.crop) train = Train(sess) show_all_variables() if FLAGS.train: train.train(model, FLAGS) else: if not model.load(train, FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode")
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height assert (os.path.exists(FLAGS.checkpoint_dir)) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: dcgan = DCGAN(sess, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir, lam=FLAGS.lam) #dcgan.load(FLAGS.checkpoint_dir): dcgan.complete(FLAGS) show_all_variables() # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: layoutgan = LAYOUTGAN(sess, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.train: layoutgan.train(FLAGS) else: if not layoutgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode")
def __init__(self, config, laplacian, lmax): self.model_type = config.model_type self.batch_size = config.batch_size self.num_node = config.num_node self.feat_in = config.feat_in self.num_time_steps = config.num_time_steps self.feat_out = config.feat_out ##Need to import laplacian, lmax self.laplacian = laplacian self.lmax = lmax self.num_hidden = config.num_hidden self.num_kernel = config.num_kernel self.classif_loss = config.classif_loss self.learning_rate = config.learning_rate self.max_grad_norm = None if config.max_grad_norm > 0: self.max_grad_norm = config.max_grad_norm self.optimizer = config.optimizer self._build_placeholders() self._build_model() self._build_steps() self._build_optim() show_all_variables()
def main(): # parse arguments args = parse_args() if args is None: exit() # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = TripleGAN(sess, epoch=args.epoch, batch_size=args.batch_size, unlabel_batch_size=args.unlabel_batch_size, z_dim=args.z_dim, dataset_name=args.dataset, nexamples=args.n, lr_d=args.lr_d, lr_g=args.lr_g, lr_c=args.lr_c, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!")
def __init__(self, config, rng): self.config = config self.rng = rng self.task = config.task # tsp self.model_dir = config.model_dir self.gpu_memory_fraction = config.gpu_memory_fraction # 1.0 self.log_step = config.log_step # 50 self.max_step = config.max_step # 1000000 self.num_log_samples = config.num_log_samples # 3 self.checkpoint_secs = config.checkpoint_secs # 300 if config.task.lower().startswith('tsp'): self.data_loader = TSPDataLoader(config, rng=self.rng) else: raise Exception("[!] Unknown task: {}".format(config.task)) self.model = Model( config, inputs=self.data_loader.x, labels=self.data_loader.y, enc_seq_length=self.data_loader.seq_length, # 128 dec_seq_length=self.data_loader.seq_length, mask=self.data_loader.mask) # dict, dict_keys(['train', 'test'] self.build_session() show_all_variables()
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=7, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=2, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, learning_rate=FLAGS.learning_rate) show_all_variables() dcgan.train(FLAGS)
def main_no_args(): models = [ACWGANGP] with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = None for model in models: if 'ACWGANGP' == model.model_name: gan = model(sess, epoch=400, batch_size=5, z_dim=100, dataset_name='lld', checkpoint_dir='checkpoint', result_dir='results', log_dir='logs') # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!") # visualize learned generator gan.visualize_results(args.epoch - 1) print(" [*] Testing finished!")
def main(): #parse arguments args = parse_args() if args is None: exit() models = [CGAN, ACGAN, ACGAN_ResNet] with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = None for model in models: if args.gan_type == model.model_name: gan = model(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir, log_dir=args.log_dir, mode=args.mode) if gan is None: raise Exception("[!] There is no option for " + args.gan_type) gan.build_model() show_all_variables() if args.mode == 'train': gan.train() print(" [*] Training finished!") elif args.mode == 'infer': gan.infer() print(" [*] Infer finished!")
def main(): # parse arguments # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = TripleGAN(sess, epoch=1, batch_size=10, unlabel_batch_size=125, z_dim=100, dataset_name='cifar10', n=4000, gan_lr=2e-4, cla_lr=2e-3, checkpoint_dir='checkpoint', result_dir='results', log_dir='logs') # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!") # visualize learned generator gan.visualize_results(0) print(" [*] Testing finished!")
def __init__(self, config, data_loader): self.data_loader = data_loader self.task = config.task self.debug = config.debug self.config = config self.input_height = config.input_height self.input_width = config.input_width self.input_channel = config.input_channel self.reg_scale = config.reg_scale self.learning_rate = config.learning_rate self.max_grad_norm = config.max_grad_norm self.batch_size = config.batch_size self.layer_dict = {} self._build_placeholders() self._build_model() self._build_steps() self._build_optim() show_all_variables()
def __init__(self, config, rng): self.config = config self.rng = rng self.task = config.task self.model_dir = config.model_dir self.gpu_memory_fraction = config.gpu_memory_fraction self.log_step = config.log_step self.max_step = config.max_step self.num_log_samples = config.num_log_samples self.checkpoint_secs = config.checkpoint_secs if config.task.lower().startswith('tsp'): self.data_loader = TSPDataLoader(config, rng=self.rng) else: raise Exception("[!] Unknown task: {}".format(config.task)) self.summary_writer = tf.summary.FileWriter(self.model_dir) self.model = Model(config, inputs=self.data_loader.x, labels=self.data_loader.y, enc_seq_length=self.data_loader.seq_length, dec_seq_length=self.data_loader.seq_length, mask=self.data_loader.mask) self.build_session() show_all_variables()
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'birds': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=None, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def main(_): pp.print(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir ) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, ) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception('[!] Train a model first, then run test mode') OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)
def __init__(self, sess, config, dataset): # Save pointer to the tensorflow session self.sess = sess # Save pointer to config self.config = config # Save pointer to the data module self.dataset = dataset # # Summaries to compute for this network # self.summary = [] # Normalizer for the input data (they are raw images) # Currently normalized to be between -1 and 1 self.mean = {} self.std = {} for _module in ["kp", "ori", "desc"]: self.mean[_module] = 128.0 self.std[_module] = 128.0 if self.config.use_old_mean_std: self.mean[ "kp"] = 116.4368117568544249706974369473755359649658203125 self.std["kp"] = 88.083076379771597430590190924704074859619140625 self.mean[ "ori"] = 116.4368117568544249706974369473755359649658203125 self.std["ori"] = 88.083076379771597430590190924704074859619140625 self.mean["desc"] = 110.75389862060546875 self.std["desc"] = 61.53688812255859375 # Account for the keypoint scale change while augmenting rotations self.scale_aug = float(get_patch_size(self.config)) / \ float(get_patch_size_no_aug(self.config)) # Allocate placeholders with tf.variable_scope("placeholders"): self._build_placeholders() # Build the network with tf.variable_scope("network"): self._build_network() # Build loss with tf.variable_scope("loss"): self._build_loss() # Build the optimization op with tf.variable_scope("optimization"): self._build_optim() # Build the legacy component. This is only used for accessing old # framework weights. You can safely ignore this part build_legacy(self) # Show all variables in the network show_all_variables() # Add all variables into histogram summary for _module in ["kp", "ori", "desc"]: for _param in self.params[_module]: tf.summary.histogram(_param.name, _param) # Collect all summary (Lazy...) self.summary = tf.summary.merge_all()
def main(_): # 首先是打印参数数据,然后判断输入图像的输出图像的宽是否指定,如果没有指定,则等于其图像的高。 pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height #然后判断checkpoint和sample的文件是否存在,不存在则创建。 if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #然后是设置session参数。tf.ConfigProto一般用在创建session的时候,用来对session进行参数配置 #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True #运行session,首先判断处理的是哪个数据集,然后对应使用不同参数的DCGAN类,这个类会在model.py文件中定义。 with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, z_dim=FLAGS.generate_test_images, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, data_dir=FLAGS.data_dir) #show所有与训练相关的变量。 show_all_variables()
def main(_): pp.pprint(flags.FLAGS.__flags) model_dir = 'mixing_' + str( FLAGS.lr ) + '_' + FLAGS.z_dims + '_' + FLAGS.epochs + '_' + FLAGS.g_layers + '_' + FLAGS.d_layers + '_' + FLAGS.output_dims + '_' + FLAGS.feature_map_shrink + FLAGS.feature_map_growth + FLAGS.spatial_map_shrink + FLAGS.spatial_map_growth + '_' + FLAGS.loss + '_' + FLAGS.z_distr + '_' + FLAGS.activation + '_' + FLAGS.weight_init + '_' + str( FLAGS.batch_size) + '_' + str(FLAGS.g_batchnorm) + '_' + str( FLAGS.d_batchnorm) + '_' + str(FLAGS.normalize_z) + '_' + str( FLAGS.minibatch_std) + '_' + str(FLAGS.use_wscale) + '_' + str( FLAGS.use_pixnorm) + '_' + str(FLAGS.D_loss_extra) gan = growGAN(z_dims=FLAGS.z_dims, epochs=FLAGS.epochs, g_layers=FLAGS.g_layers, d_layers=FLAGS.d_layers, output_dims=FLAGS.output_dims, useAlpha=FLAGS.useAlpha, useBeta=FLAGS.useBeta, useGamma=FLAGS.useGamma, useTau=FLAGS.useTau, feature_map_shrink=FLAGS.feature_map_shrink, feature_map_growth=FLAGS.feature_map_growth, spatial_map_shrink=FLAGS.spatial_map_shrink, spatial_map_growth=FLAGS.spatial_map_growth, stage=FLAGS.stage, loss=FLAGS.loss, z_distr=FLAGS.z_distr, activation=FLAGS.activation, weight_init=FLAGS.weight_init, lr=FLAGS.lr, beta1=FLAGS.beta1, beta2=FLAGS.beta2, epsilon=FLAGS.epsilon, batch_size=FLAGS.batch_size, sample_num=FLAGS.sample_num, gpu=FLAGS.gpu, g_batchnorm=FLAGS.g_batchnorm, d_batchnorm=FLAGS.d_batchnorm, normalize_z=FLAGS.normalize_z, crop=FLAGS.crop, trainflag=FLAGS.trainflag, visualize=FLAGS.visualize, model_dir=model_dir, minibatch_std=FLAGS.minibatch_std, use_wscale=FLAGS.use_wscale, use_pixnorm=FLAGS.use_pixnorm, D_loss_extra=FLAGS.D_loss_extra, G_run_avg=FLAGS.G_run_avg) show_all_variables() if FLAGS.trainflag: gan.train() else: if not gan.load()[0]: raise Exception("[!] Train a model first, then run test mode") if FLAGS.visualize: visualize(sess, gan, FLAGS)
def main(self): FLAGS = Struct(**self._config) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height FLAGS.checkpoint_dir = os.path.join(self._work_dir, "checkpoint") if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) FLAGS.sample_dir = os.path.join(self._work_dir, "samples") if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) FLAGS.work_dir = self._work_dir #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True if FLAGS.random: seed = random.randint(1, 100000) np.random.seed(seed) with open(os.path.join(self._work_dir, "seed.txt"), "w") as f: f.write("{}".format(seed)) t_num_test_samples = int( ceil(float(FLAGS.num_test_sample) / float(FLAGS.batch_size))) * FLAGS.batch_size test_samples = np.random.uniform(-1, 1, size=(t_num_test_samples, FLAGS.z_dim)) with tf.Session(config=run_config) as sess: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, packing_num=FLAGS.packing_num, num_training_sample=FLAGS.num_training_sample, num_test_sample=FLAGS.num_test_sample, z_dim=FLAGS.z_dim, test_samples=test_samples) show_all_variables() dcgan.train(FLAGS)
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, train=FLAGS.train) else: dcgan = DCGAN(sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, df_dim=FLAGS.df_dim, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, train=FLAGS.train) show_all_variables() if FLAGS.train: dcgan.train(FLAGS) else: dcgan.get_collisions(FLAGS, nbatch=FLAGS.num_batches, topK=FLAGS.topK)
def main(): # parse arguments args = parse_args() if args is None: exit() for field in args.__dict__: print('{} : {}'.format(field, getattr(args, field))) # open session models = [ GAN, CGAN, infoGAN, ACGAN, EBGAN, WGAN, WGAN_GP, DRAGAN, LSGAN, BEGAN, VAE, CVAE ] with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # declare instance for GAN gan = None for model in models: if args.gan_type == model.model_name: if args.grid != [-1] and args.grid != -1: import flex_wrapper_DIEHARD model = flex_wrapper_DIEHARD.init(args.gan_type) gan = model(sess, epoch=args.epoch, batch_size=args.batch_size, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir, grid=args.grid, repetition_bol=args.repetition_bol) else: gan = model(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) if gan is None: raise Exception("[!] There is no option for " + args.gan_type) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!") # visualize learned generator gan.visualize_results(args.epoch - 1) print(" [*] Testing finished!")
def main(): with tf.Session() as sess: gan = WGAN_GP(sess, epoch=10000, batch_size=16, dataset_name= 'eyes.tfrecords', checkpoint_dir='checkpoint', result_dir='results', log_dir='logs') # build graph gan.build_model() show_all_variables() gan.train() print(" [*] Training finished!") gan.visualize_results(20-1) print(" [*] Testing finished!")
def main(): # parse arguments args = parse_args() if args is None: exit() # open session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: model = CASED(sess, epoch=args.epoch, batch_size=args.batch_size, test_batch_size=args.test_batch_size, num_gpu=args.num_gpu, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) # build graph model.build_model() # show network architecture show_all_variables() # launch the graph in a session model.train() print(" [*] Training finished!")
def main(): # parse arguments args = parse_args() if args is None: exit() # open session models = [infoGAN] # GPU Settings os.environ["CUDA_VISIBLE_DEVICES"] = '0' gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: # declare instance for GAN gan = None for model in models: if args.gan_type == model.model_name: gan = model(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) if gan is None: raise Exception("[!] There is no option for " + args.gan_type) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!") # visualize learned generator gan.visualize_results(args.epoch-1) print(" [*] Testing finished!")
def main(): # parse arguments args = parse_args() if args is None: exit() # open session models = [GAN, CGAN, infoGAN, ACGAN, EBGAN, WGAN, WGAN_GP, DRAGAN, LSGAN, BEGAN, VAE, CVAE] with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # declare instance for GAN gan = None for model in models: if args.gan_type == model.model_name: gan = model(sess, epoch=args.epoch, batch_size=args.batch_size, z_dim=args.z_dim, dataset_name=args.dataset, checkpoint_dir=args.checkpoint_dir, result_dir=args.result_dir, log_dir=args.log_dir) if gan is None: raise Exception("[!] There is no option for " + args.gan_type) # build graph gan.build_model() # show network architecture show_all_variables() # launch the graph in a session gan.train() print(" [*] Training finished!") # visualize learned generator gan.visualize_results(args.epoch-1) print(" [*] Testing finished!")
def main(_): pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth=True with tf.Session(config=run_config) as sess: if FLAGS.dataset == 'mnist': dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, y_dim=10, c_dim=1, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) else: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.batch_size, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir) show_all_variables() if FLAGS.is_train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir): raise Exception("[!] Train a model first, then run test mode") # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], # [dcgan.h4_w, dcgan.h4_b, None]) # Below is codes for visualization OPTION = 1 visualize(sess, dcgan, FLAGS, OPTION)