def argparser(is_train=True): def str2bool(v): return v.lower() == 'true' parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--debug', action='store_true', default=False) parser.add_argument('--prefix', type=str, default='default') parser.add_argument('--train_dir', type=str) parser.add_argument('--checkpoint', type=str, default=None) parser.add_argument('--dataset', type=str, default='CIFAR10', choices=['MNIST', 'SVHN', 'CIFAR10']) parser.add_argument('--dump_result', type=str2bool, default=False) # Model parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--n_z', type=int, default=128) parser.add_argument('--norm_type', type=str, default='batch', choices=['batch', 'instance', 'None']) parser.add_argument('--deconv_type', type=str, default='bilinear', choices=['bilinear', 'nn', 'transpose']) # Training config {{{ # ======== # log parser.add_argument('--log_step', type=int, default=10) parser.add_argument('--write_summary_step', type=int, default=100) parser.add_argument('--ckpt_save_step', type=int, default=10000) parser.add_argument('--test_sample_step', type=int, default=100) parser.add_argument('--output_save_step', type=int, default=1000) # learning parser.add_argument('--max_sample', type=int, default=5000, help='num of samples the model can see') parser.add_argument('--max_training_steps', type=int, default=10000000) parser.add_argument('--learning_rate_g', type=float, default=1e-4) parser.add_argument('--learning_rate_d', type=float, default=1e-4) parser.add_argument('--update_rate', type=int, default=1) # }}} # Testing config {{{ # ======== parser.add_argument('--data_id', nargs='*', default=None) # }}} config = parser.parse_args() dataset_path = os.path.join('./datasets', config.dataset.lower()) dataset_train, dataset_test = dataset.create_default_splits(dataset_path) img, label = dataset_train.get_data(dataset_train.ids[0]) config.h = img.shape[0] config.w = img.shape[1] config.c = img.shape[2] config.num_class = label.shape[0] # --- create model --- model = Model(config, debug_information=config.debug, is_train=is_train) return config, model, dataset_train, dataset_test
def main(): dataset_path = "./datasets/mnist/" dataset_train, dataset_test = dataset.create_default_splits(dataset_path) print(dataset_train) print(len(dataset_train)) img, label = dataset_train.get_data(dataset_train.ids[0]) h = img.shape[0] w = img.shape[1] c = img.shape[2] num_class = label.shape[0] print(h, w, c)
def argparser(is_train=True): def str2bool(v): return v.lower() == 'true' parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--debug', action='store_true', default=False) parser.add_argument('--prefix', type=str, default='default') parser.add_argument('--train_dir', type=str) parser.add_argument('--checkpoint', type=str, default=None) parser.add_argument('--dataset', type=str, default=None, choices=[ "mean_excep_nl_nn", "mean_std_all", "min_max_all", "raw", "raw_excep_nl_n" ]) parser.add_argument('--dump_result', type=str2bool, default=False) #保存保存点生成图像的h5py # Model parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--n_z', type=int, default=128) #噪声维度 parser.add_argument('--norm_type', type=str, default='batch', choices=['batch', 'instance', 'None']) parser.add_argument('--deconv_type', type=str, default='bilinear', choices=['bilinear', 'nn', 'transpose']) # Training config {{{ # ======== # log parser.add_argument('--log_step', type=int, default=10) parser.add_argument('--write_summary_step', type=int, default=200) #parser.add_argument('--ckpt_save_step', type=int, default=1) parser.add_argument('--test_sample_step', type=int, default=200) #这个训练阶段的测试集就是验证集 parser.add_argument('--output_save_step', type=int, default=500) # learning #parser.add_argument('--max_sample', type=int, default=200, # help='num of samples the model can see') parser.add_argument('--max_training_steps', type=int, default=30001) parser.add_argument('--learning_rate_g', type=float, default=2e-4) parser.add_argument('--learning_rate_d', type=float, default=1e-4) parser.add_argument( '--update_rate', type=int, default=2) #这不是学习率的改变,而是决定生成器和辨别器训练时机的参数(比如训练一次生成器再训练一次辨别器)。 # }}} # Testing config {{{ # ======== parser.add_argument('--data_id', nargs='*', default=None) # }}} config = parser.parse_args() dataset_path = os.path.join( 'drive/Colab_Notebooks/SSGAN-Tensorflow-master/datasets', config.dataset.lower()) dataset_train, dataset_test = dataset.create_default_splits(dataset_path) dataset_train_unlabel, _ = dataset.create_default_splits_unlabel( dataset_path) img, label = dataset_train.get_data(dataset_train.ids[0]) config.h = img.shape[0] config.w = img.shape[1] config.c = img.shape[2] config.num_class = label.shape[0] # 5 # --- create model --- model = Model(config, debug_information=config.debug, is_train=is_train) return config, model, dataset_train, dataset_train_unlabel, dataset_test
def argparser(is_train=True): def str2bool(v): return v.lower() == 'true' parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--debug', action='store_true', default=False) parser.add_argument('--prefix', type=str, default='default') parser.add_argument('--train_dir', type=str) parser.add_argument('--checkpoint', type=str, default=None) parser.add_argument('--checkpoint_g', type=str, default=None) parser.add_argument('--checkpoint_d', type=str, default=None) parser.add_argument('--dataset', type=str, default='celeba') parser.add_argument('--dataset_path', type=str, default=None) parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) # Model parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--gan_type', type=str, default='wgan-gp', choices=['lsgan', 'hinge', 'wgan-gp']) parser.add_argument('--n_z', type=int, default=128) parser.add_argument('--num_dis_conv', type=int, default=6) parser.add_argument('--num_g_res_block', type=int, default=3) parser.add_argument('--num_d_res_block', type=int, default=3) parser.add_argument('--g_norm_type', type=str, default='batch', choices=['batch', 'instance', 'none']) parser.add_argument('--d_norm_type', type=str, default='none', choices=['batch', 'instance', 'none']) parser.add_argument('--deconv_type', type=str, default='bilinear', choices=['bilinear', 'nn', 'transpose']) # Training config {{{ # ======== # log parser.add_argument('--log_step', type=int, default=100) parser.add_argument('--write_summary_step', type=int, default=100) parser.add_argument('--ckpt_save_step', type=int, default=10000) # learning parser.add_argument('--max_training_steps', type=int, default=10000000) parser.add_argument('--learning_rate_g', type=float, default=1e-4) parser.add_argument('--learning_rate_d', type=float, default=1e-4) parser.add_argument('--adam_beta1', type=float, default=0.5) parser.add_argument('--adam_beta2', type=float, default=0.9) parser.add_argument('--lr_weight_decay', type=str2bool, default=False) parser.add_argument('--update_g', type=int, default=1) parser.add_argument('--update_d', type=int, default=1) parser.add_argument('--gamma', type=float, default=10) # }}} # Testing config {{{ # ======== parser.add_argument( '--output_file', type=str, default=None, help='dump all generated images to a HDF5 file with the filename specify here') parser.add_argument('--write_summary_image', type=str2bool, default=False) parser.add_argument('--summary_image_name', type=str, default='summary.png') parser.add_argument('--max_evaluation_steps', type=int, default=5) # }}} config = parser.parse_args() if config.dataset_path is None: dataset_path = os.path.join('./datasets', config.dataset.lower()) else: dataset_path = config.dataset_path if config.dataset in ['CIFAR10', 'CIFAR100', 'SVHN', 'MNIST', 'Fashion_MNIST']: import datasets.hdf5_loader as dataset else: import datasets.image_loader as dataset dataset_train, dataset_test = dataset.create_default_splits( dataset_path, h=config.img_h, w=config.img_w) img = dataset_train.get_data(dataset_train.ids[0]) config.h = img.shape[0] config.w = img.shape[1] config.c = img.shape[2] # --- create model --- model = Model(config, debug_information=config.debug, is_train=is_train) return config, model, dataset_train, dataset_test
def argparser(is_train=True): def str2bool(v): return v.lower() == 'true' parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--debug', action='store_true', default=False) parser.add_argument('--prefix', type=str, default='default') parser.add_argument('--train_dir', type=str) parser.add_argument('--checkpoint', type=str, default=None) parser.add_argument('--dataset', type=str, default='MNIST', choices=['MNIST', 'SVHN', 'CIFAR10', 'ulna', 'MRI']) parser.add_argument('--hdf5FileName', type=str, default='MRIdata_3_AD_MCI_Normal.hdf5', choices=[ 'MRIdata_2_AD_MCI.hdf5', 'MRIdata_2_AD_Normal.hdf5', 'MRIdata_2_MCI_Normal.hdf5', 'MRIdata_3_AD_MCI_Normal.hdf5' ]) parser.add_argument('--idFileName', type=str, default='MRIdata_3_AD_MCI_Normal_id.txt', choices=[ 'MRIdata_2_AD_MCI_id.txt', 'MRIdata_2_AD_Normal_id.txt', 'MRIdata_2_MCI_Normal_id.txt', 'MRIdata_3_AD_MCI_Normal_id.txt' ]) parser.add_argument('--dump_result', type=str2bool, default=False) # Model parser.add_argument('--batch_size', type=int, default=3) parser.add_argument('--n_z', type=int, default=128) parser.add_argument('--norm_type', type=str, default='batch', choices=['batch', 'instance', 'None']) parser.add_argument('--deconv_type', type=str, default='bilinear', choices=['bilinear', 'nn', 'transpose']) # Training config {{{ # ======== # log parser.add_argument('--log_step', type=int, default=10) parser.add_argument('--write_summary_step', type=int, default=100) parser.add_argument('--ckpt_save_step', type=int, default=50) parser.add_argument('--test_sample_step', type=int, default=100) parser.add_argument('--output_save_step', type=int, default=100) # learning parser.add_argument('--max_sample', type=int, default=5000, help='num of samples the model can see') parser.add_argument('--max_training_steps', type=int, default=80000) parser.add_argument('--learning_rate_g', type=float, default=0.0025) parser.add_argument('--learning_rate_d', type=float, default=0.005) parser.add_argument('--update_rate', type=int, default=6) # }}} # Testing config {{{ # ======== parser.add_argument('--data_id', nargs='*', default=None) # }}} config = parser.parse_args() dataset_path = os.path.join( r"/media/wenyu/8d268d3e-37df-4af4-ab98-f5660b2e71a7/wenyu/PycharmProjects/SSGAN-original-Tensorflow/datasets", config.dataset.lower()) dataset_train, dataset_test = dataset.create_default_splits( dataset_path, hdf5FileName=config.hdf5FileName, idFileName=config.idFileName, cross_validation_number=10) #dataset_train, dataset_test are 10 cross validation data. #dataset_train[i] is the i-th fold data print("step2") img, label = dataset_train[0].get_data(dataset_train[0].ids[0]) print("step3") config.h = img.shape[0] config.w = img.shape[1] if len(img.shape) == 3: config.c = img.shape[2] else: config.c = 1 config.num_class = label.shape[0] # --- create model --- model = Model(config, debug_information=config.debug, is_train=is_train) return config, model, dataset_train, dataset_test