def arguments(): parser = argparse.ArgumentParser(description='chainer implementation of pix2pix') parser.add_argument('--train', '-t', help='text file containing image pair filenames for training') parser.add_argument('--val', help='text file containing image pair filenames for validation') parser.add_argument('--imgtype', '-it', default="jpg", help="image file type (file extension)") parser.add_argument('--argfile', '-a', help="specify args file to read") parser.add_argument('--from_col', '-c1', type=int, nargs="*", default=[0], help='column index of FromImage') parser.add_argument('--to_col', '-c2', type=int, nargs="*", default=[1], help='column index of ToImage') parser.add_argument('--batch_size', '-b', type=int, default=1, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=400, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--root', '-R', default='.', help='directory containing image files') parser.add_argument('--learning_rate', '-lr', type=float, default=1e-4) parser.add_argument('--snapinterval', '-si', type=int, default=-1, help='take snapshot every this epoch') parser.add_argument('--display_interval', type=int, default=500, help='Interval of displaying log to console') parser.add_argument('--nvis', type=int, default=3, help='number of images in visualisation after each epoch') parser.add_argument('--crop_width', '-cw', type=int, default=128, help='better to have a value divisible by a large power of two') parser.add_argument('--crop_height', '-ch', type=int, default=128, help='better to have a value divisible by a large power of two') parser.add_argument('--grey', action='store_true', help='greyscale') parser.add_argument('--lambda_rec_l1', '-l1', type=float, default=1.0) parser.add_argument('--lambda_rec_l2', '-l2', type=float, default=0.0) parser.add_argument('--lambda_dis', '-ldis', type=float, default=0.1) parser.add_argument('--lambda_tv', '-ltv', type=float, default=0.0) parser.add_argument('--lambda_mispair', '-lm', type=float, default=1.0) parser.add_argument('--tv_tau', '-tt', type=float, default=1e-3, help='smoothing parameter for total variation') parser.add_argument('--load_optimizer', '-op', action='store_true', help='load optimizer parameters') parser.add_argument('--model_gen', '-m', default='') parser.add_argument('--model_dis', '-md', default='') parser.add_argument('--dtype', '-dt', choices=dtypes.keys(), default='fp32', help='floating point precision') parser.add_argument('--eqconv', '-eq', action='store_true', help='Equalised Convolution') parser.add_argument('--spconv', '-sp', action='store_true', help='Separable Convolution') parser.add_argument('--weight_decay', '-wd', type=float, default=0, #default: 1e-7 help='weight decay for regularization') parser.add_argument('--weight_decay_norm', '-wn', choices=['l1','l2'], default='l2', help='norm of weight decay for regularization') parser.add_argument('--vis_freq', '-vf', type=int, default=4000, help='visualisation frequency in iteration') # data augmentation parser.add_argument('--random', '-rt', default=True, help='random flip/crop') parser.add_argument('--noise', '-n', type=float, default=0, help='strength of noise injection') parser.add_argument('--noise_z', '-nz', type=float, default=0, help='strength of noise injection for the latent variable') # discriminator parser.add_argument('--dis_activation', '-da', default='lrelu', choices=activation.keys()) parser.add_argument('--dis_basech', '-db', type=int, default=64, help='the base number of channels in discriminator') parser.add_argument('--dis_ksize', '-dk', type=int, default=4, # default 4 help='kernel size for patchGAN discriminator') parser.add_argument('--dis_ndown', '-dl', type=int, default=3, # default 3 help='number of down layers in discriminator') parser.add_argument('--dis_down', '-dd', default='down', choices=['down','maxpool','maxpool_res','avgpool','avgpool_res','none'], ## default down help='type of down layers in discriminator') parser.add_argument('--dis_sample', '-ds', default='none', ## default down help='type of first conv layer for patchGAN discriminator') parser.add_argument('--dis_jitter', type=float, default=0, help='jitter for discriminator label for LSGAN') parser.add_argument('--dis_dropout', '-ddo', type=float, default=None, help='dropout ratio for discriminator') parser.add_argument('--dis_norm', '-dn', default='instance', choices=['instance', 'batch','batch_aff', 'rbatch', 'fnorm', 'none']) # generator: G: A -> B, F: B -> A parser.add_argument('--gen_activation', '-ga', default='relu', choices=activation.keys()) parser.add_argument('--gen_fc_activation', '-gfca', default='relu', choices=activation.keys()) parser.add_argument('--gen_out_activation', '-go', default='tanh', choices=activation.keys()) parser.add_argument('--gen_chs', '-gc', type=int, nargs="*", default=[64,128,256,512], help='Number of channels in down layers in generator; the first entry should coincide with the number of channels in the input images') parser.add_argument('--gen_fc', '-gfc', type=int, default=0, help='number of fc layers before convolutional layers') parser.add_argument('--gen_nblock', '-nb', type=int, default=9, # default 9 help='number of residual blocks in generators') parser.add_argument('--gen_ksize', '-gk', type=int, default=3, # default 4 help='kernel size for generator') parser.add_argument('--gen_sample', '-gs', default='none', help='first and last conv layers for generator') parser.add_argument('--gen_down', '-gd', default='down', choices=['down','maxpool','maxpool_res','avgpool','avgpool_res','none'], help='down layers in generator') parser.add_argument('--gen_up', '-gu', default='resize', choices=['unpool','unpool_res','deconv','pixsh','resize','resize_res','none'], help='up layers in generator') parser.add_argument('--gen_dropout', '-gdo', type=float, default=None, help='dropout ratio for generator') parser.add_argument('--gen_norm', '-gn', default='instance', choices=['instance', 'batch','batch_aff', 'rbatch', 'fnorm', 'none']) parser.add_argument('--unet', '-u', default='none', choices=['none','no_last','with_last'], help='use u-net for generator') args = parser.parse_args() args.out = os.path.join(args.out, dt.now().strftime('%m%d_%H%M')+"_cgan") save_args(args, args.out) print(args) print(args.out) args.wgan=False args.dtype = dtypes[args.dtype] args.dis_activation = activation[args.dis_activation] args.gen_activation = activation[args.gen_activation] args.gen_fc_activation = activation[args.gen_fc_activation] args.gen_out_activation = activation[args.gen_out_activation] args.lrdecay_start = args.epoch//2 args.lrdecay_period = args.epoch - args.lrdecay_start return(args)
def arguments(): parser = argparse.ArgumentParser() parser.add_argument( '--gpu', '-g', type=int, default=0, help='GPU IDs (currently, only single-GPU usage is supported') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--unified_memory_pool', '-mp', action='store_true', help='Use CPU memory to load big matrices into GPU') ## specify data file parser.add_argument('--root', '-R', default='data', type=str, help='Root directory for data') parser.add_argument('--planct_dir', '-Rp', default='', type=str, help='dir containing planCT images for discriminator') parser.add_argument( '--mvct_dir', '-Rm', default='', type=str, help='dir containing reconstructed MVCT images for discriminator') parser.add_argument('--sinogram', '-Rs', default='', type=str, help='directory containing sinograms') parser.add_argument( '--projection_matrix', '-pm', type=str, default= 'projection_matrix_2d_512_1-074mm_Nr100_equalWeights_ver4-4.npz', help='filename of the projection matrix') parser.add_argument('--system_matrix', '-sm', type=str, default='systemMatrix_2d_256_1074mm.npz', help='filename of the system matrix') parser.add_argument('--argfile', '-a', type=str, help="specify args file to load settings from") # initial models parser.add_argument('--model_gen', '-mg', help='pretrained model file for generator') parser.add_argument('--model_dis', '-md', help='pretrained model file for discriminator') parser.add_argument('--model_image', '-mi', default="", help='initial seed image') # OSEM added by S.O. in 2020/06/16 parser.add_argument('--osem', '-os', type=int, default=1, help='number of thinning out for OSEM') # dicom related parser.add_argument( '--HU_base', '-hub', type=int, default=-6000, # -4500, help='minimum HU value to be accounted for') parser.add_argument( '--HU_range', '-hur', type=int, default=9000, # 6000, help='the maximum HU value to be accounted for will be HU_base+HU_range' ) #700 parser.add_argument('--HU_range_vis', '-hurv', default=2000, type=int, help='HU range in the visualization') parser.add_argument('--crop_width', '-cw', type=int, default=512) parser.add_argument('--crop_height', '-ch', type=int, default=None) parser.add_argument('--scale_to', '-sc', type=int, default=-1) # data augmentation parser.add_argument('--random_translate', '-rt', type=int, default=4, help='random translation for planCT') parser.add_argument('--noise_dis', '-nd', type=float, default=0, help='strength of noise injection for discriminator') parser.add_argument('--noise_gen', '-ng', type=float, default=0, help='strength of noise injection for generator') ## training stgrategy parser.add_argument('--epoch', '-e', default=-1, type=int, help='number of reconstructions') parser.add_argument('--iter', '-i', default=20000, type=int, help='number of iterations for each reconstruction') parser.add_argument('--batchsize', '-b', type=int, default=1) parser.add_argument( '--weight_decay', '-wd', type=float, default=0, #1e-7, help='weight decay for regularization') parser.add_argument('--optimizer', '-op', choices=optim.keys(), default='Adam_d', help='select optimizer') parser.add_argument('--optimizer_dis', '-opd', choices=optim.keys(), default='Adam_d', help='select optimizer') parser.add_argument( '--max_reconst_freq', '-mf', default=1, type=int, # 40 help= 'consistency loss will be considered one in every this number in the end' ) parser.add_argument( '--reconst_freq_decay_start', '-rfd', default=400, type=int, help= 'reconst_freq starts to increase towards max_reconst_freq after this number of iterations' ) parser.add_argument( '--dis_freq', '-df', default=1, type=int, help= 'discriminator update interval; set to negative to turn of discriminator' ) parser.add_argument('--no_train_dec', '-ntd', action='store_true', help='not updating decoder during training') parser.add_argument('--no_train_enc', '-nte', action='store_true', help='not updating encoder during training') parser.add_argument('--no_train_seed', '-nts', action='store_true', help='not updating seed during training') parser.add_argument('--decoder_only', '-d', action='store_true', help='not using encoder') parser.add_argument( '--clip', '-cl', action='store_true', help='clip the seed array to [-1,1] at every iteration') # learning rate parser.add_argument( '--lr_sd', '-lrs', default=1e-2, type=float, # 1e-2 for conjugate (Adam) help='learning rate for seed array') parser.add_argument( '--lr_gen', '-lrg', default=1e-4, type=float, # 1e-2 help='learning rate for generator NN') parser.add_argument('--lr_dis', '-lrd', default=1e-4, type=float, help='learning rate for discriminator NN') parser.add_argument('--lr_drop', '-lrp', default=1, type=int, help='learning rate decay') ## structure of neural network parser.add_argument('--dp', action='store_true', help='Use an alternative network structure') parser.add_argument('--dtype', '-dt', choices=dtypes.keys(), default='fp32', help='floating point precision') parser.add_argument('--eqconv', '-eq', action='store_true', help='Enable Equalised Convolution') parser.add_argument('--spconv', '-sp', action='store_true', help='Enable Separable Convolution') parser.add_argument('--senet', '-se', action='store_true', help='Enable Squeeze-and-Excitation mechanism') # discriminator parser.add_argument('--dis_activation', '-da', default='lrelu', choices=activation_func.keys()) parser.add_argument('--dis_out_activation', '-do', default='none', choices=activation_func.keys()) parser.add_argument( '--dis_chs', '-dc', type=int, nargs="*", default=None, help='Number of channels in down layers in discriminator') parser.add_argument( '--dis_basech', '-db', type=int, default=32, help= 'the base number of channels in discriminator (doubled in each down-layer)' ) parser.add_argument('--dis_ndown', '-dl', type=int, default=4, help='number of down layers in discriminator') parser.add_argument('--dis_ksize', '-dk', type=int, default=4, help='kernel size for patchGAN discriminator') parser.add_argument('--dis_down', '-dd', default='down', help='type of down layers in discriminator') parser.add_argument( '--dis_sample', '-ds', default='down', help='type of first conv layer for patchGAN discriminator') parser.add_argument('--dis_jitter', type=float, default=0, help='jitter for discriminator label for LSGAN') parser.add_argument('--dis_dropout', '-ddo', type=float, default=None, help='dropout ratio for discriminator') parser.add_argument('--dis_norm', '-dn', default='group', choices=norm_layer) parser.add_argument( '--dis_reg_weighting', '-dw', type=float, default=0, help= 'regularisation of weighted discriminator. Set 0 to disable weighting') parser.add_argument('--dis_wgan', action='store_true', help='WGAN-GP') parser.add_argument('--dis_attention', action='store_true', help='attention mechanism for discriminator') # generator: G: A -> B, F: B -> A parser.add_argument('--gen_activation', '-ga', default='relu', choices=activation_func.keys()) parser.add_argument('--gen_out_activation', '-go', default='tanh', choices=activation_func.keys()) parser.add_argument('--gen_fc_activation', '-gfca', default='relu', choices=activation_func.keys()) parser.add_argument('--gen_chs', '-gc', type=int, nargs="*", default=None, help='Number of channels in down layers in generator') parser.add_argument('--gen_ndown', '-gl', type=int, default=3, help='number of down layers in generator') parser.add_argument( '--gen_basech', '-gb', type=int, default=32, help= 'the base number of channels in generator (doubled in each down-layer)' ) parser.add_argument('--gen_fc', '-gfc', type=int, default=0, help='number of fc layers before convolutional layers') parser.add_argument('--gen_nblock', '-gnb', type=int, default=4, help='number of residual blocks in generators') parser.add_argument('--gen_ksize', '-gk', type=int, default=3, help='kernel size for generator') parser.add_argument('--gen_sample', '-gs', default='none', help='first and last conv layers for generator') parser.add_argument('--gen_down', '-gd', default='down', help='down layers in generator') parser.add_argument('--gen_up', '-gu', default='deconv', help='up layers in generator') parser.add_argument('--gen_dropout', '-gdo', type=float, default=None, help='dropout ratio for generator') parser.add_argument('--gen_norm', '-gn', default='batch_aff', choices=norm_layer) parser.add_argument('--unet', '-u', default='none', choices=unettype, help='use u-net skip connections for generator') parser.add_argument('--skipdim', '-sd', type=int, default=4, help='channel number for skip connections') parser.add_argument( '--latent_dim', '-ld', default=-1, type=int, help='dimension of the latent space between encoder and decoder') ## loss function parser.add_argument('--tv_tau', '-tt', type=float, default=1e-3, help='smoothing parameter for total variation') parser.add_argument('--tv_method', '-tm', default='usual', choices=['abs', 'sobel', 'usual'], help='method of calculating total variation') parser.add_argument( '--log', default=True, help= 'Do not take exponential (consider reconstruction loss in the image domain rather than in the projection domain)' ) # weights parser.add_argument( '--lambda_tv', '-ltv', default=0, type=float, # 2e+2 for 256x256, 5e+2 is strong help='weight of total variation regularization for generator') parser.add_argument( '--lambda_tvs', '-ltvs', default=0, type=float, # help='weight of total variation regularization for seed array') parser.add_argument('--lambda_adv', '-ladv', default=0.0, type=float, help='weight of adversarial loss for generator') parser.add_argument('--lambda_advs', '-ladvs', default=0.0, type=float, help='weight of adversarial loss for seed') parser.add_argument( '--lambda_gan', '-lgan', default=0.0, type=float, help='weight of random fake generation loss for generator') parser.add_argument( '--lambda_sd', '-ls', default=0, type=float, help='weight of reconstruction consistency loss for seed array') parser.add_argument( '--lambda_nn', '-ln', default=0.0, type=float, help='weight of reconstruction consistency loss for CNN') parser.add_argument('--lambda_ae1', '-lae1', default=0.0, type=float, help='autoencoder L1 loss for generator') parser.add_argument('--lambda_ae2', '-lae2', default=0.0, type=float, help='autoencoder L2 loss for generator') parser.add_argument('--lambda_reg', '-lreg', type=float, default=0, help='weight for regularisation for generator') ## save and report parser.add_argument('--vis_freq', '-vf', default=1000, type=int, help='image output interval') parser.add_argument('--no_save_dcm', '-nodcm', action='store_true') parser.add_argument('--snapinterval', '-si', type=int, default=-1, help='take snapshot every this reconstruction') args = parser.parse_args() # set defaults if not args.gen_chs: args.gen_chs = [ int(args.gen_basech) * (2**i) for i in range(args.gen_ndown) ] args.gen_chs = [i if i < 512 else 512 for i in args.gen_chs] if not args.dis_chs: args.dis_chs = [ int(args.dis_basech) * (2**i) for i in range(args.dis_ndown) ] args.dis_chs = [i if i < 512 else 512 for i in args.dis_chs] if not args.planct_dir: args.planct_dir = os.path.join(args.root, "planCT") if not args.mvct_dir: args.mvct_dir = os.path.join(args.root, "reconstructed") if not args.sinogram: args.sinogram = os.path.join(args.root, "projection") if not args.crop_height: args.crop_height = args.crop_width if args.crop_width != 256: args.projection_matrix = args.projection_matrix.replace( '256', str(args.crop_width)) args.system_matrix = args.system_matrix.replace( '256', str(args.crop_width)) if args.latent_dim > 0: args.decoder_only = True if args.decoder_only or args.unet == "none": args.skipdim = 0 if args.iter < args.vis_freq: args.vis_freq = args.iter args.use_dis = (args.lambda_gan + args.lambda_adv + args.lambda_advs > 0) args.use_enc = (not args.decoder_only) args.ch = 1 args.out_ch = 1 dtime = dt.now().strftime('%m%d_%H%M') args.out = os.path.join( args.out, '{}_ln{}_lgan{}_ladv{}_df{},dim{}_mg{}_md{}'.format( dtime, args.lambda_nn, args.lambda_gan, args.lambda_adv, args.dis_freq, args.latent_dim, (args.model_gen is not None), (args.model_dis is not None))) return (args)
def arguments(): parser = argparse.ArgumentParser( description='Image-to-image translation using a paired training dataset' ) parser.add_argument('--argfile', '-a', help="specify args file to read") parser.add_argument('--out', '-o', help='Directory to output the result') # input image parser.add_argument('--root', '-R', help='directory containing image files') parser.add_argument('--btoa', action='store_true', help='convert in the opposite way (B to A)') parser.add_argument( '--train', '-t', help='text file containing image pair filenames for training') parser.add_argument( '--val', help='text file containing image pair filenames for validation') parser.add_argument('--from_col', '-c1', type=int, nargs="*", help='column index of FromImage') parser.add_argument('--to_col', '-c2', type=int, nargs="*", help='column index of ToImage') parser.add_argument('--imgtype', '-it', help="image file type (file extension)") parser.add_argument( '--crop_width', '-cw', type=int, help= 'this value may have to be divisible by a large power of two (if you encounter errors)' ) parser.add_argument( '--crop_height', '-ch', type=int, help= 'this value may have to be divisible by a large power of two (if you encounter errors)' ) parser.add_argument('--grey', action='store_true', help='load image (jpg/png) in greyscale') parser.add_argument( '--clipA', '-ca', type=float, nargs=2, help="lower and upper limit for pixel values of images in domain A") parser.add_argument( '--clipB', '-cb', type=float, nargs=2, help="lower and upper limit for pixel values of images in domain B") parser.add_argument( '--class_num', '-cn', type=int, help= 'number of classes for pixelwise classification (only for images in domain B)' ) # training parser.add_argument('--batch_size', '-b', type=int, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, help='GPU ID (negative value indicates CPU)') parser.add_argument('--learning_rate', '-lr', type=float, help='Learning rate') parser.add_argument('--learning_rate_gen', '-lrg', type=float) parser.add_argument('--learning_rate_dis', '-lrd', type=float) parser.add_argument('--lr_drop', type=int, help='How many times the learning rate drops') parser.add_argument('--weight_decay', '-wd', type=float, help='weight decay for regularization') parser.add_argument('--weight_decay_norm', '-wn', choices=['l1', 'l2'], help='norm of weight decay for regularization') # snapshot and evaluation parser.add_argument('--snapinterval', '-si', type=int, help='take snapshot every this epoch') parser.add_argument('--display_interval', type=int, help='Interval of displaying log to console') parser.add_argument( '--nvis', type=int, help='number of images in visualisation after each epoch') parser.add_argument('--vis_freq', '-vf', type=int, help='visualisation frequency in iteration') parser.add_argument('--parameter_statistics', '-ps', action='store_true', help='Log NN parameter statistics (very slow)') # weights parser.add_argument('--lambda_rec_l1', '-l1', type=float, help='weight for L1 reconstruction loss') parser.add_argument('--lambda_rec_l2', '-l2', type=float, help='weight for L2 reconstruction loss') parser.add_argument('--lambda_rec_ce', '-lce', type=float, help='weight for softmax focal reconstruction loss') parser.add_argument('--lambda_dis', '-ldis', type=float, help='weight for adversarial loss') parser.add_argument('--lambda_tv', '-ltv', type=float, help='weight for total variation') parser.add_argument('--lambda_reg', '-lreg', type=float, help='weight for regularisation for encoders') parser.add_argument( '--lambda_mispair', '-lm', type=float, help='weight for discriminator rejecting mis-matched (real,real) pairs' ) parser.add_argument('--lambda_wgan_gp', '-lwgp', type=float, help='lambda for the gradient penalty for WGAN') parser.add_argument('--tv_tau', '-tt', type=float, help='smoothing parameter for total variation') parser.add_argument( '--loss_ksize', '-lk', type=int, help= 'take average pooling of this kernel size before computing L1 and L2 losses' ) # data augmentation parser.add_argument('--random_translate', '-rt', type=int, help='jitter input images by random translation') parser.add_argument('--noise', '-n', type=float, help='strength of noise injection') parser.add_argument( '--noise_z', '-nz', type=float, help='strength of noise injection for the latent variable') # load model/optimizer parser.add_argument('--load_optimizer', '-mo', action='store_true', help='load optimizer parameters') parser.add_argument('--model_gen', '-m', default='', help='specify a learnt encoder/generator model file') parser.add_argument('--model_dis', '-md', default='', help='specify a learnt discriminator model file') parser.add_argument('--optimizer', '-op', choices=optim.keys(), help='optimizer') # network parser.add_argument('--dtype', '-dt', choices=dtypes.keys(), help='floating point precision') parser.add_argument('--eqconv', '-eq', action='store_true', help='Equalised Convolution') parser.add_argument('--spconv', '-sp', action='store_true', help='Separable Convolution') parser.add_argument('--senet', '-se', action='store_true', help='Enable Squeeze-and-Excitation mechanism') # discriminator parser.add_argument('--dis_activation', '-da', choices=activation_func.keys(), help='activation of middle layers discriminators') parser.add_argument('--dis_out_activation', '-do', choices=activation_func.keys(), help='activation of last layer of discriminators') parser.add_argument('--dis_ksize', '-dk', type=int, help='kernel size for patchGAN discriminator') parser.add_argument( '--dis_chs', '-dc', type=int, nargs="*", help='Number of channels in down layers in discriminator') parser.add_argument( '--dis_basech', '-db', type=int, help= 'the base number of channels in discriminator (doubled in each down-layer)' ) parser.add_argument('--dis_ndown', '-dl', type=int, help='number of down layers in discriminator') parser.add_argument('--dis_down', '-dd', help='type of down layers in discriminator') parser.add_argument( '--dis_sample', '-ds', help='type of first conv layer for patchGAN discriminator') parser.add_argument('--dis_jitter', type=float, help='jitter for discriminator label for LSGAN') parser.add_argument('--dis_dropout', '-ddo', type=float, help='dropout ratio for discriminator') parser.add_argument('--dis_norm', '-dn', choices=norm_layer, help='nomalisation layer for discriminator') parser.add_argument( '--dis_reg_weighting', '-dw', type=float, help= 'regularisation of weighted discriminator. Set 0 to disable weighting') parser.add_argument('--dis_wgan', '-wgan', action='store_true', help='WGAN-GP') parser.add_argument('--dis_attention', action='store_true', help='attention mechanism for discriminator') # generator parser.add_argument('--gen_pretrained_encoder', '-gp', type=str, choices=["", "vgg", "resnet"], help='Use pretrained ResNet/VGG as encoder') parser.add_argument( '--gen_pretrained_lr_ratio', '-gpr', type=float, help='learning rate multiplier for the pretrained part') parser.add_argument('--gen_activation', '-ga', choices=activation_func.keys(), help='activation for middle layers of generators') parser.add_argument('--gen_out_activation', '-go', choices=activation_func.keys(), help='activation for last layers of generators') parser.add_argument('--gen_chs', '-gc', type=int, nargs="*", help='Number of channels in down layers in generator') parser.add_argument('--gen_ndown', '-gl', type=int, help='number of down layers in generator') parser.add_argument( '--gen_basech', '-gb', type=int, help= 'the base number of channels in generator (doubled in each down-layer)' ) parser.add_argument('--gen_fc', '-gfc', type=int, help='number of fc layers before convolutional layers') parser.add_argument( '--gen_fc_activation', '-gfca', choices=activation_func.keys(), help='activation of fc layers before convolutional layers') parser.add_argument('--gen_nblock', '-gnb', type=int, help='number of residual blocks in generators') parser.add_argument('--gen_ksize', '-gk', type=int, help='kernel size for generator') parser.add_argument('--gen_sample', '-gs', help='first and last conv layers for generator') parser.add_argument('--gen_down', '-gd', help='down layers in generator') parser.add_argument('--gen_up', '-gu', help='up layers in generator') parser.add_argument('--gen_dropout', '-gdo', type=float, help='dropout ratio for generator') parser.add_argument('--gen_norm', '-gn', choices=norm_layer, help='nomalisation layer for generator') parser.add_argument('--unet', '-u', help='use u-net for generator') parser.add_argument('--skipdim', '-sd', type=int, help='channel number for skip connections') parser.add_argument( '--latent_dim', type=int, help='dimension of the latent space between encoder and decoder') #### args = parser.parse_args() # number of channels in input/output images: infered from data or args file. args.ch = None args.out_ch = None ## set default values from file if args.argfile: with open(args.argfile, 'r') as f: larg = json.load(f) else: larg = [] for x in default_values: if getattr(args, x) is None: if x in larg: setattr(args, x, larg[x]) else: setattr(args, x, default_values[x]) if args.learning_rate: args.learning_rate_gen = args.learning_rate args.learning_rate_dis = args.learning_rate / 2 if "resnet" in args.gen_pretrained_encoder: args.gen_chs = [64, 256, 512, 1024, 2048][:args.gen_ndown] elif "vgg" in args.gen_pretrained_encoder: args.gen_chs = [64, 128, 256, 512, 512][:args.gen_ndown] if not args.gen_chs: args.gen_chs = [ int(args.gen_basech) * (2**i) for i in range(args.gen_ndown) ] else: args.gen_ndown = len(args.gen_chs) if not args.dis_chs: args.dis_chs = [ int(args.dis_basech) * (2**i) for i in range(args.dis_ndown) ] else: args.dis_ndown = len(args.dis_chs) if args.imgtype == "dcm": args.grey = True if args.clipA[0] is None: args.clipA = [-1024, 2000] if args.clipB[0] is None: args.clipB = [-1024, 2000] elif args.imgtype not in ['csv', 'txt', 'npy']: if args.clipA[0] is None: args.clipA = [0, 255] if args.clipB[0] is None: args.clipB = [0, 255] if args.gen_fc > 0 and args.crop_width is None: print("Specify crop_width and crop_height!") exit() if args.class_num > 0: args.gen_out_activation = 'softmax' print("the last activation is set to softmax for classification.") if args.out_ch is None: args.out_ch = args.class_num # convert.py if args.out_ch is None: args.out_ch = 1 if args.grey else 3 if args.ch is None: args.ch = 1 if args.grey else 3 print(args) return (args)
def arguments(): parser = argparse.ArgumentParser() parser.add_argument( '--root', '-R', default='data', help='Directory containing trainA, trainB, testA, testB') parser.add_argument('--batch_size', '-b', type=int, default=1) parser.add_argument('--gpu', '-g', type=int, nargs="*", default=[0], help='GPU IDs') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--argfile', '-a', help="specify args file to read") parser.add_argument('--imgtype', '-it', default="jpg", help="image file type (file extension)") parser.add_argument('--learning_rate', '-lr', type=float, default=None, help='Learning rate') parser.add_argument( '--learning_rate_g', '-lrg', type=float, default=1e-4, # 2e-4 in the original paper help='Learning rate for generator') parser.add_argument('--learning_rate_d', '-lrd', type=float, default=1e-4, help='Learning rate for discriminator') parser.add_argument('--lrdecay_start', '-e1', type=int, default=25, help='anneal the learning rate (by epoch)') parser.add_argument('--lrdecay_period', '-e2', type=int, default=25, help='period to anneal the learning') parser.add_argument('--epoch', '-e', type=int, default=None, help='epoch') parser.add_argument('--snapinterval', '-si', type=int, default=-1, help='take snapshot every this epoch') parser.add_argument('--weight_decay', '-wd', type=float, default=0, help='weight decay for regularization') parser.add_argument('--weight_decay_norm', '-wn', choices=['l1', 'l2'], default='l2', help='norm of weight decay for regularization') # parser.add_argument( '--crop_width', '-cw', type=int, default=480, help='better to have a value divisible by a large power of two') parser.add_argument( '--crop_height', '-ch', type=int, default=384, help='better to have a value divisible by a large power of two') parser.add_argument('--grey', action='store_true', help='greyscale') parser.add_argument('--load_optimizer', '-op', action='store_true', help='load optimizer parameters') parser.add_argument('--load_models', '-m', default='', help='load models: specify enc_x/gen_g model file') parser.add_argument('--dtype', '-dt', choices=dtypes.keys(), default='fp32', help='floating point precision') parser.add_argument('--eqconv', '-eq', action='store_true', help='Equalised Convolution') parser.add_argument('--spconv', '-sp', action='store_true', help='Separable Convolution') # options for converter parser.add_argument('--output_analysis', '-oa', action='store_true', help='Output analysis images in conversion') # data augmentation parser.add_argument('--random_translate', '-rt', type=int, default=5, help='jitter input images by random translation') parser.add_argument('--noise', '-n', type=float, default=0, help='strength of noise injection') parser.add_argument( '--noise_z', '-nz', type=float, default=0.0, help='strength of noise injection for the latent variable') ## the parameters below are not used parser.add_argument('--HU_base', '-hub', type=int, default=-500, help='minimum HU value to be accounted for') parser.add_argument( '--HU_range', '-hur', type=int, default=700, help='the maximum HU value to be accounted for will be HU_base+HU_range' ) parser.add_argument('--slice_range', '-sr', type=float, nargs="*", default=None, help='') parser.add_argument( '--forceSpacing', '-fs', type=float, default=-1, # 0.7634, help='rescale B to match the specified spacing') # discriminator parser.add_argument('--dis_activation', '-da', default='lrelu', choices=activation.keys()) parser.add_argument('--dis_basech', '-db', type=int, default=64, help='the base number of channels in discriminator') parser.add_argument('--dis_ksize', '-dk', type=int, default=4, help='kernel size for patchGAN discriminator') parser.add_argument('--dis_ndown', '-dl', type=int, default=3, help='number of down layers in discriminator') parser.add_argument( '--dis_down', '-dd', default='down', choices=[ 'down', 'maxpool', 'maxpool_res', 'avgpool', 'avgpool_res', 'none' ], ## default down help='type of down layers in discriminator') parser.add_argument( '--dis_sample', '-ds', default='down', help='type of first conv layer for patchGAN discriminator') parser.add_argument('--dis_jitter', type=float, default=0, help='jitter for discriminator label for LSGAN') parser.add_argument('--dis_dropout', '-ddo', type=float, default=None, help='dropout ratio for discriminator') parser.add_argument( '--dis_norm', '-dn', default='instance', choices=['instance', 'batch', 'batch_aff', 'rbatch', 'fnorm', 'none']) parser.add_argument('--conditional_discriminator', '-cd', action='store_true', help='use paired dataset for training discriminator') parser.add_argument( '--n_critics', '-nc', type=int, default=1, help= 'discriminator is trained this times during a single training of generators' ) parser.add_argument('--wgan', action='store_true', help='WGAN-GP') # generator: G: A -> B, F: B -> A parser.add_argument('--gen_activation', '-ga', default='relu', choices=activation.keys()) parser.add_argument('--gen_out_activation', '-go', default='tanh', choices=activation.keys()) parser.add_argument('--gen_fc_activation', '-gfca', default='relu', choices=activation.keys()) parser.add_argument( '--gen_chs', '-gc', type=int, nargs="*", default=[32, 64, 128], help= 'Number of channels in down layers in generator; the first entry should coincide with the number of channels in the input images' ) parser.add_argument('--gen_fc', '-gfc', type=int, default=0, help='number of fc layers before convolutional layers') parser.add_argument('--gen_nblock', '-nb', type=int, default=9, help='number of residual blocks in generators') parser.add_argument( '--gen_ksize', '-gk', type=int, default=3, # 4 in the original paper help='kernel size for generator') parser.add_argument('--gen_sample', '-gs', default='none-7', help='first and last conv layers for generator') parser.add_argument('--gen_down', '-gd', default='down', choices=[ 'down', 'maxpool', 'maxpool_res', 'avgpool', 'avgpool_res', 'none' ], help='down layers in generator') parser.add_argument('--gen_up', '-gu', default='resize', choices=[ 'unpool', 'unpool_res', 'deconv', 'pixsh', 'resize', 'resize_res', 'none' ], help='up layers in generator') parser.add_argument('--gen_dropout', '-gdo', type=float, default=None, help='dropout ratio for generator') parser.add_argument( '--gen_norm', '-gn', default='instance', choices=['instance', 'batch', 'batch_aff', 'rbatch', 'fnorm', 'none']) parser.add_argument('--unet', '-u', default='with_last', choices=['none', 'no_last', 'with_last'], help='use u-net for generator') parser.add_argument( '--gen_start', type=int, default=200, help= 'start using discriminator for generator training after this number of iterations' ) parser.add_argument( '--warmup', type=int, default=200, help='add loss L2(x,x_y)+L2(y,y_x) for warming-up iterations') ## loss function parser.add_argument('--lambda_A', '-lcA', type=float, default=10.0, help='weight for cycle loss FG=Id:A -> B -> A') parser.add_argument('--lambda_B', '-lcB', type=float, default=10.0, help='weight for cycle loss GF=Id:B -> A -> B') parser.add_argument('--cycle_ksize', '-ck', type=int, default=0, help='kernel size for cycle consistency') parser.add_argument('--lambda_identity_x', '-lix', type=float, default=0, help='lambda for perceptual loss for A -> B') parser.add_argument('--lambda_identity_y', '-liy', type=float, default=0, help='lambda for perceptual loss for B -> A') parser.add_argument('--id_ksize', '-ik', type=int, default=0, help='kernel size for G-Id') parser.add_argument('--lambda_grad', '-lg', type=float, default=0, help='lambda for gradient loss') parser.add_argument('--lambda_air', '-la', type=float, default=0, help='lambda for air comparison loss') parser.add_argument('--grad_norm', default='l2', choices=['l1', 'l2'], help='norm for gradient loss') parser.add_argument( '--lambda_domain', '-ld', type=float, default=0, help= 'lambda for domain preservation: G (resp. F) restricted on A (resp. B) should be Id' ) parser.add_argument('--lambda_idempotence', '-lidm', type=float, default=0, help='lambda for idempotence: G^2=F^2=Id') parser.add_argument('--lambda_dis_y', '-ly', type=float, default=1, help='lambda for discriminator for domain B') parser.add_argument('--lambda_dis_x', '-lx', type=float, default=1, help='lambda for discriminator for domain A') parser.add_argument( '--lambda_tv', '-ltv', type=float, default=0, ## typically, 1e-3 help='lambda for the total variation') parser.add_argument('--lambda_wgan_gp', '-lwgp', type=float, default=10, help='lambda for the gradient penalty for WGAN') parser.add_argument('--tv_tau', '-tt', type=float, default=1e-3, help='smoothing parameter for total variation') ## visualisation during training parser.add_argument( '--nvis_A', type=int, default=3, help='number of images in A to visualise after each epoch') parser.add_argument( '--nvis_B', type=int, default=3, help='number of images in B to visualise after each epoch') parser.add_argument('--vis_freq', '-vf', type=int, default=1000, help='visualisation frequency in iteration') ## latent space model specific parser.add_argument('--lambda_reg', '-lreg', type=float, default=0, help='weight for regularisation for encoders') parser.add_argument( '--lambda_dis_z', '-lz', type=float, default=0, help='weight for discriminator for the latent variable') parser.add_argument('--single_encoder', '-se', action='store_true', help='enc_x = enc_y') parser.add_argument( '--z_ndown', type=int, default=2, help='number of down layers in discriminator for latent') parser.add_argument('--dis_z_start', type=int, default=1000, help='start using dis_z after this iteration') args = parser.parse_args() if args.epoch: args.lrdecay_period = args.epoch // 2 args.lrdecay_start = args.epoch - args.lrdecay_period if args.learning_rate: args.learning_rate_g = args.learning_rate args.learning_rate_d = args.learning_rate return (args)
def arguments(): parser = argparse.ArgumentParser( description='chainer implementation of pix2pix') parser.add_argument( '--train', '-t', default="__train__", help='text file containing image pair filenames for training') parser.add_argument( '--val', default="__test__", help='text file containing image pair filenames for validation') parser.add_argument('--btoa', action='store_true', help='convert in the opposite way (B to A)') parser.add_argument('--imgtype', '-it', default="jpg", help="image file type (file extension)") parser.add_argument('--argfile', '-a', help="specify args file to read") parser.add_argument('--from_col', '-c1', type=int, nargs="*", default=[0], help='column index of FromImage') parser.add_argument('--to_col', '-c2', type=int, nargs="*", default=[1], help='column index of ToImage') parser.add_argument('--batch_size', '-b', type=int, default=1, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=400, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--root', '-R', default='.', help='directory containing image files') parser.add_argument('--learning_rate_gen', '-lrg', type=float, default=2e-4) parser.add_argument('--learning_rate_dis', '-lrd', type=float, default=1e-4) parser.add_argument('--snapinterval', '-si', type=int, default=-1, help='take snapshot every this epoch') parser.add_argument('--display_interval', type=int, default=500, help='Interval of displaying log to console') parser.add_argument( '--nvis', type=int, default=3, help='number of images in visualisation after each epoch') parser.add_argument( '--crop_width', '-cw', type=int, default=None, help= 'this value may have to be divisible by a large power of two (if you encounter errors)' ) parser.add_argument( '--crop_height', '-ch', type=int, default=None, help= 'this value may have to be divisible by a large power of two (if you encounter errors)' ) parser.add_argument('--grey', action='store_true', help='greyscale') parser.add_argument('--lambda_rec_l1', '-l1', type=float, default=10.0, help='weight for L1 reconstruction loss') parser.add_argument('--lambda_rec_l2', '-l2', type=float, default=0.0, help='weight for L2 reconstruction loss') parser.add_argument('--lambda_dis', '-ldis', type=float, default=1.0, help='weight for adversarial loss') parser.add_argument('--lambda_tv', '-ltv', type=float, default=0.0, help='weight for total variation') parser.add_argument( '--lambda_mispair', '-lm', type=float, default=0, help='weight for discriminator rejecting mis-matched (real,real) pairs' ) parser.add_argument('--tv_tau', '-tt', type=float, default=1e-3, help='smoothing parameter for total variation') parser.add_argument( '--loss_ksize', '-lk', type=int, default=1, help= 'take average pooling of this kernel size before computing L1 and L2 losses' ) parser.add_argument('--load_optimizer', '-mo', action='store_true', help='load optimizer parameters') parser.add_argument('--model_gen', '-m', default='') parser.add_argument('--model_dis', '-md', default='') parser.add_argument('--optimizer', '-op', choices=optim.keys(), default='Adam', help='optimizer') parser.add_argument( '--weight_decay', '-wd', type=float, default=1e-8, #default: 1e-7 help='weight decay for regularization') parser.add_argument('--weight_decay_norm', '-wn', choices=['l1', 'l2'], default='l2', help='norm of weight decay for regularization') parser.add_argument('--vis_freq', '-vf', type=int, default=None, help='visualisation frequency in iteration') parser.add_argument('--dtype', '-dt', choices=dtypes.keys(), default='fp32', help='floating point precision') parser.add_argument('--eqconv', '-eq', action='store_true', help='Equalised Convolution') parser.add_argument('--spconv', '-sp', action='store_true', help='Separable Convolution') parser.add_argument('--senet', '-se', action='store_true', help='Enable Squeeze-and-Excitation mechanism') # data augmentation parser.add_argument('--random_translate', '-rt', type=int, default=4, help='jitter input images by random translation') parser.add_argument('--noise', '-n', type=float, default=0, help='strength of noise injection') parser.add_argument( '--noise_z', '-nz', type=float, default=0, help='strength of noise injection for the latent variable') # discriminator parser.add_argument('--dis_activation', '-da', default='lrelu', choices=activation_func.keys()) parser.add_argument( '--dis_ksize', '-dk', type=int, default=4, # default 4 help='kernel size for patchGAN discriminator') parser.add_argument( '--dis_chs', '-dc', type=int, nargs="*", default=None, help='Number of channels in down layers in discriminator') parser.add_argument( '--dis_basech', '-db', type=int, default=64, help= 'the base number of channels in discriminator (doubled in each down-layer)' ) parser.add_argument('--dis_ndown', '-dl', type=int, default=3, help='number of down layers in discriminator') parser.add_argument('--dis_down', '-dd', default='down', help='type of down layers in discriminator') parser.add_argument( '--dis_sample', '-ds', default='down', help='type of first conv layer for patchGAN discriminator') parser.add_argument('--dis_jitter', type=float, default=0, help='jitter for discriminator label for LSGAN') parser.add_argument('--dis_dropout', '-ddo', type=float, default=None, help='dropout ratio for discriminator') parser.add_argument('--dis_norm', '-dn', default='instance', choices=norm_layer) parser.add_argument( '--dis_reg_weighting', '-dw', type=float, default=0, help= 'regularisation of weighted discriminator. Set 0 to disable weighting') parser.add_argument('--dis_wgan', action='store_true', help='WGAN-GP') parser.add_argument('--dis_attention', action='store_true', help='attention mechanism for discriminator') # generator parser.add_argument('--gen_activation', '-ga', default='relu', choices=activation_func.keys()) parser.add_argument('--gen_fc_activation', '-gfca', default='relu', choices=activation_func.keys()) parser.add_argument('--gen_out_activation', '-go', default='tanh', choices=activation_func.keys()) parser.add_argument('--gen_chs', '-gc', type=int, nargs="*", default=None, help='Number of channels in down layers in generator') parser.add_argument('--gen_ndown', '-gl', type=int, default=3, help='number of down layers in generator') parser.add_argument( '--gen_basech', '-gb', type=int, default=64, help= 'the base number of channels in generator (doubled in each down-layer)' ) parser.add_argument('--gen_fc', '-gfc', type=int, default=0, help='number of fc layers before convolutional layers') parser.add_argument('--gen_nblock', '-gnb', type=int, default=9, help='number of residual blocks in generators') parser.add_argument('--gen_ksize', '-gk', type=int, default=3, help='kernel size for generator') parser.add_argument('--gen_sample', '-gs', default='none', help='first and last conv layers for generator') parser.add_argument('--gen_down', '-gd', default='down', help='down layers in generator') parser.add_argument('--gen_up', '-gu', default='resize', help='up layers in generator') parser.add_argument('--gen_dropout', '-gdo', type=float, default=None, help='dropout ratio for generator') parser.add_argument('--gen_norm', '-gn', default='instance', choices=norm_layer) parser.add_argument('--unet', '-u', default='conv', help='use u-net for generator') parser.add_argument('--skipdim', '-sd', type=int, default=4, help='channel number for skip connections') args = parser.parse_args() args.out = os.path.join(args.out, dt.now().strftime('%m%d_%H%M') + "_cgan") args.wgan = False if not args.gen_chs: args.gen_chs = [ int(args.gen_basech) * (2**i) for i in range(args.gen_ndown) ] if not args.dis_chs: args.dis_chs = [ int(args.dis_basech) * (2**i) for i in range(args.dis_ndown) ] save_args(args, args.out) print(args) print("\nresults are saved under: ", args.out) return (args)
def arguments(): parser = argparse.ArgumentParser() parser.add_argument( '--root', '-R', default='data', help='Directory containing trainA, trainB, testA, testB') parser.add_argument('--batch_size', '-b', type=int, default=1) parser.add_argument( '--gpu', '-g', type=int, nargs="*", default=[0], help='GPU IDs (currently, only single-GPU usage is supported') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--argfile', '-a', help="specify args file to load settings from") parser.add_argument('--imgtype', '-it', default="jpg", help="image file type (file extension)") parser.add_argument('--learning_rate', '-lr', type=float, default=None, help='Learning rate') parser.add_argument( '--learning_rate_g', '-lrg', type=float, default=1e-4, help='Learning rate for generator') # 2e-4 in the original paper parser.add_argument('--learning_rate_d', '-lrd', type=float, default=1e-4, help='Learning rate for discriminator') parser.add_argument('--lrdecay_start', '-e1', type=int, default=25, help='start lowering the learning rate (in epoch)') parser.add_argument('--lrdecay_period', '-e2', type=int, default=25, help='period in epoch for lowering the learning rate') parser.add_argument('--epoch', '-e', type=int, default=None, help='epoch') parser.add_argument('--iteration', type=int, default=None, help='number of iterations') parser.add_argument('--snapinterval', '-si', type=int, default=-1, help='take snapshot every this epoch') parser.add_argument('--weight_decay', '-wd', type=float, default=0, help='weight decay for regularization') #1e-8 parser.add_argument('--optimizer', '-op', choices=optim.keys(), default='Adam', help='select optimizer') # parser.add_argument( '--crop_width', '-cw', type=int, default=None, help= 'this value may have to be divisible by a large power of two (if you encounter errors)' ) parser.add_argument( '--crop_height', '-ch', type=int, default=None, help= 'this value may have to be divisible by a large power of two (if you encounter errors)' ) parser.add_argument('--grey', action='store_true', help='greyscale') parser.add_argument('--load_optimizer', '-mo', action='store_true', help='load optimizer parameters from file') parser.add_argument('--load_models', '-m', default='', help='load models: specify enc_x/gen_g model file') parser.add_argument('--dtype', '-dt', choices=dtypes.keys(), default='fp32', help='floating point precision') parser.add_argument('--eqconv', '-eq', action='store_true', help='Enable Equalised Convolution') parser.add_argument('--spconv', '-sp', action='store_true', help='Enable Separable Convolution') parser.add_argument('--senet', '-se', action='store_true', help='Enable Squeeze-and-Excitation mechanism') # options for converter parser.add_argument('--output_analysis', '-oa', action='store_true', help='Output analysis images in conversion') # data augmentation parser.add_argument('--random_translate', '-rt', type=int, default=4, help='jitter input images by random translation') parser.add_argument('--noise', '-n', type=float, default=0, help='strength of noise injection') parser.add_argument( '--noise_z', '-nz', type=float, default=0, help='strength of noise injection for the latent variable') ## DICOM specific parser.add_argument('--HU_base', '-hub', type=int, default=-500, help='minimum HU value to be accounted for') parser.add_argument( '--HU_range', '-hur', type=int, default=700, help='the maximum HU value to be accounted for will be HU_base+HU_range' ) parser.add_argument('--slice_range', '-sr', type=float, nargs="*", default=None, help='') parser.add_argument( '--forceSpacing', '-fs', type=float, default=-1, help='rescale B to match the specified spacing') # 0.7634, parser.add_argument('--num_slices', '-ns', type=int, default=1, help='number of slices stacked together') # discriminator parser.add_argument('--dis_activation', '-da', default='lrelu', choices=activation_func.keys()) parser.add_argument( '--dis_chs', '-dc', type=int, nargs="*", default=None, help='Number of channels in down layers in discriminator') parser.add_argument( '--dis_basech', '-db', type=int, default=64, help= 'the base number of channels in discriminator (doubled in each down-layer)' ) parser.add_argument('--dis_ndown', '-dl', type=int, default=3, help='number of down layers in discriminator') parser.add_argument('--dis_ksize', '-dk', type=int, default=4, help='kernel size for patchGAN discriminator') parser.add_argument( '--dis_down', '-dd', default='down', choices=downlayer, help='type of down layers in discriminator') ## default down parser.add_argument( '--dis_sample', '-ds', default='down', help='type of first conv layer for patchGAN discriminator') parser.add_argument('--dis_jitter', type=float, default=0, help='jitter for discriminator label for LSGAN') parser.add_argument('--dis_dropout', '-ddo', type=float, default=None, help='dropout ratio for discriminator') parser.add_argument('--dis_norm', '-dn', default='instance', choices=norm_layer) parser.add_argument( '--dis_reg_weighting', '-dw', type=float, default=0, help= 'regularisation of weighted discriminator. Set 0 to disable weighting') parser.add_argument('--dis_wgan', action='store_true', help='WGAN-GP') # generator: G: A -> B, F: B -> A parser.add_argument('--gen_activation', '-ga', default='relu', choices=activation_func.keys()) parser.add_argument('--gen_out_activation', '-go', default='tanh', choices=activation_func.keys()) parser.add_argument('--gen_fc_activation', '-gfca', default='relu', choices=activation_func.keys()) parser.add_argument('--gen_chs', '-gc', type=int, nargs="*", default=None, help='Number of channels in down layers in generator') parser.add_argument('--gen_ndown', '-gl', type=int, default=3, help='number of down layers in generator') parser.add_argument( '--gen_basech', '-gb', type=int, default=32, help= 'the base number of channels in generator (doubled in each down-layer)' ) parser.add_argument('--gen_fc', '-gfc', type=int, default=0, help='number of fc layers before convolutional layers') parser.add_argument('--gen_nblock', '-gnb', type=int, default=9, help='number of residual blocks in generators') parser.add_argument( '--gen_ksize', '-gk', type=int, default=3, help='kernel size for generator') # 4 in the original paper parser.add_argument('--gen_sample', '-gs', default='none-7', help='first and last conv layers for generator') parser.add_argument('--gen_down', '-gd', default='down', choices=downlayer, help='down layers in generator') parser.add_argument('--gen_up', '-gu', default='resize', choices=uplayer, help='up layers in generator') parser.add_argument('--gen_dropout', '-gdo', type=float, default=None, help='dropout ratio for generator') parser.add_argument('--gen_norm', '-gn', default='instance', choices=norm_layer) parser.add_argument('--unet', '-u', default='conv', choices=unettype, help='use u-net skip connections for generator') parser.add_argument( '--single_encoder', '-senc', action='store_true', help='use the same encoder enc_x = enc_y for both domains') parser.add_argument( '--gen_start', type=int, default=0, help= 'start using discriminator for generator training after this number of iterations' ) parser.add_argument( '--report_start', type=int, default=1000, help='start reporting losses after this number of iterations') ## loss function parser.add_argument('--lambda_A', '-lcA', type=float, default=10.0, help='weight for cycle loss FG=Id:A -> B -> A') parser.add_argument('--lambda_B', '-lcB', type=float, default=10.0, help='weight for cycle loss GF=Id:B -> A -> B') parser.add_argument('--lambda_Az', '-lcAz', type=float, default=10.0, help='weight for autoencoder loss Id:A -> Z -> A') parser.add_argument('--lambda_Bz', '-lcBz', type=float, default=10.0, help='weight for autoencoder loss Id:B -> Z -> B') parser.add_argument('--lambda_identity_x', '-lix', type=float, default=0, help='lambda for perceptual loss for A -> B') parser.add_argument('--lambda_identity_y', '-liy', type=float, default=0, help='lambda for perceptual loss for B -> A') parser.add_argument( '--perceptual_layer', '-pl', type=str, default="conv4_2", help='The name of the layer of VGG16 used for perceptual loss') parser.add_argument('--lambda_grad', '-lg', type=float, default=0, help='lambda for gradient loss') parser.add_argument('--lambda_air', '-la', type=float, default=0, help='lambda for air comparison loss') parser.add_argument( '--lambda_domain', '-ld', type=float, default=0, help= 'lambda for domain preservation: G (resp. F) restricted on A (resp. B) should be Id' ) parser.add_argument('--lambda_idempotence', '-lidm', type=float, default=0, help='lambda for idempotence: G^2=F^2=Id') parser.add_argument('--lambda_dis_y', '-ly', type=float, default=1, help='lambda for discriminator for domain B') parser.add_argument('--lambda_dis_x', '-lx', type=float, default=1, help='lambda for discriminator for domain A') parser.add_argument( '--lambda_tv', '-ltv', type=float, default=0, help='lambda for the total variation') ## typically, 1e-3 parser.add_argument('--lambda_wgan_gp', '-lwgp', type=float, default=10, help='lambda for the gradient penalty for WGAN') parser.add_argument('--lambda_reg', '-lreg', type=float, default=0, help='weight for regularisation for encoders') parser.add_argument( '--lambda_dis_z', '-lz', type=float, default=0, help='weight for discriminator for the latent variable') parser.add_argument('--tv_tau', '-tt', type=float, default=1e-3, help='smoothing parameter for total variation') parser.add_argument('--tv_method', '-tm', default='abs', choices=['abs', 'sobel', 'usual'], help='method of calculating total variation') ## visualisation during training parser.add_argument( '--nvis_A', type=int, default=3, help='number of images in A to visualise after each epoch') parser.add_argument( '--nvis_B', type=int, default=3, help='number of images in B to visualise after each epoch') parser.add_argument('--vis_freq', '-vf', type=int, default=None, help='visualisation frequency in iteration') args = parser.parse_args() if args.epoch: args.lrdecay_period = args.epoch // 2 args.lrdecay_start = args.epoch - args.lrdecay_period else: args.epoch = args.lrdecay_start + args.lrdecay_period if args.learning_rate: args.learning_rate_g = args.learning_rate args.learning_rate_d = args.learning_rate if not args.gen_chs: args.gen_chs = [ int(args.gen_basech) * (2**i) for i in range(args.gen_ndown) ] if not args.dis_chs: args.dis_chs = [ int(args.dis_basech) * (2**i) for i in range(args.dis_ndown) ] if args.imgtype == "dcm": args.grey = True return (args)