lambda_mse, lambda_adv, lambda_perc = args.lmse, args.ladv, args.lperc itr_out_dir = args.expName + '-itrOut' if os.path.isdir(itr_out_dir): shutil.rmtree(itr_out_dir) os.mkdir(itr_out_dir) # to save temp output # redirect print to a file sys.stdout = open('%s/%s' % (itr_out_dir, 'iter-prints.log'), 'w') print('X train: {}\nY train: {}\nX test: {}\nY test: {}'.format( args.xtrain, args.ytrain, args.xtest, args.ytest)) # build minibatch data generator with prefetch mb_data_iter = bkgdGen(data_generator=gen_train_batch_bg(x_fn=args.xtrain, \ y_fn=args.ytrain, mb_size=mb_size, \ in_depth=in_depth, img_size=img_size), \ max_prefetch=16) generator = make_generator_model(input_shape=(None, None, in_depth), nlayers=args.lunet) discriminator = make_discriminator_model(input_shape=(img_size, img_size, 1)) feature_extractor_vgg = tf.keras.applications.VGG19(\ weights='vgg19_weights_notop.h5', \ include_top=False) # This method returns a helper function to compute cross entropy loss cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) def discriminator_loss(real_output, fake_output):
Methodname = 'CPCE_3D' lambda_p = 0.1 in_depth = 1 Networkfolder = Methodname is_transfer_learning = False ################################################### xtrain = 'npy_frames_split_1024_train/input_*' ytrain = 'npy_frames_split_1024_train/target_*' img_size = 1024 # build minibatch data generator with prefetch mb_data_iter = bkgdGen(data_generator=gen_train_batch_bg(x_fn=xtrain, \ y_fn=ytrain, mb_size=input_width, \ in_depth=in_depth, img_size=img_size), \ max_prefetch=16) # Generator X = tf.placeholder(dtype=tf.float32, shape=[batch_size, in_depth, input_width, input_height, 1]) with tf.variable_scope('generator_model') as scope: Y_ = CPCE_3D(X, padding='valid') real_data = tf.placeholder(dtype=tf.float32, shape=[batch_size, output_width, output_height, 1]) alpha = tf.random_uniform(shape=[batch_size, 1], minval=0., maxval=1.) # Discriminator with tf.variable_scope('discriminator_model') as scope: