new_sampling_rate=sampling_rate_test, \ new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=m, new_training=False, use_adaptive_weights=DenoiserbyDenoiser) LDAMP.ListNetworkParameters() # tf Graph input x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE]) #Create handles for the measurement operator [A_handle, At_handle, A_val, A_val_tf] = LDAMP.GenerateMeasurementOperators(measurement_mode) ## Initialize the variable theta which stores the weights and biases if tie_weights == True: theta = [None] with tf.variable_scope("Iter" + str(0)): theta_thisIter = LDAMP.init_vars_DnCNN(init_mu, init_sigma) theta[0] = theta_thisIter elif DenoiserbyDenoiser: noise_min_stds = [ 0, 10, 20, 40, 60, 80, 100, 150, 300 ] #This is currently hardcoded within LearnedDAMP_functionhelper noise_max_stds = [ 10, 20, 40, 60, 80, 100, 150, 300, 500 ] # This is currently hardcoded within LearnedDAMP_functionhelper theta = [None] * len(noise_min_stds) for noise_level in range(len(noise_min_stds)): with tf.variable_scope("Adaptive_NL" + str(noise_level)): theta[noise_level] = LDAMP.init_vars_DnCNN(init_mu, init_sigma) else: n_layers_trained = n_DAMP_layers theta = [None] * n_layers_trained
new_filter_height=filter_height, new_filter_width=filter_width, new_num_filters=num_filters, \ new_n_DnCNN_layers=n_DnCNN_layers, new_n_DAMP_layers=None, new_sampling_rate=None, \ new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=None, new_n=n, new_m=None, new_training=True) LDAMP.ListNetworkParameters() # tf Graph input training_tf = tf.placeholder(tf.bool, name='training') sigma_w_tf = tf.placeholder(tf.float32) x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE]) ## Construct the measurement model and handles/placeholders y_measured = LDAMP.AddNoise(x_true, sigma_w_tf) ## Initialize the variable theta which stores the weights and biases theta_dncnn = LDAMP.init_vars_DnCNN(init_mu, init_sigma) ## Construct the reconstruction model #x_hat = LDAMP.DnCNN(y_measured,None,theta_dncnn,training=training_tf) [x_hat, div_overN] = LDAMP.DnCNN_wrapper(y_measured, None, theta_dncnn, training=training_tf) ## Define loss and optimizer nfp = np.float32(height_img * width_img) if useSURE: cost = LDAMP.MCSURE_loss(x_hat, div_overN, y_measured, sigma_w_tf) else: cost = tf.nn.l2_loss(x_true - x_hat) * 1. / nfp
new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=m, new_training=True) LDAMP.ListNetworkParameters() # tf Graph input training_tf = tf.placeholder(tf.bool, name='training') x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE]) ## Initialize the variable theta which stores the weights and biases if tie_weights == True: n_layers_trained = 1 else: n_layers_trained = n_DAMP_layers theta = [None] * n_layers_trained for iter in range(n_layers_trained): with tf.variable_scope("Iter" + str(iter)): theta_thisIter = LDAMP.init_vars_DnCNN(init_mu, init_sigma) theta[iter] = theta_thisIter ## Construct the measurement model and handles/placeholders [A_handle, At_handle, A_val, A_val_tf] = LDAMP.GenerateMeasurementOperators(measurement_mode) y_measured = LDAMP.GenerateNoisyCSData_handles(x_true, A_handle, sigma_w, A_val_tf) ## Construct the reconstruction model if alg=='DAMP': (x_hat, MSE_history, NMSE_history, PSNR_history, r_final, rvar_final, div_overN) = LDAMP.LDAMP(y_measured,A_handle,At_handle,A_val_tf,theta,x_true,tie=tie_weights,training=training_tf,LayerbyLayer=LayerbyLayer) elif alg=='DIT': (x_hat, MSE_history, NMSE_history, PSNR_history) = LDAMP.LDIT(y_measured,A_handle,At_handle,A_val_tf,theta,x_true,tie=tie_weights,training=training_tf,LayerbyLayer=LayerbyLayer) else: raise ValueError('alg was not a supported option') ## Define loss and determine which variables to train