## Initialize the variable theta which stores the weights and biases theta_dncnn = LDAMP.init_vars_DnCNN(init_mu, init_sigma) ## Construct the reconstruction model #x_hat = LDAMP.DnCNN(y_measured,None,theta_dncnn,training=training_tf) [x_hat, div_overN] = LDAMP.DnCNN_wrapper(y_measured, None, theta_dncnn, training=training_tf) ## Define loss and optimizer nfp = np.float32(height_img * width_img) if useSURE: cost = LDAMP.MCSURE_loss(x_hat, div_overN, y_measured, sigma_w_tf) else: cost = tf.nn.l2_loss(x_true - x_hat) * 1. / nfp LDAMP.CountParameters() ## Load and Preprocess Training Data #Training data was generated by GeneratingTrainingImages.m and ConvertImagestoNpyArrays.py train_images = np.load('./TrainingData/TrainingData_patch' + str(height_img) + '.npy') train_images = train_images[range(n_Train_Images), 0, :, :] assert (len(train_images) >= n_Train_Images), "Requested too much training data" val_images = np.load('./TrainingData/ValidationData_patch' + str(height_img) + '.npy')
[A_handle, At_handle, A_val, A_val_tf] = LDAMP.GenerateMeasurementOperators(measurement_mode) y_measured = LDAMP.GenerateNoisyCSData_handles(x_true, A_handle, sigma_w, A_val_tf) ## Construct the reconstruction model if alg=='DAMP': (x_hat, MSE_history, NMSE_history, PSNR_history, r_final, rvar_final, div_overN) = LDAMP.LDAMP(y_measured,A_handle,At_handle,A_val_tf,theta,x_true,tie=tie_weights,training=training_tf,LayerbyLayer=LayerbyLayer) elif alg=='DIT': (x_hat, MSE_history, NMSE_history, PSNR_history) = LDAMP.LDIT(y_measured,A_handle,At_handle,A_val_tf,theta,x_true,tie=tie_weights,training=training_tf,LayerbyLayer=LayerbyLayer) else: raise ValueError('alg was not a supported option') ## Define loss and determine which variables to train nfp = np.float32(height_img * width_img) if loss_func=='SURE': assert alg=='DAMP', "Only LDAMP supports training with SURE" cost = LDAMP.MCSURE_loss(x_hat, div_overN, r_final, tf.sqrt(rvar_final)) elif loss_func=='GSURE': assert alg == 'DAMP', "Only LDAMP currently supports training with GSURE" temp0=tf.matmul(A_val_tf,A_val_tf,transpose_b=True) temp1=tf.matrix_inverse(temp0) pinv_A=tf.matmul(A_val_tf,temp1,transpose_a=True) P=tf.matmul(pinv_A,A_val_tf) #Treat LDAMP/LDIT as a function of A^ty to calculate the divergence Aty_tf=At_handle(A_val_tf,y_measured) #Overwrite existing x_hat def (x_hat, _, _, _, _, _, _) = LDAMP.LDAMP_Aty(Aty_tf, A_handle,At_handle,A_val_tf, theta, x_true,tie=tie_weights,training=training_tf,LayerbyLayer=LayerbyLayer) if sigma_w==0.:#Not sure if TF is smart enough to avoid computing MCdiv when it doesn't have to MCdiv=0. else: #Calculate MC divergence of P*LDAMP(Aty) epsilon = tf.maximum(.001 * tf.reduce_max(Aty_tf, axis=0), .00001)