saver_best = tf.train.Saver()  # defaults to saving all variables
    saver_dict = {}
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        sess.run(
            tf.global_variables_initializer()
        )  #Seems to be necessary for the batch normalization layers for some reason.

        # if FLAGS.debug:
        #     sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        #     sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)

        start_time = time.time()
        print("Load Initial Weights ...")
        if ResumeTraining or learning_rate != learning_rates[0]:
            ##Load previous values for the weights and BNs
            saver_initvars_name_chckpt = LDAMP.GenDnCNNFilename(
                sigma_w_min, sigma_w_max, useSURE=useSURE) + ".ckpt"
            for l in range(0, n_DnCNN_layers):
                saver_dict.update({"l" + str(l) + "/w": theta_dncnn[0][l]})
            for l in range(1, n_DnCNN_layers -
                           1):  #Associate variance, means, and beta
                gamma_name = "l" + str(l) + "/BN/gamma:0"
                beta_name = "l" + str(l) + "/BN/beta:0"
                var_name = "l" + str(l) + "/BN/moving_variance:0"
                mean_name = "l" + str(l) + "/BN/moving_mean:0"
                gamma = [
                    v for v in tf.global_variables() if v.name == gamma_name
                ][0]
                beta = [
                    v for v in tf.global_variables() if v.name == beta_name
                ][0]
                moving_variance = [
Beispiel #2
0
             beta_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/beta:0"
             var_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance:0"
             mean_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean:0"
             gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
             beta = [v for v in tf.global_variables() if v.name == beta_name][0]
             moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
             moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
             saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/gamma": gamma})
             saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/beta": beta})
             saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance": moving_variance})
             saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean": moving_mean})
         saver_initvars = tf.train.Saver(saver_dict)
         saver_initvars.restore(sess, saver_initvars_name_chckpt)
 if InitWeightsMethod=='denoiser':
     #load initial weights that were trained on a denoising problem
     saver_initvars_name_chckpt=LDAMP.GenDnCNNFilename(300./255.,500./255.)+".ckpt"
     iter = 0
     for l in range(0, n_DnCNN_layers):
         saver_dict.update({"l" + str(l) + "/w": theta[iter][0][l]})#, "l" + str(l) + "/b": theta[iter][1][l]})
     for l in range(1,n_DnCNN_layers-1):#Associate variance, means, and beta
         gamma_name = "Iter"+str(iter)+"/l" + str(l) + "/BN/gamma:0"
         beta_name="Iter"+str(iter)+"/l" + str(l) + "/BN/beta:0"
         var_name="Iter"+str(iter)+"/l" + str(l) + "/BN/moving_variance:0"
         mean_name="Iter"+str(iter)+"/l" + str(l) + "/BN/moving_mean:0"
         gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
         beta = [v for v in tf.global_variables() if v.name == beta_name][0]
         moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
         moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
         saver_dict.update({"l" + str(l) + "/BN/gamma": gamma})
         saver_dict.update({"l" + str(l) + "/BN/beta": beta})
         saver_dict.update({"l" + str(l) + "/BN/moving_variance": moving_variance})
Beispiel #3
0
test_images = test_images[:, 0, :, :]
assert (len(test_images) >= n_Test_Images), "Requested too much Test data"

x_test = np.transpose(
    np.reshape(test_images, (-1, height_img * width_img * channel_img)))

# with tf.Session() as sess:
#     y_test=sess.run(y_measured,feed_dict={x_true: x_test, A_val_tf: A_val})#All the batches will use the same measurement matrix

## Test the Model
saver = tf.train.Saver()  # defaults to saving all variables
saver_dict = {}

with tf.Session() as sess:
    if tie_weights == 1:  # Load weights from pretrained denoiser
        save_name = LDAMP.GenDnCNNFilename(80. / 255.) + ".ckpt"
        for l in range(0, n_DnCNN_layers):
            saver_dict.update({"l" + str(l) + "/w": theta[0][0][l]
                               })  #, "l" + str(l) + "/b": theta[0][1][l]})
        for l in range(1, n_DnCNN_layers -
                       1):  # Associate variance, means, and beta
            gamma_name = "Iter" + str(0) + "/l" + str(l) + "/BN/gamma:0"
            beta_name = "Iter" + str(0) + "/l" + str(l) + "/BN/beta:0"
            var_name = "Iter" + str(0) + "/l" + str(
                l) + "/BN/moving_variance:0"
            mean_name = "Iter" + str(0) + "/l" + str(l) + "/BN/moving_mean:0"
            gamma = [v for v in tf.global_variables()
                     if v.name == gamma_name][0]
            beta = [v for v in tf.global_variables() if v.name == beta_name][0]
            moving_variance = [
                v for v in tf.global_variables() if v.name == var_name
Beispiel #4
0
    #     sigma_w_min = 80.
    #     sigma_w_max = 100.
    # elif 255.*sigma_w < 150.:
    #     sigma_w_min = 100.
    #     sigma_w_max = 150.
    # elif 255.*sigma_w < 300.:
    #     sigma_w_min = 150.
    #     sigma_w_max = 300.
    # else:
    #     sigma_w_min = 300.
    #     sigma_w_max = 500.
    sigma_w_min = sigma_w * 255.
    sigma_w_max = sigma_w * 255.

    save_name = LDAMP.GenDnCNNFilename(sigma_w_min / 255.,
                                       sigma_w_max / 255.,
                                       useSURE=useSURE)
    save_name_chckpt = save_name + ".ckpt"
    saver.restore(sess, save_name_chckpt)

    print("Reconstructing Signal")
    start_time = time.time()
    [reconstructed_test_images] = sess.run([x_hat],
                                           feed_dict={y_measured: y_test})
    time_taken = time.time() - start_time
    fig1 = plt.figure()
    plt.imshow(np.transpose(np.reshape(x_test[:, 0], (height_img, width_img))),
               interpolation='nearest',
               cmap='gray')
    plt.show()
    fig2 = plt.figure()