Пример #1
0
                       new_n_DnCNN_layers=n_DnCNN_layers, new_n_DAMP_layers=None,
                       new_sampling_rate=None, \
                       new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=None, new_training=False)
LDAMP.ListNetworkParameters()

# tf Graph input
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

## Construct the measurement model and handles/placeholders
y_measured = LDAMP.AddNoise(x_true, sigma_w)

## Initialize the variable theta which stores the weights and biases
theta_dncnn = LDAMP.init_vars_DnCNN(init_mu, init_sigma)

## Construct the reconstruction model
x_hat = LDAMP.DnCNN(y_measured, None, theta_dncnn, training=False)

LDAMP.CountParameters()

## Load and Preprocess Test Data
if height_img > 50:
    test_im_name = "./TrainingData/StandardTestData_" + str(
        height_img) + "Res.npy"
else:
    test_im_name = "./TrainingData/TestData_patch" + str(height_img) + ".npy"

test_images = np.load(test_im_name)
test_images = test_images[:, 0, :, :]
assert (len(test_images) >= BATCH_SIZE), "Requested too much Test data"

x_test = np.transpose(
Пример #2
0
                       new_sampling_rate=None, \
                       new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=None, new_n=n, new_m=None, new_training=True)
LDAMP.ListNetworkParameters()

# tf Graph input
sigma_w_tf = tf.placeholder(tf.float32)
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

## Initialize the variable theta which stores the weights and biases
theta_dncnn = LDAMP.init_vars_DnCNN(init_mu, init_sigma)

## Construct the measurement model and handles/placeholders
y_measured = LDAMP.AddNoise(x_true, sigma_w_tf)

## Construct the reconstruction model
x_hat = LDAMP.DnCNN(y_measured, None, theta_dncnn, reuse=True)

## Define loss and optimizer
cost = tf.nn.l2_loss(x_true - x_hat) / tf.nn.l2_loss(x_true)

LDAMP.CountParameters()

## Load and Preprocess Training Data
#Training data was generated by GeneratingTrainingImages.m and ConvertImagestoNpyArrays.py
train_images = np.load('./TrainingData/TrainingData_patch' + str(height_img) +
                       '.npy')
train_images = train_images[range(n_Train_Images), 0, :, :]
assert (len(train_images) >=
        n_Train_Images), "Requested too much training data"

val_images = np.load('./TrainingData/ValidationData_patch' + str(height_img) +
Пример #3
0
                       new_sampling_rate=None, \
                       new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=None, new_training=False)
LDAMP.ListNetworkParameters()

# tf Graph input
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

## Initialize the variable theta which stores the weights and biases
theta_dncnn = LDAMP.init_vars_DnCNN(init_mu, init_sigma)

## Construct the measurement model and handles/placeholders
y_measured = LDAMP.AddNoise(x_true, sigma_w)

## Construct the reconstruction model
x_hat = LDAMP.DnCNN(
    y_measured, None, theta_dncnn, reuse=True
)  #Set reuse=true because we initialized BN variables in init_vars_DnCNN

LDAMP.CountParameters()

## Load and Preprocess Test Data
if height_img > 50:
    test_im_name = "./TrainingData/StandardTestData_" + str(
        height_img) + "Res.npy"
else:
    test_im_name = "./TrainingData/TestData_patch" + str(height_img) + ".npy"

test_images = np.load(test_im_name)
test_images = test_images[:, 0, :, :]
assert (len(test_images) >= BATCH_SIZE), "Requested too much Test data"