Esempio n. 1
0
sigma_w_min = FLAGS.sigma_w_min / 255.  #Noise std
sigma_w_max = FLAGS.sigma_w_max / 255.  #Noise std
n = channel_img * height_img * width_img

# Parameters to to initalize weights. Won't be used if old weights are loaded
init_mu = 0
init_sigma = 0.1

train_start_time = time.time()

## Clear all the old variables, tensors, etc.
tf.reset_default_graph()

LDAMP.SetNetworkParams(new_height_img=height_img, new_width_img=width_img, new_channel_img=channel_img, \
                       new_filter_height=filter_height, new_filter_width=filter_width, new_num_filters=num_filters, \
                       new_n_DnCNN_layers=n_DnCNN_layers, new_n_DAMP_layers=None,
                       new_sampling_rate=None, \
                       new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=None, new_n=n, new_m=None, new_training=True)
LDAMP.ListNetworkParameters()

# tf Graph input
training_tf = tf.placeholder(tf.bool, name='training')
sigma_w_tf = tf.placeholder(tf.float32)
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

## Construct the measurement model and handles/placeholders
y_measured = LDAMP.AddNoise(x_true, sigma_w_tf)

## Initialize the variable theta which stores the weights and biases
theta_dncnn = LDAMP.init_vars_DnCNN(init_mu, init_sigma)
Esempio n. 2
0
sampling_rate_test = .2  #The sampling rate used for testing
sampling_rate_train = .2  #The sampling rate that was used for training
sigma_w = 0.
n = channel_img * height_img * width_img
m = int(np.round(sampling_rate_test * n))
measurement_mode = 'Fast-JL'  #'coded-diffraction'#'gaussian'#'complex-gaussian'#

# Parameters to to initalize weights. Won't be used if old weights are loaded
init_mu = 0
init_sigma = 0.1

random.seed(1)

LDAMP.SetNetworkParams(new_height_img=height_img, new_width_img=width_img, new_channel_img=channel_img, \
                       new_filter_height=filter_height, new_filter_width=filter_width, new_num_filters=num_filters, \
                       new_n_DnCNN_layers=n_DnCNN_layers, new_n_DAMP_layers=n_DAMP_layers,
                       new_sampling_rate=sampling_rate_test, \
                       new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=m, new_training=False, use_adaptive_weights=DenoiserbyDenoiser)
LDAMP.ListNetworkParameters()

# tf Graph input
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

#Create handles for the measurement operator
[A_handle, At_handle, A_val,
 A_val_tf] = LDAMP.GenerateMeasurementOperators(measurement_mode)

## Initialize the variable theta which stores the weights and biases
if tie_weights == True:
    theta = [None]
    with tf.variable_scope("Iter" + str(0)):
Esempio n. 3
0
## Problem Parameters
sigma_w = 25. / 255.  #Noise std
n = channel_img * height_img * width_img

# Parameters to to initalize weights. Won't be used if old weights are loaded
init_mu = 0
init_sigma = 0.1

train_start_time = time.time()

## Clear all the old variables, tensors, etc.
tf.reset_default_graph()

LDAMP.SetNetworkParams(new_height_img=height_img, new_width_img=width_img, new_channel_img=channel_img, \
                       new_filter_height=filter_height, new_filter_width=filter_width, new_num_filters=num_filters, \
                       new_n_DnCNN_layers=n_DnCNN_layers, new_n_DAMP_layers=None,
                       new_sampling_rate=None, \
                       new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=None, new_training=False)
LDAMP.ListNetworkParameters()

# tf Graph input
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

## Construct the measurement model and handles/placeholders
y_measured = LDAMP.AddNoise(x_true, sigma_w)

## Initialize the variable theta which stores the weights and biases
theta_dncnn = LDAMP.init_vars_DnCNN(init_mu, init_sigma)

## Construct the reconstruction model
x_hat = LDAMP.DnCNN(y_measured, None, theta_dncnn, training=False)
Esempio n. 4
0
n=channel_img*height_img*width_img
m=int(np.round(sampling_rate*n))
measurement_mode='gaussian'#'gaussian'#'coded-diffraction'#

# Parameters to to initalize weights. Won't be used if old weights are loaded
init_mu = 0
init_sigma = 0.1

train_start_time=time.time()
for n_DAMP_layers in range(start_layer,max_n_DAMP_layers+1,1):
    ## Clear all the old variables, tensors, etc.
    tf.reset_default_graph()

    LDAMP.SetNetworkParams(new_height_img=height_img, new_width_img=width_img, new_channel_img=channel_img, \
                           new_filter_height=filter_height, new_filter_width=filter_width, new_num_filters=num_filters, \
                           new_n_DnCNN_layers=n_DnCNN_layers, new_n_DAMP_layers=n_DAMP_layers,
                           new_sampling_rate=sampling_rate, \
                           new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=m, new_training=True)
    LDAMP.ListNetworkParameters()

    # tf Graph input
    training_tf = tf.placeholder(tf.bool, name='training')
    x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

    ## Initialize the variable theta which stores the weights and biases
    if tie_weights == True:
        n_layers_trained = 1
    else:
        n_layers_trained = n_DAMP_layers
    theta = [None] * n_layers_trained
    for iter in range(n_layers_trained):
n = channel_img * height_img * width_img
m = int(np.round(sampling_rate * n))
measurement_mode = 'gaussian'  #'gaussian'#'coded-diffraction'#

# Parameters to to initalize weights. Won't be used if old weights are loaded
init_mu = 0
init_sigma = 0.1

train_start_time = time.time()
for n_DAMP_layers in range(start_layer, max_n_DAMP_layers + 1, 1):
    ## Clear all the old variables, tensors, etc.
    tf.reset_default_graph()

    LDAMP.SetNetworkParams(new_height_img=height_img, new_width_img=width_img, new_channel_img=channel_img, \
                           new_filter_height=filter_height, new_filter_width=filter_width, new_num_filters=num_filters, \
                           new_n_DnCNN_layers=n_DnCNN_layers, new_n_DAMP_layers=n_DAMP_layers,
                           new_sampling_rate=sampling_rate, \
                           new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=m, new_training=True)
    LDAMP.ListNetworkParameters()

    # tf Graph input
    x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])

    ## Initialize the variable theta which stores the weights and biases
    if tie_weights == True:
        n_layers_trained = 1
    else:
        n_layers_trained = n_DAMP_layers
    theta = [None] * n_layers_trained
    for iter in range(n_layers_trained):
        with tf.variable_scope("Iter" + str(iter)):