restorer.restore(sess, ckpt_states[model_ind])

# load Ops and variables according to old model and your need
graph = tf.get_default_graph()
inputs_ = graph.get_tensor_by_name("inputs/inputs_:0")
mask_prob = graph.get_tensor_by_name("inputs/Placeholder_1:0")
targets_ = graph.get_tensor_by_name("inputs/targets_:0")
keep_prob = graph.get_tensor_by_name("inputs/Placeholder:0")  # for dropout
#outputs_ = graph.get_tensor_by_name("outputs/outputs_:0")
#outputs_ = graph.get_tensor_by_name("outputs/conv2d/Relu:0")  # act_fun = relu
outputs_ = graph.get_tensor_by_name("outputs/conv2d/Tanh:0")  # act_fun = relu
cost = graph.get_tensor_by_name("loss/Mean:0")

# In[]:
# load data
pic_test_data = my_io.load_mat(data_path)
pic_test_x = pic_test_data['N_MNIST_pic_test'].astype('float32')
print('pic_test_x: ', pic_test_x.shape)
in_imgs = pic_test_x
#num_selected = 200
#test_idx = np.linspace(0,len(pic_test_x)-1,num_selected).astype('int32')
#in_imgs = pic_test_x[test_idx]
#gt_imgs = pic_test_y[test_idx]

# In[]:
# prediction
ind = 0
mean_cost = 0
time_cost = 0
reconstructed = np.zeros(in_imgs.shape, dtype='float32')
for batch_x, _ in my_io.batch_iter(test_batch_size,
    # cross entropy loss
    #     xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits_)
    #     cost = tf.reduce_mean(xentropy)

    # mse loss
    mse = tf.losses.mean_squared_error(targets_, outputs_)
    cost = tf.reduce_mean(mse)

    tf.summary.scalar('cost', cost)

with tf.name_scope('train'):
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

# In[]:
# load data
train_data = my_io.load_mat(train_path)
test_data = my_io.load_mat(test_path)

train_x = train_data['data'].astype('float32')
test_x = test_data['data'].astype('float32')
if SUP_FLAG == 0:
    train_y = train_x
    test_y = test_x
else:
    train_y = train_data['data_gt'].astype('float32')
    test_y = test_data['data_gt'].astype('float32')
#test_y = test_data['data_gt'].astype('float32')

print('train_x: ', train_x.shape, '\ttrain_y: ', train_y.shape, '\ntest_x: ',
      test_x.shape, '\ttest_y: ', test_y.shape)
Exemple #3
0
    # mse loss
    mse = tf.losses.mean_squared_error(targets_ , outputs_)
    cost = tf.reduce_mean(mse)
    
    tf.summary.scalar('cost', cost)
    
with tf.name_scope('train'):
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)



    
# In[]:
# load data
train_data = my_io.load_mat(path1)
test_data = my_io.load_mat(path2)

train_x = train_data['N_MNIST_pic_train'].astype('float32')
test_x = test_data['N_MNIST_pic_test'].astype('float32')
if SUP_FLAG==0:
    train_y = train_x
    test_y = test_x
else:
    train_y = train_data['N_MNIST_pic_train_gt'].astype('float32')
    test_y = test_data['N_MNIST_pic_test_gt'].astype('float32')
#test_y = test_data['N_MNIST_pic_test_gt'].astype('float32')

print('train_x: ', train_x.shape, '\ttrain_y: ', train_y.shape, 
     '\ntest_x: ', test_x.shape, '\ttest_y: ', test_y.shape)