示例#1
0
pic_test_x = pic_test_data['N_MNIST_pic_test'].astype('float32')
print('pic_test_x: ', pic_test_x.shape)
in_imgs = pic_test_x
#num_selected = 200
#test_idx = np.linspace(0,len(pic_test_x)-1,num_selected).astype('int32')
#in_imgs = pic_test_x[test_idx]
#gt_imgs = pic_test_y[test_idx]

# In[]:
# prediction
ind = 0
mean_cost = 0
time_cost = 0
reconstructed = np.zeros(in_imgs.shape, dtype='float32')
for batch_x, _ in my_io.batch_iter(test_batch_size,
                                   in_imgs,
                                   in_imgs,
                                   shuffle=False):
    x = batch_x.reshape((-1, *pic_size, 1))
    feed_dict = {inputs_: x, keep_prob: 1.0, mask_prob: 0.0}  #for dropout
    #    feed_dict = {inputs_: x, targets_: y}  #for non dropout

    time1 = time()
    res_imgs = sess.run(outputs_, feed_dict=feed_dict)
    time2 = time()
    time_cost += (time2 - time1)
    res_imgs = np.squeeze(res_imgs)
    reconstructed[ind * test_batch_size:(ind + 1) * test_batch_size] = res_imgs
    ind += 1
time_cost = time_cost / len(in_imgs)
print('\nmean time cost(ms):%f\n' % (time_cost * 1e3))
示例#2
0
# In[]:
# train
test_feed_dict = {
    inputs_: test_x1,
    targets_: test_y1,
    keep_prob: 1.0,
    mask_prob: 0.0
}

summaryWriter(writer_tr, writer_te, merged, cost, test_feed_dict,
              test_feed_dict, 0)
time_start = time()

for e in range(1, 1 + epochs):
    for batch_x, batch_y in my_io.batch_iter(batch_size,
                                             train_x,
                                             train_y,
                                             throw_insufficient=True):

        x = batch_x.reshape((-1, *pic_size, 1))
        y = batch_y.reshape((-1, *pic_size, 1))

        train_feed_dict = {
            inputs_: x,
            targets_: y,
            keep_prob: keep_prob_v,
            mask_prob: mask_prob_v
        }
        sess.run(optimizer, feed_dict=train_feed_dict)

    if e % 5 == 0:
        time_cost = time() - time_start
示例#3
0
# tensorboard
writer_tr = tf.summary.FileWriter(train_log_dir, sess.graph)
writer_te = tf.summary.FileWriter(test_log_dir)
merged = tf.summary.merge_all() 

# saver
saver = tf.train.Saver()

# original performance
cost_v = sess.run(cost, feed_dict={inputs_: test_x1, targets_:test_y1, keep_prob:1.0, mask_prob:0.0})
print('\nfinetune mode:\n\nimport model: ', ckpt_states[model_ind], 
    '\ncurrent test cost: ', cost_v, '\n\nfinetuning start!\n')

for e in range(1, 1+epochs):
    for batch_x, batch_y in my_io.batch_iter(batch_size, train_x, train_y):
        x = batch_x.reshape((-1, *pic_size, 1))
        y = batch_y.reshape((-1, *pic_size, 1))
        sess.run(training_op, feed_dict={inputs_: x, targets_: y, keep_prob:keep_prob_v, mask_prob:mask_prob_v})

    if e%1 == 0: 
        tr, tr_cost = sess.run([merged, cost], feed_dict={inputs_: x, targets_: y, keep_prob:keep_prob_v,mask_prob:mask_prob_v})
        te, te_cost = sess.run([merged, cost], feed_dict={inputs_: test_x1, targets_: test_y1, keep_prob:1.0, mask_prob:0.0})
    
        writer_tr.add_summary(tr, e)
        writer_te.add_summary(te, e)     

        print(e,"Train cost:",tr_cost,"Test cost",te_cost)
    
    if e%20 == 0 and e!=0:
        saver.save(sess, model_path+'my_model',global_step=e, write_meta_graph=False)