def evaluate_checkpoint(filename): #sys.stdout = open(os.devnull, 'w') for i in range(30): g2 =tf.Graph() g3=tf.Graph() epsilonc=0 fmnist_loop_list=np.zeros([2,int(math.ceil(num_eval_examples / eval_batch_size))]) fmnist_loop_list_corr=np.zeros([2,int(math.ceil(num_eval_examples / eval_batch_size))]) with tf.Session() as sess: # Restore the checkpoint model1 = Model() saver.restore(sess, filename); # with tf.device("/cpu:0"): # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) total_xent_nat = 0. total_xent_adv = 0. total_corr_nat = 0 total_corr_adv = 0 x_batch_list=[] y_batch_list=[] x_adv_list=[] for ibatch in range(num_batches): train_loader= get_data(BATCH_SIZE) trainiter = iter(train_loader) x_batch, y_batch = next(trainiter) x_batch=np.array(x_batch).reshape([BATCH_SIZE,img_size[0]*img_size[1]]) y_batch_list.append(y_batch) x_batch_adv = attacks[ibatch].perturb(x_batch, y_batch, sess) x_batch_list.append(x_batch_adv) corruptedbatch =blur(x_batch_adv) x_adv_list.append(corruptedbatch) with g2.as_default(): with tf.Session() as sess2: sargan_model=SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver= tf.train.Saver() sargan_saver = tf.train.import_meta_graph(trained_model_path+'/sargan_mnist.meta'); sargan_saver.restore(sess2,tf.train.latest_checkpoint(trained_model_path)); for ibatch in range(num_batches): processed_batch=sess2.run(sargan_model.gen_img,feed_dict={sargan_model.image: x_adv_list[ibatch], sargan_model.cond: x_adv_list[ibatch]}) processed_batch=np.array(processed_batch).reshape([len(processed_batch),img_size[0]*img_size[1]]) x_adv_list[ibatch]=processed_batch with g3.as_default(): model3 = Model() saver2 = tf.train.Saver() with tf.Session() as sess3: saver2.restore(sess3, filename); for ibatch in range(num_batches): dict_nat = {model3.x_input: x_batch_list[ibatch], model3.y_input: y_batch_list[ibatch]} dict_adv = {model3.x_input: x_adv_list[ibatch], model3.y_input: y_batch_list[ibatch]} cur_corr_nat, cur_xent_nat = sess3.run( [model3.num_correct,model3.xent], feed_dict = dict_nat) cur_corr_adv, cur_xent_adv = sess3.run( [model3.num_correct,model3.xent], feed_dict = dict_adv) fmnist_loop_list[0,ibatch]=epsilonc fmnist_loop_list[1,ibatch]=cur_corr_adv/eval_batch_size fmnist_loop_list_corr[0,ibatch]=epsilonc fmnist_loop_list_corr[1,ibatch]=cur_corr_nat/eval_batch_size epsilonc+=config['epsilon']/num_batches '''total_xent_nat += cur_xent_nat total_xent_adv += cur_xent_adv total_corr_nat += cur_corr_nat total_corr_adv += cur_corr_adv avg_xent_nat = total_xent_nat / num_eval_examples avg_xent_adv = total_xent_adv / num_eval_examples acc_nat = total_corr_nat / num_eval_examples acc_adv = total_corr_adv / num_eval_examples ''''''summary = tf.Summary(value=[ tf.Summary.Value(tag='xent adv eval', simple_value= avg_xent_adv), tf.Summary.Value(tag='xent adv', simple_value= avg_xent_adv), tf.Summary.Value(tag='xent nat', simple_value= avg_xent_nat), tf.Summary.Value(tag='accuracy adv eval', simple_value= acc_adv), tf.Summary.Value(tag='accuracy adv', simple_value= acc_adv), tf.Summary.Value(tag='accuracy nat', simple_value= acc_nat)]) summary_writer.add_summary(summary, global_step.eval(sess3))'''''' #sys.stdout = sys.__stdout__ print('natural: {:.2f}%'.format(100 * acc_nat)) print('adversarial: {:.2f}%'.format(100 * acc_adv)) print('avg nat loss: {:.4f}'.format(avg_xent_nat)) print('avg adv loss: {:.4f}'.format(avg_xent_adv))''' np.save('loop_data/fmnist/fmnist_loop_list'+str(i+15)+".npy",fmnist_loop_list) np.save('loop_data/fmnist/fmnist_loop_list_corr'+str(i+15)+".npy",fmnist_loop_list_corr)
def evaluate_checkpoint(filename): #sys.stdout = open(os.devnull, 'w') #Different graphs for all the models gx1 = tf.Graph() gx2 = tf.Graph() gx3 = tf.Graph() gx4 = tf.Graph() g3 = tf.Graph() with tf.Session() as sess: # Restore the checkpoint saver.restore(sess, filename) # Iterate over the samples batch-by-batch #number of batches num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) - 1 total_xent_nat = 0. total_xent_corr = 0. total_corr_nat = 0. total_corr_corr = 0. total_corr_adv = np.zeros([4]).astype(dtype='float32') total_corr_blur = np.zeros([4]).astype(dtype='float32') total_xent_adv = np.zeros([4]).astype(dtype='float32') total_xent_blur = np.zeros([4]).astype(dtype='float32') #storing the various images x_batch_list = [] x_corr_list = [] x_blur_list1 = [] x_adv_list1 = [] x_blur_list2 = [] x_adv_list2 = [] x_blur_list3 = [] x_adv_list3 = [] x_blur_list4 = [] x_adv_list4 = [] #Storing y values y_batch_list = [] train_loader = get_data(BATCH_SIZE) trainiter = iter(cycle(train_loader)) for ibatch in range(num_batches): x_batch2, y_batch = next(trainiter) y_batch_list.append(y_batch) x_batch2 = np.array(x_batch2.data.numpy().transpose(0, 2, 3, 1)) x_batch = np.zeros([len(x_batch2), img_size[0] * img_size[1]]) for i in range(len(x_batch2)): x_batch[i] = x_batch2[i].reshape([img_size[0] * img_size[1]]) x_batch_adv = attack.perturb(x_batch, y_batch, sess) x_batch2 = np.zeros( [len(x_batch), img_size[0], img_size[1], img_size[2]]) x_batch_adv2 = np.zeros( [len(x_batch), img_size[0], img_size[1], img_size[2]]) for k in range(len(x_batch)): x_batch2[k] = add_gaussian_noise( x_batch[k].reshape([img_size[0], img_size[1], img_size[2]]), sd=np.random.uniform(NOISE_STD_RANGE[1], NOISE_STD_RANGE[1])) x_batch_adv2[k] = add_gaussian_noise(x_batch_adv[k].reshape( [img_size[0], img_size[1], img_size[2]]), sd=np.random.uniform( NOISE_STD_RANGE[1], NOISE_STD_RANGE[1])) x_batch_list.append(x_batch) x_corr_list.append(x_batch_adv) x_blur_list4.append(x_batch2) x_adv_list4.append(x_batch_adv2) #Running through first autoencoder with gx1.as_default(): with tf.Session() as sess2: sargan_model = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver = tf.train.Saver() sargan_saver = tf.train.import_meta_graph(trained_model_path + '/sargan_mnist.meta') sargan_saver.restore( sess2, tf.train.latest_checkpoint(trained_model_path)) for ibatch in range(num_batches): processed_batch = sess2.run(sargan_model.gen_img, feed_dict={ sargan_model.image: x_adv_list4[ibatch], sargan_model.cond: x_adv_list4[ibatch] }) x_adv_list4[ibatch] = processed_batch blurred_batch = sess2.run(sargan_model.gen_img, feed_dict={ sargan_model.image: x_blur_list4[ibatch], sargan_model.cond: x_blur_list4[ibatch] }) x_blur_list4[ibatch] = blurred_batch #adding images to first autoencoder data set x_blur_list1.append( blurred_batch.reshape( [len(x_batch), img_size[0] * img_size[1]])) x_adv_list1.append( processed_batch.reshape( [len(x_batch), img_size[0] * img_size[1]])) psnr = 0 for jj in range(num_batches): next_psnr = 0 psnr_value = (tf.image.psnr(np.array(x_batch_list[jj]).reshape( [64, img_size[0], img_size[1], img_size[2]]), np.array(x_adv_list4[jj]).reshape([ 64, img_size[0], img_size[1], img_size[2] ]), max_val=1)) psnr_value = sess2.run(psnr_value) for i in range(64): next_psnr += psnr_value[i] psnr += next_psnr / 64 psnr /= num_batches print("Not actual PSNR= ", psnr, "\n\n\n") with gx2.as_default(): with tf.Session() as sessx2: sargan_model2 = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver2 = tf.train.Saver() sargan_saver2 = tf.train.import_meta_graph(trained_model_path2 + '/sargan_mnist.meta') sargan_saver2.restore( sessx2, tf.train.latest_checkpoint(trained_model_path2)) for ibatch in range(num_batches): processed_batch = sessx2.run(sargan_model2.gen_img, feed_dict={ sargan_model2.image: x_adv_list4[ibatch], sargan_model2.cond: x_adv_list4[ibatch] }) x_adv_list4[ibatch] = (processed_batch) blurred_batch = sessx2.run(sargan_model2.gen_img, feed_dict={ sargan_model2.image: x_blur_list4[ibatch], sargan_model2.cond: x_blur_list4[ibatch] }) x_blur_list4[ibatch] = (blurred_batch) #adding images to second autoencoder data set x_blur_list2.append( blurred_batch.reshape( [len(x_batch), img_size[0] * img_size[1]])) x_adv_list2.append( processed_batch.reshape( [len(x_batch), img_size[0] * img_size[1]])) with gx3.as_default(): with tf.Session() as sessx3: sargan_model3 = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver3 = tf.train.Saver() sargan_saver3 = tf.train.import_meta_graph(trained_model_path3 + '/sargan_mnist.meta') sargan_saver3.restore( sessx3, tf.train.latest_checkpoint(trained_model_path3)) for ibatch in range(num_batches): processed_batch = sessx3.run(sargan_model3.gen_img, feed_dict={ sargan_model3.image: x_adv_list4[ibatch], sargan_model3.cond: x_adv_list4[ibatch] }) x_adv_list4[ibatch] = processed_batch blurred_batch = sessx3.run(sargan_model3.gen_img, feed_dict={ sargan_model3.image: x_blur_list4[ibatch], sargan_model3.cond: x_blur_list4[ibatch] }) x_blur_list4[ibatch] = blurred_batch #adding images to third autoencoder data set x_blur_list3.append( blurred_batch.reshape( [len(x_batch), img_size[0] * img_size[1]])) x_adv_list3.append( processed_batch.reshape( [len(x_batch), img_size[0] * img_size[1]])) #Final autoencoder setup with gx4.as_default(): with tf.Session() as sessx4: sargan_model4 = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver4 = tf.train.Saver() sargan_saver4 = tf.train.import_meta_graph(trained_model_path4 + '/sargan_mnist.meta') sargan_saver4.restore( sessx4, tf.train.latest_checkpoint(trained_model_path4)) for ibatch in range(num_batches): processed_batch = sessx4.run(sargan_model4.gen_img, feed_dict={ sargan_model4.image: x_adv_list4[ibatch], sargan_model4.cond: x_adv_list4[ibatch] }) x_adv_list4[ibatch] = processed_batch.reshape( [len(x_batch), img_size[0] * img_size[1]]) blurred_batch = sessx4.run(sargan_model4.gen_img, feed_dict={ sargan_model4.image: x_blur_list4[ibatch], sargan_model4.cond: x_blur_list4[ibatch] }) x_blur_list4[ibatch] = blurred_batch.reshape( [len(x_batch), img_size[0] * img_size[1]]) psnr = 0 for jj in range(num_batches): next_psnr = 0 psnr_value = (tf.image.psnr(np.array(x_batch_list[jj]).reshape( [64, img_size[0], img_size[1], img_size[2]]), np.array(x_adv_list4[jj]).reshape([ 64, img_size[0], img_size[1], img_size[2] ]), max_val=1)) psnr_value = sessx4.run(psnr_value) for i in range(64): next_psnr += psnr_value[i] psnr += next_psnr / 64 psnr /= num_batches print("\n\nPSNR= ", psnr, "\n\n") with g3.as_default(): model3 = Model() saver2 = tf.train.Saver() with tf.Session() as sess3: saver2.restore(sess3, filename) for ibatch in range(num_batches): cur_xent_adv = np.zeros([4]).astype(dtype='float32') cur_xent_blur = np.zeros([4]).astype(dtype='float32') cur_corr_adv = np.zeros([4]).astype(dtype='float32') cur_corr_blur = np.zeros([4]).astype(dtype='float32') dict_nat = { model3.x_input: x_batch_list[ibatch], model3.y_input: y_batch_list[ibatch] } dict_corr = { model3.x_input: x_corr_list[ibatch], model3.y_input: y_batch_list[ibatch] } #First autoencoder dictionary dict_adv1 = { model3.x_input: x_adv_list1[ibatch], model3.y_input: y_batch_list[ibatch] } dict_blur1 = { model3.x_input: x_blur_list1[ibatch], model3.y_input: y_batch_list[ibatch] } #Second autoencoder dictionary dict_adv2 = { model3.x_input: x_adv_list2[ibatch], model3.y_input: y_batch_list[ibatch] } dict_blur2 = { model3.x_input: x_blur_list2[ibatch], model3.y_input: y_batch_list[ibatch] } #Third autoencoder dictionary dict_adv3 = { model3.x_input: x_adv_list3[ibatch], model3.y_input: y_batch_list[ibatch] } dict_blur3 = { model3.x_input: x_blur_list3[ibatch], model3.y_input: y_batch_list[ibatch] } #Fourth autoencoder dictionary dict_adv4 = { model3.x_input: x_adv_list4[ibatch], model3.y_input: y_batch_list[ibatch] } dict_blur4 = { model3.x_input: x_blur_list4[ibatch], model3.y_input: y_batch_list[ibatch] } #Regular Images cur_corr_nat, cur_xent_nat = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_nat) cur_corr_corr, cur_xent_corr = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_corr) #First autoencoder dictionary cur_corr_blur[0], cur_xent_blur[0] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_blur1) cur_corr_adv[0], cur_xent_adv[0] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_adv1) #Second autoencoder dictionary cur_corr_blur[1], cur_xent_blur[1] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_blur2) cur_corr_adv[1], cur_xent_adv[1] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_adv2) #Third autoencoder dictionary cur_corr_blur[2], cur_xent_blur[2] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_blur3) cur_corr_adv[2], cur_xent_adv[2] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_adv3) #Fourth autoencoder dictionary cur_corr_blur[3], cur_xent_blur[3] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_blur4) cur_corr_adv[3], cur_xent_adv[3] = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_adv4) #Natural total_corr_nat += cur_corr_nat total_corr_corr += cur_corr_corr total_xent_nat += cur_xent_nat total_xent_corr += cur_xent_corr #running accuracy total_corr_adv += cur_corr_adv total_corr_blur += cur_corr_blur total_xent_adv += cur_xent_adv total_xent_blur += cur_xent_blur #Regual images avg_xent_nat = total_xent_nat / num_eval_examples avg_xent_corr = total_xent_corr / num_eval_examples acc_nat = total_corr_nat / num_eval_examples acc_corr = total_corr_corr / num_eval_examples #Total accuracy acc_adv = total_corr_adv / num_eval_examples acc_blur = total_corr_blur / num_eval_examples avg_xent_adv = total_xent_adv / num_eval_examples avg_xent_blur = total_xent_blur / num_eval_examples #sys.stdout = sys.__stdout__ print("No Autoencoder") print('natural: {:.2f}%'.format(100 * acc_nat)) print('Corrupted: {:.2f}%'.format(100 * acc_corr)) print('avg nat loss: {:.4f}'.format(avg_xent_nat)) print('avg corr loss: {:.4f} \n'.format(avg_xent_corr)) print("First Autoencoder") print('natural with blur: {:.2f}%'.format(100 * acc_blur[0])) print('adversarial: {:.2f}%'.format(100 * acc_adv[0])) print('avg nat with blur loss: {:.4f}'.format(avg_xent_blur[0])) print('avg adv loss: {:.4f} \n'.format(avg_xent_adv[0])) print("Second Autoencoder") print('natural with blur: {:.2f}%'.format(100 * acc_blur[1])) print('adversarial: {:.2f}%'.format(100 * acc_adv[1])) print('avg nat with blur loss: {:.4f}'.format(avg_xent_blur[1])) print('avg adv loss: {:.4f} \n'.format(avg_xent_adv[1])) print("Third Autoencoder") print('natural with blur: {:.2f}%'.format(100 * acc_blur[2])) print('adversarial: {:.2f}%'.format(100 * acc_adv[2])) print('avg nat with blur loss: {:.4f}'.format(avg_xent_blur[2])) print('avg adv loss: {:.4f} \n'.format(avg_xent_adv[2])) print("Fourth Autoencoder") print('natural with blur: {:.2f}%'.format(100 * acc_blur[3])) print('adversarial: {:.2f}%'.format(100 * acc_adv[3])) print('avg nat with blur loss: {:.4f}'.format(avg_xent_blur[3])) print('avg adv loss: {:.4f} \n'.format(avg_xent_adv[3]))
eval_batch_size = config['eval_batch_size'] eval_on_cpu = config['eval_on_cpu'] model_dir = config['model_dir-n'] # Set upd the data, hyperparameters, and the model img_size = [28,28,1] img_size2 = (28,28) trained_model_path = 'trained_models/sargan_fmnist' BATCH_SIZE = 64 NOISE_STD_RANGE = [0.1, 0.3] attacks=[] epsilon=0 model = Model() for i in range(int(math.ceil(num_eval_examples / eval_batch_size))): if eval_on_cpu: with tf.device("/cpu:0"): attack = LinfPGDAttack(model, epsilon, config['k'], config['a'], config['random_start'], config['loss_func']) attacks.append(attack) else: attack = LinfPGDAttack(model, epsilon,
def evaluate_checkpoint(filename): #sys.stdout = open(os.devnull, 'w') g2 = tf.Graph() gx2 = tf.Graph() gx3 = tf.Graph() gx4 = tf.Graph() g3 = tf.Graph() loop_list_adv = np.zeros([2, number_of_runs]) loop_list_auto = np.zeros([2, number_of_runs]) epsilonc = starting_pert with tf.Session() as sess: # Restore the checkpoint saver.restore(sess, tf.train.latest_checkpoint(model_dir)) # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) x_corr_list = [] x_adv_list = [] y_batch_list = [] train_loader = get_data(BATCH_SIZE) trainiter = iter(cycle(train_loader)) for ibatch in range(num_batches): x_batch2, y_batch = next(trainiter) y_batch_list.append(y_batch) x_batch2 = np.array(x_batch2.data.numpy().transpose(0, 2, 3, 1)) x_batch = np.zeros([len(x_batch2), img_size[0] * img_size[1]]) for i in range(len(x_batch2)): x_batch[i] = x_batch2[i].reshape([img_size[0] * img_size[1]]) x_batch_adv = attacks[ibatch].perturb(x_batch, y_batch, sess) x_batch_adv2 = np.zeros( [len(x_batch), img_size[0], img_size[1], img_size[2]]) for k in range(len(x_batch)): x_batch_adv2[k] = add_gaussian_noise(x_batch_adv[k].reshape( [img_size[0], img_size[1], img_size[2]]), sd=np.random.uniform( NOISE_STD_RANGE[1], NOISE_STD_RANGE[1])) x_corr_list.append(x_batch_adv) x_adv_list.append(x_batch_adv2) with g2.as_default(): with tf.Session() as sess2: sargan_model = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver = tf.train.Saver() sargan_saver = tf.train.import_meta_graph(trained_model_path + '/sargan_mnist.meta') sargan_saver.restore( sess2, tf.train.latest_checkpoint(trained_model_path)) for ibatch in range(num_batches): processed_batch = sess2.run(sargan_model.gen_img, feed_dict={ sargan_model.image: x_adv_list[ibatch], sargan_model.cond: x_adv_list[ibatch] }) x_adv_list[ibatch] = processed_batch with gx2.as_default(): with tf.Session() as sessx2: sargan_model2 = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver2 = tf.train.Saver() sargan_saver2 = tf.train.import_meta_graph(trained_model_path2 + '/sargan_mnist.meta') sargan_saver2.restore( sessx2, tf.train.latest_checkpoint(trained_model_path2)) for ibatch in range(num_batches): processed_batch = sessx2.run(sargan_model2.gen_img, feed_dict={ sargan_model2.image: x_adv_list[ibatch], sargan_model2.cond: x_adv_list[ibatch] }) x_adv_list[ibatch] = (processed_batch) with gx3.as_default(): with tf.Session() as sessx3: sargan_model3 = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver3 = tf.train.Saver() sargan_saver3 = tf.train.import_meta_graph(trained_model_path3 + '/sargan_mnist.meta') sargan_saver3.restore( sessx3, tf.train.latest_checkpoint(trained_model_path3)) for ibatch in range(num_batches): processed_batch = sessx3.run(sargan_model3.gen_img, feed_dict={ sargan_model3.image: x_adv_list[ibatch], sargan_model3.cond: x_adv_list[ibatch] }) x_adv_list[ibatch] = processed_batch with gx4.as_default(): with tf.Session() as sessx4: sargan_model4 = SARGAN(img_size, BATCH_SIZE, img_channel=1) sargan_saver4 = tf.train.Saver() sargan_saver4 = tf.train.import_meta_graph(trained_model_path4 + '/sargan_mnist.meta') sargan_saver4.restore( sessx4, tf.train.latest_checkpoint(trained_model_path4)) for ibatch in range(num_batches): processed_batch = sessx4.run(sargan_model4.gen_img, feed_dict={ sargan_model4.image: x_adv_list[ibatch], sargan_model4.cond: x_adv_list[ibatch] }) x_adv_list[ibatch] = processed_batch.reshape( [len(x_batch), img_size[0] * img_size[1]]) with g3.as_default(): model3 = Model() saver2 = tf.train.Saver() with tf.Session() as sess3: saver2.restore(sess3, filename) for ibatch in range(num_batches): dict_corr = { model3.x_input: x_corr_list[ibatch], model3.y_input: y_batch_list[ibatch] } dict_adv = { model3.x_input: x_adv_list[ibatch], model3.y_input: y_batch_list[ibatch] } cur_corr_corr, cur_xent_corr = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_corr) cur_corr_adv, cur_xent_adv = sess3.run( [model3.num_correct, model3.xent], feed_dict=dict_adv) loop_list_adv[0, ibatch] = epsilonc loop_list_adv[1, ibatch] = cur_corr_adv / eval_batch_size loop_list_auto[0, ibatch] = epsilonc loop_list_auto[1, ibatch] = cur_corr_corr / eval_batch_size epsilonc += change '''summary = tf.Summary(value=[ tf.Summary.Value(tag='xent adv eval', simple_value= avg_xent_adv), tf.Summary.Value(tag='xent adv', simple_value= avg_xent_adv), tf.Summary.Value(tag='xent nat', simple_value= avg_xent_nat), tf.Summary.Value(tag='accuracy adv eval', simple_value= acc_adv), tf.Summary.Value(tag='accuracy adv', simple_value= acc_adv), tf.Summary.Value(tag='accuracy nat', simple_value= acc_nat)]) summary_writer.add_summary(summary, global_step.eval(sess3))''' #sys.stdout = sys.__stdout__ return loop_list_adv, loop_list_auto